code
stringlengths
2
1.05M
repo_name
stringlengths
5
104
path
stringlengths
4
251
language
stringclasses
1 value
license
stringclasses
15 values
size
int32
2
1.05M
# Lint as: python3 # Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Configuration file for the Sphinx documentation builder. This file does only contain a selection of the most common options. For a full list see the documentation: http://www.sphinx-doc.org/en/master/config """ # -- Path setup -------------------------------------------------------------- # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # # import os # import sys # sys.path.insert(0, u'/tmp/lingvo/lingvo') # -- Project information ----------------------------------------------------- project = u'Lingvo' copyright = u'2018' author = u'' # The short X.Y version version = u'' # The full version, including alpha/beta/rc tags release = u'' # -- General configuration --------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. # # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.mathjax', 'sphinx.ext.napoleon', 'sphinx.ext.todo', 'sphinx.ext.viewcode' ] autodoc_default_flags = [ 'members', 'undoc-members', 'private-members', 'show-inheritance' ] autodoc_member_order = 'bysource' napoleon_google_docstring = True default_role = 'py:obj' intersphinx_mapping = { 'python': ('https://docs.python.org/3.7', None), 'numpy': ('http://numpy.org/doc/stable/', None), } # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: from docutils.transforms import Transform from recommonmark.parser import CommonMarkParser source_parsers = { '.md': CommonMarkParser, } source_suffix = ['.rst', '.md'] # The master toctree document. master_doc = 'index' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = 'en' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This pattern also affects html_static_path and html_extra_path . exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # -- Options for HTML output ------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme = 'sphinx_rtd_theme' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # # html_theme_options = {} # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = [] # Custom sidebar templates, must be a dictionary that maps document names # to template names. # # The default sidebars (for documents that don't match any pattern) are # defined by theme itself. Builtin themes are using these templates by # default: ``['localtoc.html', 'relations.html', 'sourcelink.html', # 'searchbox.html']``. # # html_sidebars = {} # -- Options for HTMLHelp output --------------------------------------------- # Output file base name for HTML help builder. htmlhelp_basename = 'lingvodoc' # -- Options for LaTeX output ------------------------------------------------ latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # # 'preamble': '', # Latex figure (float) alignment # # 'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ (master_doc, 'lingvo.tex', u'Lingvo Documentation', u'', 'manual'), ] # -- Options for manual page output ------------------------------------------ # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [(master_doc, 'lingvo', u'Lingvo Documentation', [author], 1)] # -- Options for Texinfo output ---------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ (master_doc, 'lingvo', u'Lingvo Documentation', author, 'Lingvo', 'One line description of project.', 'Miscellaneous'), ] # -- Options for Epub output ------------------------------------------------- # Bibliographic Dublin Core info. epub_title = project epub_author = author epub_publisher = author epub_copyright = copyright # The unique identifier of the text. This can be a ISBN number # or the project homepage. # # epub_identifier = '' # A unique identification for the text. # # epub_uid = '' # A list of files that should not be packed into the epub file. epub_exclude_files = ['search.html'] # -- Extension configuration ------------------------------------------------- # -- Options for todo extension ---------------------------------------------- # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = True class ResetFlags(Transform): default_priority = 999 def apply(self): from absl import flags # pylint: disable=g-import-not-at-top for flag in list(flags.FLAGS): if flag not in ('showprefixforinfo',): delattr(flags.FLAGS, flag) def setup(app): app.add_transform(ResetFlags)
tensorflow/lingvo
docs/apidoc/conf.py
Python
apache-2.0
6,969
# coding=utf-8 # Copyright 2014 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). from __future__ import (absolute_import, division, generators, nested_scopes, print_function, unicode_literals, with_statement) import errno import logging import os import signal import socket import sys from pants.java.nailgun_io import NailgunStreamWriter from pants.java.nailgun_protocol import ChunkType, NailgunProtocol from pants.util.socket import RecvBufferedSocket logger = logging.getLogger(__name__) class NailgunClientSession(NailgunProtocol): """Handles a single nailgun client session.""" def __init__(self, sock, in_fd, out_fd, err_fd, exit_on_broken_pipe=False): self._sock = sock if in_fd: self._input_writer = NailgunStreamWriter(in_fd, self._sock, ChunkType.STDIN, ChunkType.STDIN_EOF) else: self._input_writer = None self._stdout = out_fd self._stderr = err_fd self._exit_on_broken_pipe = exit_on_broken_pipe self.remote_pid = None def _maybe_start_input_writer(self): if self._input_writer: self._input_writer.start() def _maybe_stop_input_writer(self): if self._input_writer: self._input_writer.stop() def _write_flush(self, fd, payload=None): """Write a payload to a given fd (if provided) and flush the fd.""" try: if payload: fd.write(payload) fd.flush() except (IOError, OSError) as e: # If a `Broken Pipe` is encountered during a stdio fd write, we're headless - bail. if e.errno == errno.EPIPE and self._exit_on_broken_pipe: sys.exit() # Otherwise, re-raise. raise def _process_session(self): """Process the outputs of the nailgun session.""" try: for chunk_type, payload in self.iter_chunks(self._sock, return_bytes=True): if chunk_type == ChunkType.STDOUT: self._write_flush(self._stdout, payload) elif chunk_type == ChunkType.STDERR: self._write_flush(self._stderr, payload) elif chunk_type == ChunkType.EXIT: self._write_flush(self._stdout) self._write_flush(self._stderr) return int(payload) elif chunk_type == ChunkType.PID: self.remote_pid = int(payload) elif chunk_type == ChunkType.START_READING_INPUT: self._maybe_start_input_writer() else: raise self.ProtocolError('received unexpected chunk {} -> {}'.format(chunk_type, payload)) finally: # Bad chunk types received from the server can throw NailgunProtocol.ProtocolError in # NailgunProtocol.iter_chunks(). This ensures the NailgunStreamWriter is always stopped. self._maybe_stop_input_writer() def execute(self, working_dir, main_class, *arguments, **environment): # Send the nailgun request. self.send_request(self._sock, working_dir, main_class, *arguments, **environment) # Process the remainder of the nailgun session. return self._process_session() class NailgunClient(object): """A python nailgun client (see http://martiansoftware.com/nailgun for more info).""" class NailgunError(Exception): """Indicates an error interacting with a nailgun server.""" class NailgunConnectionError(NailgunError): """Indicates an error upon initial connect to the nailgun server.""" # For backwards compatibility with nails expecting the ng c client special env vars. ENV_DEFAULTS = dict(NAILGUN_FILESEPARATOR=os.sep, NAILGUN_PATHSEPARATOR=os.pathsep) DEFAULT_NG_HOST = '127.0.0.1' DEFAULT_NG_PORT = 2113 def __init__(self, host=DEFAULT_NG_HOST, port=DEFAULT_NG_PORT, ins=sys.stdin, out=None, err=None, workdir=None, exit_on_broken_pipe=False): """Creates a nailgun client that can be used to issue zero or more nailgun commands. :param string host: the nailgun server to contact (defaults to '127.0.0.1') :param int port: the port the nailgun server is listening on (defaults to the default nailgun port: 2113) :param file ins: a file to read command standard input from (defaults to stdin) - can be None in which case no input is read :param file out: a stream to write command standard output to (defaults to stdout) :param file err: a stream to write command standard error to (defaults to stderr) :param string workdir: the default working directory for all nailgun commands (defaults to CWD) :param bool exit_on_broken_pipe: whether or not to exit when `Broken Pipe` errors are encountered. """ self._host = host self._port = port self._stdin = ins self._stdout = out or sys.stdout self._stderr = err or sys.stderr self._workdir = workdir or os.path.abspath(os.path.curdir) self._exit_on_broken_pipe = exit_on_broken_pipe self._session = None def try_connect(self): """Creates a socket, connects it to the nailgun and returns the connected socket. :returns: a connected `socket.socket`. :raises: `NailgunClient.NailgunConnectionError` on failure to connect. """ sock = RecvBufferedSocket(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) try: sock.connect((self._host, self._port)) except (socket.error, socket.gaierror) as e: logger.debug('Encountered socket exception {!r} when attempting connect to nailgun'.format(e)) sock.close() raise self.NailgunConnectionError( 'Problem connecting to nailgun server at {}:{}: {!r}'.format(self._host, self._port, e)) else: return sock def send_control_c(self): """Sends SIGINT to a nailgun server using pid information from the active session.""" if self._session and self._session.remote_pid is not None: os.kill(self._session.remote_pid, signal.SIGINT) def execute(self, main_class, cwd=None, *args, **environment): """Executes the given main_class with any supplied args in the given environment. :param string main_class: the fully qualified class name of the main entrypoint :param string cwd: Set the working directory for this command :param list args: any arguments to pass to the main entrypoint :param dict environment: an env mapping made available to native nails via the nail context :returns: the exit code of the main_class. """ environment = dict(self.ENV_DEFAULTS.items() + environment.items()) cwd = cwd or self._workdir # N.B. This can throw NailgunConnectionError (catchable via NailgunError). sock = self.try_connect() self._session = NailgunClientSession(sock, self._stdin, self._stdout, self._stderr, self._exit_on_broken_pipe) try: return self._session.execute(cwd, main_class, *args, **environment) except socket.error as e: raise self.NailgunError('Problem communicating with nailgun server at {}:{}: {!r}' .format(self._host, self._port, e)) except NailgunProtocol.ProtocolError as e: raise self.NailgunError('Problem in nailgun protocol with nailgun server at {}:{}: {!r}' .format(self._host, self._port, e)) finally: sock.close() self._session = None def __repr__(self): return 'NailgunClient(host={!r}, port={!r}, workdir={!r})'.format(self._host, self._port, self._workdir)
fkorotkov/pants
src/python/pants/java/nailgun_client.py
Python
apache-2.0
7,704
# The name of the dashboard to be added to HORIZON['dashboards']. Required. DASHBOARD = 'help_about' DISABLED = False # A list of applications to be added to INSTALLED_APPS. ADD_INSTALLED_APPS = [ 'openstack_dashboard.dashboards.help_about', ]
ging/horizon
openstack_dashboard/enabled/_35_help_about.py
Python
apache-2.0
250
fin = open("routesRaw.txt", "r") routes = {} for x in fin: xs = x.split("\t") routeId = xs[1] operationId = xs[2] stepNumber = int(xs[3].replace("\n", "")) if (routeId in routes): routes[routeId].append(operationId) else: routes[routeId] = [] routes[routeId].append(operationId) if (len(routes[routeId]) != stepNumber): input("err!") fin.close() fout = open("routes.txt", "w") for routeId in routes: rowStr = "RTE\t" + routeId + "\t" for op in routes[routeId]: rowStr += "," + op fout.write(rowStr + "\n") fout.close()
JoelBondurant/RandomCodeSamples
python/routes.py
Python
apache-2.0
575
from __future__ import print_function import os import ssl import sys import cassandra from cassandra import auth from cassandra.cluster import Cluster import yaml from cdeploy import cqlexecutor class Migrator: def __init__(self, migrations_path, session): print('Reading migrations from {0}'.format(migrations_path)) self.migrations_path = migrations_path self.session = session def run_migrations(self): cqlexecutor.CQLExecutor.init_table(self.session) top_version = self.get_top_version() def new_migration_filter(f): return ( os.path.isfile(os.path.join(self.migrations_path, f)) and self.migration_version(f) > top_version ) new_migrations = self.filter_migrations(new_migration_filter) [self.apply_migration(file_name) for file_name in new_migrations] def undo(self): top_version = self.get_top_version() if top_version == 0: return def top_version_filter(f): return ( os.path.isfile(os.path.join(self.migrations_path, f)) and self.migration_version(f) == top_version ) top_migration = list(self.filter_migrations(top_version_filter))[0] cqlexecutor.CQLExecutor.execute_undo( self.session, self.read_migration(top_migration) ) cqlexecutor.CQLExecutor.rollback_schema_migration(self.session) print(' -> Migration {0} undone ({1})\n'.format(top_version, top_migration)) def get_top_version(self): result = cqlexecutor.CQLExecutor.get_top_version(self.session) top_version = result[0].version if len(result) > 0 else 0 print('Current version is {0}'.format(top_version)) return top_version def filter_migrations(self, filter_func): dir_list = os.listdir(self.migrations_path) if 'config' in dir_list: dir_list.remove('config') migration_dir_listing = sorted(dir_list, key=self.migration_version) return filter( filter_func, migration_dir_listing) def migration_version(self, file_name): return int(file_name.split('.')[0].split('_')[0]) def apply_migration(self, file_name): migration_script = self.read_migration(file_name) version = self.migration_version(file_name) cqlexecutor.CQLExecutor.execute(self.session, migration_script) cqlexecutor.CQLExecutor.add_schema_migration(self.session, version) print(' -> Migration {0} applied ({1})\n'.format(version, file_name)) def read_migration(self, file_name): migration_file = open(os.path.join(self.migrations_path, file_name)) return migration_file.read() DEFAULT_MIGRATIONS_PATH = './migrations' CONFIG_FILE_PATH = 'config/cassandra.yml' def main(): if '--help' in sys.argv or '-h' in sys.argv: print('Usage: cdeploy [path/to/migrations] [--undo]') return undo = False if '--undo' in sys.argv: undo = True sys.argv.remove('--undo') migrations_path = ( DEFAULT_MIGRATIONS_PATH if len(sys.argv) == 1 else sys.argv[1] ) if (invalid_migrations_dir(migrations_path) or missing_config(migrations_path)): return config = load_config(migrations_path, os.getenv('ENV')) session = get_session(config) migrator = Migrator(migrations_path, session) if undo: migrator.undo() else: migrator.run_migrations() def get_session(config): auth_provider = None if 'auth_enabled' in config and config['auth_enabled']: auth_provider = auth.PlainTextAuthProvider( username=config['auth_username'], password=config['auth_password'], ) ssl_options = None if 'ssl_enabled' in config and config['ssl_enabled']: ssl_options = { 'ca_certs': config['ssl_ca_certs'], 'ssl_version': ssl.PROTOCOL_TLSv1, # pylint: disable=E1101 } cluster = Cluster( config['hosts'], auth_provider=auth_provider, ssl_options=ssl_options, ) session = cluster.connect() try: session.set_keyspace(config['keyspace']) except cassandra.InvalidRequest: # Keyspace doesn't exist yet if 'create_keyspace' in config and config['create_keyspace']: create_keyspace(config, session) else: raise if 'consistency_level' in config: consistency_level = getattr( cassandra.ConsistencyLevel, config['consistency_level'], ) session.default_consistency_level = consistency_level return session def create_keyspace(config, session): session.execute( "CREATE KEYSPACE {0} WITH REPLICATION = {1};".format( config['keyspace'], config['replication_strategy'] ) ) session.set_keyspace(config['keyspace']) def invalid_migrations_dir(migrations_path): if not os.path.isdir(migrations_path): print('"{0}" is not a directory'.format(migrations_path)) return True else: return False def missing_config(migrations_path): config_path = config_file_path(migrations_path) if not os.path.exists(os.path.join(config_path)): print('Missing configuration file "{0}"'.format(config_path)) return True else: return False def config_file_path(migrations_path): return os.path.join(migrations_path, CONFIG_FILE_PATH) def load_config(migrations_path, env): config_file = open(config_file_path(migrations_path)) config = yaml.load(config_file) return config[env or 'development'] if __name__ == '__main__': main()
Kuwagata/cdeploy
cdeploy/migrator.py
Python
apache-2.0
5,863
#print's all prime numbers in a given range limit _author__ = "Dilipbobby" #Take the input from the user: lower = int(input("Enter lower range: ")) upper = int(input("Enter upper range: ")) #condtion to print all prime numbers that are in btw given number limits for num in range(lower,upper + 1): if num > 1: for i in range(2,num): if (num % i) == 0: break else: print(num)
dilipbobby/DataScience
Python3/Level-1/allprimes.py
Python
apache-2.0
457
#!/usr/bin/env python import logging import argparse, os, shutil, subprocess, sys, tempfile, time, shlex, re import datetime from multiprocessing import Pool import vcf def execute(cmd, output=None): import subprocess, sys, shlex # function to execute a cmd and report if an error occur print(cmd) try: process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) stdout,stderr = process.communicate() except Exception, e: # une erreur de ma commande : stderr sys.stderr.write("problem doing : %s\n%s\n" %(cmd, e)) return if output: output = open(output, 'w') output.write(stdout) output.close() if stderr != '': # une erreur interne au programme : stdout (sinon, souvent des warning arrete les programmes) sys.stdout.write("warning or error while doing : %s\n-----\n%s-----\n\n" %(cmd, stderr)) def indexBam(workdir, inputFastaFile, inputBamFile, bam_number, inputBamFileIndex=None): inputFastaLink = os.path.join(os.path.abspath(workdir), "reference.fa" ) if not os.path.exists(inputFastaLink): os.symlink(inputFastaFile, inputFastaLink) cmd = "samtools faidx %s" %(inputFastaLink) execute(cmd) inputBamLink = os.path.join(os.path.abspath(workdir), "sample_%d.bam" % (bam_number) ) os.symlink(inputBamFile, inputBamLink) if inputBamFileIndex is None: cmd = "samtools index %s" %(inputBamLink) execute(cmd) else: os.symlink(inputBamFileIndex, inputBamLink + ".bai") return inputFastaLink, inputBamLink def config(inputBamFiles, meanInsertSizes, tags, tempDir): print("Creating Config File.") configFile = tempDir+"/pindel_configFile" fil = open(configFile, 'w') for inputBamFile, meanInsertSize, tag in zip(inputBamFiles, meanInsertSizes, tags): fil.write("%s\t%s\t%s\n" %(inputBamFile, meanInsertSize, tag)) fil.close() return configFile def pindel(reference, configFile, args, tempDir, chrome=None): if chrome is None: pindel_file_base = tempDir + "/pindel" else: pindel_file_base = tempDir + "/pindel_" + chrome cmd = "pindel -f %s -i %s -o %s " %(reference, configFile, pindel_file_base ) if args.input_SV_Calls_for_assembly: cmd += ' --input_SV_Calls_for_assembly %s ' %(args.input_SV_Calls_for_assembly) if args.breakdancer: cmd += ' --breakdancer %s ' %(args.breakdancer) if args.exclude is not None: cmd += ' --exclude %s' % (args.exclude) if args.include is not None: cmd += ' --include %s' % (args.include) opt_list = [ ["number_of_threads", "%d"], ["max_range_index", "%d"], ["window_size", "%d"], ["sequencing_error_rate", "%f"], ["sensitivity", "%f"], ["maximum_allowed_mismatch_rate", "%f"], ["NM", "%d"], ["additional_mismatch", "%d"], ["min_perfect_match_around_BP", "%d"], ["min_inversion_size", "%d"], ["min_num_matched_bases", "%d"], ["balance_cutoff", "%d"], ["anchor_quality", "%d"], ["minimum_support_for_event", "%d"] ] for o, f in opt_list: if getattr(args, o) is not None: cmd += (" --%s %s" % (o, f)) % (getattr(args,o)) if chrome is not None: cmd += " -c '%s' " % (chrome) flag_list = [ "report_long_insertions", "report_duplications", "report_inversions", "report_breakpoints", "report_close_mapped_reads", "report_only_close_mapped_reads", "report_interchromosomal_events", "IndelCorrection", "NormalSamples", "DD_REPORT_DUPLICATION_READS" ] for f in flag_list: if getattr(args, f): cmd += (" --%s" % (f)) if args.detect_DD: cmd += ' -q ' cmd += ' --MAX_DD_BREAKPOINT_DISTANCE '+str(args.MAX_DD_BREAKPOINT_DISTANCE) cmd += ' --MAX_DISTANCE_CLUSTER_READS '+str(args.MAX_DISTANCE_CLUSTER_READS) cmd += ' --MIN_DD_CLUSTER_SIZE '+str(args.MIN_DD_CLUSTER_SIZE) cmd += ' --MIN_DD_BREAKPOINT_SUPPORT '+str(args.MIN_DD_BREAKPOINT_SUPPORT) cmd += ' --MIN_DD_MAP_DISTANCE '+str(args.MIN_DD_MAP_DISTANCE) return (cmd, pindel_file_base ) def move(avant, apres): if os.path.exists(avant): execute("mv %s %s" %(avant, apres)) def pindel2vcf(inputFastaFile, refName, pindel_file, vcf_file): date = str(time.strftime('%d/%m/%y',time.localtime())) cmd = "pindel2vcf -p %s -r %s -R %s -d %s -v %s -he 0.05 -ho 0.95 -G" % (pindel_file, inputFastaFile, refName, date, vcf_file) # Added hard-coded parameters. JHL return cmd def which(cmd): cmd = ["which",cmd] p = subprocess.Popen(cmd, stdout=subprocess.PIPE) res = p.stdout.readline().rstrip() if len(res) == 0: return None return res def get_bam_seq(inputBamFile, min_size): ### Changed min_size to 40mil. JHL samtools = which("samtools") cmd = [samtools, "idxstats", inputBamFile] process = subprocess.Popen(args=cmd, stdout=subprocess.PIPE) stdout, stderr = process.communicate() seqs = [] for line in stdout.split("\n"): tmp = line.split("\t") if len(tmp) == 4 and int(tmp[1]) >= min_size: seqs.append(tmp[0]) return seqs def getMeanInsertSize(bamFile): logging.info("Getting insert size of %s" % (bamFile)) cmd = "samtools view -f66 %s | head -n 1000000" % (bamFile) process = subprocess.Popen(args=cmd, shell=True, stdout=subprocess.PIPE) b_sum = 0L b_count = 0L while True: line = process.stdout.readline() if not line: break tmp = line.split("\t") if abs(long(tmp[8])) < 10000: b_sum += abs(long(tmp[8])) b_count +=1 process.wait() if b_count == 0: mean = 200 else: mean = b_sum / b_count print "Using insert size: %d" % (mean) return mean def __main__(): logging.basicConfig(level=logging.INFO) time.sleep(1) #small hack, sometimes it seems like docker file systems aren't avalible instantly parser = argparse.ArgumentParser(description='') parser.add_argument('-r', dest='inputFastaFile', required=True, help='the reference file') parser.add_argument('-R', dest='inputFastaName', default="genome", help='the reference name') parser.add_argument('-b', dest='inputBamFiles', default=[], action="append", help='the bam file') parser.add_argument('-bi', dest='inputBamFileIndexes', default=[], action="append", help='the bam file') parser.add_argument('-s', dest='insert_sizes', type=int, default=[], action="append", required=False, help='the insert size') parser.add_argument('-t', dest='sampleTags', default=[], action="append", help='the sample tag') parser.add_argument('-o1', dest='outputRaw', help='the output raw', default=None) parser.add_argument('-o2', dest='outputVcfFile', help='the output vcf', default=None) parser.add_argument('-o3', dest='outputSomaticVcfFile', help='the output somatic filtered vcf', default=None) parser.add_argument('--number_of_threads', dest='number_of_threads', type=int, default=2) parser.add_argument('--number_of_procs', dest='procs', type=int, default=1) parser.add_argument('--breakdancer', dest='breakdancer') parser.add_argument('-x', '--max_range_index', dest='max_range_index', type=int, default=None) parser.add_argument('--window_size', dest='window_size', type=int, default=None) parser.add_argument('--sequencing_error_rate', dest='sequencing_error_rate', type=float, default=None) parser.add_argument('--sensitivity', dest='sensitivity', default=None, type=float) parser.add_argument('--report_long_insertions', dest='report_long_insertions', action='store_true', default=False) parser.add_argument('--report_duplications', dest='report_duplications', action='store_true', default=False) parser.add_argument('--report_inversions', dest='report_inversions', action='store_true', default=False) parser.add_argument('--report_breakpoints', dest='report_breakpoints', action='store_true', default=False) parser.add_argument('-u', '--maximum_allowed_mismatch_rate', dest='maximum_allowed_mismatch_rate', type=float, default=None) parser.add_argument('--report_close_mapped_reads', dest='report_close_mapped_reads', action='store_true', default=False) parser.add_argument('--report_only_close_mapped_reads', dest='report_only_close_mapped_reads', action='store_true', default=False) parser.add_argument('--report_interchromosomal_events', dest='report_interchromosomal_events', action='store_true', default=False) parser.add_argument('--IndelCorrection', dest='IndelCorrection', action='store_true', default=False) parser.add_argument('--NormalSamples', dest='NormalSamples', action='store_true', default=False) parser.add_argument('-a', '--additional_mismatch', dest='additional_mismatch', type=int, default=None) parser.add_argument('-m', '--min_perfect_match_around_BP', dest='min_perfect_match_around_BP', type=int, default=None) parser.add_argument('-v', '--min_inversion_size', dest='min_inversion_size', type=int, default=None) parser.add_argument('-d', '--min_num_matched_bases', dest='min_num_matched_bases', type=int, default=None) parser.add_argument('-B', '--balance_cutoff', dest='balance_cutoff', type=int, default=None) parser.add_argument('-A', '--anchor_quality', dest='anchor_quality', type=int, default=None) parser.add_argument('-M', '--minimum_support_for_event', dest='minimum_support_for_event', type=int, default=None) parser.add_argument('-n', '--NM', dest='NM', type=int, default=None) parser.add_argument('--detect_DD', dest='detect_DD', action='store_true', default=False) parser.add_argument('--MAX_DD_BREAKPOINT_DISTANCE', dest='MAX_DD_BREAKPOINT_DISTANCE', type=int, default='350') parser.add_argument('--MAX_DISTANCE_CLUSTER_READS', dest='MAX_DISTANCE_CLUSTER_READS', type=int, default='100') parser.add_argument('--MIN_DD_CLUSTER_SIZE', dest='MIN_DD_CLUSTER_SIZE', type=int, default='3') parser.add_argument('--MIN_DD_BREAKPOINT_SUPPORT', dest='MIN_DD_BREAKPOINT_SUPPORT', type=int, default='3') parser.add_argument('--MIN_DD_MAP_DISTANCE', dest='MIN_DD_MAP_DISTANCE', type=int, default='8000') parser.add_argument('--DD_REPORT_DUPLICATION_READS', dest='DD_REPORT_DUPLICATION_READS', action='store_true', default=False) parser.add_argument('--somatic_vaf', type=float, default=0.08) parser.add_argument('--somatic_cov', type=int, default=20) parser.add_argument('--somatic_hom', type=int, default=6) parser.add_argument("-J", "--exclude", dest="exclude", default=None) parser.add_argument("-j", "--include", dest="include", default=None) parser.add_argument('--min_chrom_size', dest='min_chrom_size', type=int, default='1') parser.add_argument('-z', '--input_SV_Calls_for_assembly', dest='input_SV_Calls_for_assembly', action='store_true', default=False) parser.add_argument('--workdir', default="./") parser.add_argument('--no_clean', action="store_true", default=False) args = parser.parse_args() inputBamFiles = list( os.path.abspath(a) for a in args.inputBamFiles ) if len(inputBamFiles) == 0: logging.error("Need input files") sys.exit(1) inputBamFileIndexes = list( os.path.abspath(a) for a in args.inputBamFileIndexes ) if len(inputBamFileIndexes) == 0: inputBamFileIndexes = [None] * len(inputBamFiles) if len(inputBamFileIndexes) != len(inputBamFiles): logging.error("Index file count needs to undefined or match input file count") sys.exit(1) insertSizes = args.insert_sizes if len(insertSizes) == 0: insertSizes = [None] * len(inputBamFiles) if len(insertSizes) != len(inputBamFiles): logging.error("Insert Sizes needs to undefined or match input file count") sys.exit(1) sampleTags = args.sampleTags if len(sampleTags) != len(inputBamFiles): logging.error("Sample Tags need to match input file count") sys.exit(1) tempDir = tempfile.mkdtemp(dir=args.workdir, prefix="pindel_work_") print(tempDir) try: meanInsertSizes = [] seq_hash = {} newInputFiles = [] i = 0 #make sure the BAMs are indexed and get the mean insert sizes for inputBamFile, inputBamIndex, insertSize, sampleTag in zip(inputBamFiles, inputBamFileIndexes, insertSizes, sampleTags ): inputFastaFile, inputBamFile = indexBam(args.workdir, args.inputFastaFile, inputBamFile, i, inputBamIndex) i += 1 newInputFiles.append(inputBamFile) if insertSize==None: meanInsertSize = getMeanInsertSize(inputBamFile) else: meanInsertSize=insertSize meanInsertSizes.append( meanInsertSize ) for seq in get_bam_seq(inputBamFile, args.min_chrom_size): seq_hash[seq] = True seqs = seq_hash.keys() configFile = config(newInputFiles, meanInsertSizes, sampleTags, tempDir) #run pindel pindel_files = [] if args.procs == 1: cmd, pindelFileBase = pindel(inputFastaFile, configFile, args, tempDir) execute(cmd) for suffix in ["_D", "_SI", "_LI", "_INV", "_TD"]: if os.path.exists(pindelFileBase + suffix): pindel_files.append( pindelFileBase + suffix ) else: cmds = [] runs = [] for a in seqs: cmd, pindelFileBase = pindel(inputFastaFile, configFile, args, tempDir, a) cmds.append(cmd) runs.append(pindelFileBase) p = Pool(args.procs) values = p.map(execute, cmds, 1) for pindelFileBase in runs: for suffix in ["_D", "_SI", "_LI", "_INV", "_TD"]: if os.path.exists(pindelFileBase + suffix): pindel_files.append( pindelFileBase + suffix ) #run pindel2vcf with open(os.path.join(args.workdir, "pindel_all"), "w") as handle: for p in pindel_files: with open(p) as ihandle: for line in ihandle: handle.write(line) if args.outputRaw is not None: shutil.copy(os.path.join(args.workdir, "pindel_all"), args.outputRaw) if args.outputVcfFile is not None: cmd = pindel2vcf(inputFastaFile, args.inputFastaName, os.path.join(args.workdir, "pindel_all"), args.outputVcfFile) execute(cmd) if args.outputSomaticVcfFile is not None: with open(os.path.join(args.workdir, "pindel_somatic"), "w") as handle: for p in pindel_files: if p.endswith("_D"): with open(p) as ihandle: for line in ihandle: if re.search("ChrID", line): handle.write(line) for p in pindel_files: if p.endswith("_SI"): with open(p) as ihandle: for line in ihandle: if re.search("ChrID", line): handle.write(line) with open(os.path.join(args.workdir, "somatic.indel.filter.config"), "w") as handle: handle.write("indel.filter.input = %s\n" % os.path.join(args.workdir, "pindel_somatic")) handle.write("indel.filter.vaf = %s\n" % (args.somatic_vaf)) handle.write("indel.filter.cov = %s\n" % (args.somatic_cov)) handle.write("indel.filter.hom = %s\n" % (args.somatic_hom)) handle.write("indel.filter.pindel2vcf = %s\n" % (which("pindel2vcf"))) handle.write("indel.filter.reference = %s\n" % (inputFastaFile)) handle.write("indel.filter.referencename = %s\n" % (args.inputFastaName)) handle.write("indel.filter.referencedate = %s\n" % (datetime.datetime.now().strftime("%Y%m%d")) ) handle.write("indel.filter.output = %s\n" % (args.outputSomaticVcfFile)) # The hard-coded paths need to be removed. execute("%s ~/bin/somatic_indelfilter.pl %s" % (which("perl"), os.path.join(args.workdir, "somatic.indel.filter.config")) ) finally: if not args.no_clean and os.path.exists(tempDir): shutil.rmtree(tempDir) if __name__=="__main__": __main__()
jhl667/galaxy_tools
tools/pindel/pindel.py
Python
apache-2.0
16,775
# -*- coding: utf-8 -*- # Copyright 2022 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Generated code. DO NOT EDIT! # # Snippet for CreateService # NOTE: This snippet has been automatically generated for illustrative purposes only. # It may require modifications to work in your environment. # To install the latest published package dependency, execute the following: # python3 -m pip install google-cloud-service-management # [START servicemanagement_v1_generated_ServiceManager_CreateService_sync] from google.cloud import servicemanagement_v1 def sample_create_service(): # Create a client client = servicemanagement_v1.ServiceManagerClient() # Initialize request argument(s) request = servicemanagement_v1.CreateServiceRequest( ) # Make the request operation = client.create_service(request=request) print("Waiting for operation to complete...") response = operation.result() # Handle the response print(response) # [END servicemanagement_v1_generated_ServiceManager_CreateService_sync]
googleapis/python-service-management
samples/generated_samples/servicemanagement_v1_generated_service_manager_create_service_sync.py
Python
apache-2.0
1,564
# -*- coding: utf-8 -*- # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import os import StringIO import allura import json import PIL from nose.tools import assert_true, assert_equal, assert_in, assert_not_equal, assert_not_in from ming.orm.ormsession import ThreadLocalORMSession from mock import patch from tg import config from allura import model as M from allura.lib import helpers as h from allura.tests import decorators as td from alluratest.controller import TestController from forgewiki import model class TestRootController(TestController): def setUp(self): super(TestRootController, self).setUp() self.setup_with_tools() @td.with_wiki def setup_with_tools(self): pass def _find_edit_form(self, resp): def cond(f): return f.id == 'page_edit_form' return self.find_form(resp, cond) def test_root_index(self): page_url = h.urlquote(u'/wiki/tést/') r = self.app.get(page_url).follow() assert u'tést' in r assert 'Create Page' in r # No 'Create Page' button if user doesn't have 'create' perm r = self.app.get('/wiki/Home', extra_environ=dict(username='*anonymous')) assert 'Create Page' not in r, r @td.with_wiki def test_create_wiki_page(self): url = u"/p/test/wiki/create_wiki_page/" r = self.app.get(url) assert u'test' in r assert u'Create page' in r.body def test_root_markdown_syntax(self): response = self.app.get('/wiki/markdown_syntax/') assert 'Markdown Syntax' in response def test_root_browse_tags(self): response = self.app.get('/wiki/browse_tags/') assert 'Browse Labels' in response def test_root_browse_pages(self): response = self.app.get('/wiki/browse_pages/') assert 'Browse Pages' in response def test_root_new_page(self): response = self.app.get('/wiki/new_page?title=' + h.urlquote(u'tést')) assert u'tést' in response def test_root_new_search(self): self.app.get(h.urlquote(u'/wiki/tést/')) response = self.app.get('/wiki/search/?q=' + h.urlquote(u'tést')) assert u'Search wiki: tést' in response def test_feed(self): for ext in ['', '.rss', '.atom']: self.app.get('/wiki/feed%s' % ext, status=200) @patch('allura.lib.search.search') def test_search(self, search): r = self.app.get('/wiki/search/?q=test') assert_in( '<a href="/wiki/search/?q=test&amp;sort=score+asc" class="strong">relevance</a>', r) assert_in( '<a href="/wiki/search/?q=test&amp;sort=mod_date_dt+desc" class="">date</a>', r) p = M.Project.query.get(shortname='test') r = self.app.get('/wiki/search/?q=test&sort=score+asc') solr_query = { 'short_timeout': True, 'ignore_errors': False, 'rows': 25, 'start': 0, 'qt': 'dismax', 'qf': 'title^2 text', 'pf': 'title^2 text', 'fq': [ 'project_id_s:%s' % p._id, 'mount_point_s:wiki', '-deleted_b:true', 'type_s:("WikiPage" OR "WikiPage Snapshot")', 'is_history_b:False', ], 'hl': 'true', 'hl.simple.pre': '#ALLURA-HIGHLIGHT-START#', 'hl.simple.post': '#ALLURA-HIGHLIGHT-END#', 'sort': 'score asc', } search.assert_called_with('test', **solr_query) r = self.app.get( '/wiki/search/?q=test&search_comments=on&history=on&sort=mod_date_dt+desc') solr_query['fq'][ 3] = 'type_s:("WikiPage" OR "WikiPage Snapshot" OR "Post")' solr_query['fq'].remove('is_history_b:False') solr_query['sort'] = 'mod_date_dt desc' search.assert_called_with('test', **solr_query) r = self.app.get('/wiki/search/?q=test&parser=standard') solr_query['sort'] = 'score desc' solr_query['fq'][3] = 'type_s:("WikiPage" OR "WikiPage Snapshot")' solr_query['fq'].append('is_history_b:False') solr_query.pop('qt') solr_query.pop('qf') solr_query.pop('pf') search.assert_called_with('test', **solr_query) def test_search_help(self): r = self.app.get('/wiki/search/?q=test') btn = r.html.find('a', attrs={'class': 'icon btn search_help_modal'}) assert btn is not None, "Can't find a help button" div = r.html.find('div', attrs={'id': 'lightbox_search_help_modal'}) assert div is not None, "Can't find help text" assert_in('To search for an exact phrase', div.text) def test_nonexistent_page_edit(self): resp = self.app.get('/wiki/tést/') assert resp.location.endswith(h.urlquote(u'/wiki/tést/edit')), resp.location resp = resp.follow() assert 'tést' in resp def test_nonexistent_page_noedit(self): self.app.get('/wiki/tést/', extra_environ=dict(username='*anonymous'), status=404) self.app.get('/wiki/tést/', extra_environ=dict(username='test-user'), status=404) @patch('forgewiki.wiki_main.g.director.create_activity') def test_activity(self, create_activity): d = dict(title='foo', text='footext') self.app.post('/wiki/foo/update', params=d) assert create_activity.call_count == 1 assert create_activity.call_args[0][1] == 'created' create_activity.reset_mock() d = dict(title='foo', text='new footext') self.app.post('/wiki/foo/update', params=d) assert create_activity.call_count == 1 assert create_activity.call_args[0][1] == 'modified' create_activity.reset_mock() d = dict(title='new foo', text='footext') self.app.post('/wiki/foo/update', params=d) assert create_activity.call_count == 1 assert create_activity.call_args[0][1] == 'renamed' def test_labels(self): response = self.app.post( '/wiki/foo-bar/update', params={ 'title': 'foo', 'text': 'sometext', 'labels': 'test label', 'viewable_by-0.id': 'all'}).follow() assert_in('<a href="/p/test/wiki/search/?q=labels_t:%22test label%22&parser=standard">test label (1)</a>', response) def test_title_slashes(self): # forward slash not allowed in wiki page title - converted to dash response = self.app.post( '/wiki/foo-bar/update', params={ 'title': 'foo/bar', 'text': 'sometext', 'labels': '', 'viewable_by-0.id': 'all'}).follow() assert 'foo-bar' in response assert 'foo-bar' in response.request.url def test_dotted_page_name(self): r = self.app.post( '/wiki/page.dot/update', params={ 'title': 'page.dot', 'text': 'text1', 'labels': '', 'viewable_by-0.id': 'all'}).follow() assert 'page.dot' in r def test_subpage_attempt(self): self.app.get('/wiki/tést/') self.app.post( '/wiki/tést/update', params={ 'title': 'tést', 'text': 'text1', 'labels': '', 'viewable_by-0.id': 'all'}) assert '/p/test/wiki/Home/' in self.app.get('/wiki/tést/Home/') self.app.get('/wiki/tést/notthere/', status=404) def test_page_history(self): self.app.get('/wiki/tést/') self.app.post( '/wiki/tést/update', params={ 'title': 'tést', 'text': 'text1', 'labels': '', 'viewable_by-0.id': 'all'}) self.app.post( '/wiki/tést/update', params={ 'title': 'tést', 'text': 'text2', 'labels': '', 'viewable_by-0.id': 'all'}) response = self.app.get('/wiki/tést/history') assert 'tést' in response # two revisions are shown assert '2 by Test Admin' in response assert '1 by Test Admin' in response # you can revert to an old revison, but not the current one assert response.html.find('a', {'data-dialog-id': '1'}), response.html assert not response.html.find('a', {'data-dialog-id': '2'}) response = self.app.get('/wiki/tést/history', extra_environ=dict(username='*anonymous')) # two revisions are shown assert '2 by Test Admin' in response assert '1 by Test Admin' in response # you cannot revert to any revision assert not response.html.find('a', {'data-dialog-id': '1'}) assert not response.html.find('a', {'data-dialog-id': '2'}) def test_page_diff(self): self.app.post( '/wiki/tést/update', params={ 'title': 'tést', 'text': 'sometext', 'labels': '', 'viewable_by-0.id': 'all'}) self.app.post('/wiki/tést/revert', params=dict(version='1')) response = self.app.get('/wiki/tést/diff?v1=0&v2=0') assert 'tést' in response d = dict(title='testdiff', text="""**Optionally**, you may also want to remove all the unused accounts that have accumulated (one was created for *every* logged in SF-user who has visited your MediaWiki hosted app): ~~~~~ php removeUnusedAccounts.php --delete ~~~~~ #### 6) Import image (and other) files into your Mediawiki install #### Upload the backup of your data files to the project web. ~~~~~ scp projectname_mediawiki_files.tar.gz USERNAME@web.domain.net: ~~~~~ In the project web shell, unpack the files to the images directory of you wiki installation. In the backup, the images are in a subfolder *projectname*, so follow these steps: ~~~~~ cd wiki mkdir oldimages cd oldimages tar -xvzf ../../../projectname_mediawiki_files.tar.gz mv projectname/* ../images/ cd .. rm -r oldimages # Now fix permissons. Wrong permissions may cause massive slowdown! chown yournick:apache images/ --recursive chmod 775 images/ --recursive ~~~~~ **TODO: FIXME:** The following can't be quite correct: Now hit your wiki a few times from a browser. Initially, it will be dead slow, as it is trying to build thumbnails for the images. And it will time out, a lot. Keep hitting reload, until it works. **Note:** The logo shown in the sidebar is no longer stored as an object in the wiki (as it was in the Hosted App installation). Rather save it as a regular file, then edit LocalSettings.php, adding""") self.app.post('/wiki/testdiff/update', params=d) d = dict(title='testdiff', text="""**Optionally**, you may also want to remove all the unused accounts that have accumulated (one was created for *every* logged in SF-user who has visited your MediaWiki hosted app): ~~~~~ php removeUnusedAccounts.php --delete ~~~~~ #### 6) Import image (and other) files into your Mediawiki install #### Upload the backup of your data files to the project web. ~~~~~ scp projectname_mediawiki_files.tar.gz USERNAME@web.domain.net: ~~~~~ In the project web shell, unpack the files to the images directory of you wiki installation. In the backup, the images are in a subfolder *projectname*, so follow these steps: ~~~~~ cd wiki mkdir oldimages cd oldimages tar -xvzf ../../../projectname_mediawiki_files.tar.gz mv projectname/* ../images/ cd .. rm -r oldimages # Now fix permissions. Wrong permissions may cause a massive slowdown! chown yournick:apache images/ --recursive chmod 775 images/ --recursive ~~~~~ **TODO: FIXME:** The following can't be quite correct: Now hit your wiki a few times from a browser. Initially, it will be dead slow, as it is trying to build thumbnails for the images. And it will time out, a lot. Keep hitting reload, until it works. **Note:** The logo shown in the sidebar is no longer stored as an object in the wiki (as it was in the Hosted App installation). Rather save it as a regular file, then edit LocalSettings.php, adding""") self.app.post('/wiki/testdiff/update', params=d) response = self.app.get('/wiki/testdiff/diff?v1=1&v2=2') assert_in('# Now fix <del> permissons. </del> <ins> permissions. </ins> ' 'Wrong permissions may cause <ins> a </ins> massive slowdown!', response) response = self.app.get('/wiki/testdiff/diff?v1=2&v2=1') assert_in('# Now fix <del> permissions. </del> <ins> permissons. </ins> ' 'Wrong permissions may cause <del> a </del> massive slowdown!', response) def test_page_raw(self): self.app.post( '/wiki/TEST/update', params={ 'title': 'TEST', 'text': 'sometext', 'labels': '', 'viewable_by-0.id': 'all'}) response = self.app.get('/wiki/TEST/raw') assert 'TEST' in response def test_page_revert_no_text(self): self.app.post( '/wiki/tést/update', params={ 'title': 'tést', 'text': '', 'labels': '', 'viewable_by-0.id': 'all'}) response = self.app.post('/wiki/tést/revert', params=dict(version='1')) assert '.' in response.json['location'] response = self.app.get('/wiki/tést/') assert 'tést' in response def test_page_revert_with_text(self): self.app.get('/wiki/tést/') self.app.post( '/wiki/tést/update', params={ 'title': 'tést', 'text': 'sometext', 'labels': '', 'viewable_by-0.id': 'all'}) response = self.app.post('/wiki/tést/revert', params=dict(version='1')) assert '.' in response.json['location'] response = self.app.get('/wiki/tést/') assert 'tést' in response @patch('forgewiki.wiki_main.g.spam_checker') def test_page_update(self, spam_checker): self.app.get('/wiki/tést/') response = self.app.post( '/wiki/tést/update', params={ 'title': 'tést', 'text': 'sometext', 'labels': '', 'viewable_by-0.id': 'all'}) assert_equal(spam_checker.check.call_args[0][0], u'tést\nsometext') assert 'tést' in response def test_page_label_unlabel(self): self.app.get('/wiki/tést/') response = self.app.post( '/wiki/tést/update', params={ 'title': 'tést', 'text': 'sometext', 'labels': 'yellow,green', 'viewable_by-0.id': 'all'}) assert 'tést' in response response = self.app.post( '/wiki/tést/update', params={ 'title': 'tést', 'text': 'sometext', 'labels': 'yellow', 'viewable_by-0.id': 'all'}) assert 'tést' in response def test_page_label_count(self): labels = "label" for i in range(1, 100): labels += ',label%s' % i self.app.post( '/wiki/tést/update', params={ 'title': 'tést', 'text': 'sometext', 'labels': labels, 'viewable_by-0.id': 'all'}) r = self.app.get('/wiki/browse_tags/') assert 'results of 100 ' in r assert '<div class="page_list">' in r assert '(Page 1 of 4)' in r assert '<td>label30</td>' in r assert '<td>label1</td>' in r r = self.app.get('/wiki/browse_tags/?page=3') assert '<td>label77</td>' in r assert '<td>label99</td>' in r def test_new_attachment(self): self.app.post( '/wiki/tést/update', params={ 'title': 'tést', 'text': 'sometext', 'labels': '', 'viewable_by-0.id': 'all'}) content = file(__file__).read() self.app.post('/wiki/tést/attach', upload_files=[('file_info', 'test_root.py', content)]) response = self.app.get('/wiki/tést/') assert 'test_root.py' in response def test_attach_two_files(self): self.app.post( '/wiki/tést/update', params={ 'title': 'tést', 'text': 'sometext', 'labels': '', 'viewable_by-0.id': 'all'}) content = file(__file__).read() self.app.post('/wiki/tést/attach', upload_files=[('file_info', 'test1.py', content), ('file_info', 'test2.py', content)]) response = self.app.get('/wiki/tést/') assert 'test1.py' in response assert 'test2.py' in response def test_new_text_attachment_content(self): self.app.post( '/wiki/tést/update', params={ 'title': 'tést', 'text': 'sometext', 'labels': '', 'viewable_by-0.id': 'all'}) file_name = 'test_root.py' file_data = file(__file__).read() upload = ('file_info', file_name, file_data) self.app.post('/wiki/tést/attach', upload_files=[upload]) page_editor = self.app.get('/wiki/tést/edit') download = page_editor.click(description=file_name) assert_true(download.body == file_data) def test_new_image_attachment_content(self): self.app.post('/wiki/TEST/update', params={ 'title': 'TEST', 'text': 'sometext', 'labels': '', 'viewable_by-0.id': 'all'}) file_name = 'neo-icon-set-454545-256x350.png' file_path = os.path.join( allura.__path__[0], 'nf', 'allura', 'images', file_name) file_data = file(file_path).read() upload = ('file_info', file_name, file_data) self.app.post('/wiki/TEST/attach', upload_files=[upload]) h.set_context('test', 'wiki', neighborhood='Projects') page = model.Page.query.find(dict(title='TEST')).first() filename = page.attachments[0].filename uploaded = PIL.Image.open(file_path) r = self.app.get('/wiki/TEST/attachment/' + filename) downloaded = PIL.Image.open(StringIO.StringIO(r.body)) assert uploaded.size == downloaded.size r = self.app.get('/wiki/TEST/attachment/' + filename + '/thumb') thumbnail = PIL.Image.open(StringIO.StringIO(r.body)) assert thumbnail.size == (255, 255) # Make sure thumbnail is absent r = self.app.get('/wiki/TEST/') img_srcs = [i['src'] for i in r.html.findAll('img')] assert ('/p/test/wiki/TEST/attachment/' + filename) not in img_srcs, img_srcs def test_sidebar_static_page(self): response = self.app.get('/wiki/tést/') assert 'Edit this page' not in response assert 'Related Pages' not in response def test_related_links(self): response = self.app.get('/wiki/TEST/').follow() assert 'Edit TEST' in response assert 'Related' not in response self.app.post('/wiki/TEST/update', params={ 'title': 'TEST', 'text': 'sometext', 'labels': '', 'viewable_by-0.id': 'all'}) self.app.post('/wiki/aaa/update', params={ 'title': 'aaa', 'text': '', 'labels': '', 'viewable_by-0.id': 'all'}) self.app.post('/wiki/bbb/update', params={ 'title': 'bbb', 'text': '', 'labels': '', 'viewable_by-0.id': 'all'}) h.set_context('test', 'wiki', neighborhood='Projects') a = model.Page.query.find(dict(title='aaa')).first() a.text = '\n[TEST]\n' b = model.Page.query.find(dict(title='TEST')).first() b.text = '\n[bbb]\n' ThreadLocalORMSession.flush_all() M.MonQTask.run_ready() ThreadLocalORMSession.flush_all() ThreadLocalORMSession.close_all() response = self.app.get('/wiki/TEST/') assert 'Related' in response assert 'aaa' in response assert 'bbb' in response def test_show_discussion(self): self.app.post('/wiki/tést/update', params={ 'title': 'tést', 'text': 'sometext', 'labels': '', 'viewable_by-0.id': 'all'}) wiki_page = self.app.get('/wiki/tést/') assert wiki_page.html.find('div', {'id': 'new_post_holder'}) options_admin = self.app.get( '/admin/wiki/options', validate_chunk=True) assert options_admin.form['show_discussion'].checked options_admin.form['show_discussion'].checked = False options_admin.form.submit() options_admin2 = self.app.get( '/admin/wiki/options', validate_chunk=True) assert not options_admin2.form['show_discussion'].checked wiki_page2 = self.app.get('/wiki/tést/') assert not wiki_page2.html.find('div', {'id': 'new_post_holder'}) def test_show_left_bar(self): self.app.post('/wiki/tést/update', params={ 'title': 'tést', 'text': 'sometext', 'labels': '', 'viewable_by-0.id': 'all'}) wiki_page = self.app.get('/wiki/tést/') assert wiki_page.html.find('ul', {'class': 'sidebarmenu'}) options_admin = self.app.get( '/admin/wiki/options', validate_chunk=True) assert options_admin.form['show_left_bar'].checked options_admin.form['show_left_bar'].checked = False options_admin.form.submit() options_admin2 = self.app.get( '/admin/wiki/options', validate_chunk=True) assert not options_admin2.form['show_left_bar'].checked wiki_page2 = self.app.get( '/wiki/tést/', extra_environ=dict(username='*anonymous')) assert not wiki_page2.html.find('ul', {'class': 'sidebarmenu'}) wiki_page3 = self.app.get('/wiki/tést/') assert not wiki_page3.html.find('ul', {'class': 'sidebarmenu'}) def test_show_metadata(self): self.app.post('/wiki/tést/update', params={ 'title': 'tést', 'text': 'sometext', 'labels': '', 'viewable_by-0.id': 'all'}) wiki_page = self.app.get('/wiki/tést/') assert wiki_page.html.find('div', {'class': 'editbox'}) options_admin = self.app.get( '/admin/wiki/options', validate_chunk=True) assert options_admin.form['show_right_bar'].checked options_admin.form['show_right_bar'].checked = False options_admin.form.submit() options_admin2 = self.app.get( '/admin/wiki/options', validate_chunk=True) assert not options_admin2.form['show_right_bar'].checked wiki_page2 = self.app.get('/wiki/tést/') assert not wiki_page2.html.find('div', {'class': 'editbox'}) def test_edit_mount_label(self): r = self.app.get('/admin/wiki/edit_label', validate_chunk=True) assert r.form['mount_label'].value == 'Wiki' r = self.app.post('/admin/wiki/update_label', params=dict( mount_label='Tricky Wiki')) assert M.MonQTask.query.find({ 'task_name': 'allura.tasks.event_tasks.event', 'args': 'project_menu_updated' }).all() r = self.app.get('/admin/wiki/edit_label', validate_chunk=True) assert r.form['mount_label'].value == 'Tricky Wiki' def test_page_links_are_colored(self): self.app.get('/wiki/space%20page/') params = { 'title': 'space page', 'text': '''There is a space in the title!''', 'labels': '', 'viewable_by-0.id': 'all'} self.app.post('/wiki/space%20page/update', params=params) self.app.get('/wiki/TEST/') params = { 'title': 'TEST', 'text': ''' * Here is a link to [this page](TEST) * Here is a link to [another page](Some page which does not exist) * Here is a link to [space page space](space page) * Here is a link to [space page escape](space%20page) * Here is a link to [TEST] * Here is a link to [Some page which does not exist] * Here is a link to [space page] * Here is a link to [space%20page] * Here is a link to [another attach](TEST/attachment/attach.txt) * Here is a link to [attach](TEST/attachment/test_root.py) ''', 'labels': '', 'viewable_by-0.id': 'all'} self.app.post('/wiki/TEST/update', params=params) content = file(__file__).read() self.app.post('/wiki/TEST/attach', upload_files=[('file_info', 'test_root.py', content)]) r = self.app.get('/wiki/TEST/') found_links = 0 for link in r.html.findAll('a'): if link.contents == ['this page']: assert 'notfound' not in link.get('class', '') found_links += 1 if link.contents == ['another page']: assert 'notfound' not in link.get('class', '') found_links += 1 if link.contents == ['space page space']: assert 'notfound' not in link.get('class', '') found_links += 1 if link.contents == ['space page escape']: assert 'notfound' not in link.get('class', '') found_links += 1 if link.contents == ['[TEST]']: assert 'notfound' not in link.get('class', '') found_links += 1 if link.contents == ['[Some page which does not exist]']: assert 'notfound' in link.get('class', '') found_links += 1 if link.contents == ['[space page]']: assert 'notfound' not in link.get('class', '') found_links += 1 if link.contents == ['[space%20page]']: assert 'notfound' not in link.get('class', '') found_links += 1 if link.contents == ['another attach']: assert 'notfound' in link.get('class', '') found_links += 1 if link.contents == ['attach']: assert 'notfound' not in link.get('class', '') found_links += 1 assert found_links == 10, 'Wrong number of links found' def test_home_rename(self): assert 'The resource was found at http://localhost/p/test/wiki/Home/;' in self.app.get( '/p/test/wiki/') req = self.app.get('/p/test/wiki/Home/edit') form = self._find_edit_form(req) form['title'].value = 'new_title' form.submit() assert 'The resource was found at http://localhost/p/test/wiki/new_title/;' in self.app.get( '/p/test/wiki/') @patch.dict('allura.lib.app_globals.config', markdown_cache_threshold='0') def test_cached_html(self): """Ensure cached html is not escaped.""" html = '<p><span>My Html</span></p>' self.app.post('/wiki/cache/update', params={ 'title': 'cache', 'text': html, 'labels': '', 'viewable_by-0.id': 'all'}) # first request caches html, second serves from cache r = self.app.get('/wiki/cache/') r = self.app.get('/wiki/cache/') assert_true(html in r) def test_page_delete(self): self.app.post('/wiki/aaa/update', params={ 'title': 'aaa', 'text': '111', 'labels': '', 'viewable_by-0.id': 'all'}) self.app.post('/wiki/bbb/update', params={ 'title': 'bbb', 'text': '222', 'labels': '', 'viewable_by-0.id': 'all'}) response = self.app.get('/wiki/browse_pages/') assert 'aaa' in response assert 'bbb' in response self.app.post('/wiki/bbb/delete') response = self.app.get('/wiki/browse_pages/') assert 'aaa' in response assert '?deleted=True">bbb' in response n = M.Notification.query.get(subject="[test:wiki] test-admin removed page bbb") assert '222' in n.text def test_mailto_links(self): self.app.get('/wiki/test_mailto/') params = { 'title': 'test_mailto', 'text': ''' * Automatic mailto #1 <darth.vader@deathstar.org> * Automatic mailto #2 <mailto:luke.skywalker@tatooine.org> * Handmaid mailto <a href="mailto:yoda@jedi.org">Email Yoda</a> ''', 'labels': '', 'viewable_by-0.id': 'all'} self.app.post('/wiki/test_mailto/update', params=params) r = self.app.get('/wiki/test_mailto/') mailto_links = 0 for link in r.html.findAll('a'): if link.get('href') == 'mailto:darth.vader@deathstar.org': assert 'notfound' not in link.get('class', '') mailto_links += 1 if link.get('href') == 'mailto:luke.skywalker@tatooine.org': assert 'notfound' not in link.get('class', '') mailto_links += 1 if link.get('href') == 'mailto:yoda@jedi.org': assert link.contents == ['Email Yoda'] assert 'notfound' not in link.get('class', '') mailto_links += 1 assert mailto_links == 3, 'Wrong number of mailto links' def test_user_browse_page(self): r = self.app.get('/wiki/browse_pages/') assert '<td>Test Admin (test-admin)</td>' in r def test_subscribe(self): user = M.User.query.get(username='test-user') # user is not subscribed assert not M.Mailbox.subscribed(user_id=user._id) r = self.app.get('/p/test/wiki/Home/', extra_environ={'username': str(user.username)}) sidebar_menu = r.html.find('div', attrs={'id': 'sidebar'}) assert 'Subscribe to wiki' in str(sidebar_menu) # subscribe self.app.post('/p/test/wiki/subscribe', {'subscribe': True}, extra_environ={'username': str(user.username)}).follow() # user is subscribed assert M.Mailbox.subscribed(user_id=user._id) r = self.app.get('/p/test/wiki/Home/', extra_environ={'username': str(user.username)}) sidebar_menu = r.html.find('div', attrs={'id': 'sidebar'}) assert 'Unsubscribe' in str(sidebar_menu) # unsubscribe self.app.post('/p/test/wiki/subscribe', {'unsubscribe': True}, extra_environ={'username': str(user.username)}).follow() # user is not subscribed assert not M.Mailbox.subscribed(user_id=user._id) r = self.app.get('/p/test/wiki/Home/', extra_environ={'username': str(user.username)}) sidebar_menu = r.html.find('div', attrs={'id': 'sidebar'}) assert 'Subscribe to wiki' in str(sidebar_menu) def test_rate_limit_new_page(self): # Set rate limit to unlimit with h.push_config(config, **{'forgewiki.rate_limits': '{}'}): r = self.app.get('/p/test/wiki/new-page-title/') assert_equal(r.status_int, 302) assert_equal( r.location, 'http://localhost/p/test/wiki/new-page-title/edit') assert_equal(self.webflash(r), '') # Set rate limit to 1 in first hour of project with h.push_config(config, **{'forgewiki.rate_limits': '{"3600": 1}'}): r = self.app.get('/p/test/wiki/new-page-title/') assert_equal(r.status_int, 302) assert_equal(r.location, 'http://localhost/p/test/wiki/') wf = json.loads(self.webflash(r)) assert_equal(wf['status'], 'error') assert_equal( wf['message'], 'Page create/edit rate limit exceeded. Please try again later.') def test_rate_limit_update(self): # Set rate limit to unlimit with h.push_config(config, **{'forgewiki.rate_limits': '{}'}): r = self.app.post( '/p/test/wiki/page1/update', dict(text='Some text', title='page1')).follow() assert_in('Some text', r) p = model.Page.query.get(title='page1') assert_not_equal(p, None) # Set rate limit to 1 in first hour of project with h.push_config(config, **{'forgewiki.rate_limits': '{"3600": 1}'}): r = self.app.post( '/p/test/wiki/page2/update', dict(text='Some text', title='page2')) assert_equal(r.status_int, 302) assert_equal(r.location, 'http://localhost/p/test/wiki/') wf = json.loads(self.webflash(r)) assert_equal(wf['status'], 'error') assert_equal( wf['message'], 'Page create/edit rate limit exceeded. Please try again later.') p = model.Page.query.get(title='page2') assert_equal(p, None) def test_rate_limit_by_user(self): # also test that multiple edits to a page counts as one page towards the limit # test/wiki/Home and test/sub1/wiki already were created by this user # and proactively get the user-project wiki created (otherwise it'll be created during the subsequent edits) self.app.get('/u/test-admin/wiki/') with h.push_config(config, **{'forgewiki.rate_limits_per_user': '{"3600": 5}'}): r = self.app.post('/p/test/wiki/page123/update', # page 4 (remember, 3 other projects' wiki pages) dict(text='Starting a new page, ok', title='page123')) assert_equal(self.webflash(r), '') r = self.app.post('/p/test/wiki/page123/update', dict(text='Editing some', title='page123')) assert_equal(self.webflash(r), '') r = self.app.post('/p/test/wiki/page123/update', dict(text='Still editing', title='page123')) assert_equal(self.webflash(r), '') r = self.app.post('/p/test/wiki/pageABC/update', # page 5 dict(text='Another new page', title='pageABC')) assert_equal(self.webflash(r), '') r = self.app.post('/p/test/wiki/pageZZZZZ/update', # page 6 dict(text='This new page hits the limit', title='pageZZZZZ')) wf = json.loads(self.webflash(r)) assert_equal(wf['status'], 'error') assert_equal(wf['message'], 'Page create/edit rate limit exceeded. Please try again later.') def test_sidebar_admin_menu(self): r = self.app.get('/p/test/wiki/Home/') menu = r.html.find('div', {'id': 'sidebar-admin-menu'}) assert_equal(menu.attrMap['class'], 'hidden') # (not expanded) menu = [li.find('span').getText() for li in menu.findAll('li')] assert_equal( menu, ['Set Home', 'Permissions', 'Options', 'Rename', 'Delete Everything']) def test_sidebar_admin_menu_is_expanded(self): r = self.app.get('/p/test/admin/wiki/permissions') menu = r.html.find('div', {'id': 'sidebar-admin-menu'}) assert_not_in('hidden', menu.attrMap.get('class', '')) # expanded def test_sidebar_admin_menu_invisible_to_not_admin(self): def assert_invisible_for(username): env = {'username': username} r = self.app.get('/p/test/wiki/Home/', extra_environ=env) menu = r.html.find('div', {'id': 'sidebar-admin-menu'}) assert_equal(menu, None) assert_invisible_for('*anonymous') assert_invisible_for('test-user')
heiths/allura
ForgeWiki/forgewiki/tests/functional/test_root.py
Python
apache-2.0
38,959
# Copyright 2020 The SQLFlow Authors. All rights reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys import six import xgboost as xgb from runtime.model import collect_metadata from runtime.model import oss as pai_model_store from runtime.model import save_metadata from runtime.pai.pai_distributed import make_distributed_info_without_evaluator from runtime.step.xgboost.save import save_model_to_local_file from runtime.xgboost.dataset import xgb_dataset from runtime.xgboost.pai_rabit import PaiXGBoostTracker, PaiXGBoostWorker def dist_train(flags, datasource, select, model_params, train_params, feature_metas, feature_column_names, label_meta, validation_select, disk_cache=False, batch_size=None, epoch=1, load_pretrained_model=False, is_pai=False, pai_train_table="", pai_validate_table="", oss_model_dir="", transform_fn=None, feature_column_code="", model_repo_image="", original_sql=""): if not is_pai: raise Exception( "XGBoost distributed training is only supported on PAI") num_workers = len(flags.worker_hosts.split(",")) cluster, node, task_id = make_distributed_info_without_evaluator(flags) master_addr = cluster["ps"][0].split(":") master_host = master_addr[0] master_port = int(master_addr[1]) + 1 tracker = None print("node={}, task_id={}, cluster={}".format(node, task_id, cluster)) try: if node == 'ps': if task_id == 0: tracker = PaiXGBoostTracker(host=master_host, nworkers=num_workers, port=master_port) else: if node != 'chief': task_id += 1 envs = PaiXGBoostWorker.gen_envs(host=master_host, port=master_port, ttl=200, nworkers=num_workers, task_id=task_id) xgb.rabit.init(envs) rank = xgb.rabit.get_rank() train(datasource, select, model_params, train_params, feature_metas, feature_column_names, label_meta, validation_select, disk_cache, batch_size, epoch, load_pretrained_model, is_pai, pai_train_table, pai_validate_table, rank, nworkers=num_workers, oss_model_dir=oss_model_dir, transform_fn=transform_fn, feature_column_code=feature_column_code, model_repo_image=model_repo_image, original_sql=original_sql) except Exception as e: print("node={}, id={}, exception={}".format(node, task_id, e)) six.reraise(*sys.exc_info()) # For better backtrace finally: if tracker is not None: tracker.join() if node != 'ps': xgb.rabit.finalize() def train(datasource, select, model_params, train_params, feature_metas, feature_column_names, label_meta, validation_select, disk_cache=False, batch_size=None, epoch=1, load_pretrained_model=False, is_pai=False, pai_train_table="", pai_validate_table="", rank=0, nworkers=1, oss_model_dir="", transform_fn=None, feature_column_code="", model_repo_image="", original_sql=""): if batch_size == -1: batch_size = None print("Start training XGBoost model...") dtrain = xgb_dataset(datasource, 'train.txt', select, feature_metas, feature_column_names, label_meta, is_pai, pai_train_table, cache=disk_cache, batch_size=batch_size, epoch=epoch, rank=rank, nworkers=nworkers, transform_fn=transform_fn, feature_column_code=feature_column_code) if len(validation_select.strip()) > 0: dvalidate = list( xgb_dataset(datasource, 'validate.txt', validation_select, feature_metas, feature_column_names, label_meta, is_pai, pai_validate_table, rank=rank, nworkers=nworkers, transform_fn=transform_fn, feature_column_code=feature_column_code))[0] filename = "my_model" if load_pretrained_model: bst = xgb.Booster() bst.load_model(filename) else: bst = None re = None for per_batch_dmatrix in dtrain: watchlist = [(per_batch_dmatrix, "train")] if len(validation_select.strip()) > 0: watchlist.append((dvalidate, "validate")) re = dict() bst = xgb.train(model_params, per_batch_dmatrix, evals=watchlist, evals_result=re, xgb_model=bst, **train_params) print("Evaluation result: %s" % re) if rank == 0: # TODO(sneaxiy): collect features and label metadata = collect_metadata(original_sql=original_sql, select=select, validation_select=validation_select, model_repo_image=model_repo_image, class_name=model_params.get("booster"), attributes=model_params, features=None, label=None, evaluation=re) save_model_to_local_file(bst, model_params, filename) save_metadata("model_meta.json", metadata) if is_pai and len(oss_model_dir) > 0: save_model(oss_model_dir, filename, model_params, train_params, feature_metas, feature_column_names, label_meta, feature_column_code) def save_model(model_dir, filename, model_params, train_params, feature_metas, feature_column_names, label_meta, feature_column_code): pai_model_store.save_file(model_dir, filename) pai_model_store.save_file(model_dir, "{}.pmml".format(filename)) pai_model_store.save_file(model_dir, "model_meta.json") # (TODO:lhw) remove this function call, use the new metadata in load_metas pai_model_store.save_metas( model_dir, 1, "xgboost_model_desc", "", # estimator = "" model_params, train_params, feature_metas, feature_column_names, label_meta, feature_column_code)
sql-machine-learning/sqlflow
python/runtime/xgboost/train.py
Python
apache-2.0
8,174
# -*- coding: utf-8 -*- from collections import defaultdict import json import logging from ._compat import ElementTree, urlopen MDN_SITEMAP = 'https://developer.mozilla.org/sitemaps/en-US/sitemap.xml' SITEMAP_NS = 'http://www.sitemaps.org/schemas/sitemap/0.9' log = logging.getLogger(__name__) def parse(): """ Generate a cross-reference dictionary for the MDN JavaScript Reference. :rtype: dict """ with urlopen(MDN_SITEMAP) as f: xml = ElementTree.parse(f) refs = defaultdict(dict) for loc in xml.iterfind('{{{ns}}}url/{{{ns}}}loc'.format(ns=SITEMAP_NS)): url = loc.text if 'JavaScript/Reference/Global_Objects/' not in url: continue url_suffix = url[81:] parts = url_suffix.split('/') if len(parts) == 1: name = parts[0] if name[0].isupper(): ref_type = 'class' else: ref_type = 'data' elif len(parts) == 2: cls, attr = parts with urlopen('{url}$json'.format(url=url)) as f: metadata = json.loads(f.read().decode('utf-8')) name = '{0}.{1}'.format(cls, attr) if 'Method' in metadata['tags']: ref_type = 'function' elif 'Property' in metadata['tags']: ref_type = 'attribute' else: fmt = 'Unknown ref_type for {0}. Tags: {1}' log.warning(fmt.format(url, ', '.join(metadata['tags']))) continue else: log.warning('Skipping URL (too many parts): {0}'.format(url)) continue refs[ref_type][name] = url_suffix return dict(refs)
malept/js-sphinx-inventory
sphinx_inventory/js/mdn.py
Python
apache-2.0
1,705
# coding=utf-8 # Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for initializers.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np import tf_slim from tf_slim.layers import initializers from tf_slim.layers import regularizers # pylint: disable=g-direct-tensorflow-import from tensorflow.python.client import session from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.ops import variable_scope from tensorflow.python.ops import variables from tensorflow.python.platform import test class InitializerTest(test.TestCase): def test_xavier_wrong_dtype(self): with self.assertRaisesRegexp( TypeError, 'Cannot create initializer for non-floating point type.'): initializers.xavier_initializer(dtype=dtypes.int32) self.assertIsNone(regularizers.l1_regularizer(0.)(None)) def _test_xavier(self, initializer, shape, variance, uniform): with session.Session() as sess: var = variable_scope.get_variable( name='test', shape=shape, dtype=dtypes.float32, initializer=initializer( uniform=uniform, seed=1)) sess.run(variables.global_variables_initializer()) values = var.eval() self.assertAllClose(np.var(values), variance, 1e-3, 1e-3) def test_xavier_uniform(self): self._test_xavier(initializers.xavier_initializer, [100, 40], 2. / (100. + 40.), True) def test_xavier_normal(self): self._test_xavier(initializers.xavier_initializer, [100, 40], 2. / (100. + 40.), False) def test_xavier_scalar(self): self._test_xavier(initializers.xavier_initializer, [], 0.0, True) def test_xavier_conv2d_uniform(self): self._test_xavier(tf_slim.xavier_initializer_conv2d, [100, 40, 5, 7], 2. / (100. * 40 * (5 + 7)), True) def test_xavier_conv2d_normal(self): self._test_xavier(tf_slim.xavier_initializer_conv2d, [100, 40, 5, 7], 2. / (100. * 40 * (5 + 7)), False) class VarianceScalingInitializerTest(test.TestCase): def test_wrong_dtype(self): with self.assertRaisesRegexp( TypeError, 'Cannot create initializer for non-floating point type.'): initializers.variance_scaling_initializer(dtype=dtypes.int32) initializer = initializers.variance_scaling_initializer() with self.assertRaisesRegexp( TypeError, 'Cannot create initializer for non-floating point type.'): initializer([], dtype=dtypes.int32) def _test_variance(self, initializer, shape, variance, factor, mode, uniform): with ops.Graph().as_default() as g: with self.session(graph=g) as sess: var = variable_scope.get_variable( name='test', shape=shape, dtype=dtypes.float32, initializer=initializer( factor=factor, mode=mode, uniform=uniform, seed=1)) sess.run(variables.global_variables_initializer()) values = var.eval() self.assertAllClose(np.var(values), variance, 1e-3, 1e-3) def test_fan_in(self): for uniform in [False, True]: self._test_variance( initializers.variance_scaling_initializer, shape=[100, 40], variance=2. / 100., factor=2.0, mode='FAN_IN', uniform=uniform) def test_fan_out(self): for uniform in [False, True]: self._test_variance( initializers.variance_scaling_initializer, shape=[100, 40], variance=2. / 40., factor=2.0, mode='FAN_OUT', uniform=uniform) def test_fan_avg(self): for uniform in [False, True]: self._test_variance( initializers.variance_scaling_initializer, shape=[100, 40], variance=4. / (100. + 40.), factor=2.0, mode='FAN_AVG', uniform=uniform) def test_conv2d_fan_in(self): for uniform in [False, True]: self._test_variance( initializers.variance_scaling_initializer, shape=[100, 40, 5, 7], variance=2. / (100. * 40. * 5.), factor=2.0, mode='FAN_IN', uniform=uniform) def test_conv2d_fan_out(self): for uniform in [False, True]: self._test_variance( initializers.variance_scaling_initializer, shape=[100, 40, 5, 7], variance=2. / (100. * 40. * 7.), factor=2.0, mode='FAN_OUT', uniform=uniform) def test_conv2d_fan_avg(self): for uniform in [False, True]: self._test_variance( initializers.variance_scaling_initializer, shape=[100, 40, 5, 7], variance=2. / (100. * 40. * (5. + 7.)), factor=2.0, mode='FAN_AVG', uniform=uniform) def test_xavier_uniform(self): self._test_variance( initializers.variance_scaling_initializer, shape=[100, 40], variance=2. / (100. + 40.), factor=1.0, mode='FAN_AVG', uniform=True) def test_xavier_normal(self): self._test_variance( initializers.variance_scaling_initializer, shape=[100, 40], variance=2. / (100. + 40.), factor=1.0, mode='FAN_AVG', uniform=False) def test_xavier_scalar(self): self._test_variance( initializers.variance_scaling_initializer, shape=[], variance=0.0, factor=1.0, mode='FAN_AVG', uniform=False) def test_xavier_conv2d_uniform(self): self._test_variance( initializers.variance_scaling_initializer, shape=[100, 40, 5, 7], variance=2. / (100. * 40. * (5. + 7.)), factor=1.0, mode='FAN_AVG', uniform=True) def test_xavier_conv2d_normal(self): self._test_variance( initializers.variance_scaling_initializer, shape=[100, 40, 5, 7], variance=2. / (100. * 40. * (5. + 7.)), factor=1.0, mode='FAN_AVG', uniform=True) def test_1d_shape_fan_in(self): for uniform in [False, True]: self._test_variance( initializers.variance_scaling_initializer, shape=[100], variance=2. / 100., factor=2.0, mode='FAN_IN', uniform=uniform) def test_1d_shape_fan_out(self): for uniform in [False, True]: self._test_variance( initializers.variance_scaling_initializer, shape=[100], variance=2. / 100., factor=2.0, mode='FAN_OUT', uniform=uniform) def test_1d_shape_fan_avg(self): for uniform in [False, True]: self._test_variance( initializers.variance_scaling_initializer, shape=[100], variance=4. / (100. + 100.), factor=2.0, mode='FAN_AVG', uniform=uniform) if __name__ == '__main__': test.main()
google-research/tf-slim
tf_slim/layers/initializers_test.py
Python
apache-2.0
7,625
#!/usr/bin/python2.7 # Copyright 2015 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from google.appengine.api import datastore_errors from model import * from photo import create_photo, PhotoError from utils import * from detect_spam import SpamDetector import extend import reveal import subscribe from django.utils.translation import ugettext as _ from urlparse import urlparse # TODO(jessien): Clean up duplicate code here and in create.py. # https://github.com/google/personfinder/issues/157 # how many days left before we warn about imminent expiration. # Make this at least 1. EXPIRY_WARNING_THRESHOLD = 7 class Handler(BaseHandler): def get(self): # Check the request parameters. if not self.params.id: return self.error(404, _('No person id was specified.')) try: person = Person.get(self.repo, self.params.id) # TODO(ichikawa) Consider removing this "except" clause. # I don't think ValueError is thrown here. except ValueError: return self.error(404, _("This person's entry does not exist or has been deleted.")) if not person: return self.error(404, _("This person's entry does not exist or has been deleted.")) standalone = self.request.get('standalone') # Render the page. enable_notes_url = self.get_url('/enable_notes', id=self.params.id) self.render('add_note.html', person=person, standalone=standalone, enable_notes_url=enable_notes_url) def post(self): """Post a note in person's record view page""" if not self.params.text: return self.error( 200, _('Message is required. Please go back and try again.')) if not self.params.author_name: return self.error( 200, _('Your name is required in the "About you" section. ' 'Please go back and try again.')) if (self.params.status == 'is_note_author' and not self.params.author_made_contact): return self.error( 200, _('Please check that you have been in contact with ' 'the person after the disaster, or change the ' '"Status of this person" field.')) if (self.params.status == 'believed_dead' and not self.config.allow_believed_dead_via_ui): return self.error( 200, _('Not authorized to post notes with the status ' '"believed_dead".')) person = Person.get(self.repo, self.params.id) if person.notes_disabled: return self.error( 200, _('The author has disabled status updates ' 'on this record.')) # If a photo was uploaded, create and store a new Photo entry and get # the URL where it's served; otherwise, use the note_photo_url provided. photo, photo_url = (None, self.params.note_photo_url) if self.params.note_photo is not None: try: photo, photo_url = create_photo(self.params.note_photo, self) except PhotoError, e: return self.error(400, e.message) photo.put() spam_detector = SpamDetector(self.config.bad_words) spam_score = spam_detector.estimate_spam_score(self.params.text) if (spam_score > 0): note = NoteWithBadWords.create_original( self.repo, entry_date=get_utcnow(), person_record_id=self.params.id, author_name=self.params.author_name, author_email=self.params.author_email, author_phone=self.params.author_phone, source_date=get_utcnow(), author_made_contact=bool(self.params.author_made_contact), status=self.params.status, email_of_found_person=self.params.email_of_found_person, phone_of_found_person=self.params.phone_of_found_person, last_known_location=self.params.last_known_location, text=self.params.text, photo=photo, photo_url=photo_url, spam_score=spam_score, confirmed=False) # Write the new NoteWithBadWords to the datastore note.put_new() # When the note is detected as spam, we do not update person record # or log action. We ask the note author for confirmation first. return self.redirect('/post_flagged_note', id=note.get_record_id(), author_email=note.author_email, repo=self.repo) else: note = Note.create_original( self.repo, entry_date=get_utcnow(), person_record_id=self.params.id, author_name=self.params.author_name, author_email=self.params.author_email, author_phone=self.params.author_phone, source_date=get_utcnow(), author_made_contact=bool(self.params.author_made_contact), status=self.params.status, email_of_found_person=self.params.email_of_found_person, phone_of_found_person=self.params.phone_of_found_person, last_known_location=self.params.last_known_location, text=self.params.text, photo=photo, photo_url=photo_url) # Write the new regular Note to the datastore note.put_new() # Specially log 'believed_dead'. if note.status == 'believed_dead': UserActionLog.put_new( 'mark_dead', note, person.primary_full_name, self.request.remote_addr) # Specially log a switch to an alive status. if (note.status in ['believed_alive', 'is_note_author'] and person.latest_status not in ['believed_alive', 'is_note_author']): UserActionLog.put_new('mark_alive', note, person.primary_full_name) # Update the Person based on the Note. if person: person.update_from_note(note) # Send notification to all people # who subscribed to updates on this person subscribe.send_notifications(self, person, [note]) # write the updated person record to datastore db.put(person) # If user wants to subscribe to updates, redirect to the subscribe page if self.params.subscribe: return self.redirect('/subscribe', id=person.record_id, subscribe_email=self.params.author_email, context='add_note') # Redirect to view page so the browser's back button works properly. self.redirect('/view', id=self.params.id, query=self.params.query)
AwesomeTurtle/personfinder
app/add_note.py
Python
apache-2.0
7,586
#!/usr/bin/python2.5 # # Copyright 2010 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Google App Engine Pipeline API for complex, asynchronous workflows.""" __all__ = [ # Public API. 'Error', 'PipelineSetupError', 'PipelineExistsError', 'PipelineRuntimeError', 'SlotNotFilledError', 'SlotNotDeclaredError', 'UnexpectedPipelineError', 'PipelineStatusError', 'Slot', 'Pipeline', 'PipelineFuture', 'After', 'InOrder', 'Retry', 'Abort', 'get_status_tree', 'create_handlers_map', 'set_enforce_auth', ] import datetime import itertools import logging import os import re import sys import threading import time import traceback import urllib import uuid from google.appengine.api import mail from google.appengine.api import files from google.appengine.api import users from google.appengine.api import taskqueue from google.appengine.ext import db from google.appengine.ext import webapp # Relative imports import models import simplejson import util as mr_util # For convenience _PipelineRecord = models._PipelineRecord _SlotRecord = models._SlotRecord _BarrierRecord = models._BarrierRecord _StatusRecord = models._StatusRecord # Overall TODOs: # - Add a human readable name for start() # - Consider using sha1 of the UUID for user-supplied pipeline keys to ensure # that they keys are definitely not sequential or guessable (Python's uuid1 # method generates roughly sequential IDs). # - Ability to list all root pipelines that are live on simple page. # Potential TODOs: # - Add support for ANY N barriers. # - Add a global 'flags' value passed in to start() that all pipelines have # access to; makes it easy to pass along Channel API IDs and such. # - Allow Pipelines to declare they are "short" and optimize the evaluate() # function to run as many of them in quick succession. # - Add support in all Pipelines for hold/release where up-stream # barriers will fire but do nothing because the Pipeline is not ready. ################################################################################ class Error(Exception): """Base class for exceptions in this module.""" class PipelineSetupError(Error): """Base class for exceptions that happen before Pipeline execution.""" class PipelineExistsError(PipelineSetupError): """A new Pipeline with an assigned idempotence_key cannot be overwritten.""" class PipelineRuntimeError(Error): """Base class for exceptions that happen during Pipeline execution.""" class SlotNotFilledError(PipelineRuntimeError): """A slot that should have been filled already was not yet filled.""" class SlotNotDeclaredError(PipelineRuntimeError): """A slot that was filled or passed along was not previously declared.""" class UnexpectedPipelineError(PipelineRuntimeError): """An assertion failed, potentially leaving the pipeline unable to proceed.""" class PipelineUserError(Error): """Exceptions raised indirectly by developers to cause certain behaviors.""" class Retry(PipelineUserError): """The currently running pipeline should be retried at a later time.""" class Abort(PipelineUserError): """The currently running pipeline should be aborted up to the root.""" class PipelineStatusError(Error): """Exceptions raised when trying to collect pipeline status.""" ################################################################################ _MAX_BARRIERS_TO_NOTIFY = 10 _MAX_ABORTS_TO_BEGIN = 10 _TEST_MODE = False _TEST_ROOT_PIPELINE_KEY = None _DEFAULT_BACKOFF_SECONDS = 15 _DEFAULT_BACKOFF_FACTOR = 2 _DEFAULT_MAX_ATTEMPTS = 3 _RETRY_WIGGLE_TIMEDELTA = datetime.timedelta(seconds=20) _DEBUG = False _MAX_JSON_SIZE = 900000 _ENFORCE_AUTH = True ################################################################################ class Slot(object): """An output that is filled by a Pipeline as it executes.""" def __init__(self, name=None, slot_key=None, strict=False): """Initializer. Args: name: The name of this slot. slot_key: The db.Key for this slot's _SlotRecord if it's already been allocated by an up-stream pipeline. strict: If this Slot was created as an output of a strictly defined pipeline. """ if name is None: raise UnexpectedPipelineError('Slot with key "%s" missing a name.' % slot_key) if slot_key is None: slot_key = db.Key.from_path(_SlotRecord.kind(), uuid.uuid1().hex) self._exists = _TEST_MODE else: self._exists = True self._touched = False self._strict = strict self.name = name self.key = slot_key self.filled = False self._filler_pipeline_key = None self._fill_datetime = None self._value = None @property def value(self): """Returns the current value of this slot. Returns: The value of the slot (a serializable Python type). Raises: SlotNotFilledError if the value hasn't been filled yet. """ if not self.filled: raise SlotNotFilledError('Slot with name "%s", key "%s" not yet filled.' % (self.name, self.key)) return self._value @property def filler(self): """Returns the pipeline ID that filled this slot's value. Returns: A string that is the pipeline ID. Raises: SlotNotFilledError if the value hasn't been filled yet. """ if not self.filled: raise SlotNotFilledError('Slot with name "%s", key "%s" not yet filled.' % (self.name, self.key)) return self._filler_pipeline_key.name() @property def fill_datetime(self): """Returns when the slot was filled. Returns: A datetime.datetime. Raises: SlotNotFilledError if the value hasn't been filled yet. """ if not self.filled: raise SlotNotFilledError('Slot with name "%s", key "%s" not yet filled.' % (self.name, self.key)) return self._fill_datetime def _set_value(self, slot_record): """Sets the value of this slot based on its corresponding _SlotRecord. Does nothing if the slot has not yet been filled. Args: slot_record: The _SlotRecord containing this Slot's value. """ if slot_record.status == _SlotRecord.FILLED: self.filled = True self._filler_pipeline_key = _SlotRecord.filler.get_value_for_datastore( slot_record) self._fill_datetime = slot_record.fill_time self._value = slot_record.value def _set_value_test(self, filler_pipeline_key, value): """Sets the value of this slot for use in testing. Args: filler_pipeline_key: The db.Key of the _PipelineRecord that filled this slot. value: The serializable value set for this slot. """ self.filled = True self._filler_pipeline_key = filler_pipeline_key self._fill_datetime = datetime.datetime.utcnow() # Convert to JSON and back again, to simulate the behavior of production. self._value = simplejson.loads(simplejson.dumps(value)) def __repr__(self): """Returns a string representation of this slot.""" if self.filled: return repr(self._value) else: return 'Slot(name="%s", slot_key="%s")' % (self.name, self.key) class PipelineFuture(object): """A future for accessing the outputs of a Pipeline.""" # NOTE: Do not, ever, add a names() method to this class. Callers cannot do # introspection on their context of being called. Even though the runtime # environment of the Pipeline can allow for that to happen, such behavior # would prevent synchronous simulation and verification, whic is an # unacceptable tradeoff. def __init__(self, output_names, force_strict=False): """Initializer. Args: output_names: The list of require output names that will be strictly enforced by this class. force_strict: If True, force this future to be in strict mode. """ self._after_all_pipelines = set() self._output_dict = { 'default': Slot(name='default'), } self._strict = len(output_names) > 0 or force_strict if self._strict: for name in output_names: if name in self._output_dict: raise UnexpectedPipelineError('Output name reserved: "%s"' % name) self._output_dict[name] = Slot(name=name, strict=True) def _inherit_outputs(self, pipeline_name, already_defined, resolve_outputs=False): """Inherits outputs from a calling Pipeline. Args: pipeline_name: The Pipeline class name (used for debugging). already_defined: Maps output name to stringified db.Key (of _SlotRecords) of any exiting output slots to be inherited by this future. resolve_outputs: When True, this method will dereference all output slots before returning back to the caller, making those output slots' values available. Raises: UnexpectedPipelineError when resolve_outputs is True and any of the output slots could not be retrived from the Datastore. """ for name, slot_key in already_defined.iteritems(): if not isinstance(slot_key, db.Key): slot_key = db.Key(slot_key) slot = self._output_dict.get(name) if slot is None: if self._strict: raise UnexpectedPipelineError( 'Inherited output named "%s" must be filled but ' 'not declared for pipeline class "%s"' % (name, pipeline_name)) else: self._output_dict[name] = Slot(name=name, slot_key=slot_key) else: slot.key = slot_key slot._exists = True if resolve_outputs: slot_key_dict = dict((s.key, s) for s in self._output_dict.itervalues()) all_slots = db.get(slot_key_dict.keys()) for slot, slot_record in zip(slot_key_dict.itervalues(), all_slots): if slot_record is None: raise UnexpectedPipelineError( 'Inherited output named "%s" for pipeline class "%s" is ' 'missing its Slot in the datastore: "%s"' % (slot.name, pipeline_name, slot.key)) slot = slot_key_dict[slot_record.key()] slot._set_value(slot_record) def __getattr__(self, name): """Provides an output Slot instance with the given name if allowed.""" if name not in self._output_dict: if self._strict: raise SlotNotDeclaredError('Undeclared output with name "%s"' % name) self._output_dict[name] = Slot(name=name) slot = self._output_dict[name] return slot class Pipeline(object): """A Pipeline function-object that performs operations and has a life cycle. Class properties (to be overridden by sub-classes): async: When True, this Pipeline will execute asynchronously and fill the default output slot itself using the complete() method. output_names: List of named outputs (in addition to the default slot) that this Pipeline must output to (no more, no less). public_callbacks: If the callback URLs generated for this class should be accessible by all external requests regardless of login or task queue. admin_callbacks: If the callback URLs generated for this class should be accessible by the task queue ane externally by users logged in as admins. Modifiable instance properties: backoff_seconds: How many seconds to use as the constant factor in exponential backoff; may be changed by the user backoff_factor: Base factor to use for exponential backoff. The formula followed is (backoff_seconds * backoff_factor^current_attempt). max_attempts: Maximum number of retry attempts to make before failing completely and aborting the entire pipeline up to the root. target: The application version to use for processing this Pipeline. This can be set to the name of a backend to direct Pipelines to run there. Instance properties: pipeline_id: The ID of this pipeline. root_pipeline_id: The ID of the root of this pipeline. queue_name: The queue this pipeline runs on or None if unknown. current_attempt: The current attempt being tried for this pipeline. """ async = False output_names = [] public_callbacks = False admin_callbacks = False # Internal only. _class_path = None # Set for each class _send_mail = mail.send_mail_to_admins # For testing def __init__(self, *args, **kwargs): """Initializer. Args: *args: The positional arguments for this function-object. **kwargs: The keyword arguments for this function-object. """ self.args = args self.kwargs = kwargs self.outputs = None self.backoff_seconds = _DEFAULT_BACKOFF_SECONDS self.backoff_factor = _DEFAULT_BACKOFF_FACTOR self.max_attempts = _DEFAULT_MAX_ATTEMPTS self.target = None self.task_retry = False self._current_attempt = 0 self._root_pipeline_key = None self._pipeline_key = None self._context = None self._result_status = None self._set_class_path() if _TEST_MODE: self._context = _PipelineContext('', 'default', '') self._root_pipeline_key = _TEST_ROOT_PIPELINE_KEY self._pipeline_key = db.Key.from_path( _PipelineRecord.kind(), uuid.uuid1().hex) self.outputs = PipelineFuture(self.output_names) self._context.evaluate_test(self) @property def pipeline_id(self): """Returns the ID of this Pipeline as a string or None if unknown.""" if self._pipeline_key is None: return None return self._pipeline_key.name() @property def root_pipeline_id(self): """Returns root pipeline ID as a websafe string or None if unknown.""" if self._root_pipeline_key is None: return None return self._root_pipeline_key.name() @property def is_root(self): """Returns True if this pipeline is a root pipeline, False otherwise.""" return self._root_pipeline_key == self._pipeline_key @property def queue_name(self): """Returns the queue name this Pipeline runs on or None if unknown.""" if self._context: return self._context.queue_name return None @property def base_path(self): """Returns the base path for Pipeline URL handlers or None if unknown.""" if self._context: return self._context.base_path return None @property def has_finalized(self): """Returns True if this pipeline has completed and finalized.""" return self._result_status == _PipelineRecord.DONE @property def was_aborted(self): """Returns True if this pipeline was aborted.""" return self._result_status == _PipelineRecord.ABORTED @property def current_attempt(self): """Returns the current attempt at running this pipeline, starting at 1.""" return self._current_attempt + 1 @property def test_mode(self): """Returns True if the pipeline is running in test mode.""" return _TEST_MODE @classmethod def from_id(cls, pipeline_id, resolve_outputs=True, _pipeline_record=None): """Returns an instance corresponding to an existing Pipeline. The returned object will have the same properties a Pipeline does while it's running synchronously (e.g., like what it's first allocated), allowing callers to inspect caller arguments, outputs, fill slots, complete the pipeline, abort, retry, etc. Args: pipeline_id: The ID of this pipeline (a string). resolve_outputs: When True, dereference the outputs of this Pipeline so their values can be accessed by the caller. _pipeline_record: Internal-only. The _PipelineRecord instance to use to instantiate this instance instead of fetching it from the datastore. """ pipeline_record = _pipeline_record pipeline_key = db.Key.from_path(_PipelineRecord.kind(), pipeline_id) if pipeline_record is None: pipeline_record = db.get(pipeline_key) if pipeline_record is None: return None params = pipeline_record.params arg_list, kwarg_dict = _dereference_args( pipeline_record.class_path, params['args'], params['kwargs']) outputs = PipelineFuture(cls.output_names) outputs._inherit_outputs( pipeline_record.class_path, params['output_slots'], resolve_outputs=resolve_outputs) stage = cls(*arg_list, **kwarg_dict) stage.backoff_seconds = params['backoff_seconds'] stage.backoff_factor = params['backoff_factor'] stage.max_attempts = params['max_attempts'] stage.task_retry = params['task_retry'] stage.target = params.get('target') # May not be defined for old Pipelines stage._current_attempt = pipeline_record.current_attempt stage._set_values_internal( _PipelineContext('', params['queue_name'], params['base_path']), pipeline_key, _PipelineRecord.root_pipeline.get_value_for_datastore(pipeline_record), outputs, pipeline_record.status) return stage # Methods that can be invoked on a Pipeline instance by anyone with a # valid object (e.g., directly instantiated, retrieve via from_id). def start(self, idempotence_key='', queue_name='default', base_path='/_ah/pipeline', return_task=False): """Starts a new instance of this pipeline. Args: idempotence_key: The ID to use for this Pipeline and throughout its asynchronous workflow to ensure the operations are idempotnent. If empty a starting key will be automatically assigned. queue_name: What queue this Pipeline's workflow should execute on. base_path: The relative URL path to where the Pipeline API is mounted for access by the taskqueue API or external requests. return_task: When True, a task to start this pipeline will be returned instead of submitted, allowing the caller to start off this pipeline as part of a separate transaction (potentially leaving this newly allocated pipeline's datastore entities in place if that separate transaction fails for any reason). Returns: A taskqueue.Task instance if return_task was True. This task will *not* have a name, thus to ensure reliable execution of your pipeline you should add() this task as part of a separate Datastore transaction. Raises: PipelineExistsError if the pipeline with the given idempotence key exists. PipelineSetupError if the pipeline could not start for any other reason. """ if not idempotence_key: idempotence_key = uuid.uuid1().hex pipeline_key = db.Key.from_path(_PipelineRecord.kind(), idempotence_key) context = _PipelineContext('', queue_name, base_path) future = PipelineFuture(self.output_names, force_strict=True) try: self._set_values_internal( context, pipeline_key, pipeline_key, future, _PipelineRecord.WAITING) return context.start(self, return_task=return_task) except Error: # Pass through exceptions that originate in this module. raise except Exception, e: # Re-type any exceptions that were raised in dependent methods. raise PipelineSetupError('Error starting %s#%s: %s' % ( self, idempotence_key, str(e))) def start_test(self, idempotence_key=None, base_path='', **kwargs): """Starts this pipeline in test fashion. Args: idempotence_key: Dummy idempotence_key to use for this root pipeline. base_path: Dummy base URL path to use for this root pipeline. kwargs: Ignored keyword arguments usually passed to start(). """ if not idempotence_key: idempotence_key = uuid.uuid1().hex pipeline_key = db.Key.from_path(_PipelineRecord.kind(), idempotence_key) context = _PipelineContext('', 'default', base_path) future = PipelineFuture(self.output_names, force_strict=True) self._set_values_internal( context, pipeline_key, pipeline_key, future, _PipelineRecord.WAITING) context.start_test(self) # Pipeline control methods. def retry(self, retry_message=''): """Forces a currently running asynchronous pipeline to retry. Note this may not be called by synchronous or generator pipelines. Those must instead raise the 'Retry' exception during execution. Args: retry_message: Optional message explaining why the retry happened. Returns: True if the Pipeline should be retried, False if it cannot be cancelled mid-flight for some reason. """ if not self.async: raise UnexpectedPipelineError( 'May only call retry() method for asynchronous pipelines.') if self.try_cancel(): self._context.transition_retry(self._pipeline_key, retry_message) return True else: return False def abort(self, abort_message=''): """Mark the entire pipeline up to the root as aborted. Note this should only be called from *outside* the context of a running pipeline. Synchronous and generator pipelines should raise the 'Abort' exception to cause this behavior during execution. Args: abort_message: Optional message explaining why the abort happened. Returns: True if the abort signal was sent successfully; False if the pipeline could not be aborted for any reason. """ # TODO: Use thread-local variable to enforce that this is not called # while a pipeline is executing in the current thread. if (self.async and self._root_pipeline_key == self._pipeline_key and not self.try_cancel()): # Handle the special case where the root pipeline is async and thus # cannot be aborted outright. return False else: return self._context.begin_abort( self._root_pipeline_key, abort_message=abort_message) # Methods used by the Pipeline as it runs. def fill(self, name_or_slot, value): """Fills an output slot required by this Pipeline. Args: name_or_slot: The name of the slot (a string) or Slot record to fill. value: The serializable value to assign to this slot. Raises: UnexpectedPipelineError if the Slot no longer exists. SlotNotDeclaredError if trying to output to a slot that was not declared ahead of time. """ if isinstance(name_or_slot, basestring): slot = getattr(self.outputs, name_or_slot) elif isinstance(name_or_slot, Slot): slot = name_or_slot else: raise UnexpectedPipelineError( 'Could not fill invalid output name: %r' % name_or_slot) if not slot._exists: raise SlotNotDeclaredError( 'Cannot fill output with name "%s" that was just ' 'declared within the Pipeline context.' % slot.name) self._context.fill_slot(self._pipeline_key, slot, value) def set_status(self, message=None, console_url=None, status_links=None): """Sets the current status of this pipeline. This method is purposefully non-transactional. Updates are written to the datastore immediately and overwrite all existing statuses. Args: message: (optional) Overall status message. console_url: (optional) Relative URL to use for the "console" of this pipeline that displays current progress. When None, no console will be displayed. status_links: (optional) Dictionary of readable link names to relative URLs that should be associated with this pipeline as it runs. These links provide convenient access to other dashboards, consoles, etc associated with the pipeline. Raises: PipelineRuntimeError if the status could not be set for any reason. """ if _TEST_MODE: logging.info( 'New status for %s#%s: message=%r, console_url=%r, status_links=%r', self, self.pipeline_id, message, console_url, status_links) return status_key = db.Key.from_path(_StatusRecord.kind(), self.pipeline_id) root_pipeline_key = db.Key.from_path( _PipelineRecord.kind(), self.root_pipeline_id) status_record = _StatusRecord( key=status_key, root_pipeline=root_pipeline_key) try: if message: status_record.message = message if console_url: status_record.console_url = console_url if status_links: # Alphabeticalize the list. status_record.link_names = sorted( db.Text(s) for s in status_links.iterkeys()) status_record.link_urls = [ db.Text(status_links[name]) for name in status_record.link_names] status_record.status_time = datetime.datetime.utcnow() status_record.put() except Exception, e: raise PipelineRuntimeError('Could not set status for %s#%s: %s' % (self, self.pipeline_id, str(e))) def complete(self, default_output=None): """Marks this asynchronous Pipeline as complete. Args: default_output: What value the 'default' output slot should be assigned. Raises: UnexpectedPipelineError if the slot no longer exists or this method was called for a pipeline that is not async. """ # TODO: Enforce that all outputs expected by this async pipeline were # filled before this complete() function was called. May required all # async functions to declare their outputs upfront. if not self.async: raise UnexpectedPipelineError( 'May only call complete() method for asynchronous pipelines.') self._context.fill_slot( self._pipeline_key, self.outputs.default, default_output) def get_callback_url(self, **kwargs): """Returns a relative URL for invoking this Pipeline's callback method. Args: kwargs: Dictionary mapping keyword argument names to single values that should be passed to the callback when it is invoked. Raises: UnexpectedPipelineError if this is invoked on pipeline that is not async. """ # TODO: Support positional parameters. if not self.async: raise UnexpectedPipelineError( 'May only call get_callback_url() method for asynchronous pipelines.') kwargs['pipeline_id'] = self._pipeline_key.name() params = urllib.urlencode(kwargs) return '%s/callback?%s' % (self.base_path, params) def get_callback_task(self, *args, **kwargs): """Returns a task for calling back this Pipeline. Args: params: Keyword argument containing a dictionary of key/value pairs that will be passed to the callback when it is executed. args, kwargs: Passed to the taskqueue.Task constructor. Use these arguments to set the task name (for idempotence), etc. Returns: A taskqueue.Task instance that must be enqueued by the caller. """ if not self.async: raise UnexpectedPipelineError( 'May only call get_callback_task() method for asynchronous pipelines.') params = kwargs.get('params', {}) kwargs['params'] = params params['pipeline_id'] = self._pipeline_key.name() kwargs['url'] = self.base_path + '/callback' kwargs['method'] = 'POST' return taskqueue.Task(*args, **kwargs) def send_result_email(self): """Sends an email to admins indicating this Pipeline has completed. For developer convenience. Automatically called from finalized for root Pipelines that do not override the default action. """ status = 'successful' if self.was_aborted: status = 'aborted' app_id = os.environ['APPLICATION_ID'] shard_index = app_id.find('~') if shard_index != -1: app_id = app_id[shard_index+1:] param_dict = { 'status': status, 'app_id': app_id, 'class_path': self._class_path, 'pipeline_id': self.root_pipeline_id, 'base_path': '%s.appspot.com%s' % (app_id, self.base_path), } subject = ( 'Pipeline %(status)s: App "%(app_id)s", %(class_path)s' '#%(pipeline_id)s' % param_dict) body = """View the pipeline results here: http://%(base_path)s/status?root=%(pipeline_id)s Thanks, The Pipeline API """ % param_dict html = """<html><body> <p>View the pipeline results here:</p> <p><a href="http://%(base_path)s/status?root=%(pipeline_id)s" >http://%(base_path)s/status?root=%(pipeline_id)s</a></p> <p> Thanks, <br> The Pipeline API </p> </body></html> """ % param_dict sender = '%s@%s.appspotmail.com' % (app_id, app_id) try: self._send_mail(sender, subject, body, html=html) except (mail.InvalidSenderError, mail.InvalidEmailError): logging.warning('Could not send result email for ' 'root pipeline ID "%s" from sender "%s"', self.root_pipeline_id, sender) def cleanup(self): """Clean up this Pipeline and all Datastore records used for coordination. Only works when called on a root pipeline. Child pipelines will ignore calls to this method. After this method is called, Pipeline.from_id() and related status methods will return inconsistent or missing results. This method is fire-and-forget and asynchronous. """ if self._root_pipeline_key is None: raise UnexpectedPipelineError( 'Could not cleanup Pipeline with unknown root pipeline ID.') if not self.is_root: return task = taskqueue.Task( params=dict(root_pipeline_key=self._root_pipeline_key), url=self.base_path + '/cleanup', headers={'X-Ae-Pipeline-Key': self._root_pipeline_key}) taskqueue.Queue(self.queue_name).add(task) def with_params(self, **kwargs): """Modify various execution parameters of a Pipeline before it runs. This method has no effect in test mode. Args: kwargs: Attributes to modify on this Pipeline instance before it has been executed. Returns: This Pipeline instance, for easy chaining. """ if _TEST_MODE: logging.info( 'Setting runtime parameters for %s#%s: %r', self, self.pipeline_id, kwargs) return self if self.pipeline_id is not None: raise UnexpectedPipelineError( 'May only call with_params() on a Pipeline that has not yet ' 'been scheduled for execution.') ALLOWED = ('backoff_seconds', 'backoff_factor', 'max_attempts', 'target') for name, value in kwargs.iteritems(): if name not in ALLOWED: raise TypeError('Unexpected keyword: %s=%r' % (name, value)) setattr(self, name, value) return self # Methods implemented by developers for lifecycle management. These # must be idempotent under all circumstances. def run(self, *args, **kwargs): """Runs this Pipeline.""" raise NotImplementedError('Must implement "run" in Pipeline sub-class.') def run_test(self, *args, **kwargs): """Runs this Pipeline in test mode.""" raise NotImplementedError( 'Must implement "run_test" in Pipeline sub-class.') def finalized(self): """Finalizes this Pipeline after execution if it's a generator. Default action as the root pipeline is to email the admins with the status. Implementors be sure to call 'was_aborted' to find out if the finalization that you're handling is for a success or error case. """ if self.pipeline_id == self.root_pipeline_id: self.send_result_email() def finalized_test(self, *args, **kwargs): """Finalized this Pipeline in test mode.""" raise NotImplementedError( 'Must implement "finalized_test" in Pipeline sub-class.') def callback(self, **kwargs): """This Pipeline received an asynchronous callback request.""" raise NotImplementedError( 'Must implement "callback" in Pipeline sub-class.') def try_cancel(self): """This pipeline has been cancelled. Called when a pipeline is interrupted part-way through due to some kind of failure (an abort of the whole pipeline to the root or a forced retry on this child pipeline). Returns: True to indicate that cancellation was successful and this pipeline may go in the retry or aborted state; False to indicate that this pipeline cannot be canceled right now and must remain as-is. """ return False # Internal methods. @classmethod def _set_class_path(cls, module_dict=sys.modules): """Sets the absolute path to this class as a string. Used by the Pipeline API to reconstruct the Pipeline sub-class object at execution time instead of passing around a serialized function. Args: module_dict: Used for testing. """ if cls._class_path is not None: return # Do not set the _class_path for the base-class, otherwise all children's # lookups for _class_path will fall through and return 'Pipeline' above. # This situation can happen if users call the generic Pipeline.from_id # to get the result of a Pipeline without knowing its specific class. if cls is Pipeline: return # This is a brute-force approach to solving the module reverse-lookup # problem, where we want to refer to a class by its stable module name # but have no built-in facility for doing so in Python. found = None for name, module in module_dict.items(): if name == '__main__': continue found = getattr(module, cls.__name__, None) if found is cls: break else: # If all else fails, try the main module. name = '__main__' module = module_dict.get(name) found = getattr(module, cls.__name__, None) if found is not cls: raise ImportError('Could not determine path for Pipeline ' 'function/class "%s"' % cls.__name__) cls._class_path = '%s.%s' % (name, cls.__name__) def _set_values_internal(self, context, pipeline_key, root_pipeline_key, outputs, result_status): """Sets the user-visible values provided as an API by this class. Args: context: The _PipelineContext used for this Pipeline. pipeline_key: The db.Key of this pipeline. root_pipeline_key: The db.Key of the root pipeline. outputs: The PipelineFuture for this pipeline. result_status: The result status of this pipeline. """ self._context = context self._pipeline_key = pipeline_key self._root_pipeline_key = root_pipeline_key self._result_status = result_status self.outputs = outputs def _callback_internal(self, kwargs): """Used to execute callbacks on asynchronous pipelines.""" logging.debug('Callback %s(*%s, **%s)#%s with params: %r', self._class_path, _short_repr(self.args), _short_repr(self.kwargs), self._pipeline_key.name(), kwargs) return self.callback(**kwargs) def _run_internal(self, context, pipeline_key, root_pipeline_key, caller_output): """Used by the Pipeline evaluator to execute this Pipeline.""" self._set_values_internal( context, pipeline_key, root_pipeline_key, caller_output, _PipelineRecord.RUN) logging.debug('Running %s(*%s, **%s)#%s', self._class_path, _short_repr(self.args), _short_repr(self.kwargs), self._pipeline_key.name()) return self.run(*self.args, **self.kwargs) def _finalized_internal(self, context, pipeline_key, root_pipeline_key, caller_output, aborted): """Used by the Pipeline evaluator to finalize this Pipeline.""" result_status = _PipelineRecord.RUN if aborted: result_status = _PipelineRecord.ABORTED self._set_values_internal( context, pipeline_key, root_pipeline_key, caller_output, result_status) logging.debug('Finalizing %s(*%r, **%r)#%s', self._class_path, _short_repr(self.args), _short_repr(self.kwargs), self._pipeline_key.name()) try: self.finalized() except NotImplementedError: pass def __repr__(self): """Returns a string representation of this Pipeline.""" return '%s(*%s, **%s)' % ( self._class_path, _short_repr(self.args), _short_repr(self.kwargs)) # TODO: Change InOrder and After to use a common thread-local list of # execution modifications to apply to the current evaluating pipeline. class After(object): """Causes all contained Pipelines to run after the given ones complete. Must be used in a 'with' block. """ _local = threading.local() def __init__(self, *futures): """Initializer. Args: *futures: One or more PipelineFutures that all subsequent pipelines should follow. """ if len(futures) == 0: raise TypeError( 'Must pass one or more PipelineFuture instances to After()') self._futures = set(futures) def __enter__(self): """When entering a 'with' block.""" After._thread_init() After._local._after_all_futures.extend(self._futures) def __exit__(self, type, value, trace): """When exiting a 'with' block.""" for future in self._futures: After._local._after_all_futures.remove(future) return False @classmethod def _thread_init(cls): """Ensure thread local is initialized.""" if not hasattr(cls._local, '_after_all_futures'): cls._local._after_all_futures = [] class InOrder(object): """Causes all contained Pipelines to run in order. Must be used in a 'with' block. """ _local = threading.local() @classmethod def _add_future(cls, future): """Adds a future to the list of in-order futures thus far. Args: future: The future to add to the list. """ if cls._local._activated: cls._local._in_order_futures.add(future) def __init__(self): """Initializer.""" def __enter__(self): """When entering a 'with' block.""" InOrder._thread_init() if InOrder._local._activated: raise UnexpectedPipelineError('Already in an InOrder "with" block.') InOrder._local._activated = True InOrder._local._in_order_futures.clear() def __exit__(self, type, value, trace): """When exiting a 'with' block.""" InOrder._local._activated = False InOrder._local._in_order_futures.clear() return False @classmethod def _thread_init(cls): """Ensure thread local is initialized.""" if not hasattr(cls._local, '_in_order_futures'): cls._local._in_order_futures = set() cls._local._activated = False ################################################################################ def _short_repr(obj): """Helper function returns a truncated repr() of an object.""" stringified = repr(obj) if len(stringified) > 200: return '%s... (%d bytes)' % (stringified[:200], len(stringified)) return stringified def _write_json_blob(encoded_value): """Writes a JSON encoded value to a Blobstore File. Args: encoded_value: The encoded JSON string. Returns: The blobstore.BlobKey for the file that was created. """ file_name = files.blobstore.create(mime_type='application/json') handle = files.open(file_name, 'a') try: # Chunk the file into individual writes of less than 1MB, since the files # API does not do buffered writes implicitly. for start_index in xrange(0, len(encoded_value), _MAX_JSON_SIZE): end_index = start_index + _MAX_JSON_SIZE handle.write(encoded_value[start_index:end_index]) finally: handle.close() files.finalize(file_name) return files.blobstore.get_blob_key(file_name) def _dereference_args(pipeline_name, args, kwargs): """Dereference a Pipeline's arguments that are slots, validating them. Each argument value passed in is assumed to be a dictionary with the format: {'type': 'value', 'value': 'serializable'} # A resolved value. {'type': 'slot', 'slot_key': 'str() on a db.Key'} # A pending Slot. Args: pipeline_name: The name of the pipeline class; used for debugging. args: Iterable of positional arguments. kwargs: Dictionary of keyword arguments. Returns: Tuple (args, kwargs) where: Args: A list of positional arguments values that are all dereferenced. Kwargs: A list of keyword arguments values that are all dereferenced. Raises: SlotNotFilledError if any of the supplied 'slot_key' records are not present in the Datastore or have not yet been filled. UnexpectedPipelineError if an unknown parameter type was passed. """ lookup_slots = set() for arg in itertools.chain(args, kwargs.itervalues()): if arg['type'] == 'slot': lookup_slots.add(db.Key(arg['slot_key'])) slot_dict = {} for key, slot_record in zip(lookup_slots, db.get(lookup_slots)): if slot_record is None or slot_record.status != _SlotRecord.FILLED: raise SlotNotFilledError( 'Slot "%s" missing its value. From %s(*args=%s, **kwargs=%s)' % (key, pipeline_name, _short_repr(args), _short_repr(kwargs))) slot_dict[key] = slot_record.value arg_list = [] for current_arg in args: if current_arg['type'] == 'slot': arg_list.append(slot_dict[db.Key(current_arg['slot_key'])]) elif current_arg['type'] == 'value': arg_list.append(current_arg['value']) else: raise UnexpectedPipelineError('Unknown parameter type: %r' % current_arg) kwarg_dict = {} for key, current_arg in kwargs.iteritems(): if current_arg['type'] == 'slot': kwarg_dict[key] = slot_dict[db.Key(current_arg['slot_key'])] elif current_arg['type'] == 'value': kwarg_dict[key] = current_arg['value'] else: raise UnexpectedPipelineError('Unknown parameter type: %r' % current_arg) return (arg_list, kwarg_dict) def _generate_args(pipeline, future, queue_name, base_path): """Generate the params used to describe a Pipeline's depedencies. The arguments passed to this method may be normal values, Slot instances (for named outputs), or PipelineFuture instances (for referring to the default output slot). Args: pipeline: The Pipeline instance to generate args for. future: The PipelineFuture for the Pipeline these arguments correspond to. queue_name: The queue to run the pipeline on. base_path: Relative URL for pipeline URL handlers. Returns: Tuple (dependent_slots, output_slot_keys, params_text, params_blob) where: dependent_slots: List of db.Key instances of _SlotRecords on which this pipeline will need to block before execution (passed to create a _BarrierRecord for running the pipeline). output_slot_keys: List of db.Key instances of _SlotRecords that will be filled by this pipeline during its execution (passed to create a _BarrierRecord for finalizing the pipeline). params_text: JSON dictionary of pipeline parameters to be serialized and saved in a corresponding _PipelineRecord. Will be None if the params are too big and must be saved in a blob instead. params_blob: JSON dictionary of pipeline parameters to be serialized and saved in a Blob file, and then attached to a _PipelineRecord. Will be None if the params data size was small enough to fit in the entity. """ params = { 'args': [], 'kwargs': {}, 'after_all': [], 'output_slots': {}, 'class_path': pipeline._class_path, 'queue_name': queue_name, 'base_path': base_path, 'backoff_seconds': pipeline.backoff_seconds, 'backoff_factor': pipeline.backoff_factor, 'max_attempts': pipeline.max_attempts, 'task_retry': pipeline.task_retry, 'target': pipeline.target, } dependent_slots = set() arg_list = params['args'] for current_arg in pipeline.args: if isinstance(current_arg, PipelineFuture): current_arg = current_arg.default if isinstance(current_arg, Slot): arg_list.append({'type': 'slot', 'slot_key': str(current_arg.key)}) dependent_slots.add(current_arg.key) else: arg_list.append({'type': 'value', 'value': current_arg}) kwarg_dict = params['kwargs'] for name, current_arg in pipeline.kwargs.iteritems(): if isinstance(current_arg, PipelineFuture): current_arg = current_arg.default if isinstance(current_arg, Slot): kwarg_dict[name] = {'type': 'slot', 'slot_key': str(current_arg.key)} dependent_slots.add(current_arg.key) else: kwarg_dict[name] = {'type': 'value', 'value': current_arg} after_all = params['after_all'] for other_future in future._after_all_pipelines: slot_key = other_future._output_dict['default'].key after_all.append(str(slot_key)) dependent_slots.add(slot_key) output_slots = params['output_slots'] output_slot_keys = set() for name, slot in future._output_dict.iteritems(): output_slot_keys.add(slot.key) output_slots[name] = str(slot.key) params_encoded = simplejson.dumps(params) params_text = None params_blob = None if len(params_encoded) > _MAX_JSON_SIZE: params_blob = _write_json_blob(params_encoded) else: params_text = params_encoded return dependent_slots, output_slot_keys, params_text, params_blob class _PipelineContext(object): """Internal API for interacting with Pipeline state.""" _gettime = datetime.datetime.utcnow def __init__(self, task_name, queue_name, base_path): """Initializer. Args: task_name: The name of the currently running task or empty if there is no task running. queue_name: The queue this pipeline should run on (may not be the current queue this request is on). base_path: Relative URL for the pipeline's handlers. """ self.task_name = task_name self.queue_name = queue_name self.base_path = base_path self.barrier_handler_path = '%s/output' % base_path self.pipeline_handler_path = '%s/run' % base_path self.finalized_handler_path = '%s/finalized' % base_path self.fanout_handler_path = '%s/fanout' % base_path self.abort_handler_path = '%s/abort' % base_path self.fanout_abort_handler_path = '%s/fanout_abort' % base_path self.session_filled_output_names = set() @classmethod def from_environ(cls, environ=os.environ): """Constructs a _PipelineContext from the task queue environment.""" base_path, unused = (environ['PATH_INFO'].rsplit('/', 1) + [''])[:2] return cls( environ['HTTP_X_APPENGINE_TASKNAME'], environ['HTTP_X_APPENGINE_QUEUENAME'], base_path) def fill_slot(self, filler_pipeline_key, slot, value): """Fills a slot, enqueueing a task to trigger pending barriers. Args: filler_pipeline_key: db.Key or stringified key of the _PipelineRecord that filled this slot. slot: The Slot instance to fill. value: The serializable value to assign. Raises: UnexpectedPipelineError if the _SlotRecord for the 'slot' could not be found in the Datastore. """ if not isinstance(filler_pipeline_key, db.Key): filler_pipeline_key = db.Key(filler_pipeline_key) if _TEST_MODE: slot._set_value_test(filler_pipeline_key, value) else: encoded_value = simplejson.dumps(value, sort_keys=True) value_text = None value_blob = None if len(encoded_value) <= _MAX_JSON_SIZE: value_text = db.Text(encoded_value) else: # The encoded value is too big. Save it as a blob. value_blob = _write_json_blob(encoded_value) def txn(): slot_record = db.get(slot.key) if slot_record is None: raise UnexpectedPipelineError( 'Tried to fill missing slot "%s" ' 'by pipeline ID "%s" with value: %r' % (slot.key, filler_pipeline_key.name(), value)) # NOTE: Always take the override value here. If down-stream pipelines # need a consitent view of all up-stream outputs (meaning, all of the # outputs came from the same retry attempt of the upstream pipeline), # the down-stream pipeline must also wait for the 'default' output # of these up-stream pipelines. slot_record.filler = filler_pipeline_key slot_record.value_text = value_text slot_record.value_blob = value_blob slot_record.status = _SlotRecord.FILLED slot_record.fill_time = self._gettime() slot_record.put() task = taskqueue.Task( url=self.barrier_handler_path, params=dict(slot_key=slot.key), headers={'X-Ae-Slot-Key': slot.key, 'X-Ae-Filler-Pipeline-Key': filler_pipeline_key}) task.add(queue_name=self.queue_name, transactional=True) db.run_in_transaction(txn) self.session_filled_output_names.add(slot.name) def notify_barriers(self, slot_key, cursor, max_to_notify=_MAX_BARRIERS_TO_NOTIFY): """Searches for barriers affected by a slot and triggers completed ones. Args: slot_key: db.Key or stringified key of the _SlotRecord that was filled. cursor: Stringified Datastore cursor where the notification query should pick up. max_to_notify: Used for testing. """ if not isinstance(slot_key, db.Key): slot_key = db.Key(slot_key) # TODO: This query may suffer from lag in the high-replication Datastore. # Consider re-running notify_barriers a second time 10 seconds in the # future to pick up the stragglers, or add child entities to the # _SlotRecords that point back at dependent _BarrierRecord within a # single entity group. query = ( _BarrierRecord.all(cursor=cursor) .filter('blocking_slots =', slot_key)) results = query.fetch(max_to_notify) # Fetch all blocking _SlotRecords for any potentially triggered barriers. blocking_slot_keys = [] for barrier in results: blocking_slot_keys.extend(barrier.blocking_slots) blocking_slot_dict = {} for slot_record in db.get(blocking_slot_keys): if slot_record is None: continue blocking_slot_dict[slot_record.key()] = slot_record task_list = [] updated_barriers = [] for barrier in results: all_ready = True for blocking_slot_key in barrier.blocking_slots: slot_record = blocking_slot_dict.get(blocking_slot_key) if slot_record is None: logging.error('Barrier "%s" relies on Slot "%s" which is missing.', barrier.key(), blocking_slot_key) all_ready = False break if slot_record.status != _SlotRecord.FILLED: all_ready = False break # When all of the blocking_slots have been filled, consider the barrier # ready to trigger. We'll trigger it regardless of the current # _BarrierRecord status, since there could be task queue failures at any # point in this flow; this rolls forward the state and de-dupes using # the task name tombstones. if all_ready: if barrier.status != _BarrierRecord.FIRED: barrier.status = _BarrierRecord.FIRED barrier.trigger_time = self._gettime() updated_barriers.append(barrier) purpose = barrier.key().name() if purpose == _BarrierRecord.START: path = self.pipeline_handler_path countdown = None else: path = self.finalized_handler_path # NOTE: Wait one second before finalization to prevent # contention on the _PipelineRecord entity. countdown = 1 pipeline_key = _BarrierRecord.target.get_value_for_datastore(barrier) task_list.append(taskqueue.Task( url=path, countdown=countdown, name='ae-barrier-fire-%s-%s' % (pipeline_key.name(), purpose), params=dict(pipeline_key=pipeline_key, purpose=purpose), headers={'X-Ae-Pipeline-Key': pipeline_key})) # Blindly overwrite _BarrierRecords that have an updated status. This is # acceptable because by this point all finalization barriers for # generator children should have already had their final outputs assigned. if updated_barriers: db.put(updated_barriers) # Task continuation with sequence number to prevent fork-bombs. if len(results) == max_to_notify: the_match = re.match('(.*)-ae-barrier-notify-([0-9]+)', self.task_name) if the_match: prefix = the_match.group(1) end = int(the_match.group(2)) + 1 else: prefix = self.task_name end = 0 task_list.append(taskqueue.Task( name='%s-ae-barrier-notify-%d' % (prefix, end), url=self.barrier_handler_path, params=dict(slot_key=slot_key, cursor=query.cursor()))) if task_list: try: taskqueue.Queue(self.queue_name).add(task_list) except (taskqueue.TombstonedTaskError, taskqueue.TaskAlreadyExistsError): pass def begin_abort(self, root_pipeline_key, abort_message): """Kicks off the abort process for a root pipeline and all its children. Args: root_pipeline_key: db.Key of the root pipeline to abort. abort_message: Message explaining why the abort happened, only saved into the root pipeline. Returns: True if the abort signal was sent successfully; False otherwise. """ def txn(): pipeline_record = db.get(root_pipeline_key) if pipeline_record is None: logging.warning( 'Tried to abort root pipeline ID "%s" but it does not exist.', root_pipeline_key.name()) raise db.Rollback() if pipeline_record.status == _PipelineRecord.ABORTED: logging.warning( 'Tried to abort root pipeline ID "%s"; already in state: %s', root_pipeline_key.name(), pipeline_record.status) raise db.Rollback() if pipeline_record.abort_requested: logging.warning( 'Tried to abort root pipeline ID "%s"; abort signal already sent.', root_pipeline_key.name()) raise db.Rollback() pipeline_record.abort_requested = True pipeline_record.abort_message = abort_message pipeline_record.put() task = taskqueue.Task( url=self.fanout_abort_handler_path, params=dict(root_pipeline_key=root_pipeline_key)) task.add(queue_name=self.queue_name, transactional=True) return True return db.run_in_transaction(txn) def continue_abort(self, root_pipeline_key, cursor=None, max_to_notify=_MAX_ABORTS_TO_BEGIN): """Sends the abort signal to all children for a root pipeline. Args: root_pipeline_key: db.Key of the root pipeline to abort. cursor: The query cursor for enumerating _PipelineRecords when inserting tasks to cause child pipelines to terminate. max_to_notify: Used for testing. """ if not isinstance(root_pipeline_key, db.Key): root_pipeline_key = db.Key(root_pipeline_key) # NOTE: The results of this query may include _PipelineRecord instances # that are not actually "reachable", meaning you cannot get to them by # starting at the root pipeline and following "fanned_out" onward. This # is acceptable because even these defunct _PipelineRecords will properly # set their status to ABORTED when the signal comes, regardless of any # other status they may have had. # # The only gotcha here is if a Pipeline's finalize method somehow modifies # its inputs (like deleting an input file). In the case there are # unreachable child pipelines, it will appear as if two finalize methods # have been called instead of just one. The saving grace here is that # finalize must be idempotent, so this *should* be harmless. query = ( _PipelineRecord.all(cursor=cursor) .filter('root_pipeline =', root_pipeline_key)) results = query.fetch(max_to_notify) task_list = [] for pipeline_record in results: if pipeline_record.status not in ( _PipelineRecord.RUN, _PipelineRecord.WAITING): continue pipeline_key = pipeline_record.key() task_list.append(taskqueue.Task( name='%s-%s-abort' % (self.task_name, pipeline_key.name()), url=self.abort_handler_path, params=dict(pipeline_key=pipeline_key, purpose=_BarrierRecord.ABORT), headers={'X-Ae-Pipeline-Key': pipeline_key})) # Task continuation with sequence number to prevent fork-bombs. if len(results) == max_to_notify: the_match = re.match('(.*)-([0-9]+)', self.task_name) if the_match: prefix = the_match.group(1) end = int(the_match.group(2)) + 1 else: prefix = self.task_name end = 0 task_list.append(taskqueue.Task( name='%s-%d' % (prefix, end), url=self.fanout_abort_handler_path, params=dict(root_pipeline_key=root_pipeline_key, cursor=query.cursor()))) if task_list: try: taskqueue.Queue(self.queue_name).add(task_list) except (taskqueue.TombstonedTaskError, taskqueue.TaskAlreadyExistsError): pass def start(self, pipeline, return_task=True): """Starts a pipeline. Args: pipeline: Pipeline instance to run. return_task: When True, do not submit the task to start the pipeline but instead return it for someone else to enqueue. Returns: The task to start this pipeline if return_task was True. Raises: PipelineExistsError if the pipeline with the given ID already exists. """ # Adjust all pipeline output keys for this Pipeline to be children of # the _PipelineRecord, that way we can write them all and submit in a # single transaction. entities_to_put = [] for name, slot in pipeline.outputs._output_dict.iteritems(): slot.key = db.Key.from_path( *slot.key.to_path(), **dict(parent=pipeline._pipeline_key)) _, output_slots, params_text, params_blob = _generate_args( pipeline, pipeline.outputs, self.queue_name, self.base_path) def txn(): pipeline_record = db.get(pipeline._pipeline_key) if pipeline_record is not None: raise PipelineExistsError( 'Pipeline with idempotence key "%s" already exists; params=%s' % (pipeline._pipeline_key.name(), _short_repr(pipeline_record.params))) entities_to_put = [] for name, slot in pipeline.outputs._output_dict.iteritems(): entities_to_put.append(_SlotRecord( key=slot.key, root_pipeline=pipeline._pipeline_key)) entities_to_put.append(_PipelineRecord( key=pipeline._pipeline_key, root_pipeline=pipeline._pipeline_key, is_root_pipeline=True, # Bug in DB means we need to use the storage name here, # not the local property name. params=params_text, params_blob=params_blob, start_time=self._gettime(), class_path=pipeline._class_path, max_attempts=pipeline.max_attempts)) entities_to_put.append(_BarrierRecord( parent=pipeline._pipeline_key, key_name=_BarrierRecord.FINALIZE, target=pipeline._pipeline_key, root_pipeline=pipeline._pipeline_key, blocking_slots=list(output_slots))) db.put(entities_to_put) task = taskqueue.Task( url=self.pipeline_handler_path, params=dict(pipeline_key=pipeline._pipeline_key), headers={'X-Ae-Pipeline-Key': pipeline._pipeline_key}, target=pipeline.target) if return_task: return task task.add(queue_name=self.queue_name, transactional=True) task = db.run_in_transaction(txn) # Immediately mark the output slots as existing so they can be filled # by asynchronous pipelines or used in test mode. for output_slot in pipeline.outputs._output_dict.itervalues(): output_slot._exists = True return task def start_test(self, pipeline): """Starts a pipeline in the test mode. Args: pipeline: The Pipeline instance to test. """ global _TEST_MODE, _TEST_ROOT_PIPELINE_KEY self.start(pipeline, return_task=True) _TEST_MODE = True _TEST_ROOT_PIPELINE_KEY = pipeline._pipeline_key try: self.evaluate_test(pipeline, root=True) finally: _TEST_MODE = False def evaluate_test(self, stage, root=False): """Recursively evaluates the given pipeline in test mode. Args: stage: The Pipeline instance to run at this stage in the flow. root: True if the supplied stage is the root of the pipeline. """ args_adjusted = [] for arg in stage.args: if isinstance(arg, PipelineFuture): arg = arg.default if isinstance(arg, Slot): value = arg.value arg._touched = True else: value = arg args_adjusted.append(value) kwargs_adjusted = {} for name, arg in stage.kwargs.iteritems(): if isinstance(arg, PipelineFuture): arg = arg.default if isinstance(arg, Slot): value = arg.value arg._touched = True else: value = arg kwargs_adjusted[name] = value stage.args, stage.kwargs = args_adjusted, kwargs_adjusted pipeline_generator = mr_util.is_generator_function(stage.run) logging.debug('Running %s(*%s, **%s)', stage._class_path, _short_repr(stage.args), _short_repr(stage.kwargs)) if stage.async: stage.run_test(*stage.args, **stage.kwargs) elif pipeline_generator: all_output_slots = set() try: pipeline_iter = stage.run_test(*stage.args, **stage.kwargs) except NotImplementedError: pipeline_iter = stage.run(*stage.args, **stage.kwargs) all_substages = set() next_value = None last_sub_stage = None while True: try: yielded = pipeline_iter.send(next_value) except StopIteration: break if isinstance(yielded, Pipeline): if yielded in all_substages: raise UnexpectedPipelineError( 'Already yielded pipeline object %r' % yielded) else: all_substages.add(yielded) last_sub_stage = yielded next_value = yielded.outputs all_output_slots.update(next_value._output_dict.itervalues()) else: raise UnexpectedPipelineError( 'Yielded a disallowed value: %r' % yielded) if last_sub_stage: # Generator's outputs inherited from last running sub-stage. # If the generator changes its mind and doesn't yield anything, this # may not happen at all. Missing outputs will be caught when they # are passed to the stage as inputs, or verified from the outside by # the test runner. for slot_name, slot in last_sub_stage.outputs._output_dict.iteritems(): stage.outputs._output_dict[slot_name] = slot # Any inherited slots won't be checked for declaration. all_output_slots.remove(slot) else: # Generator yielded no children, so treat it as a sync function. stage.outputs.default._set_value_test(stage._pipeline_key, None) # Enforce the policy of requiring all undeclared output slots from # child pipelines to be consumed by their parent generator. for slot in all_output_slots: if slot.name == 'default': continue if slot.filled and not slot._strict and not slot._touched: raise SlotNotDeclaredError( 'Undeclared output "%s"; all dynamic outputs from child ' 'pipelines must be consumed.' % slot.name) else: try: result = stage.run_test(*stage.args, **stage.kwargs) except NotImplementedError: result = stage.run(*stage.args, **stage.kwargs) stage.outputs.default._set_value_test(stage._pipeline_key, result) # Enforce strict output usage at the top level. if root: found_outputs = set() for slot in stage.outputs._output_dict.itervalues(): if slot.filled: found_outputs.add(slot.name) if slot.name == 'default': continue if slot.name not in stage.output_names: raise SlotNotDeclaredError( 'Undeclared output from root pipeline "%s"' % slot.name) missing_outputs = set(stage.output_names) - found_outputs if missing_outputs: raise SlotNotFilledError( 'Outputs %r were never filled.' % missing_outputs) logging.debug('Finalizing %s(*%s, **%s)', stage._class_path, _short_repr(stage.args), _short_repr(stage.kwargs)) ran = False try: stage.finalized_test() ran = True except NotImplementedError: pass if not ran: try: stage.finalized() except NotImplementedError: pass def evaluate(self, pipeline_key, purpose=None, attempt=0): """Evaluates the given Pipeline and enqueues sub-stages for execution. Args: pipeline_key: The db.Key or stringified key of the _PipelineRecord to run. purpose: Why evaluate was called ('start', 'finalize', or 'abort'). attempt: The attempt number that should be tried. """ After._thread_init() InOrder._thread_init() InOrder._local._activated = False if not isinstance(pipeline_key, db.Key): pipeline_key = db.Key(pipeline_key) pipeline_record = db.get(pipeline_key) if pipeline_record is None: logging.error('Pipeline ID "%s" does not exist.', pipeline_key.name()) return if pipeline_record.status not in ( _PipelineRecord.WAITING, _PipelineRecord.RUN): logging.error('Pipeline ID "%s" in bad state for purpose "%s": "%s"', pipeline_key.name(), purpose or _BarrierRecord.START, pipeline_record.status) return params = pipeline_record.params root_pipeline_key = \ _PipelineRecord.root_pipeline.get_value_for_datastore(pipeline_record) default_slot_key = db.Key(params['output_slots']['default']) default_slot_record, root_pipeline_record = db.get([ default_slot_key, root_pipeline_key]) if default_slot_record is None: logging.error('Pipeline ID "%s" default slot "%s" does not exist.', pipeline_key.name(), default_slot_key) return if root_pipeline_record is None: logging.error('Pipeline ID "%s" root pipeline ID "%s" is missing.', pipeline_key.name(), root_pipeline_key.name()) return # Always finalize if we're aborting so pipelines have a chance to cleanup # before they terminate. Pipelines must access 'was_aborted' to find # out how their finalization should work. abort_signal = ( purpose == _BarrierRecord.ABORT or root_pipeline_record.abort_requested == True) finalize_signal = ( (default_slot_record.status == _SlotRecord.FILLED and purpose == _BarrierRecord.FINALIZE) or abort_signal) try: pipeline_func_class = mr_util.for_name(pipeline_record.class_path) except ImportError, e: # This means something is wrong with the deployed code. Rely on the # taskqueue system to do retries. retry_message = '%s: %s' % (e.__class__.__name__, str(e)) logging.exception( 'Could not locate %s#%s. %s', pipeline_record.class_path, pipeline_key.name(), retry_message) raise try: pipeline_func = pipeline_func_class.from_id( pipeline_key.name(), resolve_outputs=finalize_signal, _pipeline_record=pipeline_record) except SlotNotFilledError, e: logging.exception( 'Could not resolve arguments for %s#%s. Most likely this means there ' 'is a bug in the Pipeline runtime or some intermediate data has been ' 'deleted from the Datastore. Giving up.', pipeline_record.class_path, pipeline_key.name()) self.transition_aborted(pipeline_key) return except Exception, e: retry_message = '%s: %s' % (e.__class__.__name__, str(e)) logging.exception( 'Instantiating %s#%s raised exception. %s', pipeline_record.class_path, pipeline_key.name(), retry_message) self.transition_retry(pipeline_key, retry_message) if pipeline_record.params['task_retry']: raise else: return else: pipeline_generator = mr_util.is_generator_function( pipeline_func_class.run) caller_output = pipeline_func.outputs if (abort_signal and pipeline_func.async and pipeline_record.status == _PipelineRecord.RUN and not pipeline_func.try_cancel()): logging.warning( 'Could not cancel and abort mid-flight async pipeline: %r#%s', pipeline_func, pipeline_key.name()) return if finalize_signal: try: pipeline_func._finalized_internal( self, pipeline_key, root_pipeline_key, caller_output, abort_signal) except Exception, e: # This means something is wrong with the deployed finalization code. # Rely on the taskqueue system to do retries. retry_message = '%s: %s' % (e.__class__.__name__, str(e)) logging.exception('Finalizing %r#%s raised exception. %s', pipeline_func, pipeline_key.name(), retry_message) raise else: if not abort_signal: self.transition_complete(pipeline_key) return if abort_signal: logging.debug('Marking as aborted %s#%s', pipeline_func, pipeline_key.name()) self.transition_aborted(pipeline_key) return if pipeline_record.current_attempt != attempt: logging.error( 'Received evaluation task for pipeline ID "%s" attempt %d but ' 'current pending attempt is %d', pipeline_key.name(), attempt, pipeline_record.current_attempt) return if pipeline_record.current_attempt >= pipeline_record.max_attempts: logging.error( 'Received evaluation task for pipeline ID "%s" on attempt %d ' 'but that exceeds max attempts %d', pipeline_key.name(), attempt, pipeline_record.max_attempts) return if pipeline_record.next_retry_time is not None: retry_time = pipeline_record.next_retry_time - _RETRY_WIGGLE_TIMEDELTA if self._gettime() <= retry_time: detail_message = ( 'Received evaluation task for pipeline ID "%s" on attempt %d, ' 'which will not be ready until: %s' % (pipeline_key.name(), pipeline_record.current_attempt, pipeline_record.next_retry_time)) logging.warning(detail_message) raise UnexpectedPipelineError(detail_message) if pipeline_record.status == _PipelineRecord.RUN and pipeline_generator: if (default_slot_record.status == _SlotRecord.WAITING and not pipeline_record.fanned_out): # This properly handles the yield-less generator case when the # RUN state transition worked properly but outputting to the default # slot failed. self.fill_slot(pipeline_key, caller_output.default, None) return if (pipeline_record.status == _PipelineRecord.WAITING and pipeline_func.async): self.transition_run(pipeline_key) try: result = pipeline_func._run_internal( self, pipeline_key, root_pipeline_key, caller_output) except Exception, e: if self.handle_run_exception(pipeline_key, pipeline_func, e): raise else: return if pipeline_func.async: return if not pipeline_generator: self.fill_slot(pipeline_key, caller_output.default, result) expected_outputs = set(caller_output._output_dict.iterkeys()) found_outputs = self.session_filled_output_names if expected_outputs != found_outputs: exception = SlotNotFilledError( 'Outputs %r for pipeline ID "%s" were never filled by "%s".' % ( expected_outputs - found_outputs, pipeline_key.name(), pipeline_func._class_path)) if self.handle_run_exception(pipeline_key, pipeline_func, exception): raise exception return pipeline_iter = result next_value = None last_sub_stage = None sub_stage = None sub_stage_dict = {} sub_stage_ordering = [] while True: try: yielded = pipeline_iter.send(next_value) except StopIteration: break except Exception, e: if self.handle_run_exception(pipeline_key, pipeline_func, e): raise else: return if isinstance(yielded, Pipeline): if yielded in sub_stage_dict: raise UnexpectedPipelineError( 'Already yielded pipeline object %r with pipeline ID %s' % (yielded, yielded.pipeline_id)) last_sub_stage = yielded next_value = PipelineFuture(yielded.output_names) next_value._after_all_pipelines.update(After._local._after_all_futures) next_value._after_all_pipelines.update(InOrder._local._in_order_futures) sub_stage_dict[yielded] = next_value sub_stage_ordering.append(yielded) InOrder._add_future(next_value) # To aid local testing, the task_retry flag (which instructs the # evaluator to raise all exceptions back up to the task queue) is # inherited by all children from the root down. yielded.task_retry = pipeline_func.task_retry else: raise UnexpectedPipelineError( 'Yielded a disallowed value: %r' % yielded) if last_sub_stage: # Final yielded stage inherits outputs from calling pipeline that were not # already filled during the generator's execution. inherited_outputs = params['output_slots'] for slot_name in self.session_filled_output_names: del inherited_outputs[slot_name] sub_stage_dict[last_sub_stage]._inherit_outputs( pipeline_record.class_path, inherited_outputs) else: # Here the generator has yielded nothing, and thus acts as a synchronous # function. We can skip the rest of the generator steps completely and # fill the default output slot to cause finalizing. expected_outputs = set(caller_output._output_dict.iterkeys()) expected_outputs.remove('default') found_outputs = self.session_filled_output_names if expected_outputs != found_outputs: exception = SlotNotFilledError( 'Outputs %r for pipeline ID "%s" were never filled by "%s".' % ( expected_outputs - found_outputs, pipeline_key.name(), pipeline_func._class_path)) if self.handle_run_exception(pipeline_key, pipeline_func, exception): raise exception else: self.fill_slot(pipeline_key, caller_output.default, None) self.transition_run(pipeline_key) return # Allocate any SlotRecords that do not yet exist. entities_to_put = [] for future in sub_stage_dict.itervalues(): for slot in future._output_dict.itervalues(): if not slot._exists: entities_to_put.append(_SlotRecord( key=slot.key, root_pipeline=root_pipeline_key)) # Allocate PipelineRecords and BarrierRecords for generator-run Pipelines. pipelines_to_run = set() all_children_keys = [] all_output_slots = set() for sub_stage in sub_stage_ordering: future = sub_stage_dict[sub_stage] dependent_slots, output_slots, params_text, params_blob = _generate_args( sub_stage, future, self.queue_name, self.base_path) child_pipeline_key = db.Key.from_path( _PipelineRecord.kind(), uuid.uuid1().hex) all_output_slots.update(output_slots) all_children_keys.append(child_pipeline_key) child_pipeline = _PipelineRecord( key=child_pipeline_key, root_pipeline=root_pipeline_key, # Bug in DB means we need to use the storage name here, # not the local property name. params=params_text, params_blob=params_blob, class_path=sub_stage._class_path, max_attempts=sub_stage.max_attempts) entities_to_put.append(child_pipeline) if not dependent_slots: # This child pipeline will run immediately. pipelines_to_run.add(child_pipeline_key) child_pipeline.start_time = self._gettime() else: entities_to_put.append(_BarrierRecord( parent=child_pipeline_key, key_name=_BarrierRecord.START, target=child_pipeline_key, root_pipeline=root_pipeline_key, blocking_slots=list(dependent_slots))) entities_to_put.append(_BarrierRecord( parent=child_pipeline_key, key_name=_BarrierRecord.FINALIZE, target=child_pipeline_key, root_pipeline=root_pipeline_key, blocking_slots=list(output_slots))) db.put(entities_to_put) self.transition_run(pipeline_key, blocking_slot_keys=all_output_slots, fanned_out_pipelines=all_children_keys, pipelines_to_run=pipelines_to_run) def handle_run_exception(self, pipeline_key, pipeline_func, e): """Handles an exception raised by a Pipeline's user code. Args: pipeline_key: The pipeline that raised the error. pipeline_func: The class path name of the Pipeline that was running. e: The exception that was raised. Returns: True if the exception should be re-raised up through the calling stack by the caller of this method. """ if isinstance(e, Retry): retry_message = str(e) logging.warning('User forced retry for pipeline ID "%s" of %r: %s', pipeline_key.name(), pipeline_func, retry_message) self.transition_retry(pipeline_key, retry_message) elif isinstance(e, Abort): abort_message = str(e) logging.warning('User forced abort for pipeline ID "%s" of %r: %s', pipeline_key.name(), pipeline_func, abort_message) pipeline_func.abort(abort_message) else: retry_message = '%s: %s' % (e.__class__.__name__, str(e)) logging.exception('Generator %r#%s raised exception. %s', pipeline_func, pipeline_key.name(), retry_message) self.transition_retry(pipeline_key, retry_message) return pipeline_func.task_retry def transition_run(self, pipeline_key, blocking_slot_keys=None, fanned_out_pipelines=None, pipelines_to_run=None): """Marks an asynchronous or generator pipeline as running. Does nothing if the pipeline is no longer in a runnable state. Args: pipeline_key: The db.Key of the _PipelineRecord to update. blocking_slot_keys: List of db.Key instances that this pipeline's finalization barrier should wait on in addition to the existing one. This is used to update the barrier to include all child outputs. When None, the barrier will not be updated. fanned_out_pipelines: List of db.Key instances of _PipelineRecords that were fanned out by this generator pipeline. This is distinct from the 'pipelines_to_run' list because not all of the pipelines listed here will be immediately ready to execute. When None, then this generator yielded no children. pipelines_to_run: List of db.Key instances of _PipelineRecords that should be kicked off (fan-out) transactionally as part of this transition. When None, no child pipelines will run. All db.Keys in this list must also be present in the fanned_out_pipelines list. Raises: UnexpectedPipelineError if blocking_slot_keys was not empty and the _BarrierRecord has gone missing. """ def txn(): pipeline_record = db.get(pipeline_key) if pipeline_record is None: logging.warning('Pipeline ID "%s" cannot be marked as run. ' 'Does not exist.', pipeline_key.name()) raise db.Rollback() if pipeline_record.status != _PipelineRecord.WAITING: logging.warning('Pipeline ID "%s" in bad state to be marked as run: %s', pipeline_key.name(), pipeline_record.status) raise db.Rollback() pipeline_record.status = _PipelineRecord.RUN if fanned_out_pipelines: # NOTE: We must model the pipeline relationship in a top-down manner, # meaning each pipeline must point forward to the pipelines that it # fanned out to. The reason is race conditions. If evaluate() # dies early, it may create many unused _PipelineRecord and _SlotRecord # instances that never progress. The only way we know which of these # are valid is by traversing the graph from the root, where the # fanned_out property refers to those pipelines that were run using a # transactional task. child_pipeline_list = list(fanned_out_pipelines) pipeline_record.fanned_out = child_pipeline_list if pipelines_to_run: child_indexes = [ child_pipeline_list.index(p) for p in pipelines_to_run] child_indexes.sort() task = taskqueue.Task( url=self.fanout_handler_path, params=dict(parent_key=str(pipeline_key), child_indexes=child_indexes)) task.add(queue_name=self.queue_name, transactional=True) pipeline_record.put() if blocking_slot_keys: # NOTE: Always update a generator pipeline's finalization barrier to # include all of the outputs of any pipelines that it runs, to ensure # that finalized calls will not happen until all child pipelines have # completed. barrier_key = db.Key.from_path( _BarrierRecord.kind(), _BarrierRecord.FINALIZE, parent=pipeline_key) finalize_barrier = db.get(barrier_key) if finalize_barrier is None: raise UnexpectedPipelineError( 'Pipeline ID "%s" cannot update finalize barrier. ' 'Does not exist.' % pipeline_key.name()) else: finalize_barrier.blocking_slots = list( blocking_slot_keys.union(set(finalize_barrier.blocking_slots))) finalize_barrier.put() db.run_in_transaction(txn) def transition_complete(self, pipeline_key): """Marks the given pipeline as complete. Does nothing if the pipeline is no longer in a state that can be completed. Args: pipeline_key: db.Key of the _PipelineRecord that has completed. """ def txn(): pipeline_record = db.get(pipeline_key) if pipeline_record is None: logging.warning( 'Tried to mark pipeline ID "%s" as complete but it does not exist.', pipeline_key.name()) raise db.Rollback() if pipeline_record.status not in ( _PipelineRecord.WAITING, _PipelineRecord.RUN): logging.warning( 'Tried to mark pipeline ID "%s" as complete, found bad state: %s', pipeline_key.name(), pipeline_record.status) raise db.Rollback() pipeline_record.status = _PipelineRecord.DONE pipeline_record.finalized_time = self._gettime() pipeline_record.put() db.run_in_transaction(txn) def transition_retry(self, pipeline_key, retry_message): """Marks the given pipeline as requiring another retry. Does nothing if all attempts have been exceeded. Args: pipeline_key: db.Key of the _PipelineRecord that needs to be retried. retry_message: User-supplied message indicating the reason for the retry. """ def txn(): pipeline_record = db.get(pipeline_key) if pipeline_record is None: logging.warning( 'Tried to retry pipeline ID "%s" but it does not exist.', pipeline_key.name()) raise db.Rollback() if pipeline_record.status not in ( _PipelineRecord.WAITING, _PipelineRecord.RUN): logging.warning( 'Tried to retry pipeline ID "%s", found bad state: %s', pipeline_key.name(), pipeline_record.status) raise db.Rollback() params = pipeline_record.params offset_seconds = (params['backoff_seconds'] * (params['backoff_factor'] ** pipeline_record.current_attempt)) pipeline_record.next_retry_time = ( self._gettime() + datetime.timedelta(seconds=offset_seconds)) pipeline_record.current_attempt += 1 pipeline_record.retry_message = retry_message pipeline_record.status = _PipelineRecord.WAITING if pipeline_record.current_attempt >= pipeline_record.max_attempts: root_pipeline_key = ( _PipelineRecord.root_pipeline.get_value_for_datastore( pipeline_record)) logging.warning( 'Giving up on pipeline ID "%s" after %d attempt(s); causing abort ' 'all the way to the root pipeline ID "%s"', pipeline_key.name(), pipeline_record.current_attempt, root_pipeline_key.name()) # NOTE: We do *not* set the status to aborted here to ensure that # this pipeline will be finalized before it has been marked as aborted. pipeline_record.abort_message = ( 'Aborting after %d attempts' % pipeline_record.current_attempt) task = taskqueue.Task( url=self.fanout_abort_handler_path, params=dict(root_pipeline_key=root_pipeline_key)) task.add(queue_name=self.queue_name, transactional=True) else: task = taskqueue.Task( url=self.pipeline_handler_path, eta=pipeline_record.next_retry_time, params=dict(pipeline_key=pipeline_key, purpose=_BarrierRecord.START, attempt=pipeline_record.current_attempt), headers={'X-Ae-Pipeline-Key': pipeline_key}) task.add(queue_name=self.queue_name, transactional=True) pipeline_record.put() db.run_in_transaction(txn) def transition_aborted(self, pipeline_key): """Makes the given pipeline as having aborted. Does nothing if the pipeline is in a bad state. Args: pipeline_key: db.Key of the _PipelineRecord that needs to be retried. """ def txn(): pipeline_record = db.get(pipeline_key) if pipeline_record is None: logging.warning( 'Tried to abort pipeline ID "%s" but it does not exist.', pipeline_key.name()) raise db.Rollback() if pipeline_record.status not in ( _PipelineRecord.WAITING, _PipelineRecord.RUN): logging.warning( 'Tried to abort pipeline ID "%s", found bad state: %s', pipeline_key.name(), pipeline_record.status) raise db.Rollback() pipeline_record.status = _PipelineRecord.ABORTED pipeline_record.finalized_time = self._gettime() pipeline_record.put() db.run_in_transaction(txn) ################################################################################ class _BarrierHandler(webapp.RequestHandler): """Request handler for triggering barriers.""" def post(self): if 'HTTP_X_APPENGINE_TASKNAME' not in self.request.environ: self.response.set_status(403) return context = _PipelineContext.from_environ(self.request.environ) context.notify_barriers( self.request.get('slot_key'), self.request.get('cursor')) class _PipelineHandler(webapp.RequestHandler): """Request handler for running pipelines.""" def post(self): if 'HTTP_X_APPENGINE_TASKNAME' not in self.request.environ: self.response.set_status(403) return context = _PipelineContext.from_environ(self.request.environ) context.evaluate(self.request.get('pipeline_key'), purpose=self.request.get('purpose'), attempt=int(self.request.get('attempt', '0'))) class _FanoutAbortHandler(webapp.RequestHandler): """Request handler for fanning out abort notifications.""" def post(self): if 'HTTP_X_APPENGINE_TASKNAME' not in self.request.environ: self.response.set_status(403) return context = _PipelineContext.from_environ(self.request.environ) context.continue_abort( self.request.get('root_pipeline_key'), self.request.get('cursor')) class _FanoutHandler(webapp.RequestHandler): """Request handler for fanning out pipeline children.""" def post(self): if 'HTTP_X_APPENGINE_TASKNAME' not in self.request.environ: self.response.set_status(403) return context = _PipelineContext.from_environ(self.request.environ) # Set of stringified db.Keys of children to run. all_pipeline_keys = set() # For backwards compatibility with the old style of fan-out requests. all_pipeline_keys.update(self.request.get_all('pipeline_key')) # Fetch the child pipelines from the parent. This works around the 10KB # task payload limit. This get() is consistent-on-read and the fan-out # task is enqueued in the transaction that updates the parent, so the # fanned_out property is consistent here. parent_key = self.request.get('parent_key') child_indexes = [int(x) for x in self.request.get_all('child_indexes')] if parent_key: parent_key = db.Key(parent_key) parent = db.get(parent_key) for index in child_indexes: all_pipeline_keys.add(str(parent.fanned_out[index])) all_tasks = [] for pipeline_key in all_pipeline_keys: all_tasks.append(taskqueue.Task( url=context.pipeline_handler_path, params=dict(pipeline_key=pipeline_key), headers={'X-Ae-Pipeline-Key': pipeline_key}, name='ae-pipeline-fan-out-' + db.Key(pipeline_key).name())) batch_size = 100 # Limit of taskqueue API bulk add. for i in xrange(0, len(all_tasks), batch_size): batch = all_tasks[i:i+batch_size] try: taskqueue.Queue(context.queue_name).add(batch) except (taskqueue.TombstonedTaskError, taskqueue.TaskAlreadyExistsError): pass class _CleanupHandler(webapp.RequestHandler): """Request handler for cleaning up a Pipeline.""" def post(self): if 'HTTP_X_APPENGINE_TASKNAME' not in self.request.environ: self.response.set_status(403) return root_pipeline_key = db.Key(self.request.get('root_pipeline_key')) logging.debug('Cleaning up root_pipeline_key=%r', root_pipeline_key) # TODO(user): Accumulate all BlobKeys from _PipelineRecord and # _SlotRecord entities and delete them. pipeline_keys = ( _PipelineRecord.all(keys_only=True) .filter('root_pipeline =', root_pipeline_key)) db.delete(pipeline_keys) slot_keys = ( _SlotRecord.all(keys_only=True) .filter('root_pipeline =', root_pipeline_key)) db.delete(slot_keys) barrier_keys = ( _BarrierRecord.all(keys_only=True) .filter('root_pipeline =', root_pipeline_key)) db.delete(barrier_keys) status_keys = ( _StatusRecord.all(keys_only=True) .filter('root_pipeline =', root_pipeline_key)) db.delete(status_keys) class _CallbackHandler(webapp.RequestHandler): """Receives asynchronous callback requests from humans or tasks.""" def post(self): self.get() def get(self): # NOTE: The rest of these validations and the undescriptive error code 400 # are present to address security risks of giving external users access to # cause PipelineRecord lookups and execution. This approach is still # vulnerable to timing attacks, since db.get() will have different latency # depending on existence. Luckily, the key names are generally unguessable # UUIDs, so the risk here is low. pipeline_id = self.request.get('pipeline_id') if not pipeline_id: logging.error('"pipeline_id" parameter missing.') self.response.set_status(400) return pipeline_key = db.Key.from_path(_PipelineRecord.kind(), pipeline_id) pipeline_record = db.get(pipeline_key) if pipeline_record is None: logging.error('Pipeline ID "%s" for callback does not exist.', pipeline_id) self.response.set_status(400) return params = pipeline_record.params real_class_path = params['class_path'] try: pipeline_func_class = mr_util.for_name(real_class_path) except ImportError, e: logging.error('Cannot load class named "%s" for pipeline ID "%s".', real_class_path, pipeline_id) self.response.set_status(400) return if 'HTTP_X_APPENGINE_TASKNAME' not in self.request.environ: if pipeline_func_class.public_callbacks: pass elif pipeline_func_class.admin_callbacks: if not users.is_current_user_admin(): logging.error('Unauthorized callback for admin-only pipeline ID "%s"', pipeline_id) self.response.set_status(400) return else: logging.error('External callback for internal-only pipeline ID "%s"', pipeline_id) self.response.set_status(400) return stage = pipeline_func_class.from_id(pipeline_id) if stage is None: logging.error('Pipeline ID "%s" deleted during callback', pipeline_id) self.response.set_status(400) return kwargs = {} for key in self.request.arguments(): if key != 'pipeline_id': kwargs[str(key)] = self.request.get(key) callback_result = stage._callback_internal(kwargs) if callback_result is not None: status_code, content_type, content = callback_result self.response.set_status(status_code) self.response.headers['Content-Type'] = content_type self.response.out.write(content) ################################################################################ def _get_timestamp_ms(when): """Converts a datetime.datetime to integer milliseconds since the epoch. Requires special handling to preserve microseconds. Args: when: A datetime.datetime instance. Returns: Integer time since the epoch in milliseconds. """ ms_since_epoch = float(time.mktime(when.utctimetuple()) * 1000.0) ms_since_epoch += when.microsecond / 1000.0 return int(ms_since_epoch) def _get_internal_status(pipeline_key=None, pipeline_dict=None, slot_dict=None, barrier_dict=None, status_dict=None): """Gets the UI dictionary of a pipeline from a set of status dictionaries. Args: pipeline_key: The key of the pipeline to lookup. pipeline_dict: Dictionary mapping pipeline db.Key to _PipelineRecord. Default is an empty dictionary. slot_dict: Dictionary mapping slot db.Key to _SlotRecord. Default is an empty dictionary. barrier_dict: Dictionary mapping barrier db.Key to _BarrierRecord. Default is an empty dictionary. status_dict: Dictionary mapping status record db.Key to _StatusRecord. Default is an empty dictionary. Returns: Dictionary with the keys: classPath: The pipeline function being run. args: List of positional argument slot dictionaries. kwargs: Dictionary of keyword argument slot dictionaries. outputs: Dictionary of output slot dictionaries. children: List of child pipeline IDs. queueName: Queue on which this pipeline is running. afterSlotKeys: List of Slot Ids after which this pipeline runs. currentAttempt: Number of the current attempt, starting at 1. maxAttempts: Maximum number of attempts before aborting. backoffSeconds: Constant factor for backoff before retrying. backoffFactor: Exponential factor for backoff before retrying. status: Current status of the pipeline. startTimeMs: When this pipeline ran or will run due to retries, if present. endTimeMs: When this pipeline finalized, if present. lastRetryMessage: Why the pipeline failed during the last retry, if there was a failure; may be empty. abortMessage: For root pipelines, why the pipeline was aborted if it was aborted; may be empty. Dictionary will contain these keys if explicit status is set: statusTimeMs: When the status was set as milliseconds since the epoch. statusMessage: Status message, if present. statusConsoleUrl: The relative URL for the console of this pipeline. statusLinks: Dictionary mapping human-readable names to relative URLs for related URLs to this pipeline. Raises: PipelineStatusError if any input is bad. """ if pipeline_dict is None: pipeline_dict = {} if slot_dict is None: slot_dict = {} if barrier_dict is None: barrier_dict = {} if status_dict is None: status_dict = {} pipeline_record = pipeline_dict.get(pipeline_key) if pipeline_record is None: raise PipelineStatusError( 'Could not find pipeline ID "%s"' % pipeline_key.name()) params = pipeline_record.params root_pipeline_key = \ _PipelineRecord.root_pipeline.get_value_for_datastore(pipeline_record) default_slot_key = db.Key(params['output_slots']['default']) start_barrier_key = db.Key.from_path( _BarrierRecord.kind(), _BarrierRecord.START, parent=pipeline_key) finalize_barrier_key = db.Key.from_path( _BarrierRecord.kind(), _BarrierRecord.FINALIZE, parent=pipeline_key) status_record_key = db.Key.from_path( _StatusRecord.kind(), pipeline_key.name()) start_barrier = barrier_dict.get(start_barrier_key) finalize_barrier = barrier_dict.get(finalize_barrier_key) default_slot = slot_dict.get(default_slot_key) status_record = status_dict.get(status_record_key) if finalize_barrier is None: raise PipelineStatusError( 'Finalization barrier missing for pipeline ID "%s"' % pipeline_key.name()) if default_slot is None: raise PipelineStatusError( 'Default output slot with key=%s missing for pipeline ID "%s"' % ( default_slot_key, pipeline_key.name())) output = { 'classPath': pipeline_record.class_path, 'args': list(params['args']), 'kwargs': params['kwargs'].copy(), 'outputs': params['output_slots'].copy(), 'children': [key.name() for key in pipeline_record.fanned_out], 'queueName': params['queue_name'], 'afterSlotKeys': [str(key) for key in params['after_all']], 'currentAttempt': pipeline_record.current_attempt + 1, 'maxAttempts': pipeline_record.max_attempts, 'backoffSeconds': pipeline_record.params['backoff_seconds'], 'backoffFactor': pipeline_record.params['backoff_factor'], } # TODO(user): Truncate args, kwargs, and outputs to < 1MB each so we # can reasonably return the whole tree of pipelines and their outputs. # Coerce each value to a string to truncate if necessary. For now if the # params are too big it will just cause the whole status page to break. # Fix the key names in parameters to match JavaScript style. for value_dict in itertools.chain( output['args'], output['kwargs'].itervalues()): if 'slot_key' in value_dict: value_dict['slotKey'] = value_dict.pop('slot_key') # Figure out the pipeline's status. if pipeline_record.status in (_PipelineRecord.WAITING, _PipelineRecord.RUN): if default_slot.status == _SlotRecord.FILLED: status = 'finalizing' elif (pipeline_record.status == _PipelineRecord.WAITING and pipeline_record.next_retry_time is not None): status = 'retry' elif start_barrier and start_barrier.status == _BarrierRecord.WAITING: # start_barrier will be missing for root pipelines status = 'waiting' else: status = 'run' elif pipeline_record.status == _PipelineRecord.DONE: status = 'done' elif pipeline_record.status == _PipelineRecord.ABORTED: status = 'aborted' output['status'] = status if status_record: output['statusTimeMs'] = _get_timestamp_ms(status_record.status_time) if status_record.message: output['statusMessage'] = status_record.message if status_record.console_url: output['statusConsoleUrl'] = status_record.console_url if status_record.link_names: output['statusLinks'] = dict( zip(status_record.link_names, status_record.link_urls)) # Populate status-depenedent fields. if status in ('run', 'finalizing', 'done', 'retry'): if pipeline_record.next_retry_time is not None: output['startTimeMs'] = _get_timestamp_ms(pipeline_record.next_retry_time) elif start_barrier: # start_barrier will be missing for root pipelines output['startTimeMs'] = _get_timestamp_ms(start_barrier.trigger_time) elif pipeline_record.start_time: # Assume this pipeline ran immediately upon spawning with no # start barrier or it's the root pipeline. output['startTimeMs'] = _get_timestamp_ms(pipeline_record.start_time) if status in ('finalizing',): output['endTimeMs'] = _get_timestamp_ms(default_slot.fill_time) if status in ('done',): output['endTimeMs'] = _get_timestamp_ms(pipeline_record.finalized_time) if pipeline_record.next_retry_time is not None: output['lastRetryMessage'] = pipeline_record.retry_message if pipeline_record.abort_message: output['abortMessage'] = pipeline_record.abort_message return output def _get_internal_slot(slot_key=None, filler_pipeline_key=None, slot_dict=None): """Gets information about a _SlotRecord for display in UI. Args: slot_key: The db.Key of the slot to fetch. filler_pipeline_key: In the case the slot has not yet been filled, assume that the given db.Key (for a _PipelineRecord) will be the filler of the slot in the future. slot_dict: The slot JSON dictionary. Returns: Dictionary with the keys: status: Slot status: 'filled' or 'waiting' fillTimeMs: Time in milliseconds since the epoch of when it was filled. value: The current value of the slot, which is a slot's JSON dictionary. fillerPipelineId: The pipeline ID of what stage has or should fill this slot. Raises: PipelineStatusError if any input is bad. """ if slot_dict is None: slot_dict = {} slot_record = slot_dict.get(slot_key) if slot_record is None: raise PipelineStatusError( 'Could not find data for output slot key "%s".' % slot_key) output = {} if slot_record.status == _SlotRecord.FILLED: output['status'] = 'filled' output['fillTimeMs'] = _get_timestamp_ms(slot_record.fill_time) output['value'] = slot_record.value filler_pipeline_key = \ _SlotRecord.filler.get_value_for_datastore(slot_record) else: output['status'] = 'waiting' if filler_pipeline_key: output['fillerPipelineId'] = filler_pipeline_key.name() return output def get_status_tree(root_pipeline_id): """Gets the full status tree of a pipeline. Args: root_pipeline_id: The root pipeline ID to get status for. Returns: Dictionary with the keys: rootPipelineId: The ID of the root pipeline. slots: Mapping of slot IDs to result of from _get_internal_slot. pipelines: Mapping of pipeline IDs to result of _get_internal_status. Raises: PipelineStatusError if any input is bad. """ root_pipeline_key = db.Key.from_path(_PipelineRecord.kind(), root_pipeline_id) root_pipeline_record = db.get(root_pipeline_key) if root_pipeline_record is None: raise PipelineStatusError( 'Could not find pipeline ID "%s"' % root_pipeline_id) if (root_pipeline_key != _PipelineRecord.root_pipeline.get_value_for_datastore( root_pipeline_record)): raise PipelineStatusError( 'Pipeline ID "%s" is not a root pipeline!' % root_pipeline_id) found_pipeline_dict = dict((stage.key(), stage) for stage in _PipelineRecord.all().filter('root_pipeline =', root_pipeline_key)) found_slot_dict = dict((slot.key(), slot) for slot in _SlotRecord.all().filter('root_pipeline =', root_pipeline_key)) found_barrier_dict = dict((barrier.key(), barrier) for barrier in _BarrierRecord.all().filter('root_pipeline =', root_pipeline_key)) found_status_dict = dict((status.key(), status) for status in _StatusRecord.all().filter('root_pipeline =', root_pipeline_key)) # Breadth-first traversal of _PipelineRecord instances by following # _PipelineRecord.fanned_out property values. valid_pipeline_keys = set([root_pipeline_key]) slot_filler_dict = {} # slot_key to pipeline_key expand_stack = [root_pipeline_record] while expand_stack: old_stack = expand_stack expand_stack = [] for pipeline_record in old_stack: for child_pipeline_key in pipeline_record.fanned_out: # This will let us prune off those pipelines which were allocated in # the Datastore but were never run due to mid-flight task failures. child_pipeline_record = found_pipeline_dict.get(child_pipeline_key) if child_pipeline_record is None: raise PipelineStatusError( 'Pipeline ID "%s" points to child ID "%s" which does not exist.' % (pipeline_record.key().name(), child_pipeline_key.name())) expand_stack.append(child_pipeline_record) valid_pipeline_keys.add(child_pipeline_key) # Figure out the deepest pipeline that's responsible for outputting to # a particular _SlotRecord, so we can report which pipeline *should* # be the filler. child_outputs = child_pipeline_record.params['output_slots'] for output_slot_key in child_outputs.itervalues(): slot_filler_dict[db.Key(output_slot_key)] = child_pipeline_key output = { 'rootPipelineId': root_pipeline_id, 'slots': {}, 'pipelines': {}, } for pipeline_key in found_pipeline_dict.keys(): if pipeline_key not in valid_pipeline_keys: continue output['pipelines'][pipeline_key.name()] = _get_internal_status( pipeline_key=pipeline_key, pipeline_dict=found_pipeline_dict, slot_dict=found_slot_dict, barrier_dict=found_barrier_dict, status_dict=found_status_dict) for slot_key, filler_pipeline_key in slot_filler_dict.iteritems(): output['slots'][str(slot_key)] = _get_internal_slot( slot_key=slot_key, filler_pipeline_key=filler_pipeline_key, slot_dict=found_slot_dict) return output class _StatusUiHandler(webapp.RequestHandler): """Render the status UI.""" _RESOURCE_MAP = { '/status': ('ui/status.html', 'text/html'), '/status.css': ('ui/status.css', 'text/css'), '/status.js': ('ui/status.js', 'text/javascript'), '/common.js': ('ui/common.js', 'text/javascript'), '/common.css': ('ui/common.css', 'text/css'), '/jquery-1.4.2.min.js': ('ui/jquery-1.4.2.min.js', 'text/javascript'), '/jquery.treeview.min.js': ('ui/jquery.treeview.min.js', 'text/javascript'), '/jquery.cookie.js': ('ui/jquery.cookie.js', 'text/javascript'), '/jquery.timeago.js': ('ui/jquery.timeago.js', 'text/javascript'), '/jquery.ba-hashchange.min.js': ( 'ui/jquery.ba-hashchange.min.js', 'text/javascript'), '/jquery.json.min.js': ('ui/jquery.json.min.js', 'text/javascript'), '/jquery.treeview.css': ('ui/jquery.treeview.css', 'text/css'), '/treeview-default.gif': ('ui/images/treeview-default.gif', 'image/gif'), '/treeview-default-line.gif': ( 'ui/images/treeview-default-line.gif', 'image/gif'), '/treeview-black.gif': ('ui/images/treeview-black.gif', 'image/gif'), '/treeview-black-line.gif': ( 'ui/images/treeview-black-line.gif', 'image/gif'), '/images/treeview-default.gif': ( 'ui/images/treeview-default.gif', 'image/gif'), '/images/treeview-default-line.gif': ( 'ui/images/treeview-default-line.gif', 'image/gif'), '/images/treeview-black.gif': ( 'ui/images/treeview-black.gif', 'image/gif'), '/images/treeview-black-line.gif': ( 'ui/images/treeview-black-line.gif', 'image/gif'), } def get(self, resource=''): if _ENFORCE_AUTH: if users.get_current_user() is None: self.redirect(users.create_login_url(self.request.url)) return if not users.is_current_user_admin(): self.response.out.write('Forbidden') self.response.set_status(403) return if resource not in self._RESOURCE_MAP: logging.info('Could not find: %s', resource) self.response.set_status(404) self.response.out.write("Resource not found.") self.response.headers['Content-Type'] = 'text/plain' return relative_path, content_type = self._RESOURCE_MAP[resource] path = os.path.join(os.path.dirname(__file__), relative_path) if not _DEBUG: self.response.headers["Cache-Control"] = "public, max-age=300" self.response.headers["Content-Type"] = content_type self.response.out.write(open(path, 'rb').read()) class _BaseRpcHandler(webapp.RequestHandler): """Base handler for JSON-RPC responses. Sub-classes should fill in the 'json_response' property. All exceptions will be rturne """ def get(self): if _ENFORCE_AUTH: if not users.is_current_user_admin(): self.response.out.write('Forbidden') self.response.set_status(403) return # XSRF protection if (not _DEBUG and self.request.headers.get('X-Requested-With') != 'XMLHttpRequest'): self.response.out.write('Request missing X-Requested-With header') self.response.set_status(403) return self.json_response = {} try: self.handle() output = simplejson.dumps(self.json_response) except Exception, e: self.json_response.clear() self.json_response['error_class'] = e.__class__.__name__ self.json_response['error_message'] = str(e) self.json_response['error_traceback'] = traceback.format_exc() output = simplejson.dumps(self.json_response) self.response.set_status(200) self.response.headers['Content-Type'] = 'text/javascript' self.response.headers['Cache-Control'] = 'no-cache' self.response.out.write(output) def handle(self): raise NotImplementedError('To be implemented by sub-classes.') class _TreeStatusHandler(_BaseRpcHandler): """RPC handler for getting the status of all children of root pipeline.""" def handle(self): self.json_response.update( get_status_tree(self.request.get('root_pipeline_id'))) ################################################################################ def set_enforce_auth(new_status): """Sets whether Pipeline API handlers rely on app.yaml for access control. Args: new_status: If True, then the Pipeline API will enforce its own access control on status and static file handlers. If False, then it will assume app.yaml is doing the enforcement. """ global _ENFORCE_AUTH _ENFORCE_AUTH = new_status def create_handlers_map(prefix='.*'): """Create new handlers map. Args: prefix: url prefix to use. Returns: list of (regexp, handler) pairs for WSGIApplication constructor. """ return [ (prefix + '/output', _BarrierHandler), (prefix + '/run', _PipelineHandler), (prefix + '/finalized', _PipelineHandler), (prefix + '/cleanup', _CleanupHandler), (prefix + '/abort', _PipelineHandler), (prefix + '/fanout', _FanoutHandler), (prefix + '/fanout_abort', _FanoutAbortHandler), (prefix + '/callback', _CallbackHandler), (prefix + '/rpc/tree', _TreeStatusHandler), (prefix + '(/.+)', _StatusUiHandler), ]
akbertram/appengine-pipeline
src/pipeline/pipeline.py
Python
apache-2.0
112,693
from __future__ import division from __future__ import unicode_literals from builtins import range from past.utils import old_div import hashlib import os import random import string import tempfile import re import time import urllib from datetime import datetime from datetime import timedelta from elodie.compatability import _rename from elodie.external.pyexiftool import ExifTool from elodie.dependencies import get_exiftool from elodie import constants def checksum(file_path, blocksize=65536): hasher = hashlib.sha256() with open(file_path, 'rb') as f: buf = f.read(blocksize) while len(buf) > 0: hasher.update(buf) buf = f.read(blocksize) return hasher.hexdigest() return None def create_working_folder(format=None): temporary_folder = tempfile.gettempdir() folder = os.path.join(temporary_folder, random_string(10, format), random_string(10, format)) os.makedirs(folder) return (temporary_folder, folder) def download_file(name, destination): try: url_to_file = 'https://s3.amazonaws.com/jmathai/github/elodie/{}'.format(name) # urlretrieve works differently for python 2 and 3 if constants.python_version < 3: final_name = '{}/{}{}'.format(destination, random_string(10), os.path.splitext(name)[1]) urllib.urlretrieve( url_to_file, final_name ) else: final_name, headers = urllib.request.urlretrieve(url_to_file) return final_name except Exception as e: return False def get_file(name): file_path = get_file_path(name) if not os.path.isfile(file_path): return False return file_path def get_file_path(name): current_folder = os.path.dirname(os.path.realpath(__file__)) return os.path.join(current_folder, 'files', name) def get_test_location(): return (61.013710, 99.196656, 'Siberia') def populate_folder(number_of_files, include_invalid=False): folder = '%s/%s' % (tempfile.gettempdir(), random_string(10)) os.makedirs(folder) for x in range(0, number_of_files): ext = 'jpg' if x % 2 == 0 else 'txt' fname = '%s/%s.%s' % (folder, x, ext) with open(fname, 'a'): os.utime(fname, None) if include_invalid: fname = '%s/%s' % (folder, 'invalid.invalid') with open(fname, 'a'): os.utime(fname, None) return folder def random_string(length, format=None): format_choice = string.ascii_uppercase + string.digits if format == 'int': format_choice = string.digits elif format == 'str': format_choice = string.asci_uppercase return ''.join(random.SystemRandom().choice(format_choice) for _ in range(length)) def random_decimal(): return random.random() def random_coordinate(coordinate, precision): # Here we add to the decimal section of the coordinate by a given precision return coordinate + ((old_div(10.0, (10.0**precision))) * random_decimal()) def temp_dir(): return tempfile.gettempdir() def is_windows(): return os.name == 'nt' # path_tz_fix(file_name) # Change timestamp in file_name by the offset # between UTC and local time, i.e. # 2015-12-05_00-59-26-with-title-some-title.jpg -> # 2015-12-04_20-59-26-with-title-some-title.jpg # (Windows only) def path_tz_fix(file_name): if is_windows(): # Calculate the offset between UTC and local time tz_shift = old_div((datetime.fromtimestamp(0) - datetime.utcfromtimestamp(0)).seconds,3600) # replace timestamp in file_name m = re.search('(\d{4}-\d{2}-\d{2}_\d{2}-\d{2}-\d{2})',file_name) t_date = datetime.fromtimestamp(time.mktime(time.strptime(m.group(0), '%Y-%m-%d_%H-%M-%S'))) s_date_fix = (t_date-timedelta(hours=tz_shift)).strftime('%Y-%m-%d_%H-%M-%S') return re.sub('\d{4}-\d{2}-\d{2}_\d{2}-\d{2}-\d{2}',s_date_fix,file_name) else: return file_name # time_convert(s_time) # Change s_time (struct_time) by the offset # between UTC and local time # (Windows only) def time_convert(s_time): if is_windows(): return time.gmtime((time.mktime(s_time))) else: return s_time # isclose(a,b,rel_tol) # To compare float coordinates a and b # with relative tolerance c def isclose(a, b, rel_tol = 1e-8): if not isinstance(a, (int, float)) or not isinstance(b, (int, float)): return False diff = abs(a - b) return (diff <= abs(rel_tol * a) and diff <= abs(rel_tol * b)) def reset_dbs(): """ Back up hash_db and location_db """ # This is no longer needed. See gh-322 # https://github.com/jmathai/elodie/issues/322 pass def restore_dbs(): """ Restore back ups of hash_db and location_db """ # This is no longer needed. See gh-322 # https://github.com/jmathai/elodie/issues/322 pass def setup_module(): exiftool_addedargs = [ u'-config', u'"{}"'.format(constants.exiftool_config) ] ExifTool(executable_=get_exiftool(), addedargs=exiftool_addedargs).start() def teardown_module(): ExifTool().terminate
jmathai/elodie
elodie/tests/helper.py
Python
apache-2.0
5,170
# Copyright 2014 Open vStorage NV # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ This package contains all views for the Internal API """
tcpcloud/openvstorage
webapps/api/backend/views/__init__.py
Python
apache-2.0
642
# Copyright 2013, Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Client side of the scheduler manager RPC API. """ import oslo_messaging as messaging import nova.conf from nova.objects import base as objects_base from nova import rpc CONF = nova.conf.CONF class SchedulerAPI(object): '''Client side of the scheduler rpc API. API version history: * 1.0 - Initial version. * 1.1 - Changes to prep_resize(): * remove instance_uuid, add instance * remove instance_type_id, add instance_type * remove topic, it was unused * 1.2 - Remove topic from run_instance, it was unused * 1.3 - Remove instance_id, add instance to live_migration * 1.4 - Remove update_db from prep_resize * 1.5 - Add reservations argument to prep_resize() * 1.6 - Remove reservations argument to run_instance() * 1.7 - Add create_volume() method, remove topic from live_migration() * 2.0 - Remove 1.x backwards compat * 2.1 - Add image_id to create_volume() * 2.2 - Remove reservations argument to create_volume() * 2.3 - Remove create_volume() * 2.4 - Change update_service_capabilities() * accepts a list of capabilities * 2.5 - Add get_backdoor_port() * 2.6 - Add select_hosts() ... Grizzly supports message version 2.6. So, any changes to existing methods in 2.x after that point should be done such that they can handle the version_cap being set to 2.6. * 2.7 - Add select_destinations() * 2.8 - Deprecate prep_resize() -- JUST KIDDING. It is still used by the compute manager for retries. * 2.9 - Added the legacy_bdm_in_spec parameter to run_instance() ... Havana supports message version 2.9. So, any changes to existing methods in 2.x after that point should be done such that they can handle the version_cap being set to 2.9. * Deprecated live_migration() call, moved to conductor * Deprecated select_hosts() 3.0 - Removed backwards compat ... Icehouse and Juno support message version 3.0. So, any changes to existing methods in 3.x after that point should be done such that they can handle the version_cap being set to 3.0. * 3.1 - Made select_destinations() send flavor object * 4.0 - Removed backwards compat for Icehouse * 4.1 - Add update_aggregates() and delete_aggregate() * 4.2 - Added update_instance_info(), delete_instance_info(), and sync_instance_info() methods ... Kilo and Liberty support message version 4.2. So, any changes to existing methods in 4.x after that point should be done such that they can handle the version_cap being set to 4.2. * 4.3 - Modify select_destinations() signature by providing a RequestSpec obj ''' VERSION_ALIASES = { 'grizzly': '2.6', 'havana': '2.9', 'icehouse': '3.0', 'juno': '3.0', 'kilo': '4.2', 'liberty': '4.2', } def __init__(self): super(SchedulerAPI, self).__init__() target = messaging.Target(topic=CONF.scheduler_topic, version='4.0') version_cap = self.VERSION_ALIASES.get(CONF.upgrade_levels.scheduler, CONF.upgrade_levels.scheduler) serializer = objects_base.NovaObjectSerializer() self.client = rpc.get_client(target, version_cap=version_cap, serializer=serializer) def select_destinations(self, ctxt, spec_obj): version = '4.3' msg_args = {'spec_obj': spec_obj} if not self.client.can_send_version(version): del msg_args['spec_obj'] msg_args['request_spec'] = spec_obj.to_legacy_request_spec_dict() msg_args['filter_properties' ] = spec_obj.to_legacy_filter_properties_dict() version = '4.0' cctxt = self.client.prepare(version=version) return cctxt.call(ctxt, 'select_destinations', **msg_args) def update_aggregates(self, ctxt, aggregates): # NOTE(sbauza): Yes, it's a fanout, we need to update all schedulers cctxt = self.client.prepare(fanout=True, version='4.1') cctxt.cast(ctxt, 'update_aggregates', aggregates=aggregates) def delete_aggregate(self, ctxt, aggregate): # NOTE(sbauza): Yes, it's a fanout, we need to update all schedulers cctxt = self.client.prepare(fanout=True, version='4.1') cctxt.cast(ctxt, 'delete_aggregate', aggregate=aggregate) def update_instance_info(self, ctxt, host_name, instance_info): cctxt = self.client.prepare(version='4.2', fanout=True) return cctxt.cast(ctxt, 'update_instance_info', host_name=host_name, instance_info=instance_info) def delete_instance_info(self, ctxt, host_name, instance_uuid): cctxt = self.client.prepare(version='4.2', fanout=True) return cctxt.cast(ctxt, 'delete_instance_info', host_name=host_name, instance_uuid=instance_uuid) def sync_instance_info(self, ctxt, host_name, instance_uuids): cctxt = self.client.prepare(version='4.2', fanout=True) return cctxt.cast(ctxt, 'sync_instance_info', host_name=host_name, instance_uuids=instance_uuids) # NOTE(CHANGE) # TODO(Yingxin): Bump version def notify_schedulers(self, ctxt, host_name, scheduler=None): if scheduler is not None: cctxt = self.client.prepare(version='4.0', server=scheduler) else: cctxt = self.client.prepare(version='4.0', fanout=True) return cctxt.cast(ctxt, 'notified_by_remote', host_name=host_name) def send_commit(self, ctxt, commit, compute, scheduler, seed): cctxt = self.client.prepare(version='4.0', server=scheduler) cctxt.cast(ctxt, 'receive_commit', commit=commit, compute=compute, seed=seed)
cyx1231st/nova
nova/scheduler/rpcapi.py
Python
apache-2.0
6,665
@when(u'I get the text from the label') def step_impl(context): context.expected_text = context.page.label_id() @when(u'I search for the label by "{how}"') def step_impl(context, how): method = 'label_{0}'.format(how) context.expected_text = getattr(context.page, method)()
OnShift/page_object
features/steps/label_steps.py
Python
apache-2.0
288
#!/usr/bin/env python3 # -*- coding: utf-8 -*- #------------------------------------------------------------------------------- # Name: # Purpose: This .py file is the main Framework file # It ranks images of a specific person of interest in a static manner # # Required libs: python-dateutil, numpy,matplotlib,pyparsing # Author: konkonst # # Created: 30/03/2014 # Copyright: (c) ITI (CERTH) 2014 # Licence: <apache licence 2.0> #------------------------------------------------------------------------------- import time,os,pickle,glob,shutil, personPopularity from staticCommPersonTask import communitystatic print('staticCommPersonCentered') print(time.asctime( time.localtime(time.time()) )) '''PARAMETERS''' #Construct the data class from scratch: 1-yes / 2- from the community detection/ else-perform only the ranking dataextract = 1 #Provide a time Limit (unix timestamp) about when the dataset begins in case you only want part of the dataset. If it is set to 0 the whole dataset is considered. timeLimit = 0 #1071561600 #Community detection method. 'Ahn','Demon' and 'Copra' for overlapping and 'Louvain' for non. Ahn carries a threshold. commDetectMethod = ['Demon', 0.66] #User sets desired number of displayed top images topImages = 8 #User sets desired number of most frequent people to retrieve topPeople = 200 #Provide people set or leave empty to retrieve images for the number of topPeople as set above peopleSet = ['justin_timberlake','oprah_winfrey','lady_gaga','justin_bieber','michael_schumacher','miley_cyrus','jk_rowling','zinedine_zidane','barack_obama','prince_william','brad_pitt_actor','leonardo_dicaprio','natalie_portman'] peopleSet.sort() ##peopleSet = [] #Uncomment this to activate the use of the rankedPeople.txt pool of users #Delete all previous folders containing results? (Does not apply to the html files) delFolders = 0 #If there are any nodes that should not be considered, please place them in './data/txt/stopNodes.txt' '''Functions''' t = time.time() filename = [f for f in os.listdir("./data/txt/")] for idx,files in enumerate(filename): print(str(idx+1) + '.' + files) selection = int(input('Select a dataset from the above: '))-1 dataset_path_results = "./data/"+filename[selection][:-4]+"/staticPersonCentered_"+commDetectMethod[0]+"/results/" dataset_path_tmp = "./data/"+filename[selection][:-4]+"/staticPersonCentered_"+commDetectMethod[0]+"/tmp/" datasetFilename = './data/txt/'+filename[selection] if not os.path.exists(dataset_path_results): os.makedirs(dataset_path_results) os.makedirs(dataset_path_tmp) if not os.path.exists(dataset_path_results+"rankedPeople.txt"): personPopularity.popPerson(datasetFilename, dataset_path_results, dataset_path_tmp, commDetectMethod,timeLimit=timeLimit) if dataextract==1:#Start from scratch data = communitystatic.from_txt(datasetFilename,dataset_path_results,dataset_path_tmp,timeLimit=timeLimit) dataPck = open(dataset_path_tmp + "allPersondata.pck", "wb") pickle.dump(data, dataPck , protocol = 2) dataPck.close() del(data) elapsed = time.time() - t print('Stage 1: %.2f seconds' % elapsed) if dataextract==1 or dataextract==2:#If the basic data (authors, mentions, time) has been created data = pickle.load(open(dataset_path_tmp + "allPersondata.pck", "rb")) captiondict = data.captiondict print('static Community detection method selected is :'+commDetectMethod[0]) dataStatic=data.extraction(commDetectMethod) del(data) elapsed = time.time() - t print('\nStage 2: %.2f seconds' % elapsed) decisionforAll = input('\nRetrieve the topImages by screening them one by one???(y or n) ') if dataextract ==1 or dataextract ==2 or dataextract ==3:#Only ranking beyond this point data = pickle.load(open(dataset_path_tmp + "allPersondata.pck", "rb")) captiondict = data.captiondict del(data) dataStatic = pickle.load(open(dataset_path_tmp + 'comm_'+commDetectMethod[0]+'.pck','rb')) #delete folders if you're starting from scratch if delFolders == 1: result_files = glob.glob(dataset_path_results+'/analysis/*.txt') if result_files: for file in result_files: os.remove(file) if not peopleSet: with open(dataset_path_results+'rankedPeople.txt','r') as f: for lineId,line in enumerate(f): if lineId>topPeople-1: break line = line.split('\t') peopleSet.append(line[0]) for person in peopleSet: if decisionforAll != str('n') and not os.path.exists(dataset_path_results+'html/'+person): os.makedirs(dataset_path_results+'html/'+person) if decisionforAll != str('n'): personDecision = input('\nRetrieve images for '+person+'???(y or n) ') if decisionforAll == str('n'): print("\nRetrieval Commences for "+person) if decisionforAll == str('n') or personDecision == str('y'): dataStatic.photoRetrieval(topImages, person, captiondict,decisionforAll) dataStatic.popularity_coappearence(topImages, person, captiondict) elapsed = time.time() - t print('\nStage 3: %.2f seconds' % elapsed)
socialsensor/public-figure-image-ranking
python/mainStaticPersonTask.py
Python
apache-2.0
5,159
# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ NTTCIS Common Components """ import xml.etree.ElementTree as etree import re from functools import wraps from copy import deepcopy from base64 import b64encode from time import sleep from io import BytesIO try: from collections.abc import MutableSequence, Mapping except ImportError: from collections import MutableSequence, Mapping # TODO: use disutils.version when Travis CI fixed the pylint issue with version # from distutils.version import LooseVersion from libcloud.utils.py3 import httplib from libcloud.utils.py3 import b from libcloud.common.base import ConnectionUserAndKey, XmlResponse, RawResponse from libcloud.compute.base import Node from libcloud.utils.py3 import basestring from libcloud.utils.xml import findtext from libcloud.compute.types import LibcloudError, InvalidCredsError # Roadmap / TODO: # # 1.0 - Copied from OpSource API, named provider details. # setup a few variables to represent all of the NTTC-CIS cloud namespaces NAMESPACE_BASE = "http://oec.api.opsource.net/schemas" ORGANIZATION_NS = NAMESPACE_BASE + "/organization" SERVER_NS = NAMESPACE_BASE + "/server" NETWORK_NS = NAMESPACE_BASE + "/network" DIRECTORY_NS = NAMESPACE_BASE + "/directory" GENERAL_NS = NAMESPACE_BASE + "/general" BACKUP_NS = NAMESPACE_BASE + "/backup" # API 2.0 Namespaces and URNs TYPES_URN = "urn:didata.com:api:cloud:types" # API end-points API_ENDPOINTS = { 'na': { 'name': 'North America (NA)', 'host': 'api-na.dimensiondata.com', 'vendor': 'NTTC-CIS' }, 'eu': { 'name': 'Europe (EU)', 'host': 'api-eu.dimensiondata.com', 'vendor': 'NTTC-CIS' }, 'au': { 'name': 'Australia (AU)', 'host': 'api-au.dimensiondata.com', 'vendor': 'NTTC-CIS' }, 'au-gov': { 'name': 'Australia Canberra ACT (AU)', 'host': 'api-canberra.dimensiondata.com', 'vendor': 'NTTC-CIS' }, 'af': { 'name': 'Africa (AF)', 'host': 'api-mea.dimensiondata.com', 'vendor': 'NTTC-CIS' }, 'ap': { 'name': 'Asia Pacific (AP)', 'host': 'api-ap.dimensiondata.com', 'vendor': 'NTTC-CIS' }, 'ca': { 'name': 'Canada (CA)', 'host': 'api-canada.dimensiondata.com', 'vendor': 'NTTC-CIS' }, 'is-na': { 'name': 'North America (NA)', 'host': 'usapi.cloud.is.co.za', 'vendor': 'InternetSolutions' }, 'is-eu': { 'name': 'Europe (EU)', 'host': 'euapi.cloud.is.co.za', 'vendor': 'InternetSolutions' }, 'is-au': { 'name': 'Australia (AU)', 'host': 'auapi.cloud.is.co.za', 'vendor': 'InternetSolutions' }, 'is-af': { 'name': 'Africa (AF)', 'host': 'meaapi.cloud.is.co.za', 'vendor': 'InternetSolutions' }, 'is-ap': { 'name': 'Asia Pacific (AP)', 'host': 'apapi.cloud.is.co.za', 'vendor': 'InternetSolutions' }, 'is-latam': { 'name': 'South America (LATAM)', 'host': 'latamapi.cloud.is.co.za', 'vendor': 'InternetSolutions' }, 'is-canada': { 'name': 'Canada (CA)', 'host': 'canadaapi.cloud.is.co.za', 'vendor': 'InternetSolutions' }, 'ntta-na': { 'name': 'North America (NA)', 'host': 'cloudapi.nttamerica.com', 'vendor': 'NTTNorthAmerica' }, 'ntta-eu': { 'name': 'Europe (EU)', 'host': 'eucloudapi.nttamerica.com', 'vendor': 'NTTNorthAmerica' }, 'ntta-au': { 'name': 'Australia (AU)', 'host': 'aucloudapi.nttamerica.com', 'vendor': 'NTTNorthAmerica' }, 'ntta-af': { 'name': 'Africa (AF)', 'host': 'sacloudapi.nttamerica.com', 'vendor': 'NTTNorthAmerica' }, 'ntta-ap': { 'name': 'Asia Pacific (AP)', 'host': 'hkcloudapi.nttamerica.com', 'vendor': 'NTTNorthAmerica' }, 'cisco-na': { 'name': 'North America (NA)', 'host': 'iaas-api-na.cisco-ccs.com', 'vendor': 'Cisco' }, 'cisco-eu': { 'name': 'Europe (EU)', 'host': 'iaas-api-eu.cisco-ccs.com', 'vendor': 'Cisco' }, 'cisco-au': { 'name': 'Australia (AU)', 'host': 'iaas-api-au.cisco-ccs.com', 'vendor': 'Cisco' }, 'cisco-af': { 'name': 'Africa (AF)', 'host': 'iaas-api-mea.cisco-ccs.com', 'vendor': 'Cisco' }, 'cisco-ap': { 'name': 'Asia Pacific (AP)', 'host': 'iaas-api-ap.cisco-ccs.com', 'vendor': 'Cisco' }, 'cisco-latam': { 'name': 'South America (LATAM)', 'host': 'iaas-api-sa.cisco-ccs.com', 'vendor': 'Cisco' }, 'cisco-canada': { 'name': 'Canada (CA)', 'host': 'iaas-api-ca.cisco-ccs.com', 'vendor': 'Cisco' }, 'med1-il': { 'name': 'Israel (IL)', 'host': 'api.cloud.med-1.com', 'vendor': 'Med-1' }, 'med1-na': { 'name': 'North America (NA)', 'host': 'api-na.cloud.med-1.com', 'vendor': 'Med-1' }, 'med1-eu': { 'name': 'Europe (EU)', 'host': 'api-eu.cloud.med-1.com', 'vendor': 'Med-1' }, 'med1-au': { 'name': 'Australia (AU)', 'host': 'api-au.cloud.med-1.com', 'vendor': 'Med-1' }, 'med1-af': { 'name': 'Africa (AF)', 'host': 'api-af.cloud.med-1.com', 'vendor': 'Med-1' }, 'med1-ap': { 'name': 'Asia Pacific (AP)', 'host': 'api-ap.cloud.med-1.com', 'vendor': 'Med-1' }, 'med1-latam': { 'name': 'South America (LATAM)', 'host': 'api-sa.cloud.med-1.com', 'vendor': 'Med-1' }, 'med1-canada': { 'name': 'Canada (CA)', 'host': 'api-ca.cloud.med-1.com', 'vendor': 'Med-1' }, 'indosat-id': { 'name': 'Indonesia (ID)', 'host': 'iaas-api.indosat.com', 'vendor': 'Indosat' }, 'indosat-na': { 'name': 'North America (NA)', 'host': 'iaas-usapi.indosat.com', 'vendor': 'Indosat' }, 'indosat-eu': { 'name': 'Europe (EU)', 'host': 'iaas-euapi.indosat.com', 'vendor': 'Indosat' }, 'indosat-au': { 'name': 'Australia (AU)', 'host': 'iaas-auapi.indosat.com', 'vendor': 'Indosat' }, 'indosat-af': { 'name': 'Africa (AF)', 'host': 'iaas-afapi.indosat.com', 'vendor': 'Indosat' }, 'bsnl-in': { 'name': 'India (IN)', 'host': 'api.bsnlcloud.com', 'vendor': 'BSNL' }, 'bsnl-na': { 'name': 'North America (NA)', 'host': 'usapi.bsnlcloud.com', 'vendor': 'BSNL' }, 'bsnl-eu': { 'name': 'Europe (EU)', 'host': 'euapi.bsnlcloud.com', 'vendor': 'BSNL' }, 'bsnl-au': { 'name': 'Australia (AU)', 'host': 'auapi.bsnlcloud.com', 'vendor': 'BSNL' }, 'bsnl-af': { 'name': 'Africa (AF)', 'host': 'afapi.bsnlcloud.com', 'vendor': 'BSNL' } } # Default API end-point for the base connection class. DEFAULT_REGION = 'na' BAD_CODE_XML_ELEMENTS = ( ('responseCode', SERVER_NS), ('responseCode', TYPES_URN), ('result', GENERAL_NS) ) BAD_MESSAGE_XML_ELEMENTS = ( ('message', SERVER_NS), ('message', TYPES_URN), ('resultDetail', GENERAL_NS) ) def get_params(func): @wraps(func) def paramed(*args, **kwargs): if kwargs: for k, v in kwargs.items(): old_key = k matches = re.findall(r'_(\w)', k) for match in matches: k = k.replace('_' + match, match.upper()) del kwargs[old_key] kwargs[k] = v params = kwargs result = func(args[0], params) else: result = func(args[0]) return result return paramed def dd_object_to_id(obj, obj_type, id_value='id'): """ Takes in a DD object or string and prints out it's id This is a helper method, as many of our functions can take either an object or a string, and we need an easy way of converting them :param obj: The object to get the id for :type obj: ``object`` :param func: The function to call, e.g. ex_get_vlan. Note: This function needs to return an object which has ``status`` attribute. :type func: ``function`` :rtype: ``str`` """ if isinstance(obj, obj_type): return getattr(obj, id_value) elif isinstance(obj, (basestring)): return obj else: raise TypeError( "Invalid type %s looking for basestring or %s" % (type(obj).__name__, obj_type.__name__) ) # TODO: use disutils.version when Travis CI fixed the pylint issue with version # This is a temporary workaround. def LooseVersion(version): return float(version) class NetworkDomainServicePlan(object): ESSENTIALS = "ESSENTIALS" ADVANCED = "ADVANCED" class NttCisRawResponse(RawResponse): pass class NttCisResponse(XmlResponse): def parse_error(self): if self.status == httplib.UNAUTHORIZED: raise InvalidCredsError(self.body) elif self.status == httplib.FORBIDDEN: raise InvalidCredsError(self.body) body = self.parse_body() if self.status == httplib.BAD_REQUEST: for response_code in BAD_CODE_XML_ELEMENTS: code = findtext(body, response_code[0], response_code[1]) if code is not None: break for message in BAD_MESSAGE_XML_ELEMENTS: message = findtext(body, message[0], message[1]) if message is not None: break raise NttCisAPIException(code=code, msg=message, driver=self.connection.driver) if self.status is not httplib.OK: raise NttCisAPIException(code=self.status, msg=body, driver=self.connection.driver) return self.body class NttCisAPIException(LibcloudError): def __init__(self, code, msg, driver): self.code = code self.msg = msg self.driver = driver def __str__(self): return "%s: %s" % (self.code, self.msg) def __repr__(self): return ("<NttCisAPIException: code='%s', msg='%s'>" % (self.code, self.msg)) class NttCisConnection(ConnectionUserAndKey): """ Connection class for the NttCis driver """ api_path_version_1 = '/oec' api_path_version_2 = '/caas' api_version_1 = 0.9 # Earliest version supported oldest_api_version = '2.2' # Latest version supported latest_api_version = '2.7' # Default api version active_api_version = '2.7' _orgId = None responseCls = NttCisResponse rawResponseCls = NttCisRawResponse allow_insecure = False def __init__(self, user_id, key, secure=True, host=None, port=None, url=None, timeout=None, proxy_url=None, api_version=None, **conn_kwargs): super(NttCisConnection, self).__init__( user_id=user_id, key=key, secure=secure, host=host, port=port, url=url, timeout=timeout, proxy_url=proxy_url) if conn_kwargs['region']: self.host = conn_kwargs['region']['host'] if api_version: if LooseVersion(api_version) < LooseVersion( self.oldest_api_version): msg = 'API Version specified is too old. No longer ' \ 'supported. Please upgrade to the latest version {}' \ .format(self.active_api_version) raise NttCisAPIException(code=None, msg=msg, driver=self.driver) elif LooseVersion(api_version) > LooseVersion( self.latest_api_version): msg = 'Unsupported API Version. The version specified is ' \ 'not release yet. Please use the latest supported ' \ 'version {}' \ .format(self.active_api_version) raise NttCisAPIException(code=None, msg=msg, driver=self.driver) else: # Overwrite default version using the version user specified self.active_api_version = api_version def add_default_headers(self, headers): headers['Authorization'] = \ ('Basic %s' % b64encode(b('%s:%s' % (self.user_id, self.key))).decode('utf-8')) headers['Content-Type'] = 'application/xml' return headers def request_api_1(self, action, params=None, data='', headers=None, method='GET'): action = "%s/%s/%s" % (self.api_path_version_1, self.api_version_1, action) return super(NttCisConnection, self).request( action=action, params=params, data=data, method=method, headers=headers) def request_api_2(self, path, action, params=None, data='', headers=None, method='GET'): action = "%s/%s/%s/%s" % (self.api_path_version_2, self.active_api_version, path, action) return super(NttCisConnection, self).request( action=action, params=params, data=data, method=method, headers=headers) def raw_request_with_orgId_api_1(self, action, params=None, data='', headers=None, method='GET'): action = "%s/%s" % (self.get_resource_path_api_1(), action) return super(NttCisConnection, self).request( action=action, params=params, data=data, method=method, headers=headers, raw=True) def request_with_orgId_api_1(self, action, params=None, data='', headers=None, method='GET'): action = "%s/%s" % (self.get_resource_path_api_1(), action) return super(NttCisConnection, self).request( action=action, params=params, data=data, method=method, headers=headers) def request_with_orgId_api_2(self, action, params=None, data='', headers=None, method='GET'): action = "%s/%s" % (self.get_resource_path_api_2(), action) return super(NttCisConnection, self).request( action=action, params=params, data=data, method=method, headers=headers) def paginated_request_with_orgId_api_2(self, action, params=None, data='', headers=None, method='GET', page_size=250): """ A paginated request to the MCP2.0 API This essentially calls out to request_with_orgId_api_2 for each page and yields the response to make a generator This generator can be looped through to grab all the pages. :param action: The resource to access (i.e. 'network/vlan') :type action: ``str`` :param params: Parameters to give to the action :type params: ``dict`` or ``None`` :param data: The data payload to be added to the request :type data: ``str`` :param headers: Additional header to be added to the request :type headers: ``str`` or ``dict`` or ``None`` :param method: HTTP Method for the request (i.e. 'GET', 'POST') :type method: ``str`` :param page_size: The size of each page to be returned Note: Max page size in MCP2.0 is currently 250 :type page_size: ``int`` """ if params is None: params = {} params['pageSize'] = page_size resp = self.request_with_orgId_api_2(action, params, data, headers, method).object yield resp if len(resp) <= 0: return pcount = resp.get('pageCount') # pylint: disable=no-member psize = resp.get('pageSize') # pylint: disable=no-member pnumber = resp.get('pageNumber') # pylint: disable=no-member while int(pcount) >= int(psize): params['pageNumber'] = int(pnumber) + 1 resp = self.request_with_orgId_api_2(action, params, data, headers, method).object pcount = resp.get('pageCount') # pylint: disable=no-member psize = resp.get('pageSize') # pylint: disable=no-member pnumber = resp.get('pageNumber') # pylint: disable=no-member yield resp def get_resource_path_api_1(self): """ This method returns a resource path which is necessary for referencing resources that require a full path instead of just an ID, such as networks, and customer snapshots. """ return ("%s/%s/%s" % (self.api_path_version_1, self.api_version_1, self._get_orgId())) def get_resource_path_api_2(self): """ This method returns a resource path which is necessary for referencing resources that require a full path instead of just an ID, such as networks, and customer snapshots. """ return ("%s/%s/%s" % (self.api_path_version_2, self.active_api_version, self._get_orgId())) def wait_for_state(self, state, func, poll_interval=2, timeout=60, *args, **kwargs): """ Wait for the function which returns a instance with field status/state to match. Keep polling func until one of the desired states is matched :param state: Either the desired state (`str`) or a `list` of states :type state: ``str`` or ``list`` :param func: The function to call, e.g. ex_get_vlan. Note: This function needs to return an object which has ``status`` attribute. :type func: ``function`` :param poll_interval: The number of seconds to wait between checks :type poll_interval: `int` :param timeout: The total number of seconds to wait to reach a state :type timeout: `int` :param args: The arguments for func :type args: Positional arguments :param kwargs: The arguments for func :type kwargs: Keyword arguments :return: Result from the calling function. """ cnt = 0 result = None object_state = None state = state.lower() while cnt < timeout / poll_interval: result = func(*args, **kwargs) if isinstance(result, Node): object_state = result.state.lower() else: # BUG: need to use result.status.lower() or # will never match if client uses lower case object_state = result.status.lower() if object_state is state or object_state in state: return result sleep(poll_interval) cnt += 1 msg = 'Status check for object %s timed out' % (result) raise NttCisAPIException(code=object_state, msg=msg, driver=self.driver) def _get_orgId(self): """ Send the /myaccount API request to NTTC-CIS cloud and parse the 'orgId' from the XML response object. We need the orgId to use most of the other API functions """ if self._orgId is None: body = self.request_api_1('myaccount').object self._orgId = findtext(body, 'orgId', DIRECTORY_NS) return self._orgId def get_account_details(self): """ Get the details of this account :rtype: :class:`DimensionDataAccountDetails` """ body = self.request_api_1('myaccount').object return NttCisAccountDetails( user_name=findtext(body, 'userName', DIRECTORY_NS), full_name=findtext(body, 'fullName', DIRECTORY_NS), first_name=findtext(body, 'firstName', DIRECTORY_NS), last_name=findtext(body, 'lastName', DIRECTORY_NS), email=findtext(body, 'emailAddress', DIRECTORY_NS)) class NttCisAccountDetails(object): """ NTTCIS account class details """ def __init__(self, user_name, full_name, first_name, last_name, email): self.user_name = user_name self.full_name = full_name self.first_name = first_name self.last_name = last_name self.email = email class NttCisStatus(object): """ NTTCIS API pending operation status class action, request_time, user_name, number_of_steps, update_time, step.name, step.number, step.percent_complete, failure_reason, """ def __init__(self, action=None, request_time=None, user_name=None, number_of_steps=None, update_time=None, step_name=None, step_number=None, step_percent_complete=None, failure_reason=None): self.action = action self.request_time = request_time self.user_name = user_name self.number_of_steps = number_of_steps self.update_time = update_time self.step_name = step_name self.step_number = step_number self.step_percent_complete = step_percent_complete self.failure_reason = failure_reason def __repr__(self): return (('<NttCisStatus: action=%s, request_time=%s, ' 'user_name=%s, number_of_steps=%s, update_time=%s, ' 'step_name=%s, step_number=%s, ' 'step_percent_complete=%s, failure_reason=%s>') % (self.action, self.request_time, self.user_name, self.number_of_steps, self.update_time, self.step_name, self.step_number, self.step_percent_complete, self.failure_reason)) class NttCisNetwork(object): """ NTTCIS network with location. """ def __init__(self, id, name, description, location, private_net, multicast, status): self.id = str(id) self.name = name self.description = description self.location = location self.private_net = private_net self.multicast = multicast self.status = status def __repr__(self): return (('<NttCisNetwork: id=%s, name=%s, description=%s, ' 'location=%s, private_net=%s, multicast=%s>') % (self.id, self.name, self.description, self.location, self.private_net, self.multicast)) class NttCisNetworkDomain(object): """ NttCis network domain with location. """ def __init__(self, id, name, description, location, status, plan): self.id = str(id) self.name = name self.description = description self.location = location self.status = status self.plan = plan def __repr__(self): return (('<NttCisNetworkDomain: id=%s, name=%s, ' 'description=%s, location=%s, status=%s, plan=%s>') % (self.id, self.name, self.description, self.location, self.status, self.plan)) class NttCisPublicIpBlock(object): """ NTTCIS Public IP Block with location. """ def __init__(self, id, base_ip, size, location, network_domain, status): self.id = str(id) self.base_ip = base_ip self.size = size self.location = location self.network_domain = network_domain self.status = status def __repr__(self): return (('<NttCisNetworkDomain: id=%s, base_ip=%s, ' 'size=%s, location=%s, status=%s>') % (self.id, self.base_ip, self.size, self.location, self.status)) class NttCisServerCpuSpecification(object): """ A class that represents the specification of the CPU(s) for a node """ def __init__(self, cpu_count, cores_per_socket, performance): """ Instantiate a new :class:`NttCisServerCpuSpecification` :param cpu_count: The number of CPUs :type cpu_count: ``int`` :param cores_per_socket: The number of cores per socket, the recommendation is 1 :type cores_per_socket: ``int`` :param performance: The performance type, e.g. HIGHPERFORMANCE :type performance: ``str`` """ self.cpu_count = cpu_count self.cores_per_socket = cores_per_socket self.performance = performance def __repr__(self): return (('<NttCisServerCpuSpecification: ' 'cpu_count=%s, cores_per_socket=%s, ' 'performance=%s>') % (self.cpu_count, self.cores_per_socket, self.performance)) class NttCisServerDisk(object): """ A class that represents the disk on a server """ def __init__(self, id=None, scsi_id=None, size_gb=None, speed=None, state=None): """ Instantiate a new :class:`DimensionDataServerDisk` :param id: The id of the disk :type id: ``str`` :param scsi_id: Representation for scsi :type scsi_id: ``int`` :param size_gb: Size of the disk :type size_gb: ``int`` :param speed: Speed of the disk (i.e. STANDARD) :type speed: ``str`` :param state: State of the disk (i.e. PENDING) :type state: ``str`` """ self.id = id self.scsi_id = scsi_id self.size_gb = size_gb self.speed = speed self.state = state def __repr__(self): return (('<NttCisServerDisk: ' 'id=%s, size_gb=%s') % (self.id, self.size_gb)) class NttCisScsiController(object): """ A class that represents the disk on a server """ def __init__(self, id, adapter_type, bus_number, state): """ Instantiate a new :class:`DimensionDataServerDisk` :param id: The id of the controller :type id: ``str`` :param adapter_type: The 'brand' of adapter :type adapter_type: ``str`` :param bus_number: The bus number occupied on the virtual hardware :type bus_nubmer: ``str`` :param state: Curent state (i.e. NORMAL) :type speed: ``str`` :param state: State of the disk (i.e. PENDING) :type state: ``str`` """ self.id = id self.adapter_type = adapter_type self.bus_number = bus_number self.state = state def __repr__(self): return (('<NttCisScsiController: ' 'id=%s, adapter_type=%s, bus_number=%s, state=%s') % (self.id, self.adapter_type, self.bus_number, self.state)) class NttCisServerVMWareTools(object): """ A class that represents the VMWareTools for a node """ def __init__(self, status, version_status, api_version): """ Instantiate a new :class:`NttCisServerVMWareTools` object :param status: The status of VMWare Tools :type status: ``str`` :param version_status: The status for the version of VMWare Tools (i.e NEEDS_UPGRADE) :type version_status: ``str`` :param api_version: The API version of VMWare Tools :type api_version: ``str`` """ self.status = status self.version_status = version_status self.api_version = api_version def __repr__(self): return (('<NttCisServerVMWareTools ' 'status=%s, version_status=%s, ' 'api_version=%s>') % (self.status, self.version_status, self.api_version)) class NttCisSnapshot(object): """ NTTCIS Class representing server snapshots """ def __init__(self, server_id, service_plan, id=None, window_id=None, start_time=None, state=None, end_time=None, type=None, expiry_time=None, action=None): self.server_id = server_id self.service_plan = service_plan self.id = id self.window_id = window_id self.start_time = start_time self.end_time = end_time self.state = state self.end_time = end_time self.type = type self.expiry_time = expiry_time self.action = action def __repr__(self): return (('<NttCisSnapshots ' 'id=%s, start_time=%s, ' 'end_time=%s, self.type=%s, ' 'self.expiry_timne=%s, self.state=%s>') % (self.id, self.start_time, self.end_time, self.type, self.expiry_time, self.state)) class NttCisReservedIpAddress(object): """ NTTCIS Rerverse IPv4 address """ def __init__(self, datacenter_id, exclusive, vlan_id, ip, description=None): self.datacenter_id = datacenter_id self.exclusive = exclusive self.vlan_id = vlan_id self.ip = ip self.description = description def __repr__(self): return (('<NttCisReservedIpAddress ' 'datacenterId=%s, exclusiven=%s, vlanId=%s, ipAddress=%s,' ' description=-%s') % (self.datacenter_id, self.exclusive, self.vlan_id, self.ip, self.description)) class NttCisFirewallRule(object): """ NTTCIS Firewall Rule for a network domain """ def __init__(self, id, name, action, location, network_domain, status, ip_version, protocol, source, destination, enabled): self.id = str(id) self.name = name self.action = action self.location = location self.network_domain = network_domain self.status = status self.ip_version = ip_version self.protocol = protocol self.source = source self.destination = destination self.enabled = enabled def __repr__(self): return (('<NttCisFirewallRule: id=%s, name=%s, ' 'action=%s, location=%s, network_domain=%s, ' 'status=%s, ip_version=%s, protocol=%s, source=%s, ' 'destination=%s, enabled=%s>') % (self.id, self.name, self.action, self.location, self.network_domain, self.status, self.ip_version, self.protocol, self.source, self.destination, self.enabled)) """ class NttCisFirewallAddress(object): The source or destination model in a firewall rule def __init__(self, any_ip, ip_address, ip_prefix_size, port_begin, port_end, address_list_id, port_list_id): self.any_ip = any_ip self.ip_address = ip_address self.ip_prefix_size = ip_prefix_size self.port_list_id = port_list_id self.port_begin = port_begin self.port_end = port_end self.address_list_id = address_list_id self.port_list_id = port_list_id def __repr__(self): return ( '<NttCisFirewallAddress: any_ip=%s, ip_address=%s, ' 'ip_prefix_size=%s, port_begin=%s, port_end=%s, ' 'address_list_id=%s, port_list_id=%s>' % (self.any_ip, self.ip_address, self.ip_prefix_size, self.port_begin, self.port_end, self.address_list_id, self.port_list_id)) """ class NttCisFirewallAddress(object): """ The source or destination model in a firewall rule 9/4/18: Editing Class to use with ex_create_firewall_rtule method. Will haved to circle back and test for any other uses. """ def __init__(self, any_ip=None, ip_address=None, ip_prefix_size=None, port_begin=None, port_end=None, address_list_id=None, port_list_id=None): """ param any_ip: used to set ip address to "ANY" :param ip_address: Optional, an ip address of either IPv4 decimal notation or an IPv6 address :type ``str`` :param ip_prefix_size: An integer denoting prefix size. :type ``int`` :param port_begin: integer for an individual port or start of a list of ports if not using a port list :type ``int`` :param port_end: integer required if using a list of ports (NOT a port list but a list starting with port begin) :type ``int`` :param address_list_id: An id identifying an address list :type ``str`` :param port_list_id: An id identifying a port list :type ``str`` """ self.any_ip = any_ip self.ip_address = ip_address self.ip_prefix_size = ip_prefix_size self.port_list_id = port_list_id self.port_begin = port_begin self.port_end = port_end self.address_list_id = address_list_id self.port_list_id = port_list_id def __repr__(self): return ( '<NttCisFirewallAddress: any_ip=%s, ip_address=%s, ' 'ip_prefix_size=%s, port_begin=%s, port_end=%s, ' 'address_list_id=%s, port_list_id=%s>' % (self.any_ip, self.ip_address, self.ip_prefix_size, self.port_begin, self.port_end, self.address_list_id, self.port_list_id)) class NttCisNatRule(object): """ An IP NAT rule in a network domain """ def __init__(self, id, network_domain, internal_ip, external_ip, status): self.id = id self.network_domain = network_domain self.internal_ip = internal_ip self.external_ip = external_ip self.status = status def __repr__(self): return (('<NttCisNatRule: id=%s, status=%s>') % (self.id, self.status)) class NttCisAntiAffinityRule(object): """ Anti-Affinity rule for NTTCIS An Anti-Affinity rule ensures that servers in the rule will not reside on the same VMware ESX host. """ def __init__(self, id, node_list): """ Instantiate a new :class:`NttCisDataAntiAffinityRule` :param id: The ID of the Anti-Affinity rule :type id: ``str`` :param node_list: List of node ids that belong in this rule :type node_list: ``list`` of ``str`` """ self.id = id self.node_list = node_list def __repr__(self): return (('<NttCisAntiAffinityRule: id=%s>') % (self.id)) class NttCisVlan(object): """ NTTCIS VLAN. """ def __init__(self, id, name, description, location, network_domain, status, private_ipv4_range_address, private_ipv4_range_size, ipv6_range_address, ipv6_range_size, ipv4_gateway, ipv6_gateway): """ Initialize an instance of ``DimensionDataVlan`` :param id: The ID of the VLAN :type id: ``str`` :param name: The name of the VLAN :type name: ``str`` :param description: Plan text description of the VLAN :type description: ``str`` :param location: The location (data center) of the VLAN :type location: ``NodeLocation`` :param network_domain: The Network Domain that owns this VLAN :type network_domain: :class:`DimensionDataNetworkDomain` :param status: The status of the VLAN :type status: :class:`DimensionDataStatus` :param private_ipv4_range_address: The host address of the VLAN IP space :type private_ipv4_range_address: ``str`` :param private_ipv4_range_size: The size (e.g. '24') of the VLAN as a CIDR range size :type private_ipv4_range_size: ``int`` :param ipv6_range_address: The host address of the VLAN IP space :type ipv6_range_address: ``str`` :param ipv6_range_size: The size (e.g. '32') of the VLAN as a CIDR range size :type ipv6_range_size: ``int`` :param ipv4_gateway: The IPv4 default gateway address :type ipv4_gateway: ``str`` :param ipv6_gateway: The IPv6 default gateway address :type ipv6_gateway: ``str`` """ self.id = str(id) self.name = name self.location = location self.description = description self.network_domain = network_domain self.status = status self.private_ipv4_range_address = private_ipv4_range_address self.private_ipv4_range_size = private_ipv4_range_size self.ipv6_range_address = ipv6_range_address self.ipv6_range_size = ipv6_range_size self.ipv4_gateway = ipv4_gateway self.ipv6_gateway = ipv6_gateway def __repr__(self): return (('<NttCisVlan: id=%s, name=%s, ' 'description=%s, location=%s, status=%s>') % (self.id, self.name, self.description, self.location, self.status)) class NttCisPool(object): """ NttCis VIP Pool. """ def __init__(self, id, name, description, status, load_balance_method, health_monitor_id, service_down_action, slow_ramp_time): """ Initialize an instance of ``NttCisPool`` :param id: The ID of the pool :type id: ``str`` :param name: The name of the pool :type name: ``str`` :param description: Plan text description of the pool :type description: ``str`` :param status: The status of the pool :type status: :class:NttCisStatus` :param load_balance_method: The load balancer method :type load_balance_method: ``str`` :param health_monitor_id: The ID of the health monitor :type health_monitor_id: ``str`` :param service_down_action: Action to take when pool is down :type service_down_action: ``str`` :param slow_ramp_time: The ramp-up time for service recovery :type slow_ramp_time: ``int`` """ self.id = str(id) self.name = name self.description = description self.status = status self.load_balance_method = load_balance_method self.health_monitor_id = health_monitor_id self.service_down_action = service_down_action self.slow_ramp_time = slow_ramp_time def __repr__(self): return (('<NttCisPool: id=%s, name=%s, ' 'description=%s, status=%s>') % (self.id, self.name, self.description, self.status)) class NttCisPoolMember(object): """ NTTCIS VIP Pool Member. """ def __init__(self, id, name, status, ip, port, node_id): """ Initialize an instance of ``NttCisPoolMember`` :param id: The ID of the pool member :type id: ``str`` :param name: The name of the pool member :type name: ``str`` :param status: The status of the pool :type status: :class:`NttCisStatus` :param ip: The IP of the pool member :type ip: ``str`` :param port: The port of the pool member :type port: ``int`` :param node_id: The ID of the associated node :type node_id: ``str`` """ self.id = str(id) self.name = name self.status = status self.ip = ip self.port = port self.node_id = node_id def __repr__(self): return (('NttCisPoolMember: id=%s, name=%s, ' 'ip=%s, status=%s, port=%s, node_id=%s>') % (self.id, self.name, self.ip, self.status, self.port, self.node_id)) class NttCisVIPNode(object): def __init__(self, id, name, status, ip, connection_limit='10000', connection_rate_limit='10000', health_monitor=None): """ Initialize an instance of :class:`NttCisVIPNode` :param id: The ID of the node :type id: ``str`` :param name: The name of the node :type name: ``str`` :param status: The status of the node :type status: :class:`NttCisStatus` :param ip: The IP of the node :type ip: ``str`` :param connection_limit: The total connection limit for the node :type connection_limit: ``int`` :param connection_rate_limit: The rate limit for the node :type connection_rate_limit: ``int`` """ self.id = str(id) self.name = name self.status = status self.ip = ip self.connection_limit = connection_limit self.connection_rate_limit = connection_rate_limit if health_monitor is not None: self.health_monitor_id = health_monitor def __repr__(self): return (('<NttCisVIPNode: id=%s, name=%s, ' 'status=%s, ip=%s>') % (self.id, self.name, self.status, self.ip)) class NttCisVirtualListener(object): """ NTTCIS Virtual Listener. """ def __init__(self, id, name, status, ip): """ Initialize an instance of :class:`NttCisVirtualListener` :param id: The ID of the listener :type id: ``str`` :param name: The name of the listener :type name: ``str`` :param status: The status of the listener :type status: :class:`NttCisStatus` :param ip: The IP of the listener :type ip: ``str`` """ self.id = str(id) self.name = name self.status = status self.ip = ip def __repr__(self): return (('<NttCisVirtualListener: id=%s, name=%s, ' 'status=%s, ip=%s>') % (self.id, self.name, self.status, self.ip)) class NttCisDefaultHealthMonitor(object): """ A default health monitor for a VIP (node, pool or listener) """ def __init__(self, id, name, node_compatible, pool_compatible): """ Initialize an instance of :class:`NttCisDefaultHealthMonitor` :param id: The ID of the monitor :type id: ``str`` :param name: The name of the monitor :type name: ``str`` :param node_compatible: Is a monitor capable of monitoring nodes :type node_compatible: ``bool`` :param pool_compatible: Is a monitor capable of monitoring pools :type pool_compatible: ``bool`` """ self.id = id self.name = name self.node_compatible = node_compatible self.pool_compatible = pool_compatible def __repr__(self): return (('<NttCisDefaultHealthMonitor: id=%s, name=%s>') % (self.id, self.name)) class NttCisPersistenceProfile(object): """ Each Persistence Profile declares the combination of Virtual Listener type and protocol with which it is compatible and whether or not it is compatible as a Fallback Persistence Profile. """ def __init__(self, id, name, compatible_listeners, fallback_compatible): """ Initialize an instance of :class:`NttCisPersistenceProfile` :param id: The ID of the profile :type id: ``str`` :param name: The name of the profile :type name: ``str`` :param compatible_listeners: List of compatible Virtual Listener types :type compatible_listeners: ``list`` of :class:`NttCisVirtualListenerCompatibility` :param fallback_compatible: Is capable as a fallback profile :type fallback_compatible: ``bool`` """ self.id = id self.name = name self.compatible_listeners = compatible_listeners self.fallback_compatible = fallback_compatible def __repr__(self): return (('NttCisPersistenceProfile: id=%s, name=%s>') % (self.id, self.name)) class NttCisDefaultiRule(object): """ A default iRule for a network domain, can be applied to a listener """ def __init__(self, id, name, compatible_listeners): """ Initialize an instance of :class:`NttCisefaultiRule` :param id: The ID of the iRule :type id: ``str`` :param name: The name of the iRule :type name: ``str`` :param compatible_listeners: List of compatible Virtual Listener types :type compatible_listeners: ``list`` of :class:`NttCisVirtualListenerCompatibility` """ self.id = id self.name = name self.compatible_listeners = compatible_listeners def __repr__(self): return (('<NttCisDefaultiRule: id=%s, name=%s>') % (self.id, self.name)) class NttCisVirtualListenerCompatibility(object): """ A compatibility preference for a persistence profile or iRule specifies which virtual listener types this profile or iRule can be applied to. """ def __init__(self, type, protocol): self.type = type self.protocol = protocol def __repr__(self): return (('<NttCisVirtualListenerCompatibility: ' 'type=%s, protocol=%s>') % (self.type, self.protocol)) class NttCisBackupDetails(object): """ NTTCIS Backup Details represents information about a targets backups configuration """ def __init__(self, asset_id, service_plan, status, clients=None): """ Initialize an instance of :class:`NttCisBackupDetails` :param asset_id: Asset identification for backups :type asset_id: ``str`` :param service_plan: The service plan for backups. i.e (Essentials) :type service_plan: ``str`` :param status: The overall status this backup target. i.e. (unregistered) :type status: ``str`` :param clients: Backup clients attached to this target :type clients: ``list`` of :class:`NttCisBackupClient` """ self.asset_id = asset_id self.service_plan = service_plan self.status = status self.clients = clients def __repr__(self): return (('<NttCisBackupDetails: id=%s>') % (self.asset_id)) class NttCisBackupClient(object): """ An object that represents a backup client """ def __init__(self, id, type, status, schedule_policy, storage_policy, download_url, alert=None, running_job=None): """ Initialize an instance of :class:`NttCisBackupClient` :param id: Unique ID for the client :type id: ``str`` :param type: The type of client that this client is :type type: :class:`NttCisBackupClientType` :param status: The states of this particular backup client. i.e. (Unregistered) :type status: ``str`` :param schedule_policy: The schedule policy for this client NOTE: NTTCIS only sends back the name of the schedule policy, no further details :type schedule_policy: ``str`` :param storage_policy: The storage policy for this client NOTE: NTTCIS only sends back the name of the storage policy, no further details :type storage_policy: ``str`` :param download_url: The download url for this client :type download_url: ``str`` :param alert: The alert configured for this backup client (optional) :type alert: :class:`NttCisBackupClientAlert` :param alert: The running job for the client (optional) :type alert: :class:`NttCisBackupClientRunningJob` """ self.id = id self.type = type self.status = status self.schedule_policy = schedule_policy self.storage_policy = storage_policy self.download_url = download_url self.alert = alert self.running_job = running_job def __repr__(self): return (('<NttCisBackupClient: id=%s>') % (self.id)) class NttCisBackupClientAlert(object): """ An alert for a backup client """ def __init__(self, trigger, notify_list=[]): """ Initialize an instance of :class:`NttCisBackupClientAlert` :param trigger: Trigger type for the client i.e. ON_FAILURE :type trigger: ``str`` :param notify_list: List of email addresses that are notified when the alert is fired :type notify_list: ``list`` of ``str`` """ self.trigger = trigger self.notify_list = notify_list def __repr__(self): return (('<NttCisBackupClientAlert: trigger=%s>') % (self.trigger)) class NttCisBackupClientRunningJob(object): """ A running job for a given backup client """ def __init__(self, id, status, percentage=0): """ Initialize an instance of :class:`NttCisBackupClientRunningJob` :param id: The unqiue ID of the job :type id: ``str`` :param status: The status of the job i.e. Waiting :type status: ``str`` :param percentage: The percentage completion of the job :type percentage: ``int`` """ self.id = id self.percentage = percentage self.status = status def __repr__(self): return (('<NttCisBackupClientRunningJob: id=%s>') % (self.id)) class NttCisBackupClientType(object): """ A client type object for backups """ def __init__(self, type, is_file_system, description): """ Initialize an instance of :class:`NttCisBackupClientType` :param type: The type of client i.e. (FA.Linux, MySQL, ect.) :type type: ``str`` :param is_file_system: The name of the iRule :type is_file_system: ``bool`` :param description: Description of the client :type description: ``str`` """ self.type = type self.is_file_system = is_file_system self.description = description def __repr__(self): return (('<NttCisBackupClientType: type=%s>') % (self.type)) class NttCisBackupStoragePolicy(object): """ A representation of a storage policy """ def __init__(self, name, retention_period, secondary_location): """ Initialize an instance of :class:`NttCisBackupStoragePolicy` :param name: The name of the storage policy i.e. 14 Day Storage Policy :type name: ``str`` :param retention_period: How long to keep the backup in days :type retention_period: ``int`` :param secondary_location: The secondary location i.e. Primary :type secondary_location: ``str`` """ self.name = name self.retention_period = retention_period self.secondary_location = secondary_location def __repr__(self): return (('<NttCisBackupStoragePolicy: name=%s>') % (self.name)) class NttCisBackupSchedulePolicy(object): """ A representation of a schedule policy """ def __init__(self, name, description): """ Initialize an instance of :class:`NttCisBackupSchedulePolicy` :param name: The name of the policy i.e 12AM - 6AM :type name: ``str`` :param description: Short summary of the details of the policy :type description: ``str`` """ self.name = name self.description = description def __repr__(self): return (('<NttCisBackupSchedulePolicy: name=%s>') % (self.name)) class NttCisTag(object): """ A representation of a Tag in NTTCIS A Tag first must have a Tag Key, then an asset is tag with a key and an option value. Tags can be queried later to filter assets and also show up on usage report if so desired. """ def __init__(self, asset_type, asset_id, asset_name, datacenter, key, value): """ Initialize an instance of :class:`NttCisTag` :param asset_type: The type of asset. Current asset types: SERVER, VLAN, NETWORK_DOMAIN, CUSTOMER_IMAGE, PUBLIC_IP_BLOCK, ACCOUNT :type asset_type: ``str`` :param asset_id: The GUID of the asset that is tagged :type asset_id: ``str`` :param asset_name: The name of the asset that is tagged :type asset_name: ``str`` :param datacenter: The short datacenter name of the tagged asset :type datacenter: ``str`` :param key: The tagged key :type key: :class:`NttCisTagKey` :param value: The tagged value :type value: ``None`` or ``str`` """ self.asset_type = asset_type self.asset_id = asset_id self.asset_name = asset_name self.datacenter = datacenter self.key = key self.value = value def __repr__(self): return (('<NttCisTag: asset_name=%s, tag_name=%s, value=%s>') % (self.asset_name, self.key.name, self.value)) class NttCisTagKey(object): """ A representation of a Tag Key in NTTCIS A tag key is required to tag an asset """ def __init__(self, id, name, description, value_required, display_on_report): """ Initialize an instance of :class:`NttCisTagKey` :param id: GUID of the tag key :type id: ``str`` :param name: Name of the tag key :type name: ``str`` :param description: Description of the tag key :type description: ``str`` :param value_required: If a value is required for this tag key :type value_required: ``bool`` :param display_on_report: If this tag key should be displayed on usage reports :type display_on_report: ``bool`` """ self.id = id self.name = name self.description = description self.value_required = value_required self.display_on_report = display_on_report def __repr__(self): return (('NttCisTagKey: id=%s name=%s>') % (self.id, self.name)) class NttCisIpAddressList(object): """ NttCis IP Address list """ def __init__(self, id, name, description, ip_version, ip_address_collection, state, create_time, child_ip_address_lists=None): """" Initialize an instance of :class:`NttCisIpAddressList` :param id: GUID of the IP Address List key :type id: ``str`` :param name: Name of the IP Address List :type name: ``str`` :param description: Description of the IP Address List :type description: ``str`` :param ip_version: IP version. E.g. IPV4, IPV6 :type ip_version: ``str`` :param ip_address_collection: Collection of NttCisIpAddress :type ip_address_collection: ``List`` :param state: IP Address list state :type state: ``str`` :param create_time: IP Address List created time :type create_time: ``date time`` :param child_ip_address_lists: List of IP address list to be included :type child_ip_address_lists: List of :class:'NttCisIpAddressList' """ self.id = id self.name = name self.description = description self.ip_version = ip_version self.ip_address_collection = ip_address_collection self.state = state self.create_time = create_time self.child_ip_address_lists = child_ip_address_lists def __repr__(self): return ('<NttCisIpAddressList: id=%s, name=%s, description=%s, ' 'ip_version=%s, ip_address_collection=%s, state=%s, ' 'create_time=%s, child_ip_address_lists=%s>' % (self.id, self.name, self.description, self.ip_version, self.ip_address_collection, self.state, self.create_time, self.child_ip_address_lists)) class NttCisChildIpAddressList(object): """ NttCis Child IP Address list """ def __init__(self, id, name): """" Initialize an instance of :class:`NttCisDataChildIpAddressList` :param id: GUID of the IP Address List key :type id: ``str`` :param name: Name of the IP Address List :type name: ``str`` """ self.id = id self.name = name def __repr__(self): return ('<NttCisChildIpAddressList: id=%s, name=%s>' % (self.id, self.name)) class NttCisIpAddress(object): """ A representation of IP Address in NttCis """ def __init__(self, begin, end=None, prefix_size=None): """ Initialize an instance of :class:`NttCisIpAddress` :param begin: IP Address Begin :type begin: ``str`` :param end: IP Address end :type end: ``str`` :param prefixSize: IP Address prefix size :type prefixSize: ``int`` """ self.begin = begin self.end = end self.prefix_size = prefix_size def __repr__(self): return ('<NttCisIpAddress: begin=%s, end=%s, prefix_size=%s>' % (self.begin, self.end, self.prefix_size)) class NttCisPortList(object): """ NttCis Port list """ def __init__(self, id, name, description, port_collection, child_portlist_list, state, create_time): """" Initialize an instance of :class:`DNttCisPortList` :param id: GUID of the Port List key :type id: ``str`` :param name: Name of the Port List :type name: ``str`` :param description: Description of the Port List :type description: ``str`` :param port_collection: Collection of NttCisPort :type port_collection: ``List`` :param child_portlist_list: Collection of NttCisChildPort :type child_portlist_list: ``List`` :param state: Port list state :type state: ``str`` :param create_time: Port List created time :type create_time: ``date time`` """ self.id = id self.name = name self.description = description self.port_collection = port_collection self.child_portlist_list = child_portlist_list self.state = state self.create_time = create_time def __repr__(self): return ( "<NttCisPortList: id=%s, name=%s, description=%s, " "port_collection=%s, child_portlist_list=%s, state=%s, " "create_time=%s>" % (self.id, self.name, self.description, self.port_collection, self.child_portlist_list, self.state, self.create_time)) class NttCisChildPortList(object): """ NttCis Child Port list """ def __init__(self, id, name): """" Initialize an instance of :class:`NttCisChildIpAddressList` :param id: GUID of the child port list key :type id: ``str`` :param name: Name of the child port List :type name: ``str`` """ self.id = id self.name = name def __repr__(self): return ('<NttCisChildPortList: id=%s, name=%s>' % (self.id, self.name)) class NttCisPort(object): """ A representation of Port in NTTCIS """ def __init__(self, begin, end=None): """ Initialize an instance of :class:`NttCisPort` :param begin: Port Number Begin :type begin: ``str`` :param end: Port Number end :type end: ``str`` """ self.begin = begin self.end = end def __repr__(self): return ('<NttCisPort: begin=%s, end=%s>' % (self.begin, self.end)) class NttCisNic(object): """ A representation of Network Adapter in NTTCIS """ def __init__(self, private_ip_v4=None, vlan=None, network_adapter_name=None): """ Initialize an instance of :class:`NttCisNic` :param private_ip_v4: IPv4 :type private_ip_v4: ``str`` :param vlan: Network VLAN :type vlan: class: NttCisVlan or ``str`` :param network_adapter_name: Network Adapter Name :type network_adapter_name: ``str`` """ self.private_ip_v4 = private_ip_v4 self.vlan = vlan self.network_adapter_name = network_adapter_name def __repr__(self): return ('<NttCisNic: private_ip_v4=%s, vlan=%s,' 'network_adapter_name=%s>' % (self.private_ip_v4, self.vlan, self.network_adapter_name)) # Dynamically create classes from returned XML. Leaves the API as the # single authoritative source. class ClassFactory(object): pass attrs = {} def processor(mapping, name=None): """ Closure that keeps the deepcopy of the original dict converted to XML current. :param mapping: The converted XML to dict/lists :type mapping: ``dict`` :param name: (Optional) what becomes the class name if provided :type: ``str`` :return: Nothing """ mapping = mapping # the map_copy will have keys deleted after the key and value are processed map_copy = deepcopy(mapping) def add_items(key, value, name=None): """ Add items to the global attr dict, then delete key, value from map copy :param key: from the process function becomes the attribute name :type key: ``str`` :param value: The value of the property and may be a dict :type value: ``str`` :param name: Name of class, often same as key :type: name" ``str`` """ if name in attrs: attrs[name].update({key: value}) elif name is not None: attrs[name] = value else: attrs.update({key: value}) # trim the copy of the mapping if key in map_copy: del map_copy[key] elif key in map_copy[name]: del map_copy[name][key] if len(map_copy[name]) == 0: del map_copy[name] def handle_map(map, name): tmp = {} types = [type(x) for x in map.values()] if XmlListConfig not in types and \ XmlDictConfig not in types and dict not in types: return map elif XmlListConfig in types: result = handle_seq(map, name) return result else: for k, v in map.items(): if isinstance(v, str): tmp.update({k: v}) if isinstance(v, dict): cls = build_class(k.capitalize(), v) tmp.update({k: cls}) elif isinstance(v, XmlDictConfig): cls = build_class(k.capitalize(), v) return (k, cls) return tmp def handle_seq(seq, name): tmp = {} if isinstance(seq, list): tmp = [] for _ in seq: cls = build_class(name.capitalize(), _) tmp.append(cls) return tmp for k, v in seq.items(): if isinstance(v, MutableSequence): for _ in v: if isinstance(_, Mapping): types = [type(x) for x in _.values()] if XmlDictConfig in types: result = handle_map(_, k) if isinstance(result, tuple): tmp.update({result[0]: result[1]}) else: tmp.update({k: result}) else: tmp_list = [build_class(k.capitalize(), i) for i in v] tmp[k] = tmp_list elif isinstance(v, str): tmp.update({k: v}) return tmp def build_class(key, value): klass = class_factory(key.capitalize(), value) return klass(value) def process(mapping): """ This function is recursive, creating attributes for the class factory by taking apart the elements in the dictionary. Thus, the calls to handle_seq or handle_map :param mapping: the dictionary converted from XML :return: itself (recursive) """ for k1, v1 in mapping.items(): if isinstance(v1, Mapping): types = [type(v) for v in v1.values()] if MutableSequence not in types and dict not in types: result = handle_map(v1, k1) cls = build_class(k1.capitalize(), result) add_items(k1, cls) elif XmlListConfig in types: result = handle_seq(v1, k1) cls = build_class(list(v1)[0], result) add_items(k1, cls) elif dict in types: result = handle_map(v1, k1) cls = build_class(k1.capitalize(), result) add_items(k1, cls, k1) elif isinstance(v1, list): tmp1 = {} tmp2 = {} tmp2[k1] = [] for i, j in enumerate(v1): if isinstance(j, dict): key = list(j)[0] result = handle_map(j, key) tmp1[k1 + str(i)] = build_class(k1, result) tmp2[k1].append(tmp1[k1 + str(i)]) if tmp2: add_items(k1, tmp2[k1], k1) elif isinstance(v1, str): add_items(k1, v1) if len(map_copy) == 0: return 1 return process(mapping) def class_factory(cls_name, attrs): """ This class takes a name and a dictionary to create a class. The clkass has an init method, an iter for retrieving properties, and, finally, a repr for returning the instance :param cls_name: The name to be tacked onto the suffix NttCis :type cls_name: ``str`` :param attrs: The attributes and values for an instance :type attrs: ``dict`` :return: a class that inherits from ClassFactory :rtype: ``ClassFactory`` """ def __init__(self, *args, **kwargs): for key in attrs: setattr(self, key, attrs[key]) if cls_name == "NttCisServer": self.state = self._get_state() def __iter__(self): for name in self.__dict__: yield getattr(self, name) def __repr__(self): values = ', '.join('{}={!r}'.format(*i) for i in zip(self.__dict__, self)) return '{}({})'.format(self.__class__.__name__, values) cls_attrs = dict( __init__=__init__, __iter__=__iter__, __repr__=__repr__) return type("NttCis{}".format(cls_name), (ClassFactory,), cls_attrs) class XmlListConfig(list): """ Creates a class from XML elements that make a list. If a list of XML elements with attributes, the attributes are passed to XmlDictConfig. """ def __init__(self, elem_list): for element in elem_list: if element is not None: # treat like dict if len(element) >= 0 or element[0].tag != element[1].tag: self.append(XmlDictConfig(element)) # treat like list elif element[0].tag == element[1].tag: # property refers to an element used repeatedly # in the XML for data centers only if 'property' in element.tag: self.append({element.attrib.get('name'): element.attrib.get('value')}) else: self.append(element.attrib) elif element.text: text = element.text.strip() if text: self.append(text) class XmlDictConfig(dict): """ Inherits from dict. Looks for XML elements, such as attrib, that can be converted to a dictionary. Any XML element that contains other XML elements, will be passed to XmlListConfig """ def __init__(self, parent_element): if parent_element.items(): if 'property' in parent_element.tag: self.update({parent_element.attrib.get('name'): parent_element.attrib.get('value')}) else: self.update(dict(parent_element.items())) for element in parent_element: if len(element) > 0: # treat like dict - we assume that if the first two tags # in a series are different, then they are all different. if len(element) == 1 or element[0].tag != element[1].tag: elem_dict = XmlDictConfig(element) # treat like list - we assume that if the first two tags # in a series are the same, then the rest are the same. else: # here, we put the list in dictionary; the key is the # tag name the list elements all share in common, and # the value is the list itself elem_dict = {element[0].tag.split('}')[1]: XmlListConfig(element)} # if the tag has attributes, add those to the dict if element.items(): elem_dict.update(dict(element.items())) self.update({element.tag.split('}')[1]: elem_dict}) # this assumes that if you've got an attribute in a tag, # you won't be having any text. This may or may not be a # good idea -- time will tell. It works for the way we are # currently doing XML configuration files... elif element.items(): # It is possible to have duplicate element tags. # If so, convert to a dict of lists if element.tag.split('}')[1] in self: if isinstance(self[element.tag.split('}')[1]], list): self[element.tag.split('}')[1]].\ append(dict(element.items())) else: tmp_list = list() tmp_dict = dict() for k, v in self[element.tag.split('}')[1]].items(): if isinstance(k, XmlListConfig): tmp_list.append(k) else: tmp_dict.update({k: v}) tmp_list.append(tmp_dict) tmp_list.append(dict(element.items())) self[element.tag.split('}')[1]] = tmp_list else: self.update({element.tag.split('}')[1]: dict(element.items())}) # finally, if there are no child tags and no attributes, extract # the text else: self.update({element.tag.split('}')[1]: element.text}) def process_xml(xml): """ Take the xml and put it into a dictionary. The process the dictionary recursively. This returns a class based on the XML API. Thus, properties will have the camel case found in the Java XML. This a trade-off to reduce the number of "static" classes that all have to be synchronized with any changes in the API. :param xml: The serialized version of the XML returned from Cloud Control :return: a dynamic class that inherits from ClassFactory :rtype: `ClassFactory` """ global attrs tree = etree.parse(BytesIO(xml)) root = tree.getroot() elem = root.tag.split('}')[1].capitalize() items = dict(root.items()) if 'pageNumber' in items: converted_xml = XmlListConfig(root) processor(converted_xml[0]) else: converted_xml = XmlDictConfig(root) processor(converted_xml) klass = class_factory(elem.capitalize(), attrs) cls = klass(attrs) attrs = {} return cls
ByteInternet/libcloud
libcloud/common/nttcis.py
Python
apache-2.0
74,157
# Copyright (c) 2019 Baidu.com, Inc. All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions # and limitations under the License. """ This module provides a client class for APP BLB. """ import copy import json import logging import uuid import sys from baidubce import bce_base_client from baidubce.auth import bce_v1_signer from baidubce.http import bce_http_client from baidubce.http import handler from baidubce.http import http_methods from baidubce import utils from baidubce.utils import required from baidubce import compat if sys.version < '3': sys.setdefaultencoding('utf-8') _logger = logging.getLogger(__name__) class AppBlbClient(bce_base_client.BceBaseClient): """ APP BLB base sdk client """ version = b'/v1' def __init__(self, config=None): bce_base_client.BceBaseClient.__init__(self, config) def _merge_config(self, config=None): """ :param config: :type config: baidubce.BceClientConfiguration :return: """ if config is None: return self.config else: new_config = copy.copy(self.config) new_config.merge_non_none_values(config) return new_config def _send_request(self, http_method, path, body=None, headers=None, params=None, config=None, body_parser=None): config = self._merge_config(config) if body_parser is None: body_parser = handler.parse_json if headers is None: headers = {b'Accept': b'*/*', b'Content-Type': b'application/json;charset=utf-8'} return bce_http_client.send_request( config, bce_v1_signer.sign, [handler.parse_error, body_parser], http_method, path, body, headers, params) @required(vpc_id=(bytes, str), subnet_id=(bytes, str)) def create_app_loadbalancer(self, vpc_id, subnet_id, name=None, desc=None, client_token=None, config=None): """ Create a app LoadBalancer with the specified options. :param name: the name of LoadBalancer to create :type name: string :param desc: The description of LoadBalancer :type desc: string :param vpc_id: id of vpc which the LoadBalancer belong to :type vpc_id: string :param subnet_id: id of subnet which the LoadBalancer belong to :type subnet_id: string :param client_token: If the clientToken is not specified by the user, a random String generated by default algorithm will be used. :type client_token: string :param config: :type config: baidubce.BceClientConfiguration :return: :rtype baidubce.bce_response.BceResponse """ path = utils.append_uri(self.version, 'appblb') params = {} if client_token is None: params[b'clientToken'] = generate_client_token() else: params[b'clientToken'] = client_token body = {} if name is not None: body['name'] = compat.convert_to_string(name) if desc is not None: body['desc'] = compat.convert_to_string(desc) body['vpcId'] = compat.convert_to_string(vpc_id) body['subnetId'] = compat.convert_to_string(subnet_id) return self._send_request(http_methods.POST, path, body=json.dumps(body), params=params, config=config) @required(blb_id=(bytes, str)) def update_app_loadbalancer(self, blb_id, name=None, desc=None, client_token=None, config=None): """ Modify the special attribute to new value of the LoadBalancer owned by the user. :param name: name of LoadBalancer to describe :type name: string :param blb_id: id of LoadBalancer to describe :type blb_id: string :param desc: The description of LoadBalancer :type desc: string :param client_token: If the clientToken is not specified by the user, a random String generated by default algorithm will be used. :type client_token: string :param config: :type config: baidubce.BceClientConfiguration :return: :rtype baidubce.bce_response.BceResponse """ path = utils.append_uri(self.version, 'appblb', blb_id) params = {} if client_token is None: params[b'clientToken'] = generate_client_token() else: params[b'clientToken'] = client_token body = {} if name is not None: body['name'] = compat.convert_to_string(name) if desc is not None: body['desc'] = compat.convert_to_string(desc) return self._send_request(http_methods.PUT, path, json.dumps(body), params=params, config=config) def describe_app_loadbalancers(self, address=None, name=None, blb_id=None, bcc_id=None, marker=None, max_keys=None, config=None): """ Return a list of LoadBalancers :param address: Intranet service address in dotted decimal notation :type address: string :param name: name of LoadBalancer to describe :type name: string :param blb_id: id of LoadBalancer to describe :type blb_id: string :param bcc_id: bcc which bind the LoadBalancers :type bcc_id: string :param marker: The optional parameter marker specified in the original request to specify where in the results to begin listing. Together with the marker, specifies the list result which listing should begin. If the marker is not specified, the list result will listing from the first one. :type marker: string :param max_keys The optional parameter to specifies the max number of list result to return. The default value is 1000. :type max_keys: int :param config: :type config: baidubce.BceClientConfiguration :return: :rtype baidubce.bce_response.BceResponse """ path = utils.append_uri(self.version, 'appblb') params = {} if address is not None: params[b'address'] = address if name is not None: params[b'name'] = name if blb_id is not None: params[b'blbId'] = blb_id if bcc_id is not None: params[b'bccId'] = bcc_id if marker is not None: params[b'marker'] = marker if max_keys is not None: params[b'maxKeys'] = max_keys return self._send_request(http_methods.GET, path, params=params, config=config) @required(blb_id=(bytes, str)) def describe_app_loadbalancer_detail(self, blb_id, config=None): """ Return detail imformation of specific LoadBalancer :param blb_id: id of LoadBalancer to describe :type blb_id: string :param config: :type config: baidubce.BceClientConfiguration :return: :rtype baidubce.bce_response.BceResponse """ path = utils.append_uri(self.version, 'appblb', blb_id) return self._send_request(http_methods.GET, path, config=config) @required(blb_id=(bytes, str)) def delete_app_loadbalancer(self, blb_id, client_token=None, config=None): """ delete the LoadBalancer owned by the user. :param blb_id: id of LoadBalancer to describe :type blb_id: string :param client_token: If the clientToken is not specified by the user, a random String generated by default algorithm will be used. :type client_token: string :param config: :type config: baidubce.BceClientConfiguration :return: :rtype baidubce.bce_response.BceResponse """ path = utils.append_uri(self.version, 'appblb', blb_id) params = {} if client_token is None: params[b'clientToken'] = generate_client_token() else: params[b'clientToken'] = client_token return self._send_request(http_methods.DELETE, path, params=params, config=config) """ Listener API """ @required(blb_id=(bytes, str), listener_port=int, scheduler=(bytes, str)) def create_app_tcp_listener(self, blb_id, listener_port, scheduler, client_token=None, config=None): """ Create a app tcp listener rule with the specified options. :param blb_id: the id of blb which the listener work on :type blb_id: string :param listener_port: port to be linstened owned by listener :value 1-65535 :type listener_port: int :param scheduler balancing algorithm :value 'RoundRobin' or 'LeastConnection' or 'Hash' :type scheduler: string :param client_token: If the clientToken is not specified by the user, a random String generated by default algorithm will be used. :type client_token: string :param config: :type config: baidubce.BceClientConfiguration :return: :rtype baidubce.bce_response.BceResponse """ path = utils.append_uri(self.version, 'appblb', blb_id, 'TCPlistener') params = {} if client_token is None: params[b'clientToken'] = generate_client_token() else: params[b'clientToken'] = client_token body = { 'listenerPort': listener_port, 'scheduler': compat.convert_to_string(scheduler) } return self._send_request(http_methods.POST, path, body=json.dumps(body), params=params, config=config) @required(blb_id=(bytes, str), listener_port=int, scheduler=(bytes, str)) def create_app_udp_listener(self, blb_id, listener_port, scheduler, client_token=None, config=None): """ Create a app udp listener rule with the specified options. :param blb_id: the id of blb which the listener work on :type blb_id: string :param listener_port: port to be linstened owned by listener :value 1-65535 :type listener_port: int :param scheduler balancing algorithm :value 'RoundRobin' or 'LeastConnection' or 'Hash' :type scheduler: string :param client_token: If the clientToken is not specified by the user, a random String generated by default algorithm will be used. :type client_token: string :param config: :type config: baidubce.BceClientConfiguration :return: :rtype baidubce.bce_response.BceResponse """ path = utils.append_uri(self.version, 'appblb', blb_id, 'UDPlistener') params = {} if client_token is None: params[b'clientToken'] = generate_client_token() else: params[b'clientToken'] = client_token body = { 'listenerPort': listener_port, 'scheduler': compat.convert_to_string(scheduler) } return self._send_request(http_methods.POST, path, body=json.dumps(body), params=params, config=config) @required(blb_id=(bytes, str), listener_port=int, scheduler=(bytes, str)) def create_app_http_listener(self, blb_id, listener_port, scheduler, keep_session=None, keep_session_type=None, keep_session_timeout=None, keep_session_cookie_name=None, x_forward_for=None, server_timeout=None, redirect_port=None, client_token=None, config=None): """ Create a app http listener rule with the specified options. :param blb_id: the id of blb which the listener work on :type blb_id: string :param listener_port: port to be linstened owned by listener :value 1-65535 :type listener_port: int :param scheduler: balancing algorithm :value 'RoundRobin' or 'LeastConnection' :type scheduler: string :param keep_session: Whether to enable the session hold function, that is,the request sent by the same client will reach the same backend server :value true or false default:false :type keep_session: bool :param keep_session_type: The cookie handling method maintained by the session, valid only if the session is held open :value 'insert' or 'rewrite' default:insert :type keep_session_type: string :param keep_session_timeout: The time the cookie is kept in session (in seconds), valid only if the session is held open :value 1-15552000 default:3600 :type keep_session_timeout: int :param keep_session_cookie_name: The session keeps the name of the cookie that needs to be overridden if and only if session persistence is enabled and keep_session_type="rewrite" :type keep_session_cookie_name: int :param x_forward_for: Whether to enable the real IP address of the client, the backend server can obtain the real address of the client through the X-Forwarded-For HTTP header. :value true or false, default: False :type x_forward_for: bool :param server_timeout: Backend server maximum timeout (unit: second) :value 1-3600, default: 30 :type server_timeout:int :param redirect_port: Forward the request received by this listener to the HTTPS listener, which is specified by the HTTPS listener. :type redirect_port:int :param client_token: If the clientToken is not specified by the user, a random String generated by default algorithm will be used. :type client_token: string :param config: :type config: baidubce.BceClientConfiguration :return: :rtype baidubce.bce_response.BceResponse """ path = utils.append_uri(self.version, 'appblb', blb_id, 'HTTPlistener') params = {} if client_token is None: params[b'clientToken'] = generate_client_token() else: params[b'clientToken'] = client_token body = { 'listenerPort': listener_port, 'scheduler': compat.convert_to_string(scheduler)} if keep_session is not None: body['keepSession'] = keep_session if keep_session_type is not None: body['keepSessionType'] = \ compat.convert_to_string(keep_session_type) if keep_session_timeout is not None: body['keepSessionTimeout'] = keep_session_timeout if keep_session_cookie_name is not None: body['keepSessionCookieName'] = keep_session_cookie_name if x_forward_for is not None: body['xForwardFor'] = x_forward_for if server_timeout is not None: body['serverTimeout'] = server_timeout if redirect_port is not None: body['redirectPort'] = redirect_port return self._send_request(http_methods.POST, path, body=json.dumps(body), params=params, config=config) @required(blb_id=(bytes, str), listener_port=int, scheduler=(bytes, str), cert_ids=list) def create_app_https_listener(self, blb_id, listener_port, scheduler, cert_ids, keep_session=None, keep_session_type=None, keep_session_timeout=None, keep_session_cookie_name=None, x_forward_for=None, server_timeout=None, ie6_compatible=None, encryption_type=None, encryption_protocols=None, dual_auth=None, client_certIds=None, client_token=None, config=None): """ Create a app https listener rule with the specified options. :param blb_id: The id of blb which the listener work on :type blb_id: string :param listener_port: port to be linstened owned by listener :value 1-65535 :type listener_port: int :param scheduler: balancing algorithm :value 'RoundRobin' or 'LeastConnection' :type scheduler: string :param cert_ids: The certificate to be loaded by the listener. :type cert_ids: List<String> :param keep_session: Whether to enable the session hold function, that is, the request sent by the same client will reach the same backend server :value true or false, default: false :type keep_session: bool :param keep_session_type: The cookie handling method maintained by the session, valid only if the session is held open :value 'insert' or 'rewrite', default:insert :type keep_session_type: string :param keep_session_timeout: The time the cookie is kept in session (in seconds), valid only if the session is held open :value 1-15552000, default:3600 :type keep_session_timeout: int :param keep_session_cookie_name: The session keeps the name of the cookie that needs to be overridden if and only if session persistence is enabled and keep_session_type="rewrite" :type keep_session_cookie_name: int :param x_forward_for: Whether to enable the real IP address of the client, the backend server can obtain the real address of the client through the X-Forwarded-For HTTP header. :value true or false, default: flase :type x_forward_for: bool :param server_timeout: Backend server maximum timeout (unit: second) :value 1-3600, default: 30 :type server_timeout: int :param ie6_compatible: compatible with IE6 HTTPS request (the protocol format is earlier SSL3.0, the security is poor) :value true or false, default: true :type ie6_compatible: bool :param encryption_type: Encryption options, support three types: compatibleIE/incompatibleIE/userDefind, corresponding to: IE-compatible encryption/disabled unsecure encryption/custom encryption, when encryptionType is valid and legitimate, ie6Compatible field transfer value will not take effect type: encryption_type:string :param encryption_protocols: When the encryptionType value is userDefind, the list of protocol types is a string list composed of four protocols: "sslv3", "tlsv10", "tlsv11", "tlsv12". type: encryption_protocols:list :param dual_auth: Whether to Open Two-way Authentication, default:false :type dual_auth: boolean :param client_certIds: When dualAuth is true, the loaded client certificate chain :type client_certIds: list :param client_token: If the clientToken is not specified by the user, a random String generated by default algorithm will be used. :type client_token: string :param config: :type config: baidubce.BceClientConfiguration :return :rtype baidubce.bce_response.BceResponse """ path = utils.append_uri(self.version, 'appblb', blb_id, 'HTTPSlistener') params = {} if client_token is None: params[b'clientToken'] = generate_client_token() else: params[b'clientToken'] = client_token body = { 'listenerPort': listener_port, 'scheduler': compat.convert_to_string(scheduler), 'certIds': cert_ids} if keep_session is not None: body['keepSession'] = keep_session if keep_session_type is not None: body['keepSessionType'] = \ compat.convert_to_string(keep_session_type) if keep_session_timeout is not None: body['keepSessionTimeout'] = keep_session_timeout if keep_session_cookie_name is not None: body['keepSessionCookieName'] = keep_session_cookie_name if x_forward_for is not None: body['xForwardFor'] = x_forward_for if server_timeout is not None: body['serverTimeout'] = server_timeout if ie6_compatible is not None: body['ie6Compatible'] = ie6_compatible if encryption_type is not None: body['encryptionType'] = \ compat.convert_to_string(encryption_type) if encryption_protocols is not None: body['encryptionProtocols'] = encryption_protocols if dual_auth is not None: body['dualAuth'] = dual_auth if client_certIds is not None: body['clientCertIds'] = client_certIds return self._send_request(http_methods.POST, path, body=json.dumps(body), params=params, config=config) @required(blb_id=(bytes, str), listener_port=int, scheduler=(bytes, str), cert_ids=list) def create_app_ssl_listener(self, blb_id, listener_port, scheduler, cert_ids, ie6_compatible=None, encryption_type=None, encryption_protocols=None, dual_auth=None, client_certIds=None, client_token=None, config=None): """ Create a app ssl listener rule with the specified options. :param blb_id: The id of blb which the listener work on :type blb_id: string :param listener_port: port to be linstened owned by listener :value 1-65535 :type listener_port: int :param scheduler: balancing algorithm :value 'RoundRobin' or 'LeastConnection' :type scheduler: string :param cert_ids: The SSL certificate to be loaded by the listener. Currently HTTPS listeners can only bind one SSL certificate. :type cert_ids: List<String> :param ie6_compatible: compatible with IE6 HTTPS request (the protocol format is earlier SSL3.0, the security is poor) :value true or false, default: true :type ie6_compatible: bool :param encryption_type: Encryption options, support three types: compatibleIE/incompatibleIE/userDefind, corresponding to: IE-compatible encryption/disabled unsecure encryption/custom encryption, when encryptionType is valid and legitimate, ie6Compatible field transfer value will not take effect type: encryption_type:string :param encryption_protocols: When the encryptionType value is userDefind, the list of protocol types is a string list composed of four protocols: "sslv3", "tlsv10", "tlsv11", "tlsv12". type: encryption_protocols:list :param dual_auth: Whether to Open Two-way Authentication, default:false :type dual_auth: boolean :param client_certIds: When dualAuth is true, the loaded client certificate chain :type client_certIds: list :param client_token: If the clientToken is not specified by the user, a random String generated by default algorithm will be used. :type client_token: string :param config: :type config: baidubce.BceClientConfiguration :return :rtype baidubce.bce_response.BceResponse """ path = utils.append_uri(self.version, 'appblb', blb_id, 'SSLlistener') params = {} if client_token is None: params[b'clientToken'] = generate_client_token() else: params[b'clientToken'] = client_token body = { 'listenerPort': listener_port, 'scheduler': compat.convert_to_string(scheduler), 'certIds': cert_ids} if ie6_compatible is not None: body['ie6Compatible'] = ie6_compatible if encryption_type is not None: body['encryptionType'] = \ compat.convert_to_string(encryption_type) if encryption_protocols is not None: body['encryptionProtocols'] = encryption_protocols if dual_auth is not None: body['dualAuth'] = dual_auth if client_certIds is not None: body['clientCertIds'] = client_certIds return self._send_request(http_methods.POST, path, body=json.dumps(body), params=params, config=config) @required(blb_id=(bytes, str), listener_port=int) def update_app_tcp_listener(self, blb_id, listener_port, scheduler=None, client_token=None, config=None): """ update a app tcp listener rule with the specified options. :param blb_id: the id of blb which the listener work on :type blb_id:string :param listener_port: port to be linstened owned by listener :value 1-65535 :type listener_port:int :param scheduler balancing algorithm :value 'RoundRobin'or'LeastConnection'or'Hash' :type scheduler:string :param client_token: If the clientToken is not specified by the user, a random String generated by default algorithm will be used. :type client_token: string :param config: :type config: baidubce.BceClientConfiguration :return: :rtype baidubce.bce_response.BceResponse """ path = utils.append_uri(self.version, 'appblb', blb_id, 'TCPlistener') params = {} if client_token is None: params[b'clientToken'] = generate_client_token() else: params[b'clientToken'] = client_token params[b'listenerPort'] = listener_port body = {} if scheduler is not None: body['scheduler'] = compat.convert_to_string(scheduler) return self._send_request(http_methods.PUT, path, body=json.dumps(body), params=params, config=config) @required(blb_id=(bytes, str), listener_port=int) def update_app_udp_listener(self, blb_id, listener_port, scheduler=None, client_token=None, config=None): """ update a app udp listener rule with the specified options. :param blb_id: the id of blb which the listener work on :type blb_id:string :param listener_port: port to be linstened owned by listener :value 1-65535 :type listener_port:int :param scheduler balancing algorithm :value 'RoundRobin'or'LeastConnection'or'Hash' :type scheduler:string :param client_token: If the clientToken is not specified by the user, a random String generated by default algorithm will be used. :type client_token: string :param config: :type config: baidubce.BceClientConfiguration :return: :rtype baidubce.bce_response.BceResponse """ path = utils.append_uri(self.version, 'appblb', blb_id, 'UDPlistener') params = {} if client_token is None: params[b'clientToken'] = generate_client_token() else: params[b'clientToken'] = client_token params[b'listenerPort'] = listener_port body = { 'scheduler': compat.convert_to_string(scheduler) } return self._send_request(http_methods.PUT, path, body=json.dumps(body), params=params, config=config) @required(blb_id=(bytes, str), listener_port=int) def update_app_http_listener(self, blb_id, listener_port, scheduler=None, keep_session=None, keep_session_type=None, keep_session_timeout=None, keep_session_cookie_name=None, x_forward_for=None, server_timeout=None, redirect_port=None, client_token=None, config=None): """ update a app http listener rule with the specified options. :param blb_id: The id of blb which the listener work on :type blb_id: string :param listener_port: Port to be linstened owned by listener :value 1-65535 :type listener_port: int :param scheduler: Balancing algorithm :value 'RoundRobin' or 'LeastConnection' or 'Hash' :type scheduler: string :param keep_session: Whether to enable the session hold function, that is, the request sent by the same client will reach the same backend server :value true or false, default:false :type keep_session: bool :param keep_session_type: The cookie handling method maintained by the session, valid only if the session is held open :value 'insert' or 'rewrite', default:insert :type keep_session_type: string :param keep_session_timeout: The time the cookie is kept in session (in seconds), valid only if the session is held open :value 1-15552000, default:3600 :type keep_session_timeout: int :param keep_session_cookie_name: The session keeps the name of the cookie that needs to be overridden,if and only if session persistence is enabled and keep_session_type="rewrite" :type keep_session_cookie_name: int :param x_forward_for: Whether to enable the real IP address of the client, the backend server can obtain the real address of the client through the X-Forwarded-For HTTP header. :value true or false, default: flase :type x_forward_for: bool :param server_timeout: Backend server maximum timeout (unit: second) :value 1-3600, default: 30 :type server_timeout: int :param redirect_port: Forward the request received by this listener to the HTTPS listener, which is specified by the HTTPS listener. :type redirect_port: int :param client_token: If the clientToken is not specified by the user, a random String generated by default algorithm will be used. :type client_token: string :param config: :type config: baidubce.BceClientConfiguration :return: :rtype baidubce.bce_response.BceResponse """ path = utils.append_uri(self.version, 'appblb', blb_id, 'HTTPlistener') params = {} if client_token is None: params[b'clientToken'] = generate_client_token() else: params[b'clientToken'] = client_token params[b'listenerPort'] = listener_port body = {} if scheduler is not None: body['scheduler'] = compat.convert_to_string(scheduler) if keep_session is not None: body['keepSession'] = keep_session if keep_session_type is not None: body['keepSessionType'] = \ compat.convert_to_string(keep_session_type) if keep_session_timeout is not None: body['keepSessionTimeout'] = keep_session_timeout if keep_session_cookie_name is not None: body['keepSessionCookieName'] = keep_session_cookie_name if x_forward_for is not None: body['xForwardFor'] = x_forward_for if server_timeout is not None: body['serverTimeout'] = server_timeout if redirect_port is not None: body['redirectPort'] = redirect_port return self._send_request(http_methods.PUT, path, body=json.dumps(body), params=params, config=config) @required(blb_id=(bytes, str), listener_port=int) def update_app_https_listener(self, blb_id, listener_port, scheduler=None, keep_session=None, keep_session_type=None, keep_session_timeout=None, keep_session_cookie_name=None, x_forward_for=None, server_timeout=None, cert_ids=None, ie6_compatible=None, encryption_type=None, encryption_protocols=None, dual_auth=None, client_certIds=None, client_token=None, config=None): """ update a app https listener rule with the specified options. :param blb_id: The id of blb which the listener work on :type blb_id: string :param listener_port: Port to be linstened owned by listener :value 1-65535 :type listener_port: int :param scheduler: Balancing algorithm :value 'RoundRobin' or 'LeastConnection' or 'Hash' :type scheduler: string :param keep_session: Whether to enable the session hold function, that is, the request sent by the same client will reach the same backend server :value true or false, default: false :type keep_session: bool :param keep_session_type: The cookie handling method maintained by the session, valid only if the session is held open :value 'insert' or 'rewrite', default: insert :type keep_session_type: string :param keep_session_timeout: The time the cookie is kept in session (in seconds), valid only if the session is held open :value 1-15552000, default:3600 :type keep_session_timeout: int :param keep_session_cookie_name: The session keeps the name of the cookie that needs to be overridden,if and only if session persistence is enabled and keep_session_type="rewrite" :type keep_session_cookie_name: int :param x_forward_for: Whether to enable the real IP address of the client, the backend server can obtain the real address of the client through the X-Forwarded-For HTTP header. :value true or false, default: False :type x_forward_for: bool :param server_timeout: Backend server maximum timeout (unit: second) :value 1-3600, default: 30 :type server_timeout: int :param cert_ids: The SSL certificate to be loaded by the listener. Currently HTTPS listeners can only bind one SSL certificate. :type cert_ids:List<String> :param ie6_compatible: Is it compatible with IE6 HTTPS request (the protocol format is earlier SSL3.0, the security is poor) :value true or false, default: true :type ie6_compatible: bool :param encryption_type: Encryption options, support three types: compatibleIE/incompatibleIE/userDefind, corresponding to: IE-compatible encryption/disabled unsecure encryption/custom encryption, when encryptionType is valid and legitimate, ie6Compatible field transfer value will not take effect type: encryption_type:string :param encryption_protocols: When the encryptionType value is userDefind, the list of protocol types is a string list composed of four protocols: "sslv3", "tlsv10", "tlsv11", "tlsv12". type: encryption_protocols:list :param dual_auth: Whether to Open Two-way Authentication, default:false :type dual_auth: boolean :param client_certIds: When dualAuth is true, the loaded client certificate chain :type client_certIds: list :param client_token: If the clientToken is not specified by the user, a random String generated by default algorithm will be used. :type client_token: string :param config: :type config: baidubce.BceClientConfiguration :return: :rtype baidubce.bce_response.BceResponse """ path = utils.append_uri(self.version, 'appblb', blb_id, 'HTTPSlistener') params = {} if client_token is None: params[b'clientToken'] = generate_client_token() else: params[b'clientToken'] = client_token params[b'listenerPort'] = listener_port body = {} if scheduler is not None: body['scheduler'] = compat.convert_to_string(scheduler) if keep_session is not None: body['keepSession'] = keep_session if keep_session_type is not None: body['keepSessionType'] = \ compat.convert_to_string(keep_session_type) if keep_session_timeout is not None: body['keepSessionTimeout'] = keep_session_timeout if keep_session_cookie_name is not None: body['keepSessionCookieName'] = keep_session_cookie_name if x_forward_for is not None: body['xForwardFor'] = x_forward_for if server_timeout is not None: body['serverTimeout'] = server_timeout if cert_ids is not None: body['certIds'] = cert_ids if ie6_compatible is not None: body['compatibleIE'] = ie6_compatible if encryption_type is not None: body['encryptionType'] = \ compat.convert_to_string(encryption_type) if encryption_protocols is not None: body['encryptionProtocols'] = encryption_protocols if dual_auth is not None: body['dualAuth'] = dual_auth if client_certIds is not None: body['clientCertIds'] = client_certIds return self._send_request(http_methods.PUT, path, body=json.dumps(body), params=params, config=config) @required(blb_id=(bytes, str), listener_port=int) def update_app_ssl_listener(self, blb_id, listener_port, scheduler=None, cert_ids=None, ie6_compatible=None, encryption_type=None, encryption_protocols=None, dual_auth=None, client_certIds=None, client_token=None, config=None): """ update a app ssl listener rule with the specified options. :param blb_id: The id of blb which the listener work on :type blb_id: string :param listener_port: port to be linstened owned by listener :value 1-65535 :type listener_port: int :param scheduler: balancing algorithm :value 'RoundRobin' or 'LeastConnection' :type scheduler: string :param cert_ids: The SSL certificate to be loaded by the listener. Currently HTTPS listeners can only bind one SSL certificate. :type cert_ids: List<String> :param ie6_compatible: compatible with IE6 HTTPS request (the protocol format is earlier SSL3.0, the security is poor) :value true or false, default: true :type ie6_compatible: bool :param encryption_type: Encryption options, support three types: compatibleIE/incompatibleIE/userDefind, corresponding to: IE-compatible encryption/disabled unsecure encryption/custom encryption, when encryptionType is valid and legitimate, ie6Compatible field transfer value will not take effect type: encryption_type:string :param encryption_protocols: When the encryptionType value is userDefind, the list of protocol types is a string list composed of four protocols: "sslv3", "tlsv10", "tlsv11", "tlsv12". type: encryption_protocols:list :param dual_auth: Whether to Open Two-way Authentication, default:false :type dual_auth: boolean :param client_certIds: When dualAuth is true, the loaded client certificate chain :type client_certIds: list :param client_token: If the clientToken is not specified by the user, a random String generated by default algorithm will be used. :type client_token: string :param config: :type config: baidubce.BceClientConfiguration :return :rtype baidubce.bce_response.BceResponse """ path = utils.append_uri(self.version, 'appblb', blb_id, 'SSLlistener') params = {} if client_token is None: params[b'clientToken'] = generate_client_token() else: params[b'clientToken'] = client_token params[b'listenerPort'] = listener_port body = {} if scheduler is not None: body['scheduler'] = compat.convert_to_string(scheduler) if cert_ids is not None: body['certIds'] = cert_ids if ie6_compatible is not None: body['compatibleIE'] = ie6_compatible if encryption_type is not None: body['encryptionType'] = \ compat.convert_to_string(encryption_type) if encryption_protocols is not None: body['encryptionProtocols'] = encryption_protocols if dual_auth is not None: body['dualAuth'] = dual_auth if client_certIds is not None: body['clientCertIds'] = client_certIds return self._send_request(http_methods.PUT, path, body=json.dumps(body), params=params, config=config) @required(blb_id=(bytes, str)) def describe_app_tcp_listener(self, blb_id, listener_port=None, marker=None, max_keys=None, config=None): """ get app tcp listeners identified by bibID :param blb_id the id of blb which the listener work on :type blb_id:string :param listener_port The listener port to query :type listener_port:int :param marker The optional parameter marker specified in the original request to specify where in the results to begin listing. Together with the marker, specifies the list result which listing should begin. If the marker is not specified, the list result will listing from the first one. :type marker: string :param max_keys The optional parameter to specifies the max number of list result to return. The default value is 1000. :type max_keys: int :param config: :type config: baidubce.BceClientConfiguration :return: :rtype baidubce.bce_response.BceResponse """ path = utils.append_uri(self.version, 'appblb', blb_id, 'TCPlistener') params = {} if listener_port is not None: params[b'listenerPort'] = listener_port if marker is not None: params[b'marker'] = marker if max_keys is not None: params[b'maxKeys'] = max_keys return self._send_request(http_methods.GET, path, params=params, config=config) @required(blb_id=(bytes, str)) def describe_app_udp_listener(self, blb_id, listener_port=None, marker=None, max_keys=None, config=None): """ get app udp listeners identified by bibID :param blb_id the id of blb which the listener work on :type blb_id:string :param listener_port The listener port to query :type listener_port:int :param marker The optional parameter marker specified in the original request to specify where in the results to begin listing. Together with the marker, specifies the list result which listing should begin. If the marker is not specified, the list result will listing from the first one. :type marker: string :param max_keys The optional parameter to specifies the max number of list result to return. The default value is 1000. :type max_keys: int :param config: :type config: baidubce.BceClientConfiguration :return: :rtype baidubce.bce_response.BceResponse """ path = utils.append_uri(self.version, 'appblb', blb_id, 'UDPlistener') params = {} if listener_port is not None: params[b'listenerPort'] = listener_port if marker is not None: params[b'marker'] = marker if max_keys is not None: params[b'maxKeys'] = max_keys return self._send_request(http_methods.GET, path, params=params, config=config) @required(blb_id=(bytes, str)) def describe_app_http_listener(self, blb_id, listener_port=None, marker=None, max_keys=None, config=None): """ get app http listeners identified by bibID :param blb_id the id of blb which the listener work on :type blb_id:string :param listener_port The listener port to query :type listener_port:int :param marker The optional parameter marker specified in the original request to specify where in the results to begin listing. Together with the marker, specifies the list result which listing should begin. If the marker is not specified, the list result will listing from the first one. :type marker: string :param max_keys The optional parameter to specifies the max number of list result to return. The default value is 1000. :type max_keys: int :param config: :type config: baidubce.BceClientConfiguration :return: :rtype baidubce.bce_response.BceResponse """ path = utils.append_uri(self.version, 'appblb', blb_id, 'HTTPlistener') params = {} if listener_port is not None: params[b'listenerPort'] = listener_port if marker is not None: params[b'marker'] = marker if max_keys is not None: params[b'maxKeys'] = max_keys return self._send_request(http_methods.GET, path, params=params, config=config) @required(blb_id=(bytes, str)) def describe_app_https_listener(self, blb_id, listener_port=None, marker=None, max_keys=None, config=None): """ get app https listeners identified by bibID :param blb_id the id of blb which the listener work on :type blb_id:string :param listener_port The listener port to query :type listener_port:int :param marker The optional parameter marker specified in the original request to specify where in the results to begin listing. Together with the marker, specifies the list result which listing should begin. If the marker is not specified, the list result will listing from the first one. :type marker: string :param max_keys The optional parameter to specifies the max number of list result to return. The default value is 1000. :type max_keys: int :param config: :type config: baidubce.BceClientConfiguration :return: :rtype baidubce.bce_response.BceResponse """ path = utils.append_uri(self.version, 'appblb', blb_id, 'HTTPSlistener') params = {} if listener_port is not None: params[b'listenerPort'] = listener_port if marker is not None: params[b'marker'] = marker if max_keys is not None: params[b'maxKeys'] = max_keys return self._send_request(http_methods.GET, path, params=params, config=config) @required(blb_id=(bytes, str)) def describe_app_ssl_listener(self, blb_id, listener_port=None, marker=None, max_keys=None, config=None): """ get app ssl listeners identified by bibID :param blb_id the id of blb which the listener work on :type blb_id:string :param listener_port The listener port to query :type listener_port:int :param marker The optional parameter marker specified in the original request to specify where in the results to begin listing. Together with the marker, specifies the list result which listing should begin. If the marker is not specified, the list result will listing from the first one. :type marker: string :param max_keys The optional parameter to specifies the max number of list result to return. The default value is 1000. :type max_keys: int :param config: :type config: baidubce.BceClientConfiguration :return: :rtype baidubce.bce_response.BceResponse """ path = utils.append_uri(self.version, 'appblb', blb_id, 'SSLlistener') params = {} if listener_port is not None: params[b'listenerPort'] = listener_port if marker is not None: params[b'marker'] = marker if max_keys is not None: params[b'maxKeys'] = max_keys return self._send_request(http_methods.GET, path, params=params, config=config) @required(blb_id=(bytes, str), portList=list) def delete_app_listeners(self, blb_id, portList, client_token=None, config=None): """ Release app listener under the specified LoadBalancer, the listener is specified by listening to the port. :param blb_id: id of LoadBalancer :type blb_id:string :param portList: The ports of listeners to be released :type portList:list<int> :param client_token: If the clientToken is not specified by the user, a random String generated by default algorithm will be used. :type client_token: string :param config: :type config: baidubce.BceClientConfiguration :return: :rtype baidubce.bce_response.BceResponse """ path = utils.append_uri(self.version, 'appblb', blb_id, 'listener') params = {} params[b'batchdelete'] = None if client_token is None: params[b'clientToken'] = generate_client_token() else: params[b'clientToken'] = client_token body = {} body['portList'] = portList return self._send_request(http_methods.PUT, path, body=json.dumps(body), params=params, config=config) @required(blb_id=(bytes, str), listener_port=int, app_policy_vos=list) def create_policys(self, blb_id, listener_port, app_policy_vos, client_token=None, config=None): """ Create policys. :param blb_id: the id of blb which the listener work on :type blb_id: string :param listener_port: port to be linstened owned by listener :value 1-65535 :type listener_port: int :param app_policy_vos policy list the listener binds. If the listener type is TCP, there is only one policy and only the full match is supported. https://cloud.baidu.com/doc/BLB/API.html#AppPolicy :type app_policy_vos: list<AppPolicy> :param client_token: If the clientToken is not specified by the user, a random String generated by default algorithm will be used. :type client_token: string :param config: :type config: baidubce.BceClientConfiguration :return: :rtype baidubce.bce_response.BceResponse """ path = utils.append_uri(self.version, 'appblb', blb_id, 'policys') params = {} if client_token is None: params[b'clientToken'] = generate_client_token() else: params[b'clientToken'] = client_token body = { 'listenerPort': listener_port, 'appPolicyVos': app_policy_vos } return self._send_request(http_methods.POST, path, body=json.dumps(body), params=params, config=config) @required(blb_id=(bytes, str), listener_port=int) def describe_policys(self, blb_id, listener_port, marker=None, max_keys=None, config=None): """ get policys :param blb_id the id of blb which the listener work on :type blb_id:string :param listener_port The listener port used by listener :type listener_port:int :param marker The optional parameter marker specified in the original request to specify where in the results to begin listing. Together with the marker, specifies the list result which listing should begin. If the marker is not specified, the list result will listing from the first one. :type marker: string :param max_keys The optional parameter to specifies the max number of list result to return. The default value is 1000. :type max_keys: int :param config: :type config: baidubce.BceClientConfiguration :return: :rtype baidubce.bce_response.BceResponse """ path = utils.append_uri(self.version, 'appblb', blb_id, 'policys') params = {} params[b'port'] = listener_port if marker is not None: params[b'marker'] = marker if max_keys is not None: params[b'maxKeys'] = max_keys return self._send_request(http_methods.GET, path, params=params, config=config) @required(blb_id=(bytes, str), listener_port=int, policys_list=list) def delete_policys(self, blb_id, listener_port, policys_list, client_token=None, config=None): """ Release the listener under the specified LoadBalancer, the listener is specified by listening to the port. :param blb_id: id of LoadBalancer :type blb_id:string :param listener_port The listener port used by listener :type listener_port:int :param policys_list All policy identifiers to be released :type policys_list:list<str> :param client_token: If the clientToken is not specified by the user, a random String generated by default algorithm will be used. :type client_token: string :param config: :type config: baidubce.BceClientConfiguration :return: :rtype baidubce.bce_response.BceResponse """ path = utils.append_uri(self.version, 'appblb', blb_id, 'policys') params = {} params[b'batchdelete'] = None if client_token is None: params[b'clientToken'] = generate_client_token() else: params[b'clientToken'] = client_token body = { 'port': listener_port, 'policyIdList': policys_list } return self._send_request(http_methods.PUT, path, body=json.dumps(body), params=params, config=config) """ ServerGroup API """ @required(blb_id=(bytes, str)) def create_app_server_group(self, blb_id, name=None, desc=None, backend_server_list=None, client_token=None, config=None): """ create server group for the specified LoadBalancer, support batch add :param blb_id: id of LoadBalancer :type blb_id:string :param name: name of server group :type name:string :param desc: description of server group :type desc:string :param backend_server_list List of backend servers to be added https://cloud.baidu.com/doc/BLB/API.html#AppBackendServer :type backend_server_list:List<AppBackendServer> :param client_token: If the clientToken is not specified by the user, a random String generated by default algorithm will be used. :type client_token: string :param config: :type config: baidubce.BceClientConfiguration :return: :rtype baidubce.bce_response.BceResponse """ path = utils.append_uri(self.version, 'appblb', blb_id, 'appservergroup') params = {} if client_token is None: params[b'clientToken'] = generate_client_token() else: params[b'clientToken'] = client_token body = {} if name is not None: body['name'] = compat.convert_to_string(name) if desc is not None: body['desc'] = compat.convert_to_string(desc) if backend_server_list is not None: body['backendServerList'] = backend_server_list return self._send_request(http_methods.POST, path, body=json.dumps(body), params=params, config=config) @required(blb_id=(bytes, str), sg_id=(bytes, str)) def update_app_server_group(self, blb_id, sg_id, name=None, desc=None, client_token=None, config=None): """ update the information of the app server group of the specified LoadBalancer :param blb_id: id of LoadBalancer :type blb_id:string :param sg_id: id of the server group to be updated :type sg_id:string :param name: name of server group :type name:string :param desc: description of server group :type desc:string :param client_token: If the clientToken is not specified by the user, a random String generated by default algorithm will be used. :type client_token: string :param config: :type config: baidubce.BceClientConfiguration :return: :rtype baidubce.bce_response.BceResponse """ path = utils.append_uri(self.version, 'appblb', blb_id, 'appservergroup') params = {} if client_token is None: params[b'clientToken'] = generate_client_token() else: params[b'clientToken'] = client_token body = {} body['sgId'] = compat.convert_to_string(sg_id) if name is not None: body['name'] = compat.convert_to_string(name) if desc is not None: body['desc'] = compat.convert_to_string(desc) return self._send_request(http_methods.PUT, path, body=json.dumps(body), params=params, config=config) @required(blb_id=(bytes, str)) def describe_app_server_group(self, blb_id, name=None, exactly_match=None, marker=None, max_keys=None, config=None): """ Query the imformation of app server group of the specified LoadBalancer :param blb_id: Id of LoadBalancer :type blb_id:string :param name: name of server group :type name:string :param exactly_match: Set whether the name matches globally :type exactly_match:boolean :param marker: The optional parameter marker specified in the original request to specify where in the results to begin listing. Together with the marker, specifies the list result which listing should begin. If the marker is not specified, the list result will listing from the first one. :type marker: string :param max_keys: The optional parameter to specifies the max number of list result to return. The default value is 1000. :type max_keys: int :param config: :type config: baidubce.BceClientConfiguration :return: :rtype baidubce.bce_response.BceResponse """ path = utils.append_uri(self.version, 'appblb', blb_id, 'appservergroup') params = {} if name is not None: params[b'name'] = name if exactly_match is not None: params[b'exactlyMatch'] = exactly_match if marker is not None: params[b'marker'] = marker if max_keys is not None: params[b'maxKeys'] = max_keys return self._send_request(http_methods.GET, path, params=params, config=config) @required(blb_id=(bytes, str), sg_id=(bytes, str)) def delete_app_server_group(self, blb_id, sg_id, client_token=None, config=None): """ delete the app server group of the specified LoadBalancer, :param blb_id: id of LoadBalancer :type blb_id:string :param sg_id: id of the server group to be updated :type sg_id:string :param client_token: If the clientToken is not specified by the user, a random String generated by default algorithm will be used. :type client_token: string :param config: :type config: baidubce.BceClientConfiguration :return: :rtype baidubce.bce_response.BceResponse """ path = utils.append_uri(self.version, 'appblb', blb_id, 'appservergroup') params = {} params[b'delete'] = None if client_token is None: params[b'clientToken'] = generate_client_token() else: params[b'clientToken'] = client_token body = {} body['sgId'] = compat.convert_to_string(sg_id) return self._send_request(http_methods.PUT, path, body=json.dumps(body), params=params, config=config) @required(blb_id=(bytes, str), sg_id=(bytes, str), port=int, protocol_type=(bytes, str)) def create_app_server_group_port(self, blb_id, sg_id, port, protocol_type, health_check=None, health_check_port=None, health_check_urlpath=None, health_check_timeout_insecond=None, health_check_interval_insecond=None, health_check_down_retry=None, health_check_up_retry=None, health_check_normal_status=None, client_token=None, config=None): """ create server group for the specified LoadBalancer, support batch add :param blb_id: id of LoadBalancer :type blb_id:string :param sg_id: id of the server group :type sg_id:string :param port: Port number, integer between 1 and 65535 :type port:string :param protocol_type: Protocol type of listening port, "TCP"/"UDP"/"HTTP" :type protocol_type:string :param health_check: Health check protocol :value 'HTTP' or 'TCP',default:'HTTP' :type health_check: string :param health_check_port: Health check port, the default is the same as port :type health_check_port: int :param health_check_urlpath: Health check URI, default '/'. Effective when the health check protocol is "HTTP" :type health_check_urlpath: string :param health_check_timeout_insecond: Health check timeout (unit: second) :value 1-60, default: 3 :type health_check_timeout_insecond: int :param health_check_interval_insecond: Health check interval (unit: second) :value 1-10, default: 3 :type health_check_interval_insecond: int :param health_check_down_retry: The unhealthy down retry, that is, how many consecutive health check failures, shields the backend server. :value 2-5, default: 3 :type health_check_down_retry: int :param health_check_up_retry: Health up retry, that is, how many consecutive health checks are successful, then re-use the back-end server :value:2-5, default: 3 :type health_check_up_retry: int :param health_check_normal_status: The HTTP status code when the health check is normal supports a combination of five types of status codes, such as "http_1xx|http_2xx", Effective when the health check protocol is "HTTP" :value default: http_2xx|http_3xx :type health_check_normal_status: string :param client_token: If the clientToken is not specified by the user, a random String generated by default algorithm will be used. :type client_token: string :param config: :type config: baidubce.BceClientConfiguration :return: :rtype baidubce.bce_response.BceResponse """ path = utils.append_uri(self.version, 'appblb', blb_id, 'appservergroupport') params = {} if client_token is None: params[b'clientToken'] = generate_client_token() else: params[b'clientToken'] = client_token body = { 'sgId': compat.convert_to_string(sg_id), 'port': port, 'type': compat.convert_to_string(protocol_type) } if health_check is not None: body['healthCheck'] = compat.convert_to_string(health_check) if health_check_port is not None: body['healthCheckPort'] = health_check_port if health_check_urlpath is not None: body['healthCheckUrlPath'] = \ compat.convert_to_string(health_check_urlpath) if health_check_timeout_insecond is not None: body['healthCheckTimeoutInSecond'] = health_check_timeout_insecond if health_check_interval_insecond is not None: body['healthCheckIntervalInSecond'] = health_check_interval_insecond if health_check_down_retry is not None: body['healthCheckDownRetry'] = health_check_down_retry if health_check_up_retry is not None: body['healthCheckUpRetry'] = health_check_up_retry if health_check_normal_status is not None: body['healthCheckNormalStatus'] = \ compat.convert_to_string(health_check_normal_status) return self._send_request(http_methods.POST, path, body=json.dumps(body), params=params, config=config) @required(blb_id=(bytes, str), sg_id=(bytes, str), port_id=(bytes, str)) def update_app_server_group_port(self, blb_id, sg_id, port_id, health_check=None, health_check_port=None, health_check_urlpath=None, health_check_timeout_insecond=None, health_check_interval_insecond=None, health_check_down_retry=None, health_check_up_retry=None, health_check_normal_status=None, client_token=None, config=None): """ update server group for the specified LoadBalancer, support batch add :param blb_id: id of LoadBalancer :type blb_id:string :param sg_id: id of the server group :type sg_id:string :param port_id: The id of the server group port to be updated :type port_id:string :param health_check: Health check protocol :value 'HTTP' or 'TCP',default:'HTTP' :type health_check: string :param health_check_port: Health check port, the default is the same as port :type health_check_port: int :param health_check_urlpath: Health check URI, default '/'. Effective when the health check protocol is "HTTP" :type health_check_urlpath: string :param health_check_timeout_insecond: Health check timeout (unit: second) :value 1-60, default: 3 :type health_check_timeout_insecond: int :param health_check_interval_insecond: Health check interval (unit: second) :value 1-10, default: 3 :type health_check_interval_insecond: int :param health_check_down_retry: The unhealthy down retry, that is, how many consecutive health check failures, shields the backend server. :value 2-5, default: 3 :type health_check_down_retry: int :param health_check_up_retry: Health up retry, that is, how many consecutive health checks are successful, then re-use the back-end server :value:2-5, default: 3 :type health_check_up_retry: int :param health_check_normal_status: The HTTP status code when the health check is normal supports a combination of five types of status codes, such as "http_1xx|http_2xx", Effective when the health check protocol is "HTTP" :value default: http_2xx|http_3xx :type health_check_normal_status: string :param client_token: If the clientToken is not specified by the user, a random String generated by default algorithm will be used. :type client_token: string :param config: :type config: baidubce.BceClientConfiguration :return: :rtype baidubce.bce_response.BceResponse """ path = utils.append_uri(self.version, 'appblb', blb_id, 'appservergroupport') params = {} if client_token is None: params[b'clientToken'] = generate_client_token() else: params[b'clientToken'] = client_token body = { 'sgId': compat.convert_to_string(sg_id), 'portId': compat.convert_to_string(port_id) } if health_check is not None: body['healthCheck'] = compat.convert_to_string(health_check) if health_check_port is not None: body['healthCheckPort'] = health_check_port if health_check_urlpath is not None: body['healthCheckUrlPath'] = \ compat.convert_to_string(health_check_urlpath) if health_check_timeout_insecond is not None: body['healthCheckTimeoutInSecond'] = health_check_timeout_insecond if health_check_interval_insecond is not None: body['healthCheckIntervalInSecond'] = health_check_interval_insecond if health_check_down_retry is not None: body['healthCheckDownRetry'] = health_check_down_retry if health_check_up_retry is not None: body['healthCheckUpRetry'] = health_check_up_retry if health_check_normal_status is not None: body['healthCheckNormalStatus'] = \ compat.convert_to_string(health_check_normal_status) return self._send_request(http_methods.PUT, path, body=json.dumps(body), params=params, config=config) @required(blb_id=(bytes, str), sg_id=(bytes, str), port_list=list) def delete_app_server_group_port(self, blb_id, sg_id, port_list, client_token=None, config=None): """ delete server group of the specified LoadBalancer, :param blb_id: id of LoadBalancer :type blb_id:string :param sg_id: id of the server group :type sg_id:string :param port_list: The ports of listeners to be released :type port_list:list<string> :param client_token: If the clientToken is not specified by the user, a random String generated by default algorithm will be used. :type client_token: string :param config: :type config: baidubce.BceClientConfiguration :return: :rtype baidubce.bce_response.BceResponse """ path = utils.append_uri(self.version, 'appblb', blb_id, 'appservergroupport') params = {} params[b'batchdelete'] = None if client_token is None: params[b'clientToken'] = generate_client_token() else: params[b'clientToken'] = client_token body = { 'sgId': compat.convert_to_string(sg_id), 'portIdList': port_list } return self._send_request(http_methods.PUT, path, body=json.dumps(body), params=params, config=config) @required(blb_id=(bytes, str), sg_id=(bytes, str), backend_server_list=list) def create_app_blb_rs(self, blb_id, sg_id, backend_server_list, client_token=None, config=None): """ Add backend server for the specified LoadBalancer and server group, support batch add :param blb_id: id of LoadBalancer :type blb_id:string :param sg_id: id of the server group :type sg_id:string :param backend_server_list List of backend servers to be added https://cloud.baidu.com/doc/BLB/API.html#AppBackendServer :type backend_server_list:List<AppBackendServer> :param client_token: If the clientToken is not specified by the user, a random String generated by default algorithm will be used. :type client_token: string :param config: :type config: baidubce.BceClientConfiguration :return: :rtype baidubce.bce_response.BceResponse """ path = utils.append_uri(self.version, 'appblb', blb_id, 'blbrs') params = {} if client_token is None: params[b'clientToken'] = generate_client_token() else: params[b'clientToken'] = client_token body = { 'sgId': compat.convert_to_string(sg_id), 'backendServerList': backend_server_list } return self._send_request(http_methods.POST, path, body=json.dumps(body), params=params, config=config) @required(blb_id=(bytes, str), sg_id=(bytes, str), backend_server_list=list) def update_app_blb_rs(self, blb_id, sg_id, backend_server_list, client_token=None, config=None): """ update backend server for the specified LoadBalancer and server group, support batch update :param blb_id: id of LoadBalancer :type blb_id:string :param sg_id: id of the server group :type sg_id:string :param backend_server_list List of backend servers to be added https://cloud.baidu.com/doc/BLB/API.html#AppBackendServer :type backend_server_list:List<AppBackendServer> :param client_token: If the clientToken is not specified by the user, a random String generated by default algorithm will be used. :type client_token: string :param config: :type config: baidubce.BceClientConfiguration :return: :rtype baidubce.bce_response.BceResponse """ path = utils.append_uri(self.version, 'appblb', blb_id, 'blbrs') params = {} if client_token is None: params[b'clientToken'] = generate_client_token() else: params[b'clientToken'] = client_token body = { 'sgId': compat.convert_to_string(sg_id), 'backendServerList': backend_server_list } return self._send_request(http_methods.PUT, path, body=json.dumps(body), params=params, config=config) @required(blb_id=(bytes, str), sg_id=(bytes, str)) def describe_app_blb_rs(self, blb_id, sg_id, marker=None, max_keys=None, config=None): """ Query the list of backend servers under the specified LoadBalancer and server group :param blb_id: Id of LoadBalancer :type blb_id:string :param sg_id: id of the server group :type sg_id:string :param marker: The optional parameter marker specified in the original request to specify where in the results to begin listing. Together with the marker, specifies the list result which listing should begin. If the marker is not specified, the list result will listing from the first one. :type marker: string :param max_keys: The optional parameter to specifies the max number of list result to return. The default value is 1000. :type max_keys: int :param config: :type config: baidubce.BceClientConfiguration :return: :rtype baidubce.bce_response.BceResponse """ path = utils.append_uri(self.version, 'appblb', blb_id, 'blbrs') params = {} params[b'sgId'] = compat.convert_to_string(sg_id) if marker is not None: params[b'marker'] = marker if max_keys is not None: params[b'maxKeys'] = max_keys return self._send_request(http_methods.GET, path, params=params, config=config) @required(blb_id=(bytes, str), sg_id=(bytes, str), backend_server_list=list) def delete_app_blb_rs(self, blb_id, sg_id, backend_server_list, client_token=None, config=None): """ delete backend server for the specified LoadBalancer and server group, support batch delete :param blb_id: id of LoadBalancer :type blb_id:string :param sg_id: id of the server group :type sg_id:string :param backend_server_list List of backend servers to be deleted :type backend_server_list:List<string> :param client_token: If the clientToken is not specified by the user, a random String generated by default algorithm will be used. :type client_token: string :param config: :type config: baidubce.BceClientConfiguration :return: :rtype baidubce.bce_response.BceResponse """ path = utils.append_uri(self.version, 'appblb', blb_id, 'blbrs') params = {} params[b'batchdelete'] = None if client_token is None: params[b'clientToken'] = generate_client_token() else: params[b'clientToken'] = client_token body = { 'sgId': compat.convert_to_string(sg_id), 'backendServerIdList': backend_server_list } return self._send_request(http_methods.PUT, path, body=json.dumps(body), params=params, config=config) @required(blb_id=(bytes, str), sg_id=(bytes, str)) def describe_rs_mount(self, blb_id, sg_id, config=None): """ describe servers of specific server group :param blb_id: id of LoadBalancer :type blb_id:string :param sg_id: id of the server group :type sg_id:string :param config: :type config: baidubce.BceClientConfiguration :return: :rtype baidubce.bce_response.BceResponse """ path = utils.append_uri(self.version, 'appblb', blb_id, 'blbrsmount') params = { 'sgId': compat.convert_to_string(sg_id) } return self._send_request(http_methods.GET, path, params=params, config=config) @required(blb_id=(bytes, str), sg_id=(bytes, str)) def describe_rs_unmount(self, blb_id, sg_id, config=None): """ describe servers of specific server group :param blb_id: id of LoadBalancer :type blb_id:string :param sg_id: id of the server group :type sg_id:string :param config: :type config: baidubce.BceClientConfiguration :return: :rtype baidubce.bce_response.BceResponse """ path = utils.append_uri(self.version, 'appblb', blb_id, 'blbrsunmount') params = { 'sgId': compat.convert_to_string(sg_id) } return self._send_request(http_methods.GET, path, params=params, config=config) def generate_client_token_by_uuid(): """ The default method to generate the random string for client_token if the optional parameter client_token is not specified by the user. :return: :rtype string """ return str(uuid.uuid4()) generate_client_token = generate_client_token_by_uuid
baidubce/bce-sdk-python
baidubce/services/blb/app_blb_client.py
Python
apache-2.0
87,381
# Copyright 2016 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_serialization import jsonutils from oslo_serialization.serializer.base_serializer import BaseSerializer class JSONSerializer(BaseSerializer): """JSON serializer based on the jsonutils module.""" def __init__(self, default=jsonutils.to_primitive, encoding='utf-8'): self._default = default self._encoding = encoding def dump(self, obj, fp): return jsonutils.dump(obj, fp) def dump_as_bytes(self, obj): return jsonutils.dump_as_bytes(obj, default=self._default, encoding=self._encoding) def load(self, fp): return jsonutils.load(fp, encoding=self._encoding) def load_from_bytes(self, s): return jsonutils.loads(s, encoding=self._encoding)
openstack/oslo.serialization
oslo_serialization/serializer/json_serializer.py
Python
apache-2.0
1,376
""" Functions that aid testing in various ways. A typical use would be:: lowcore = create_named_configuration('LOWBD2-CORE') times = numpy.linspace(-3, +3, 13) * (numpy.pi / 12.0) frequency = numpy.array([1e8]) channel_bandwidth = numpy.array([1e7]) # Define the component and give it some polarisation and spectral behaviour f = numpy.array([100.0]) flux = numpy.array([f]) phasecentre = SkyCoord(ra=+15.0 * u.deg, dec=-35.0 * u.deg, frame='icrs', equinox='J2000') compabsdirection = SkyCoord(ra=17.0 * u.deg, dec=-36.5 * u.deg, frame='icrs', equinox='J2000') comp = create_skycomponent(flux=flux, frequency=frequency, direction=compabsdirection, polarisation_frame=PolarisationFrame('stokesI')) image = create_test_image(frequency=frequency, phasecentre=phasecentre, cellsize=0.001, polarisation_frame=PolarisationFrame('stokesI') vis = create_visibility(lowcore, times=times, frequency=frequency, channel_bandwidth=channel_bandwidth, phasecentre=phasecentre, weight=1, polarisation_frame=PolarisationFrame('stokesI'), integration_time=1.0) """ import csv import logging from typing import List import astropy.units as u import numpy from astropy.coordinates import SkyCoord from astropy.io import fits from astropy.wcs import WCS from astropy.wcs.utils import pixel_to_skycoord from scipy import interpolate from data_models.memory_data_models import Configuration, Image, GainTable, Skycomponent, SkyModel, PointingTable from data_models.parameters import arl_path from data_models.polarisation import PolarisationFrame from processing_components.calibration.calibration_control import create_calibration_controls from processing_components.calibration.operations import create_gaintable_from_blockvisibility, apply_gaintable from processing_components.image.operations import import_image_from_fits from processing_components.imaging.base import predict_2d, predict_skycomponent_visibility, \ create_image_from_visibility, advise_wide_field from processing_components.imaging.primary_beams import create_pb from processing_components.skycomponent.operations import create_skycomponent, insert_skycomponent, \ apply_beam_to_skycomponent, filter_skycomponents_by_flux from processing_components.visibility.base import create_blockvisibility, create_visibility from processing_components.visibility.coalesce import convert_blockvisibility_to_visibility, \ convert_visibility_to_blockvisibility from processing_library.image.operations import create_image_from_array log = logging.getLogger(__name__) def create_test_image(canonical=True, cellsize=None, frequency=None, channel_bandwidth=None, phasecentre=None, polarisation_frame=PolarisationFrame("stokesI")) -> Image: """Create a useful test image This is the test image M31 widely used in ALMA and other simulations. It is actually part of an Halpha region in M31. :param canonical: Make the image into a 4 dimensional image :param cellsize: :param frequency: Frequency (array) in Hz :param channel_bandwidth: Channel bandwidth (array) in Hz :param phasecentre: Phase centre of image (SkyCoord) :param polarisation_frame: Polarisation frame :return: Image """ if frequency is None: frequency = [1e8] im = import_image_from_fits(arl_path("data/models/M31.MOD")) if canonical: if polarisation_frame is None: im.polarisation_frame = PolarisationFrame("stokesI") elif isinstance(polarisation_frame, PolarisationFrame): im.polarisation_frame = polarisation_frame else: raise ValueError("polarisation_frame is not valid") im = replicate_image(im, frequency=frequency, polarisation_frame=im.polarisation_frame) if cellsize is not None: im.wcs.wcs.cdelt[0] = -180.0 * cellsize / numpy.pi im.wcs.wcs.cdelt[1] = +180.0 * cellsize / numpy.pi if frequency is not None: im.wcs.wcs.crval[3] = frequency[0] if channel_bandwidth is not None: im.wcs.wcs.cdelt[3] = channel_bandwidth[0] else: if len(frequency) > 1: im.wcs.wcs.cdelt[3] = frequency[1] - frequency[0] else: im.wcs.wcs.cdelt[3] = 0.001 * frequency[0] im.wcs.wcs.radesys = 'ICRS' im.wcs.wcs.equinox = 2000.00 if phasecentre is not None: im.wcs.wcs.crval[0] = phasecentre.ra.deg im.wcs.wcs.crval[1] = phasecentre.dec.deg # WCS is 1 relative im.wcs.wcs.crpix[0] = im.data.shape[3] // 2 + 1 im.wcs.wcs.crpix[1] = im.data.shape[2] // 2 + 1 return im def create_test_image_from_s3(npixel=16384, polarisation_frame=PolarisationFrame("stokesI"), cellsize=0.000015, frequency=numpy.array([1e8]), channel_bandwidth=numpy.array([1e6]), phasecentre=None, fov=20, flux_limit=1e-3) -> Image: """Create MID test image from S3 The input catalog was generated at http://s-cubed.physics.ox.ac.uk/s3_sex using the following query:: Database: s3_sex SQL: select * from Galaxies where (pow(10,itot_151)*1000 > 1.0) and (right_ascension between -5 and 5) and (declination between -5 and 5);; Number of rows returned: 29966 For frequencies < 610MHz, there are three tables to use:: data/models/S3_151MHz_10deg.csv, use fov=10 data/models/S3_151MHz_20deg.csv, use fov=20 data/models/S3_151MHz_40deg.csv, use fov=40 For frequencies > 610MHz, there are three tables: data/models/S3_1400MHz_1mJy_10deg.csv, use flux_limit>= 1e-3 data/models/S3_1400MHz_100uJy_10deg.csv, use flux_limit < 1e-3 data/models/S3_1400MHz_1mJy_18deg.csv, use flux_limit>= 1e-3 data/models/S3_1400MHz_100uJy_18deg.csv, use flux_limit < 1e-3 The component spectral index is calculated from the 610MHz and 151MHz or 1400MHz and 610MHz, and then calculated for the specified frequencies. If polarisation_frame is not stokesI then the image will a polarised axis but the values will be zero. :param npixel: Number of pixels :param polarisation_frame: Polarisation frame (default PolarisationFrame("stokesI")) :param cellsize: cellsize in radians :param frequency: :param channel_bandwidth: Channel width (Hz) :param phasecentre: phasecentre (SkyCoord) :param fov: fov 10 | 20 | 40 :param flux_limit: Minimum flux (Jy) :return: Image """ ras = [] decs = [] fluxes = [] if phasecentre is None: phasecentre = SkyCoord(ra=+180.0 * u.deg, dec=-60.0 * u.deg, frame='icrs', equinox='J2000') if polarisation_frame is None: polarisation_frame = PolarisationFrame("stokesI") npol = polarisation_frame.npol nchan = len(frequency) shape = [nchan, npol, npixel, npixel] w = WCS(naxis=4) # The negation in the longitude is needed by definition of RA, DEC w.wcs.cdelt = [-cellsize * 180.0 / numpy.pi, cellsize * 180.0 / numpy.pi, 1.0, channel_bandwidth[0]] w.wcs.crpix = [npixel // 2 + 1, npixel // 2 + 1, 1.0, 1.0] w.wcs.ctype = ["RA---SIN", "DEC--SIN", 'STOKES', 'FREQ'] w.wcs.crval = [phasecentre.ra.deg, phasecentre.dec.deg, 1.0, frequency[0]] w.naxis = 4 w.wcs.radesys = 'ICRS' w.wcs.equinox = 2000.0 model = create_image_from_array(numpy.zeros(shape), w, polarisation_frame=polarisation_frame) if numpy.max(frequency) > 6.1E8: if fov > 10: fovstr = '18' else: fovstr = '10' if flux_limit >= 1e-3: csvfilename = arl_path('data/models/S3_1400MHz_1mJy_%sdeg.csv' % fovstr) else: csvfilename = arl_path('data/models/S3_1400MHz_100uJy_%sdeg.csv' % fovstr) log.info('create_test_image_from_s3: Reading S3 sources from %s ' % csvfilename) else: assert fov in [10, 20, 40], "Field of view invalid: use one of %s" % ([10, 20, 40]) csvfilename = arl_path('data/models/S3_151MHz_%ddeg.csv' % (fov)) log.info('create_test_image_from_s3: Reading S3 sources from %s ' % csvfilename) with open(csvfilename) as csvfile: readCSV = csv.reader(csvfile, delimiter=',') r = 0 for row in readCSV: # Skip first row if r > 0: ra = float(row[4]) + phasecentre.ra.deg dec = float(row[5]) + phasecentre.dec.deg if numpy.max(frequency) > 6.1E8: alpha = (float(row[11]) - float(row[10])) / numpy.log10(1400.0 / 610.0) flux = numpy.power(10, float(row[10])) * numpy.power(frequency / 1.4e9, alpha) else: alpha = (float(row[10]) - float(row[9])) / numpy.log10(610.0 / 151.0) flux = numpy.power(10, float(row[9])) * numpy.power(frequency / 1.51e8, alpha) if numpy.max(flux) > flux_limit: ras.append(ra) decs.append(dec) fluxes.append(flux) r += 1 csvfile.close() assert len(fluxes) > 0, "No sources found above flux limit %s" % flux_limit log.info('create_test_image_from_s3: %d sources read' % (len(fluxes))) p = w.sub(2).wcs_world2pix(numpy.array(ras), numpy.array(decs), 1) total_flux = numpy.sum(fluxes) fluxes = numpy.array(fluxes) ip = numpy.round(p).astype('int') ok = numpy.where((0 <= ip[0, :]) & (npixel > ip[0, :]) & (0 <= ip[1, :]) & (npixel > ip[1, :]))[0] ps = ip[:, ok] fluxes = fluxes[ok] actual_flux = numpy.sum(fluxes) log.info('create_test_image_from_s3: %d sources inside the image' % (ps.shape[1])) log.info('create_test_image_from_s3: average channel flux in S3 model = %.3f, actual average channel flux in ' 'image = %.3f' % (total_flux / float(nchan), actual_flux / float(nchan))) for chan in range(nchan): for iflux, flux in enumerate(fluxes): model.data[chan, 0, ps[1, iflux], ps[0, iflux]] = flux[chan] return model def create_test_skycomponents_from_s3(polarisation_frame=PolarisationFrame("stokesI"), frequency=numpy.array([1e8]), channel_bandwidth=numpy.array([1e6]), phasecentre=None, fov=20, flux_limit=1e-3, radius=None): """Create test image from S3 The input catalog was generated at http://s-cubed.physics.ox.ac.uk/s3_sex using the following query:: Database: s3_sex SQL: select * from Galaxies where (pow(10,itot_151)*1000 > 1.0) and (right_ascension between -5 and 5) and (declination between -5 and 5);; Number of rows returned: 29966 For frequencies < 610MHz, there are three tables to use:: data/models/S3_151MHz_10deg.csv, use fov=10 data/models/S3_151MHz_20deg.csv, use fov=20 data/models/S3_151MHz_40deg.csv, use fov=40 For frequencies > 610MHz, there are three tables: data/models/S3_1400MHz_1mJy_10deg.csv, use flux_limit>= 1e-3 data/models/S3_1400MHz_100uJy_10deg.csv, use flux_limit < 1e-3 data/models/S3_1400MHz_1mJy_18deg.csv, use flux_limit>= 1e-3 data/models/S3_1400MHz_100uJy_18deg.csv, use flux_limit < 1e-3 The component spectral index is calculated from the 610MHz and 151MHz or 1400MHz and 610MHz, and then calculated for the specified frequencies. If polarisation_frame is not stokesI then the image will a polarised axis but the values will be zero. :param npixel: Number of pixels :param polarisation_frame: Polarisation frame (default PolarisationFrame("stokesI")) :param cellsize: cellsize in radians :param frequency: :param channel_bandwidth: Channel width (Hz) :param phasecentre: phasecentre (SkyCoord) :param fov: fov 10 | 20 | 40 :param flux_limit: Minimum flux (Jy) :return: Image """ ras = [] decs = [] fluxes = [] names = [] if phasecentre is None: phasecentre = SkyCoord(ra=+180.0 * u.deg, dec=-60.0 * u.deg, frame='icrs', equinox='J2000') if polarisation_frame is None: polarisation_frame = PolarisationFrame("stokesI") if numpy.max(frequency) > 6.1E8: if fov > 10: fovstr = '18' else: fovstr = '10' if flux_limit >= 1e-3: csvfilename = arl_path('data/models/S3_1400MHz_1mJy_%sdeg.csv' % fovstr) else: csvfilename = arl_path('data/models/S3_1400MHz_100uJy_%sdeg.csv' % fovstr) log.info('create_test_skycomponents_from_s3: Reading S3-SEX sources from %s ' % csvfilename) else: assert fov in [10, 20, 40], "Field of view invalid: use one of %s" % ([10, 20, 40]) csvfilename = arl_path('data/models/S3_151MHz_%ddeg.csv' % (fov)) log.info('create_test_skycomponents_from_s3: Reading S3-SEX sources from %s ' % csvfilename) skycomps = list() with open(csvfilename) as csvfile: readCSV = csv.reader(csvfile, delimiter=',') r = 0 for row in readCSV: # Skip first row if r > 0: ra = float(row[4])/numpy.cos(phasecentre.dec.rad) + phasecentre.ra.deg dec = float(row[5]) + phasecentre.dec.deg if numpy.max(frequency) > 6.1E8: alpha = (float(row[11]) - float(row[10])) / numpy.log10(1400.0 / 610.0) flux = numpy.power(10, float(row[10])) * numpy.power(frequency / 1.4e9, alpha) else: alpha = (float(row[10]) - float(row[9])) / numpy.log10(610.0 / 151.0) flux = numpy.power(10, float(row[9])) * numpy.power(frequency / 1.51e8, alpha) if numpy.max(flux) > flux_limit: ras.append(ra) decs.append(dec) fluxes.append([[f] for f in flux]) names.append("S3_%s" % row[0]) r += 1 csvfile.close() assert len(fluxes) > 0, "No sources found above flux limit %s" % flux_limit directions = SkyCoord(ra=ras * u.deg, dec=decs * u.deg) if phasecentre is not None: separations = directions.separation(phasecentre).to('rad').value else: separations = numpy.zeros(len(names)) for isource, name in enumerate(names): direction = directions[isource] if separations[isource] < radius: if not numpy.isnan(flux).any(): skycomps.append(Skycomponent(direction=direction, flux=fluxes[isource], frequency=frequency, name=names[isource], shape='Point', polarisation_frame=polarisation_frame)) log.info('create_test_skycomponents_from_s3: %d sources found above fluxlimit inside search radius' % len(skycomps)) return skycomps def create_low_test_image_from_gleam(npixel=512, polarisation_frame=PolarisationFrame("stokesI"), cellsize=0.000015, frequency=numpy.array([1e8]), channel_bandwidth=numpy.array([1e6]), phasecentre=None, kind='cubic', applybeam=False, flux_limit=0.1, flux_max=numpy.inf, flux_min=-numpy.inf, radius=None, insert_method='Nearest') -> Image: """Create LOW test image from the GLEAM survey Stokes I is estimated from a cubic spline fit to the measured fluxes. The polarised flux is always zero. See http://www.mwatelescope.org/science/gleam-survey The catalog is available from Vizier. VIII/100 GaLactic and Extragalactic All-sky MWA survey (Hurley-Walker+, 2016) GaLactic and Extragalactic All-sky Murchison Wide Field Array (GLEAM) survey. I: A low-frequency extragalactic catalogue. Hurley-Walker N., et al., Mon. Not. R. Astron. Soc., 464, 1146-1167 (2017), 2017MNRAS.464.1146H :param npixel: Number of pixels :param polarisation_frame: Polarisation frame (default PolarisationFrame("stokesI")) :param cellsize: cellsize in radians :param frequency: :param channel_bandwidth: Channel width (Hz) :param phasecentre: phasecentre (SkyCoord) :param kind: Kind of interpolation (see scipy.interpolate.interp1d) Default: linear :return: Image """ if phasecentre is None: phasecentre = SkyCoord(ra=+15.0 * u.deg, dec=-35.0 * u.deg, frame='icrs', equinox='J2000') if radius is None: radius = npixel * cellsize / numpy.sqrt(2.0) sc = create_low_test_skycomponents_from_gleam(flux_limit=flux_limit, polarisation_frame=polarisation_frame, frequency=frequency, phasecentre=phasecentre, kind=kind, radius=radius) sc = filter_skycomponents_by_flux(sc, flux_min=flux_min, flux_max=flux_max) if polarisation_frame is None: polarisation_frame = PolarisationFrame("stokesI") npol = polarisation_frame.npol nchan = len(frequency) shape = [nchan, npol, npixel, npixel] w = WCS(naxis=4) # The negation in the longitude is needed by definition of RA, DEC w.wcs.cdelt = [-cellsize * 180.0 / numpy.pi, cellsize * 180.0 / numpy.pi, 1.0, channel_bandwidth[0]] w.wcs.crpix = [npixel // 2 + 1, npixel // 2 + 1, 1.0, 1.0] w.wcs.ctype = ["RA---SIN", "DEC--SIN", 'STOKES', 'FREQ'] w.wcs.crval = [phasecentre.ra.deg, phasecentre.dec.deg, 1.0, frequency[0]] w.naxis = 4 w.wcs.radesys = 'ICRS' w.wcs.equinox = 2000.0 model = create_image_from_array(numpy.zeros(shape), w, polarisation_frame=polarisation_frame) model = insert_skycomponent(model, sc, insert_method=insert_method) if applybeam: beam = create_pb(model, telescope='LOW', use_local=False) model.data[...] *= beam.data[...] return model def create_low_test_skymodel_from_gleam(npixel=512, polarisation_frame=PolarisationFrame("stokesI"), cellsize=0.000015, frequency=numpy.array([1e8]), channel_bandwidth=numpy.array([1e6]), phasecentre=None, kind='cubic', applybeam=True, flux_limit=0.1, flux_max=numpy.inf, flux_threshold=1.0, insert_method='Nearest', telescope='LOW') -> SkyModel: """Create LOW test skymodel from the GLEAM survey Stokes I is estimated from a cubic spline fit to the measured fluxes. The polarised flux is always zero. See http://www.mwatelescope.org/science/gleam-survey The catalog is available from Vizier. VIII/100 GaLactic and Extragalactic All-sky MWA survey (Hurley-Walker+, 2016) GaLactic and Extragalactic All-sky Murchison Wide Field Array (GLEAM) survey. I: A low-frequency extragalactic catalogue. Hurley-Walker N., et al., Mon. Not. R. Astron. Soc., 464, 1146-1167 (2017), 2017MNRAS.464.1146H :param telescope: :param npixel: Number of pixels :param polarisation_frame: Polarisation frame (default PolarisationFrame("stokesI")) :param cellsize: cellsize in radians :param frequency: :param channel_bandwidth: Channel width (Hz) :param phasecentre: phasecentre (SkyCoord) :param kind: Kind of interpolation (see scipy.interpolate.interp1d) Default: cubic :param applybeam: Apply the primary beam? :param flux_limit: Weakest component :param flux_max: Maximum strength component to be included in components :param flux_threshold: Split between components (brighter) and image (weaker) :param insert_method: Nearest | PSWF | Lanczos :return: :return: SkyModel """ if phasecentre is None: phasecentre = SkyCoord(ra=+15.0 * u.deg, dec=-35.0 * u.deg, frame='icrs', equinox='J2000') radius = npixel * cellsize sc = create_low_test_skycomponents_from_gleam(flux_limit=flux_limit, polarisation_frame=polarisation_frame, frequency=frequency, phasecentre=phasecentre, kind=kind, radius=radius) sc = filter_skycomponents_by_flux(sc, flux_max=flux_max) if polarisation_frame is None: polarisation_frame = PolarisationFrame("stokesI") npol = polarisation_frame.npol nchan = len(frequency) shape = [nchan, npol, npixel, npixel] w = WCS(naxis=4) # The negation in the longitude is needed by definition of RA, DEC w.wcs.cdelt = [-cellsize * 180.0 / numpy.pi, cellsize * 180.0 / numpy.pi, 1.0, channel_bandwidth[0]] w.wcs.crpix = [npixel // 2 + 1, npixel // 2 + 1, 1.0, 1.0] w.wcs.ctype = ["RA---SIN", "DEC--SIN", 'STOKES', 'FREQ'] w.wcs.crval = [phasecentre.ra.deg, phasecentre.dec.deg, 1.0, frequency[0]] w.naxis = 4 w.wcs.radesys = 'ICRS' w.wcs.equinox = 2000.0 model = create_image_from_array(numpy.zeros(shape), w, polarisation_frame=polarisation_frame) if applybeam: beam = create_pb(model, telescope=telescope, use_local=False) sc = apply_beam_to_skycomponent(sc, beam) weaksc = filter_skycomponents_by_flux(sc, flux_max=flux_threshold) brightsc = filter_skycomponents_by_flux(sc, flux_min=flux_threshold) model = insert_skycomponent(model, weaksc, insert_method=insert_method) log.info( 'create_low_test_skymodel_from_gleam: %d bright sources above flux threshold %.3f, %d weak sources below ' % (len(brightsc), flux_threshold, len(weaksc))) return SkyModel(components=brightsc, image=model, mask=None, gaintable=None) def create_low_test_skycomponents_from_gleam(flux_limit=0.1, polarisation_frame=PolarisationFrame("stokesI"), frequency=numpy.array([1e8]), kind='cubic', phasecentre=None, radius=1.0) \ -> List[Skycomponent]: """Create sky components from the GLEAM survey Stokes I is estimated from a cubic spline fit to the measured fluxes. The polarised flux is always zero. See http://www.mwatelescope.org/science/gleam-survey The catalog is available from Vizier. VIII/100 GaLactic and Extragalactic All-sky MWA survey (Hurley-Walker+, 2016) GaLactic and Extragalactic All-sky Murchison Wide Field Array (GLEAM) survey. I: A low-frequency extragalactic catalogue. Hurley-Walker N., et al., Mon. Not. R. Astron. Soc., 464, 1146-1167 (2017), 2017MNRAS.464.1146H :param flux_limit: Only write components brighter than this (Jy) :param polarisation_frame: Polarisation frame (default PolarisationFrame("stokesI")) :param frequency: Frequencies at which the flux will be estimated :param kind: Kind of interpolation (see scipy.interpolate.interp1d) Default: linear :param phasecentre: Desired phase centre (SkyCoord) default None implies all sources :param radius: Radius of sources selected around phasecentre (default 1.0 rad) :return: List of Skycomponents """ fitsfile = arl_path("data/models/GLEAM_EGC.fits") rad2deg = 180.0 / numpy.pi decmin = phasecentre.dec.to('deg').value - rad2deg * radius / 2.0 decmax = phasecentre.dec.to('deg').value + rad2deg * radius / 2.0 hdulist = fits.open(fitsfile, lazy_load_hdus=False) recs = hdulist[1].data[0].array fluxes = recs['peak_flux_wide'] mask = fluxes > flux_limit filtered_recs = recs[mask] decs = filtered_recs['DEJ2000'] mask = decs > decmin filtered_recs = filtered_recs[mask] decs = filtered_recs['DEJ2000'] mask = decs < decmax filtered_recs = filtered_recs[mask] ras = filtered_recs['RAJ2000'] decs = filtered_recs['DEJ2000'] names = filtered_recs['Name'] if polarisation_frame is None: polarisation_frame = PolarisationFrame("stokesI") npol = polarisation_frame.npol nchan = len(frequency) # For every source, we read all measured fluxes and interpolate to the # required frequencies gleam_freqs = numpy.array([76, 84, 92, 99, 107, 115, 122, 130, 143, 151, 158, 166, 174, 181, 189, 197, 204, 212, 220, 227]) gleam_flux_freq = numpy.zeros([len(names), len(gleam_freqs)]) for i, f in enumerate(gleam_freqs): gleam_flux_freq[:, i] = filtered_recs['int_flux_%03d' % (f)][:] skycomps = [] directions = SkyCoord(ra=ras * u.deg, dec=decs * u.deg) if phasecentre is not None: separations = directions.separation(phasecentre).to('rad').value else: separations = numpy.zeros(len(names)) for isource, name in enumerate(names): direction = directions[isource] if separations[isource] < radius: fint = interpolate.interp1d(gleam_freqs * 1.0e6, gleam_flux_freq[isource, :], kind=kind) flux = numpy.zeros([nchan, npol]) flux[:, 0] = fint(frequency) if not numpy.isnan(flux).any(): skycomps.append(Skycomponent(direction=direction, flux=flux, frequency=frequency, name=name, shape='Point', polarisation_frame=polarisation_frame)) log.info('create_low_test_skycomponents_from_gleam: %d sources above flux limit %.3f' % (len(skycomps), flux_limit)) hdulist.close() return skycomps def replicate_image(im: Image, polarisation_frame=PolarisationFrame('stokesI'), frequency=numpy.array([1e8])) \ -> Image: """ Make a new canonical shape Image, extended along third and fourth axes by replication. The order of the data is [chan, pol, dec, ra] :param frequency: :param im: :param polarisation_frame: Polarisation_frame :return: Image """ if len(im.data.shape) == 2: fim = Image() newwcs = WCS(naxis=4) newwcs.wcs.crpix = [im.wcs.wcs.crpix[0] + 1.0, im.wcs.wcs.crpix[1] + 1.0, 1.0, 1.0] newwcs.wcs.cdelt = [im.wcs.wcs.cdelt[0], im.wcs.wcs.cdelt[1], 1.0, 1.0] newwcs.wcs.crval = [im.wcs.wcs.crval[0], im.wcs.wcs.crval[1], 1.0, frequency[0]] newwcs.wcs.ctype = [im.wcs.wcs.ctype[0], im.wcs.wcs.ctype[1], 'STOKES', 'FREQ'] nchan = len(frequency) npol = polarisation_frame.npol fim.polarisation_frame = polarisation_frame fim.wcs = newwcs fshape = [nchan, npol, im.data.shape[1], im.data.shape[0]] fim.data = numpy.zeros(fshape) log.info("replicate_image: replicating shape %s to %s" % (im.data.shape, fim.data.shape)) for i3 in range(nchan): fim.data[i3, 0, :, :] = im.data[:, :] return fim else: return im def create_blockvisibility_iterator(config: Configuration, times: numpy.array, frequency: numpy.array, channel_bandwidth, phasecentre: SkyCoord, weight: float = 1, polarisation_frame=PolarisationFrame('stokesI'), integration_time=1.0, number_integrations=1, predict=predict_2d, model=None, components=None, phase_error=0.0, amplitude_error=0.0, sleep=0.0, **kwargs): """ Create a sequence of Visibilities and optionally predicting and coalescing This is useful mainly for performing large simulations. Do something like:: vis_iter = create_blockvisibility_iterator(config, times, frequency, channel_bandwidth, phasecentre=phasecentre, weight=1.0, integration_time=30.0, number_integrations=3) for i, vis in enumerate(vis_iter): if i == 0: fullvis = vis else: fullvis = append_visibility(fullvis, vis) :param config: Configuration of antennas :param times: hour angles in radians :param frequency: frequencies (Hz] Shape [nchan] :param weight: weight of a single sample :param phasecentre: phasecentre of observation :param npol: Number of polarizations :param integration_time: Integration time ('auto' or value in s) :param number_integrations: Number of integrations to be created at each time. :param model: Model image to be inserted :param components: Components to be inserted :param sleep_time: Time to sleep between yields :return: Visibility """ for time in times: actualtimes = time + numpy.arange(0, number_integrations) * integration_time * numpy.pi / 43200.0 bvis = create_blockvisibility(config, actualtimes, frequency=frequency, phasecentre=phasecentre, weight=weight, polarisation_frame=polarisation_frame, integration_time=integration_time, channel_bandwidth=channel_bandwidth) if model is not None: vis = convert_blockvisibility_to_visibility(bvis) vis = predict(vis, model, **kwargs) bvis = convert_visibility_to_blockvisibility(vis) if components is not None: vis = predict_skycomponent_visibility(bvis, components) # Add phase errors if phase_error > 0.0 or amplitude_error > 0.0: gt = create_gaintable_from_blockvisibility(bvis) gt = simulate_gaintable(gt=gt, phase_error=phase_error, amplitude_error=amplitude_error) bvis = apply_gaintable(bvis, gt) import time time.sleep(sleep) yield bvis def simulate_gaintable(gt: GainTable, phase_error=0.1, amplitude_error=0.0, smooth_channels=1, leakage=0.0, **kwargs) -> GainTable: """ Simulate a gain table :type gt: GainTable :param phase_error: std of normal distribution, zero mean :param amplitude_error: std of log normal distribution :param leakage: std of cross hand leakage :param smooth_channels: Use bspline over smooth_channels :param kwargs: :return: Gaintable """ def moving_average(a, n=3): return numpy.convolve(a, numpy.ones((n,)) / n, mode='valid') log.debug("simulate_gaintable: Simulating amplitude error = %.4f, phase error = %.4f" % (amplitude_error, phase_error)) amps = 1.0 phases = 1.0 ntimes, nant, nchan, nrec, _ = gt.data['gain'].shape if phase_error > 0.0: phases = numpy.zeros(gt.data['gain'].shape) for time in range(ntimes): for ant in range(nant): phase = numpy.random.normal(0, phase_error, nchan + int(smooth_channels) - 1) if smooth_channels > 1: phase = moving_average(phase, smooth_channels) phases[time, ant, ...] = phase[..., numpy.newaxis, numpy.newaxis] if amplitude_error > 0.0: amps = numpy.ones(gt.data['gain'].shape, dtype='complex') for time in range(ntimes): for ant in range(nant): amp = numpy.random.lognormal(mean=0.0, sigma=amplitude_error, size=nchan + int(smooth_channels) - 1) if smooth_channels > 1: amp = moving_average(amp, smooth_channels) amp = amp / numpy.average(amp) amps[time, ant, ...] = amp[..., numpy.newaxis, numpy.newaxis] gt.data['gain'] = amps * numpy.exp(0 + 1j * phases) nrec = gt.data['gain'].shape[-1] if nrec > 1: if leakage > 0.0: leak = numpy.random.normal(0, leakage, gt.data['gain'][..., 0, 0].shape) + 1j * \ numpy.random.normal(0, leakage, gt.data['gain'][..., 0, 0].shape) gt.data['gain'][..., 0, 1] = gt.data['gain'][..., 0, 0] * leak leak = numpy.random.normal(0, leakage, gt.data['gain'][..., 1, 1].shape) + 1j * \ numpy.random.normal(0, leakage, gt.data['gain'][..., 1, 1].shape) gt.data['gain'][..., 1, 0] = gt.data['gain'][..., 1, 1] * leak else: gt.data['gain'][..., 0, 1] = 0.0 gt.data['gain'][..., 1, 0] = 0.0 return gt def simulate_pointingtable(pt: PointingTable, pointing_error, static_pointing_error=None, global_pointing_error=None, seed=None, **kwargs) -> PointingTable: """ Simulate a gain table :type pt: PointingTable :param pointing_error: std of normal distribution (radians) :param static_pointing_error: std of normal distribution (radians) :param global_pointing_error: 2-vector of global pointing error (rad) :param kwargs: :return: PointingTable """ if seed is not None: numpy.random.seed(seed) if static_pointing_error is None: static_pointing_error = [0.0, 0.0] r2s = 180.0 * 3600.0 / numpy.pi pt.data['pointing'] = numpy.zeros(pt.data['pointing'].shape) ntimes, nant, nchan, nrec, _ = pt.data['pointing'].shape if pointing_error > 0.0: log.debug("simulate_pointingtable: Simulating dynamic pointing error = %g (rad) %g (arcsec)" % (pointing_error, r2s * pointing_error)) pt.data['pointing'] += numpy.random.normal(0.0, pointing_error, pt.data['pointing'].shape) if (abs(static_pointing_error[0]) > 0.0) or (abs(static_pointing_error[1]) > 0.0): numpy.random.seed(18051955) log.debug("simulate_pointingtable: Simulating static pointing error = (%g, %g) (rad) (%g, %g)(arcsec)" % (static_pointing_error[0], static_pointing_error[1], r2s * static_pointing_error[0], r2s * static_pointing_error[1])) static_pe = numpy.zeros(pt.data['pointing'].shape[1:]) static_pe[...,0] = numpy.random.normal(0.0, static_pointing_error[0], static_pe[...,0].shape)[numpy.newaxis, ...] static_pe[...,1] = numpy.random.normal(0.0, static_pointing_error[1], static_pe[...,1].shape)[numpy.newaxis, ...] pt.data['pointing'] += static_pe if global_pointing_error is not None: if seed is not None: numpy.random.seed(seed) log.debug("simulate_pointingtable: Simulating global pointing error = [%g, %g] (rad) [%g,s %g] (arcsec)" % (global_pointing_error[0], global_pointing_error[1], r2s * global_pointing_error[0], r2s * global_pointing_error[1])) pt.data['pointing'][..., :] += global_pointing_error return pt def simulate_pointingtable_from_timeseries(pt, type='wind', time_series_type='precision', pointing_directory=None, reference_pointing=False, seed=None): """Create a pointing table with time series created from PSD. :param pt: Pointing table to be filled :param type: Type of pointing: 'tracking' or 'wind' :param pointing_file: Name of pointing file :param reference_pointing: Use reference pointing? :return: """ if seed is not None: numpy.random.seed(seed) if pointing_directory is None: pointing_directory = arl_path("data/models/%s" % time_series_type) pt.data['pointing'] = numpy.zeros(pt.data['pointing'].shape) ntimes, nant, nchan, nrec, _ = pt.data['pointing'].shape # Use az and el at the beginning of this pointingtable axis_values = pt.nominal[0,0,0,0,0] el = pt.nominal[0,0,0,0,1] el_deg = el * 180.0 / numpy.pi az_deg = axis_values * 180.0 / numpy.pi if el_deg < 30.0: el_deg = 15.0 elif el_deg < (90.0+45.0)/2.0: el_deg = 45.0 else: el_deg = 90.0 if abs(az_deg) < 45.0 / 2.0: az_deg = 0.0 elif abs(az_deg) < (45.0 + 90.0)/2.0: az_deg = 45.0 elif abs(az_deg) < (90.0 + 135.0)/2.0: az_deg = 90.0 elif abs(az_deg) < (135.0 + 180.0)/2.0: az_deg = 135.0 else: az_deg = 180.0 pointing_file = '%s/El%dAz%d.dat' % (pointing_directory, int(el_deg), int(az_deg)) log.debug("simulate_pointingtable_from_timeseries: Reading wind PSD from %s" % pointing_file) psd = numpy.loadtxt(pointing_file) # define some arrays freq = psd[:, 0] axesdict = { "az": psd[:, 1], "el": psd[:, 2], "pxel": psd[:, 3], "pel": psd[:, 4] } if type == 'tracking': axes = ["az", "el"] elif type == 'wind': axes = ["pxel", "pel"] else: raise ValueError("Pointing type %s not known" % type) freq_interval = 0.0001 for axis in axes: axis_values = axesdict[axis] if (axis == "az") or (axis == "el"): # determine index of maximum PSD value; add 50 for better fit axis_values_max_index = numpy.argwhere(axis_values == numpy.max(axis_values))[0][0] + 50 axis_values_max_index = min(axis_values_max_index, len(axis_values)) # max_freq = 2.0 / pt.interval[0] max_freq = 0.4 freq_max_index = numpy.argwhere(freq > max_freq)[0][0] else: break_freq = 0.01 # not max; just a break axis_values_max_index = numpy.argwhere(freq>break_freq)[0][0] # max_freq = 2.0 / pt.interval[0] max_freq = 0.1 freq_max_index = numpy.argwhere(freq > max_freq)[0][0] # construct regularly-spaced frequencies regular_freq = numpy.arange(freq[0], freq[freq_max_index], freq_interval) regular_axis_values_max_index = numpy.argwhere(numpy.abs(regular_freq-freq[axis_values_max_index])==numpy.min(numpy.abs(regular_freq-freq[axis_values_max_index])))[0][0] # print ('Frequency break: ', freq[az_max_index]) # print ('Max frequency: ', max_freq) # # print ('New frequency break: ', regular_freq[regular_az_max_index]) # print ('New max frequency: ', regular_freq[-1]) if axis_values_max_index>=freq_max_index: raise ValueError('Frequency break is higher than highest frequency; select a lower break') # use original frequency break and max frequency to fit function # fit polynomial to psd up to max value import warnings from numpy import RankWarning warnings.simplefilter('ignore', RankWarning) p_axis_values1 = numpy.polyfit(freq[:axis_values_max_index], numpy.log(axis_values[:axis_values_max_index]), 5) f_axis_values1 = numpy.poly1d(p_axis_values1) # fit polynomial to psd beyond max value p_axis_values2 = numpy.polyfit(freq[axis_values_max_index:freq_max_index], numpy.log(axis_values[axis_values_max_index:freq_max_index]), 5) f_axis_values2 = numpy.poly1d(p_axis_values2) # use new frequency break and max frequency to apply function (ensures equal spacing of frequency intervals) # resampled to construct regularly-spaced frequencies regular_axis_values1 = numpy.exp(f_axis_values1(regular_freq[:regular_axis_values_max_index])) regular_axis_values2 = numpy.exp(f_axis_values2(regular_freq[regular_axis_values_max_index:])) # join regular_axis_values = numpy.append(regular_axis_values1, regular_axis_values2) M0 = len(regular_axis_values) # check rms of resampled PSD # df = regular_freq[1:]-regular_freq[:-1] # psd2rms_pxel = numpy.sqrt(numpy.sum(regular_az[:-1]*df)) # print ('Calculate rms of resampled PSD: ', psd2rms_pxel) original_regular_freq = regular_freq original_regular_axis_values = regular_axis_values # get amplitudes from psd values if (regular_axis_values<0).any(): raise ValueError('Resampling returns negative power values; change fit range') amp_axis_values = numpy.sqrt(regular_axis_values*2*freq_interval) # need to scale PSD by 2* frequency interval before square rooting, then by number of modes in resampled PSD # Now we generate some random phases for ant in range(nant): regular_freq = original_regular_freq regular_axis_values = original_regular_axis_values phi_axis_values = numpy.random.rand(len(regular_axis_values)) * 2 * numpy.pi # create complex array z_axis_values = amp_axis_values * numpy.exp(1j * phi_axis_values) # polar # make symmetrical frequencies mirror_z_axis_values = numpy.copy(z_axis_values) # make complex conjugates mirror_z_axis_values.imag -= 2 * z_axis_values.imag # make negative frequencies mirror_regular_freq = -regular_freq # join z_axis_values = numpy.append(z_axis_values, mirror_z_axis_values[::-1]) regular_freq = numpy.append(regular_freq, mirror_regular_freq[::-1]) # add a 0 Fourier term z_axis_values = numpy.append(0 + 0 * 1j, z_axis_values) regular_freq = numpy.append(0, regular_freq) # perform inverse fft ts = numpy.fft.ifft(z_axis_values) # set up and check scalings N = len(ts) Dt = pt.interval[0] ts = numpy.real(ts) ts *= M0 # the result is scaled by number of points in the signal, so multiply - real part - by this # The output of the iFFT will be a random time series on the finite # (bounded, limited) time interval t = 0 to tmax = (N-1) X Dt, # # where Dt = 1 / (2 X Fmax) # scale to time interval times = numpy.arange(ntimes) * Dt # Convert from arcsec to radians ts *= numpy.pi / (180.0 * 3600.0) # We take reference pointing to mean that the pointing errors are zero at the beginning # of the set of integrations if reference_pointing: ts[:] -= ts[0] # pt.data['time'] = times[:ntimes] if axis == 'az': pt.data['pointing'][:, ant, :, :, 0] = ts[:ntimes, numpy.newaxis, numpy.newaxis, ...] elif axis == 'el': pt.data['pointing'][:, ant, :, :, 1] = ts[:ntimes, numpy.newaxis, numpy.newaxis, ...] elif axis == 'pxel': pt.data['pointing'][:, ant, :, :, 0] = ts[:ntimes, numpy.newaxis, numpy.newaxis, ...] elif axis == 'pel': pt.data['pointing'][:, ant, :, :, 1] = ts[:ntimes, numpy.newaxis, numpy.newaxis, ...] else: raise ValueError("Unknown axis %s" % axis) return pt def ingest_unittest_visibility(config, frequency, channel_bandwidth, times, vis_pol, phasecentre, block=False, zerow=False): if block: vt = create_blockvisibility(config, times, frequency, channel_bandwidth=channel_bandwidth, phasecentre=phasecentre, weight=1.0, polarisation_frame=vis_pol, zerow=zerow) else: vt = create_visibility(config, times, frequency, channel_bandwidth=channel_bandwidth, phasecentre=phasecentre, weight=1.0, polarisation_frame=vis_pol, zerow=zerow) vt.data['vis'][...] = 0.0 return vt def create_unittest_components(model, flux, applypb=False, telescope='LOW', npixel=None, scale=1.0, single=False, symmetric=False, angular_scale=1.0): # Fill the visibility with exactly computed point sources. if npixel == None: _, _, _, npixel = model.data.shape spacing_pixels = int(scale * npixel) // 4 log.info('Spacing in pixels = %s' % spacing_pixels) if not symmetric: centers = [(0.2*angular_scale, 1.1*angular_scale)] else: centers = list() if not single: centers.append([0.0, 0.0]) for x in numpy.linspace(-1.2*angular_scale, 1.2*angular_scale, 7): if abs(x) > 1e-15: centers.append([x, x]) centers.append([x, -x]) model_pol = model.polarisation_frame # Make the list of components rpix = model.wcs.wcs.crpix components = [] for center in centers: ix, iy = center # The phase center in 0-relative coordinates is n // 2 so we centre the grid of # components on ny // 2, nx // 2. The wcs must be defined consistently. p = int(round(rpix[0] + ix * spacing_pixels * numpy.sign(model.wcs.wcs.cdelt[0]))), \ int(round(rpix[1] + iy * spacing_pixels * numpy.sign(model.wcs.wcs.cdelt[1]))) sc = pixel_to_skycoord(p[0], p[1], model.wcs, origin=1) log.info("Component at (%f, %f) [0-rel] %s" % (p[0], p[1], str(sc))) # Channel images comp = create_skycomponent(direction=sc, flux=flux, frequency=model.frequency, polarisation_frame=model_pol) components.append(comp) if applypb: beam = create_pb(model, telescope=telescope, use_local=False) components = apply_beam_to_skycomponent(components, beam) return components def create_unittest_model(vis, model_pol, npixel=None, cellsize=None, nchan=1): advice = advise_wide_field(vis, guard_band_image=2.0, delA=0.02, facets=1, wprojection_planes=1, oversampling_synthesised_beam=4.0) if cellsize is None: cellsize = advice['cellsize'] if npixel is None: npixel = advice['npixels2'] model = create_image_from_visibility(vis, npixel=npixel, cellsize=cellsize, nchan=nchan, polarisation_frame=model_pol) return model def insert_unittest_errors(vt, seed=180555, calibration_context="TG", amp_errors=None, phase_errors=None): """Simulate gain errors and apply :param vt: :param seed: Random number seed, set to big integer repeat values from run to run :param phase_errors: e.g. {'T': 1.0, 'G': 0.1, 'B': 0.01} :param amp_errors: e.g. {'T': 0.0, 'G': 0.01, 'B': 0.01} :return: """ controls = create_calibration_controls() if amp_errors is None: amp_errors = {'T': 0.0, 'G': 0.01, 'B': 0.01} if phase_errors is None: phase_errors = {'T': 1.0, 'G': 0.1, 'B': 0.01} for c in calibration_context: gaintable = create_gaintable_from_blockvisibility(vt, timeslice=controls[c]['timeslice']) gaintable = simulate_gaintable(gaintable, phase_error=phase_errors[c], amplitude_error=amp_errors[c], timeslice=controls[c]['timeslice'], phase_only=controls[c]['phase_only'], crosspol=controls[c]['shape'] == 'matrix') vt = apply_gaintable(vt, gaintable, timeslice=controls[c]['timeslice'], inverse=True) return vt
SKA-ScienceDataProcessor/algorithm-reference-library
processing_components/simulation/testing_support.py
Python
apache-2.0
47,842
import json from typing import NamedTuple from collections import namedtuple import kfp import kfp.dsl as dsl from kfp import components from kfp.dsl.types import Integer def get_current_namespace(): """Returns current namespace if available, else kubeflow""" try: current_namespace = open( "/var/run/secrets/kubernetes.io/serviceaccount/namespace" ).read() except: current_namespace = "kubeflow" return current_namespace def create_worker_spec( worker_num: int = 0 ) -> NamedTuple( "CreatWorkerSpec", [("worker_spec", dict)] ): """ Creates pytorch-job worker spec """ worker = {} if worker_num > 0: worker = { "replicas": worker_num, "restartPolicy": "OnFailure", "template": { "metadata": { "annotations": { "sidecar.istio.io/inject": "false" } }, "spec": { "containers": [ { "args": [ "--backend", "gloo", ], "image": "public.ecr.aws/pytorch-samples/pytorch_dist_mnist:latest", "name": "pytorch", "resources": { "requests": { "memory": "4Gi", "cpu": "2000m", # Uncomment for GPU # "nvidia.com/gpu": 1, }, "limits": { "memory": "4Gi", "cpu": "2000m", # Uncomment for GPU # "nvidia.com/gpu": 1, }, }, } ] }, }, } worker_spec_output = namedtuple( "MyWorkerOutput", ["worker_spec"] ) return worker_spec_output(worker) worker_spec_op = components.func_to_container_op( create_worker_spec, base_image="python:slim", ) @dsl.pipeline( name="launch-kubeflow-pytorchjob", description="An example to launch pytorch.", ) def mnist_train( namespace: str = get_current_namespace(), worker_replicas: int = 1, ttl_seconds_after_finished: int = -1, job_timeout_minutes: int = 600, delete_after_done: bool = False, ): pytorchjob_launcher_op = components.load_component_from_file( "./component.yaml" ) master = { "replicas": 1, "restartPolicy": "OnFailure", "template": { "metadata": { "annotations": { # See https://github.com/kubeflow/website/issues/2011 "sidecar.istio.io/inject": "false" } }, "spec": { "containers": [ { # To override default command # "command": [ # "python", # "/opt/mnist/src/mnist.py" # ], "args": [ "--backend", "gloo", ], # Or, create your own image from # https://github.com/kubeflow/pytorch-operator/tree/master/examples/mnist "image": "public.ecr.aws/pytorch-samples/pytorch_dist_mnist:latest", "name": "pytorch", "resources": { "requests": { "memory": "4Gi", "cpu": "2000m", # Uncomment for GPU # "nvidia.com/gpu": 1, }, "limits": { "memory": "4Gi", "cpu": "2000m", # Uncomment for GPU # "nvidia.com/gpu": 1, }, }, } ], # If imagePullSecrets required # "imagePullSecrets": [ # {"name": "image-pull-secret"}, # ], }, }, } worker_spec_create = worker_spec_op( worker_replicas ) # Launch and monitor the job with the launcher pytorchjob_launcher_op( # Note: name needs to be a unique pytorchjob name in the namespace. # Using RUN_ID_PLACEHOLDER is one way of getting something unique. name=f"name-{kfp.dsl.RUN_ID_PLACEHOLDER}", namespace=namespace, master_spec=master, # pass worker_spec as a string because the JSON serializer will convert # the placeholder for worker_replicas (which it sees as a string) into # a quoted variable (eg a string) instead of an unquoted variable # (number). If worker_replicas is quoted in the spec, it will break in # k8s. See https://github.com/kubeflow/pipelines/issues/4776 worker_spec=worker_spec_create.outputs[ "worker_spec" ], ttl_seconds_after_finished=ttl_seconds_after_finished, job_timeout_minutes=job_timeout_minutes, delete_after_done=delete_after_done, ) if __name__ == "__main__": import kfp.compiler as compiler pipeline_file = "test.tar.gz" print( f"Compiling pipeline as {pipeline_file}" ) compiler.Compiler().compile( mnist_train, pipeline_file ) # # To run: # client = kfp.Client() # run = client.create_run_from_pipeline_package( # pipeline_file, # arguments={}, # run_name="test pytorchjob run" # ) # print(f"Created run {run}")
kubeflow/pipelines
components/kubeflow/pytorch-launcher/sample.py
Python
apache-2.0
6,178
# -*- coding: utf-8 -*- from __future__ import absolute_import, unicode_literals from . import settings from django.contrib.auth import get_user_model def get_website_user(): """Get a generic 'website' user. Can be used to specify the required user when there is no direct link to a real user. """ UserModel = get_user_model() user, created = UserModel.objects.get_or_create(**settings.WEBSITE_USER) if created: user.set_unusable_password() user.is_active = False user.save() return user
thecut/thecut-authorship
thecut/authorship/utils.py
Python
apache-2.0
551
# -*- coding: utf-8 -*- # Define here the models for your scraped items # # See documentation in: # http://doc.scrapy.org/en/latest/topics/items.html from scrapy.item import Item, Field class DmmJokeItem(Item): # define the fields for your item here like: # name = scrapy.Field() pass class DVDDetailItem(Item): m_fields = ['title','rental_date','production_year','production_country', 'recorded_time','script','original','details','subtitles','sound','series', 'movie_id'] m_type = Field() title = Field() link = Field() img_url = Field() rental_date = Field() production_year = Field() production_country = Field() recorded_time = Field() performers = Field() supervision = Field() production = Field() script = Field() original = Field() details = Field() subtitles = Field() sound = Field() series = Field() studios = Field() genre = Field() movie_id = Field() average_rating = Field() brief = Field() slogan = Field() create_time = Field() update_time = Field() class DVDDetailListItem(Item): m_type = Field() link = Field() img_url = Field() img_desc = Field() price = Field() create_time = Field() update_time = Field() class ADVDDetailItem(Item): m_type = Field() title = Field() link = Field() img_url = Field() platform = Field() delivery_start_date = Field() release_date = Field() recorded_time = Field() performers = Field() supervision = Field() series = Field() studios = Field() lables = Field() genre = Field() movie_id = Field() average_rating = Field() brief = Field() sample_images = Field() total_comment_num = Field() create_time = Field() update_time = Field() class ADVDDetailListItem(Item): m_type = Field() link = Field() img_url = Field() img_desc = Field() price = Field() create_time = Field() update_time = Field()
counsellors/scrapy_dmm_dvd
dmm_joke/dmm_joke/items.py
Python
apache-2.0
2,040
import contextlib import gc import multiprocessing import os from memsql_loader.util.apsw_storage import APSWStorage from memsql_loader.util import paths MEMSQL_LOADER_DB = 'memsql_loader.db' def get_loader_db_path(): return os.path.join(paths.get_data_dir(), MEMSQL_LOADER_DB) # IMPORTANT NOTE: This class cannot be shared across forked processes unless # you use fork_wrapper. class LoaderStorage(APSWStorage): _instance = None _initialized = False _instance_lock = multiprocessing.RLock() # We use LoaderStorage as a singleton. def __new__(cls, *args, **kwargs): with cls._instance_lock: if cls._instance is None: cls._instance = super(LoaderStorage, cls).__new__( cls, *args, **kwargs) cls._initialized = False return cls._instance @classmethod def drop_database(cls): with cls._instance_lock: if os.path.isfile(get_loader_db_path()): os.remove(get_loader_db_path()) if os.path.isfile(get_loader_db_path() + '-shm'): os.remove(get_loader_db_path() + '-shm') if os.path.isfile(get_loader_db_path() + '-wal'): os.remove(get_loader_db_path() + '-wal') cls._instance = None @classmethod @contextlib.contextmanager def fork_wrapper(cls): # This context manager should be used around any code that forks new # processes that will use a LoaderStorage object (e.g. Worker objects). # This ensures that we don't share SQLite connections across forked # processes. with cls._instance_lock: if cls._instance is not None: cls._instance.close_connections() # We garbage collect here to clean up any SQLite objects we # may have missed; this is important because any surviving # objects post-fork will mess up SQLite connections in the # child process. We use generation=2 to collect as many # objects as possible. gc.collect(2) yield with cls._instance_lock: if cls._instance is not None: cls._instance.setup_connections() def __init__(self): with LoaderStorage._instance_lock: # Since this is a singleton object, we don't want to call the # parent object's __init__ if we've already instantiated this # object in __new__. However, we may have closed this object's # connections in fork_wrapper above; in that case, we want to set # up new database connections. if not LoaderStorage._initialized: super(LoaderStorage, self).__init__(get_loader_db_path()) LoaderStorage._initialized = True return elif not self._db or not self._db_t: self.setup_connections()
memsql/memsql-loader
memsql_loader/loader_db/storage.py
Python
apache-2.0
2,959
import zstackwoodpecker.test_state as ts_header import os TestAction = ts_header.TestAction def path(): return dict(initial_formation="template5", checking_point=8, path_list=[ [TestAction.create_vm, 'vm1', ], [TestAction.create_volume, 'volume1', 'flag=scsi'], [TestAction.attach_volume, 'vm1', 'volume1'], [TestAction.create_volume, 'volume2', 'flag=scsi'], [TestAction.attach_volume, 'vm1', 'volume2'], [TestAction.create_volume, 'volume3', 'flag=scsi'], [TestAction.attach_volume, 'vm1', 'volume3'], [TestAction.create_vm_snapshot, 'vm1', 'vm1-snapshot1'], [TestAction.resize_volume, 'vm1', 5*1024*1024], [TestAction.detach_volume, 'volume1'], [TestAction.create_vm_snapshot, 'vm1', 'vm1-snapshot5'], [TestAction.delete_vm_snapshot, 'vm1-snapshot1'], [TestAction.create_vm_snapshot, 'vm1', 'vm1-snapshot8'], [TestAction.clone_vm, 'vm1', 'vm2'], [TestAction.create_volume_backup, 'volume2', 'volume2-backup1'], [TestAction.migrate_vm, 'vm1'], [TestAction.delete_vm_snapshot, 'vm1-snapshot5'], ]) ''' The final status: Running:['vm1', 'vm2'] Stopped:[] Enadbled:['vm1-snapshot5', 'volume2-snapshot5', 'volume3-snapshot5', 'volume2-backup1'] attached:['volume2', 'volume3'] Detached:['volume1'] Deleted:['vm1-snapshot1', 'volume1-snapshot1', 'volume2-snapshot1', 'volume3-snapshot1', 'vm1-snapshot8', 'volume2-snapshot8', 'volume3-snapshot8'] Expunged:[] Ha:[] Group: '''
zstackio/zstack-woodpecker
integrationtest/vm/multihosts/vm_snapshots/paths/xsky_path23.py
Python
apache-2.0
1,415
""" Type variables for Parametric polymorphism. Cretonne instructions and instruction transformations can be specified to be polymorphic by using type variables. """ from __future__ import absolute_import import math from . import types, is_power_of_two from copy import copy try: from typing import Tuple, Union, Iterable, Any, Set, TYPE_CHECKING # noqa if TYPE_CHECKING: from srcgen import Formatter # noqa Interval = Tuple[int, int] # An Interval where `True` means 'everything' BoolInterval = Union[bool, Interval] # Set of special types: None, False, True, or iterable. SpecialSpec = Union[bool, Iterable[types.SpecialType]] except ImportError: pass MAX_LANES = 256 MAX_BITS = 64 MAX_BITVEC = MAX_BITS * MAX_LANES def int_log2(x): # type: (int) -> int return int(math.log(x, 2)) def intersect(a, b): # type: (Interval, Interval) -> Interval """ Given two `(min, max)` inclusive intervals, compute their intersection. Use `(None, None)` to represent the empty interval on input and output. """ if a[0] is None or b[0] is None: return (None, None) lo = max(a[0], b[0]) assert lo is not None hi = min(a[1], b[1]) assert hi is not None if lo <= hi: return (lo, hi) else: return (None, None) def is_empty(intv): # type: (Interval) -> bool return intv is None or intv is False or intv == (None, None) def encode_bitset(vals, size): # type: (Iterable[int], int) -> int """ Encode a set of values (each between 0 and size) as a bitset of width size. """ res = 0 assert is_power_of_two(size) and size <= 64 for v in vals: assert 0 <= v and v < size res |= 1 << v return res def pp_set(s): # type: (Iterable[Any]) -> str """ Return a consistent string representation of a set (ordering is fixed) """ return '{' + ', '.join([repr(x) for x in sorted(s)]) + '}' def decode_interval(intv, full_range, default=None): # type: (BoolInterval, Interval, int) -> Interval """ Decode an interval specification which can take the following values: True Use the `full_range`. `False` or `None` An empty interval (lo, hi) An explicit interval """ if isinstance(intv, tuple): # mypy bug here: 'builtins.None' object is not iterable lo, hi = intv assert is_power_of_two(lo) assert is_power_of_two(hi) assert lo <= hi assert lo >= full_range[0] assert hi <= full_range[1] return intv if intv: return full_range else: return (default, default) def interval_to_set(intv): # type: (Interval) -> Set if is_empty(intv): return set() (lo, hi) = intv assert is_power_of_two(lo) assert is_power_of_two(hi) assert lo <= hi return set([2**i for i in range(int_log2(lo), int_log2(hi)+1)]) def legal_bool(bits): # type: (int) -> bool """ True iff bits is a legal bit width for a bool type. bits == 1 || bits \in { 8, 16, .. MAX_BITS } """ return bits == 1 or \ (bits >= 8 and bits <= MAX_BITS and is_power_of_two(bits)) class TypeSet(object): """ A set of types. We don't allow arbitrary subsets of types, but use a parametrized approach instead. Objects of this class can be used as dictionary keys. Parametrized type sets are specified in terms of ranges: - The permitted range of vector lanes, where 1 indicates a scalar type. - The permitted range of integer types. - The permitted range of floating point types, and - The permitted range of boolean types. The ranges are inclusive from smallest bit-width to largest bit-width. A typeset representing scalar integer types `i8` through `i32`: >>> TypeSet(ints=(8, 32)) TypeSet(lanes={1}, ints={8, 16, 32}) Passing `True` instead of a range selects all available scalar types: >>> TypeSet(ints=True) TypeSet(lanes={1}, ints={8, 16, 32, 64}) >>> TypeSet(floats=True) TypeSet(lanes={1}, floats={32, 64}) >>> TypeSet(bools=True) TypeSet(lanes={1}, bools={1, 8, 16, 32, 64}) Similarly, passing `True` for the lanes selects all possible scalar and vector types: >>> TypeSet(lanes=True, ints=True) TypeSet(lanes={1, 2, 4, 8, 16, 32, 64, 128, 256}, ints={8, 16, 32, 64}) Finally, a type set can contain special types (derived from `SpecialType`) which can't appear as lane types. :param lanes: `(min, max)` inclusive range of permitted vector lane counts. :param ints: `(min, max)` inclusive range of permitted scalar integer widths. :param floats: `(min, max)` inclusive range of permitted scalar floating point widths. :param bools: `(min, max)` inclusive range of permitted scalar boolean widths. :param bitvecs : `(min, max)` inclusive range of permitted bitvector widths. :param specials: Sequence of special types to appear in the set. """ def __init__( self, lanes=None, # type: BoolInterval ints=None, # type: BoolInterval floats=None, # type: BoolInterval bools=None, # type: BoolInterval bitvecs=None, # type: BoolInterval specials=None # type: SpecialSpec ): # type: (...) -> None self.lanes = interval_to_set(decode_interval(lanes, (1, MAX_LANES), 1)) self.ints = interval_to_set(decode_interval(ints, (8, MAX_BITS))) self.floats = interval_to_set(decode_interval(floats, (32, 64))) self.bools = interval_to_set(decode_interval(bools, (1, MAX_BITS))) self.bools = set(filter(legal_bool, self.bools)) self.bitvecs = interval_to_set(decode_interval(bitvecs, (1, MAX_BITVEC))) # Allow specials=None, specials=True, specials=(...) self.specials = set() # type: Set[types.SpecialType] if isinstance(specials, bool): if specials: self.specials = set(types.ValueType.all_special_types) elif specials: self.specials = set(specials) def copy(self): # type: (TypeSet) -> TypeSet """ Return a copy of our self. """ n = TypeSet() n.lanes = copy(self.lanes) n.ints = copy(self.ints) n.floats = copy(self.floats) n.bools = copy(self.bools) n.bitvecs = copy(self.bitvecs) n.specials = copy(self.specials) return n def typeset_key(self): # type: () -> Tuple[Tuple, Tuple, Tuple, Tuple, Tuple, Tuple] """Key tuple used for hashing and equality.""" return (tuple(sorted(list(self.lanes))), tuple(sorted(list(self.ints))), tuple(sorted(list(self.floats))), tuple(sorted(list(self.bools))), tuple(sorted(list(self.bitvecs))), tuple(sorted(s.name for s in self.specials))) def __hash__(self): # type: () -> int h = hash(self.typeset_key()) assert h == getattr(self, 'prev_hash', h), "TypeSet changed!" self.prev_hash = h return h def __eq__(self, other): # type: (object) -> bool if isinstance(other, TypeSet): return self.typeset_key() == other.typeset_key() else: return False def __ne__(self, other): # type: (object) -> bool return not self.__eq__(other) def __repr__(self): # type: () -> str s = 'TypeSet(lanes={}'.format(pp_set(self.lanes)) if len(self.ints) > 0: s += ', ints={}'.format(pp_set(self.ints)) if len(self.floats) > 0: s += ', floats={}'.format(pp_set(self.floats)) if len(self.bools) > 0: s += ', bools={}'.format(pp_set(self.bools)) if len(self.bitvecs) > 0: s += ', bitvecs={}'.format(pp_set(self.bitvecs)) if len(self.specials) > 0: s += ', specials=[{}]'.format(pp_set(self.specials)) return s + ')' def emit_fields(self, fmt): # type: (Formatter) -> None """Emit field initializers for this typeset.""" assert len(self.bitvecs) == 0, "Bitvector types are not emitable." fmt.comment(repr(self)) fields = (('lanes', 16), ('ints', 8), ('floats', 8), ('bools', 8)) for (field, bits) in fields: vals = [int_log2(x) for x in getattr(self, field)] fmt.line('{}: BitSet::<u{}>({}),' .format(field, bits, encode_bitset(vals, bits))) def __iand__(self, other): # type: (TypeSet) -> TypeSet """ Intersect self with other type set. >>> a = TypeSet(lanes=True, ints=(16, 32)) >>> a TypeSet(lanes={1, 2, 4, 8, 16, 32, 64, 128, 256}, ints={16, 32}) >>> b = TypeSet(lanes=(4, 16), ints=True) >>> a &= b >>> a TypeSet(lanes={4, 8, 16}, ints={16, 32}) >>> a = TypeSet(lanes=True, bools=(1, 8)) >>> b = TypeSet(lanes=True, bools=(16, 32)) >>> a &= b >>> a TypeSet(lanes={1, 2, 4, 8, 16, 32, 64, 128, 256}) """ self.lanes.intersection_update(other.lanes) self.ints.intersection_update(other.ints) self.floats.intersection_update(other.floats) self.bools.intersection_update(other.bools) self.bitvecs.intersection_update(other.bitvecs) self.specials.intersection_update(other.specials) return self def issubset(self, other): # type: (TypeSet) -> bool """ Return true iff self is a subset of other """ return self.lanes.issubset(other.lanes) and \ self.ints.issubset(other.ints) and \ self.floats.issubset(other.floats) and \ self.bools.issubset(other.bools) and \ self.bitvecs.issubset(other.bitvecs) and \ self.specials.issubset(other.specials) def lane_of(self): # type: () -> TypeSet """ Return a TypeSet describing the image of self across lane_of """ new = self.copy() new.lanes = set([1]) new.bitvecs = set() return new def as_bool(self): # type: () -> TypeSet """ Return a TypeSet describing the image of self across as_bool """ new = self.copy() new.ints = set() new.floats = set() new.bitvecs = set() if len(self.lanes.difference(set([1]))) > 0: new.bools = self.ints.union(self.floats).union(self.bools) if 1 in self.lanes: new.bools.add(1) return new def half_width(self): # type: () -> TypeSet """ Return a TypeSet describing the image of self across halfwidth """ new = self.copy() new.ints = set([x//2 for x in self.ints if x > 8]) new.floats = set([x//2 for x in self.floats if x > 32]) new.bools = set([x//2 for x in self.bools if x > 8]) new.bitvecs = set([x//2 for x in self.bitvecs if x > 1]) new.specials = set() return new def double_width(self): # type: () -> TypeSet """ Return a TypeSet describing the image of self across doublewidth """ new = self.copy() new.ints = set([x*2 for x in self.ints if x < MAX_BITS]) new.floats = set([x*2 for x in self.floats if x < MAX_BITS]) new.bools = set(filter(legal_bool, set([x*2 for x in self.bools if x < MAX_BITS]))) new.bitvecs = set([x*2 for x in self.bitvecs if x < MAX_BITVEC]) new.specials = set() return new def half_vector(self): # type: () -> TypeSet """ Return a TypeSet describing the image of self across halfvector """ new = self.copy() new.bitvecs = set() new.lanes = set([x//2 for x in self.lanes if x > 1]) new.specials = set() return new def double_vector(self): # type: () -> TypeSet """ Return a TypeSet describing the image of self across doublevector """ new = self.copy() new.bitvecs = set() new.lanes = set([x*2 for x in self.lanes if x < MAX_LANES]) new.specials = set() return new def to_bitvec(self): # type: () -> TypeSet """ Return a TypeSet describing the image of self across to_bitvec """ assert len(self.bitvecs) == 0 all_scalars = self.ints.union(self.floats.union(self.bools)) new = self.copy() new.lanes = set([1]) new.ints = set() new.bools = set() new.floats = set() new.bitvecs = set([lane_w * nlanes for lane_w in all_scalars for nlanes in self.lanes]) new.specials = set() return new def image(self, func): # type: (str) -> TypeSet """ Return the image of self across the derived function func """ if (func == TypeVar.LANEOF): return self.lane_of() elif (func == TypeVar.ASBOOL): return self.as_bool() elif (func == TypeVar.HALFWIDTH): return self.half_width() elif (func == TypeVar.DOUBLEWIDTH): return self.double_width() elif (func == TypeVar.HALFVECTOR): return self.half_vector() elif (func == TypeVar.DOUBLEVECTOR): return self.double_vector() elif (func == TypeVar.TOBITVEC): return self.to_bitvec() else: assert False, "Unknown derived function: " + func def preimage(self, func): # type: (str) -> TypeSet """ Return the inverse image of self across the derived function func """ # The inverse of the empty set is always empty if (self.size() == 0): return self if (func == TypeVar.LANEOF): new = self.copy() new.bitvecs = set() new.lanes = set([2**i for i in range(0, int_log2(MAX_LANES)+1)]) return new elif (func == TypeVar.ASBOOL): new = self.copy() new.bitvecs = set() if 1 not in self.bools: new.ints = self.bools.difference(set([1])) new.floats = self.bools.intersection(set([32, 64])) # If b1 is not in our typeset, than lanes=1 cannot be in the # pre-image, as as_bool() of scalars is always b1. new.lanes = self.lanes.difference(set([1])) else: new.ints = set([2**x for x in range(3, 7)]) new.floats = set([32, 64]) return new elif (func == TypeVar.HALFWIDTH): return self.double_width() elif (func == TypeVar.DOUBLEWIDTH): return self.half_width() elif (func == TypeVar.HALFVECTOR): return self.double_vector() elif (func == TypeVar.DOUBLEVECTOR): return self.half_vector() elif (func == TypeVar.TOBITVEC): new = TypeSet() # Start with all possible lanes/ints/floats/bools lanes = interval_to_set(decode_interval(True, (1, MAX_LANES), 1)) ints = interval_to_set(decode_interval(True, (8, MAX_BITS))) floats = interval_to_set(decode_interval(True, (32, 64))) bools = interval_to_set(decode_interval(True, (1, MAX_BITS))) # See which combinations have a size that appears in self.bitvecs has_t = set() # type: Set[Tuple[str, int, int]] for l in lanes: for i in ints: if i * l in self.bitvecs: has_t.add(('i', i, l)) for i in bools: if i * l in self.bitvecs: has_t.add(('b', i, l)) for i in floats: if i * l in self.bitvecs: has_t.add(('f', i, l)) for (t, width, lane) in has_t: new.lanes.add(lane) if (t == 'i'): new.ints.add(width) elif (t == 'b'): new.bools.add(width) else: assert t == 'f' new.floats.add(width) return new else: assert False, "Unknown derived function: " + func def size(self): # type: () -> int """ Return the number of concrete types represented by this typeset """ return (len(self.lanes) * (len(self.ints) + len(self.floats) + len(self.bools) + len(self.bitvecs)) + len(self.specials)) def concrete_types(self): # type: () -> Iterable[types.ValueType] def by(scalar, lanes): # type: (types.LaneType, int) -> types.ValueType if (lanes == 1): return scalar else: return scalar.by(lanes) for nlanes in self.lanes: for bits in self.ints: yield by(types.IntType.with_bits(bits), nlanes) for bits in self.floats: yield by(types.FloatType.with_bits(bits), nlanes) for bits in self.bools: yield by(types.BoolType.with_bits(bits), nlanes) for bits in self.bitvecs: assert nlanes == 1 yield types.BVType.with_bits(bits) for spec in self.specials: yield spec def get_singleton(self): # type: () -> types.ValueType """ Return the singleton type represented by self. Can only call on typesets containing 1 type. """ types = list(self.concrete_types()) assert len(types) == 1 return types[0] def widths(self): # type: () -> Set[int] """ Return a set of the widths of all possible types in self""" scalar_w = self.ints.union(self.floats.union(self.bools)) scalar_w = scalar_w.union(self.bitvecs) return set(w * l for l in self.lanes for w in scalar_w) class TypeVar(object): """ Type variables can be used in place of concrete types when defining instructions. This makes the instructions *polymorphic*. A type variable is restricted to vary over a subset of the value types. This subset is specified by a set of flags that control the permitted base types and whether the type variable can assume scalar or vector types, or both. :param name: Short name of type variable used in instruction descriptions. :param doc: Documentation string. :param ints: Allow all integer base types, or `(min, max)` bit-range. :param floats: Allow all floating point base types, or `(min, max)` bit-range. :param bools: Allow all boolean base types, or `(min, max)` bit-range. :param scalars: Allow type variable to assume scalar types. :param simd: Allow type variable to assume vector types, or `(min, max)` lane count range. :param bitvecs: Allow all BitVec base types, or `(min, max)` bit-range. """ def __init__( self, name, # type: str doc, # type: str ints=False, # type: BoolInterval floats=False, # type: BoolInterval bools=False, # type: BoolInterval scalars=True, # type: bool simd=False, # type: BoolInterval bitvecs=False, # type: BoolInterval base=None, # type: TypeVar derived_func=None, # type: str specials=None # type: SpecialSpec ): # type: (...) -> None self.name = name self.__doc__ = doc self.is_derived = isinstance(base, TypeVar) if base: assert self.is_derived assert derived_func self.base = base self.derived_func = derived_func self.name = '{}({})'.format(derived_func, base.name) else: min_lanes = 1 if scalars else 2 lanes = decode_interval(simd, (min_lanes, MAX_LANES), 1) self.type_set = TypeSet( lanes=lanes, ints=ints, floats=floats, bools=bools, bitvecs=bitvecs, specials=specials) @staticmethod def singleton(typ): # type: (types.ValueType) -> TypeVar """Create a type variable that can only assume a single type.""" scalar = None # type: types.ValueType if isinstance(typ, types.VectorType): scalar = typ.base lanes = (typ.lanes, typ.lanes) elif isinstance(typ, types.LaneType): scalar = typ lanes = (1, 1) elif isinstance(typ, types.SpecialType): return TypeVar(typ.name, typ.__doc__, specials=[typ]) else: assert isinstance(typ, types.BVType) scalar = typ lanes = (1, 1) ints = None floats = None bools = None bitvecs = None if isinstance(scalar, types.IntType): ints = (scalar.bits, scalar.bits) elif isinstance(scalar, types.FloatType): floats = (scalar.bits, scalar.bits) elif isinstance(scalar, types.BoolType): bools = (scalar.bits, scalar.bits) elif isinstance(scalar, types.BVType): bitvecs = (scalar.bits, scalar.bits) tv = TypeVar( typ.name, typ.__doc__, ints=ints, floats=floats, bools=bools, bitvecs=bitvecs, simd=lanes) return tv def __str__(self): # type: () -> str return "`{}`".format(self.name) def __repr__(self): # type: () -> str if self.is_derived: return ( 'TypeVar({}, base={}, derived_func={})' .format(self.name, self.base, self.derived_func)) else: return ( 'TypeVar({}, {})' .format(self.name, self.type_set)) def __hash__(self): # type: () -> int if (not self.is_derived): return object.__hash__(self) return hash((self.derived_func, self.base)) def __eq__(self, other): # type: (object) -> bool if not isinstance(other, TypeVar): return False if self.is_derived and other.is_derived: return ( self.derived_func == other.derived_func and self.base == other.base) else: return self is other def __ne__(self, other): # type: (object) -> bool return not self.__eq__(other) # Supported functions for derived type variables. # The names here must match the method names on `ir::types::Type`. # The camel_case of the names must match `enum OperandConstraint` in # `instructions.rs`. LANEOF = 'lane_of' ASBOOL = 'as_bool' HALFWIDTH = 'half_width' DOUBLEWIDTH = 'double_width' HALFVECTOR = 'half_vector' DOUBLEVECTOR = 'double_vector' TOBITVEC = 'to_bitvec' @staticmethod def is_bijection(func): # type: (str) -> bool return func in [ TypeVar.HALFWIDTH, TypeVar.DOUBLEWIDTH, TypeVar.HALFVECTOR, TypeVar.DOUBLEVECTOR] @staticmethod def inverse_func(func): # type: (str) -> str return { TypeVar.HALFWIDTH: TypeVar.DOUBLEWIDTH, TypeVar.DOUBLEWIDTH: TypeVar.HALFWIDTH, TypeVar.HALFVECTOR: TypeVar.DOUBLEVECTOR, TypeVar.DOUBLEVECTOR: TypeVar.HALFVECTOR }[func] @staticmethod def derived(base, derived_func): # type: (TypeVar, str) -> TypeVar """Create a type variable that is a function of another.""" # Safety checks to avoid over/underflows. ts = base.get_typeset() assert len(ts.specials) == 0, "Can't derive from special types" if derived_func == TypeVar.HALFWIDTH: if len(ts.ints) > 0: assert min(ts.ints) > 8, "Can't halve all integer types" if len(ts.floats) > 0: assert min(ts.floats) > 32, "Can't halve all float types" if len(ts.bools) > 0: assert min(ts.bools) > 8, "Can't halve all boolean types" elif derived_func == TypeVar.DOUBLEWIDTH: if len(ts.ints) > 0: assert max(ts.ints) < MAX_BITS,\ "Can't double all integer types." if len(ts.floats) > 0: assert max(ts.floats) < MAX_BITS,\ "Can't double all float types." if len(ts.bools) > 0: assert max(ts.bools) < MAX_BITS, "Can't double all bool types." elif derived_func == TypeVar.HALFVECTOR: assert min(ts.lanes) > 1, "Can't halve a scalar type" elif derived_func == TypeVar.DOUBLEVECTOR: assert max(ts.lanes) < MAX_LANES, "Can't double 256 lanes." return TypeVar(None, None, base=base, derived_func=derived_func) @staticmethod def from_typeset(ts): # type: (TypeSet) -> TypeVar """ Create a type variable from a type set.""" tv = TypeVar(None, None) tv.type_set = ts return tv def lane_of(self): # type: () -> TypeVar """ Return a derived type variable that is the scalar lane type of this type variable. When this type variable assumes a scalar type, the derived type will be the same scalar type. """ return TypeVar.derived(self, self.LANEOF) def as_bool(self): # type: () -> TypeVar """ Return a derived type variable that has the same vector geometry as this type variable, but with boolean lanes. Scalar types map to `b1`. """ return TypeVar.derived(self, self.ASBOOL) def half_width(self): # type: () -> TypeVar """ Return a derived type variable that has the same number of vector lanes as this one, but the lanes are half the width. """ return TypeVar.derived(self, self.HALFWIDTH) def double_width(self): # type: () -> TypeVar """ Return a derived type variable that has the same number of vector lanes as this one, but the lanes are double the width. """ return TypeVar.derived(self, self.DOUBLEWIDTH) def half_vector(self): # type: () -> TypeVar """ Return a derived type variable that has half the number of vector lanes as this one, with the same lane type. """ return TypeVar.derived(self, self.HALFVECTOR) def double_vector(self): # type: () -> TypeVar """ Return a derived type variable that has twice the number of vector lanes as this one, with the same lane type. """ return TypeVar.derived(self, self.DOUBLEVECTOR) def to_bitvec(self): # type: () -> TypeVar """ Return a derived type variable that represent a flat bitvector with the same size as self """ return TypeVar.derived(self, self.TOBITVEC) def singleton_type(self): # type: () -> types.ValueType """ If the associated typeset has a single type return it. Otherwise return None """ ts = self.get_typeset() if ts.size() != 1: return None return ts.get_singleton() def free_typevar(self): # type: () -> TypeVar """ Get the free type variable controlling this one. """ if self.is_derived: return self.base.free_typevar() elif self.singleton_type() is not None: # A singleton type variable is not a proper free variable. return None else: return self def rust_expr(self): # type: () -> str """ Get a Rust expression that computes the type of this type variable. """ if self.is_derived: return '{}.{}()'.format( self.base.rust_expr(), self.derived_func) elif self.singleton_type(): return self.singleton_type().rust_name() else: return self.name def constrain_types_by_ts(self, ts): # type: (TypeSet) -> None """ Constrain the range of types this variable can assume to a subset of those in the typeset ts. """ if not self.is_derived: self.type_set &= ts else: self.base.constrain_types_by_ts(ts.preimage(self.derived_func)) def constrain_types(self, other): # type: (TypeVar) -> None """ Constrain the range of types this variable can assume to a subset of those `other` can assume. """ if self is other: return self.constrain_types_by_ts(other.get_typeset()) def get_typeset(self): # type: () -> TypeSet """ Returns the typeset for this TV. If the TV is derived, computes it recursively from the derived function and the base's typeset. """ if not self.is_derived: return self.type_set else: return self.base.get_typeset().image(self.derived_func) def get_fresh_copy(self, name): # type: (str) -> TypeVar """ Get a fresh copy of self. Can only be called on free typevars. """ assert not self.is_derived tv = TypeVar.from_typeset(self.type_set.copy()) tv.name = name return tv
sunfishcode/cretonne
lib/cretonne/meta/cdsl/typevar.py
Python
apache-2.0
30,326
import itertools from batchy.runloop import coro_return, runloop_coroutine from batchy.batch_coroutine import batch_coroutine, class_batch_coroutine from . import BaseTestCase CALL_COUNT = 0 @batch_coroutine() def increment(arg_lists): def increment_single(n): return n + 1 global CALL_COUNT CALL_COUNT += 1 coro_return([increment_single(*ar, **kw) for ar, kw in arg_lists]) yield @batch_coroutine(accepts_kwargs=False) def increment_nokwargs(arg_lists): global CALL_COUNT CALL_COUNT += 1 coro_return(list(itertools.starmap(lambda _n: _n + 1, arg_lists))) yield class BatchClient(object): def __init__(self): self.get_call_count = 0 self.set_call_count = 0 self.run_call_count = 0 self.throw_count = 0 @class_batch_coroutine(1) def get(self, arg_lists): self.get_call_count += 1 yield self.run() coro_return([0] * len(arg_lists)) @class_batch_coroutine(1) def set(self, _): self.set_call_count += 1 yield self.run() @class_batch_coroutine(0) def run(self, _): self.run_call_count += 1 yield @class_batch_coroutine(0) def throw(self, _): self.throw_count += 1 raise ValueError() yield # pylint: disable-msg=W0101 @class_batch_coroutine(2) def throw_sooner(self, _): self.throw_count += 1 raise ValueError() yield # pylint: disable-msg=W0101 def reset(self): self.get_call_count = self.set_call_count = self.run_call_count = self.throw_count = 0 class BatchTests(BaseTestCase): def setup(self): global CALL_COUNT CALL_COUNT = 0 def test_simple_batch(self): @runloop_coroutine() def test(): a, b, c = yield increment(1), increment(2), increment(3) coro_return((a, b, c)) self.assert_equals((2,3,4), test()) self.assert_equals(1, CALL_COUNT) def test_batch_no_kwargs(self): @runloop_coroutine() def test(): a, b, c = yield increment_nokwargs(1), increment_nokwargs(2), increment_nokwargs(3) coro_return((a, b, c)) self.assert_equals((2,3,4), test()) self.assert_equals(1, CALL_COUNT) def test_multi_clients(self): client1, client2 = BatchClient(), BatchClient() @runloop_coroutine() def sub_1(client): rv = yield client.get() yield client.set() coro_return(rv) @runloop_coroutine() def sub_2(client): rv = yield client.get() yield client.set() coro_return(rv) @runloop_coroutine() def test1(): rv = yield sub_1(client1), sub_2(client2) coro_return(rv) test1() self.assert_equal(1, client1.get_call_count) self.assert_equal(1, client1.set_call_count) self.assert_equal(2, client1.run_call_count) self.assert_equal(1, client2.get_call_count) self.assert_equal(1, client2.set_call_count) self.assert_equal(2, client2.run_call_count) client1.reset() client2.reset() @runloop_coroutine() def test2(): rv = yield sub_1(client1), sub_2(client1) coro_return(rv) test2() self.assert_equal(1, client1.get_call_count) self.assert_equal(1, client1.set_call_count) self.assert_equal(2, client1.run_call_count) self.assert_equal(0, client2.get_call_count) self.assert_equal(0, client2.set_call_count) self.assert_equal(0, client2.run_call_count) def test_exception(self): client = BatchClient() @runloop_coroutine() def action_1(): yield client.throw() @runloop_coroutine() def action_2(): yield client.get('a') yield client.throw() @runloop_coroutine() def test(): yield action_1(), action_1(), action_2() self.assert_raises(ValueError, test) def test_exception_sooner(self): client = BatchClient() @runloop_coroutine() def action_1(): yield client.throw_sooner() @runloop_coroutine() def action_2(): yield client.get('a') yield client.throw_sooner() @runloop_coroutine() def test(): yield action_1(), action_1(), action_2() self.assert_raises(ValueError, test)
mikekap/batchy
tests/batch_tests.py
Python
apache-2.0
4,519
#!/usr/bin/env python3 import docker def pull_ncapture(): d_client = docker.from_env() d_client.images.pull('cyberreboot/vent-ncapture', tag='master') if __name__ == '__main__': # pragma: no cover pull_ncapture()
Jeff-Wang93/vent
vent/core/network_tap/ncontrol/prestart.py
Python
apache-2.0
230
# Copyright 2015 Internap. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from functools import wraps import json import logging from flask import make_response, request, Response, current_app from werkzeug.routing import BaseConverter from netman.api import NETMAN_API_VERSION from netman.core.objects.exceptions import UnknownResource, Conflict, InvalidValue def to_response(fn): @wraps(fn) def wrapper(self, *args, **kwargs): try: result = fn(self, *args, **kwargs) if isinstance(result, Response): return result else: code, data = result if data is not None: response = json_response(data, code) else: response = make_response("", code) except InvalidValue as e: response = exception_to_response(e, 400) except UnknownResource as e: response = exception_to_response(e, 404) except Conflict as e: response = exception_to_response(e, 409) except NotImplementedError as e: response = exception_to_response(e, 501) except Exception as e: logging.exception(e) response = exception_to_response(e, 500) self.logger.info("Responding {} : {}".format(response.status_code, response.data)) if 'Netman-Max-Version' in request.headers: response.headers['Netman-Version'] = min( float(request.headers['Netman-Max-Version']), NETMAN_API_VERSION) return response return wrapper def exception_to_response(exception, code): data = {'error': str(exception)} if "Netman-Verbose-Errors" in request.headers: if hasattr(exception, "__module__"): data["error-module"] = exception.__module__ data["error-class"] = exception.__class__.__name__ else: if data['error'] == "": if hasattr(exception, "__module__"): data['error'] = "Unexpected error: {}.{}".format(exception.__module__, exception.__class__.__name__) else: data['error'] = "Unexpected error: {}".format(exception.__class__.__name__) response = json_response(data, code) response.status_code = code return response def json_response(data, code): json_data = json.dumps(data, indent=None) response = current_app.response_class(json_data, mimetype='application/json; charset=UTF-8') response.status_code = code return response class RegexConverter(BaseConverter): def __init__(self, url_map, *items): super(RegexConverter, self).__init__(url_map) self.regex = items[0] class BadRequest(InvalidValue): pass class MultiContext(object): def __init__(self, switch_api, parameters, *contexts): self.context_instances = [] for context in contexts: obj = context(switch_api) obj.process(parameters) self.context_instances.append(obj) self.parameters = parameters def __enter__(self): return [(obj.__enter__()) for obj in self.context_instances] def __exit__(self, type_, value, traceback): for context in self.context_instances: context.__exit__(type_, value, traceback)
idjaw/netman
netman/api/api_utils.py
Python
apache-2.0
3,822
d = 1 e = 2 f = 3
I-Valchev/UrPas
coverage-3.7.1/tests/modules/pkg1/sub/ps1a.py
Python
apache-2.0
18
# -*- coding: utf-8 -*- # Copyright 2022 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from collections import OrderedDict import functools import re from typing import Dict, Optional, Sequence, Tuple, Type, Union import pkg_resources from google.api_core.client_options import ClientOptions from google.api_core import exceptions as core_exceptions from google.api_core import gapic_v1 from google.api_core import retry as retries from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore try: OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] except AttributeError: # pragma: NO COVER OptionalRetry = Union[retries.Retry, object] # type: ignore from google.api_core import operation # type: ignore from google.api_core import operation_async # type: ignore from google.cloud.tpu_v2alpha1.services.tpu import pagers from google.cloud.tpu_v2alpha1.types import cloud_tpu from google.protobuf import field_mask_pb2 # type: ignore from google.protobuf import timestamp_pb2 # type: ignore from .transports.base import TpuTransport, DEFAULT_CLIENT_INFO from .transports.grpc_asyncio import TpuGrpcAsyncIOTransport from .client import TpuClient class TpuAsyncClient: """Manages TPU nodes and other resources TPU API v2alpha1 """ _client: TpuClient DEFAULT_ENDPOINT = TpuClient.DEFAULT_ENDPOINT DEFAULT_MTLS_ENDPOINT = TpuClient.DEFAULT_MTLS_ENDPOINT accelerator_type_path = staticmethod(TpuClient.accelerator_type_path) parse_accelerator_type_path = staticmethod(TpuClient.parse_accelerator_type_path) node_path = staticmethod(TpuClient.node_path) parse_node_path = staticmethod(TpuClient.parse_node_path) runtime_version_path = staticmethod(TpuClient.runtime_version_path) parse_runtime_version_path = staticmethod(TpuClient.parse_runtime_version_path) common_billing_account_path = staticmethod(TpuClient.common_billing_account_path) parse_common_billing_account_path = staticmethod( TpuClient.parse_common_billing_account_path ) common_folder_path = staticmethod(TpuClient.common_folder_path) parse_common_folder_path = staticmethod(TpuClient.parse_common_folder_path) common_organization_path = staticmethod(TpuClient.common_organization_path) parse_common_organization_path = staticmethod( TpuClient.parse_common_organization_path ) common_project_path = staticmethod(TpuClient.common_project_path) parse_common_project_path = staticmethod(TpuClient.parse_common_project_path) common_location_path = staticmethod(TpuClient.common_location_path) parse_common_location_path = staticmethod(TpuClient.parse_common_location_path) @classmethod def from_service_account_info(cls, info: dict, *args, **kwargs): """Creates an instance of this client using the provided credentials info. Args: info (dict): The service account private key info. args: Additional arguments to pass to the constructor. kwargs: Additional arguments to pass to the constructor. Returns: TpuAsyncClient: The constructed client. """ return TpuClient.from_service_account_info.__func__(TpuAsyncClient, info, *args, **kwargs) # type: ignore @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): """Creates an instance of this client using the provided credentials file. Args: filename (str): The path to the service account private key json file. args: Additional arguments to pass to the constructor. kwargs: Additional arguments to pass to the constructor. Returns: TpuAsyncClient: The constructed client. """ return TpuClient.from_service_account_file.__func__(TpuAsyncClient, filename, *args, **kwargs) # type: ignore from_service_account_json = from_service_account_file @classmethod def get_mtls_endpoint_and_cert_source( cls, client_options: Optional[ClientOptions] = None ): """Return the API endpoint and client cert source for mutual TLS. The client cert source is determined in the following order: (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the client cert source is None. (2) if `client_options.client_cert_source` is provided, use the provided one; if the default client cert source exists, use the default one; otherwise the client cert source is None. The API endpoint is determined in the following order: (1) if `client_options.api_endpoint` if provided, use the provided one. (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the default mTLS endpoint; if the environment variabel is "never", use the default API endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise use the default API endpoint. More details can be found at https://google.aip.dev/auth/4114. Args: client_options (google.api_core.client_options.ClientOptions): Custom options for the client. Only the `api_endpoint` and `client_cert_source` properties may be used in this method. Returns: Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the client cert source to use. Raises: google.auth.exceptions.MutualTLSChannelError: If any errors happen. """ return TpuClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore @property def transport(self) -> TpuTransport: """Returns the transport used by the client instance. Returns: TpuTransport: The transport used by the client instance. """ return self._client.transport get_transport_class = functools.partial( type(TpuClient).get_transport_class, type(TpuClient) ) def __init__( self, *, credentials: ga_credentials.Credentials = None, transport: Union[str, TpuTransport] = "grpc_asyncio", client_options: ClientOptions = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: """Instantiates the tpu client. Args: credentials (Optional[google.auth.credentials.Credentials]): The authorization credentials to attach to requests. These credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. transport (Union[str, ~.TpuTransport]): The transport to use. If set to None, a transport is chosen automatically. client_options (ClientOptions): Custom options for the client. It won't take effect if a ``transport`` instance is provided. (1) The ``api_endpoint`` property can be used to override the default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT environment variable can also be used to override the endpoint: "always" (always use the default mTLS endpoint), "never" (always use the default regular endpoint) and "auto" (auto switch to the default mTLS endpoint if client certificate is present, this is the default value). However, the ``api_endpoint`` property takes precedence if provided. (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable is "true", then the ``client_cert_source`` property can be used to provide client certificate for mutual TLS transport. If not provided, the default SSL client certificate will be used if present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not set, no client certificate will be used. Raises: google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport creation failed for any reason. """ self._client = TpuClient( credentials=credentials, transport=transport, client_options=client_options, client_info=client_info, ) async def list_nodes( self, request: Union[cloud_tpu.ListNodesRequest, dict] = None, *, parent: str = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> pagers.ListNodesAsyncPager: r"""Lists nodes. .. code-block:: python from google.cloud import tpu_v2alpha1 def sample_list_nodes(): # Create a client client = tpu_v2alpha1.TpuClient() # Initialize request argument(s) request = tpu_v2alpha1.ListNodesRequest( parent="parent_value", ) # Make the request page_result = client.list_nodes(request=request) # Handle the response for response in page_result: print(response) Args: request (Union[google.cloud.tpu_v2alpha1.types.ListNodesRequest, dict]): The request object. Request for [ListNodes][google.cloud.tpu.v2alpha1.Tpu.ListNodes]. parent (:class:`str`): Required. The parent resource name. This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.cloud.tpu_v2alpha1.services.tpu.pagers.ListNodesAsyncPager: Response for [ListNodes][google.cloud.tpu.v2alpha1.Tpu.ListNodes]. Iterating over this object will yield results and resolve additional pages automatically. """ # Create or coerce a protobuf request object. # Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) request = cloud_tpu.ListNodesRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if parent is not None: request.parent = parent # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.list_nodes, default_timeout=None, client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # This method is paged; wrap the response in a pager, which provides # an `__aiter__` convenience method. response = pagers.ListNodesAsyncPager( method=rpc, request=request, response=response, metadata=metadata, ) # Done; return the response. return response async def get_node( self, request: Union[cloud_tpu.GetNodeRequest, dict] = None, *, name: str = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> cloud_tpu.Node: r"""Gets the details of a node. .. code-block:: python from google.cloud import tpu_v2alpha1 def sample_get_node(): # Create a client client = tpu_v2alpha1.TpuClient() # Initialize request argument(s) request = tpu_v2alpha1.GetNodeRequest( name="name_value", ) # Make the request response = client.get_node(request=request) # Handle the response print(response) Args: request (Union[google.cloud.tpu_v2alpha1.types.GetNodeRequest, dict]): The request object. Request for [GetNode][google.cloud.tpu.v2alpha1.Tpu.GetNode]. name (:class:`str`): Required. The resource name. This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.cloud.tpu_v2alpha1.types.Node: A TPU instance. """ # Create or coerce a protobuf request object. # Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([name]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) request = cloud_tpu.GetNodeRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if name is not None: request.name = name # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.get_node, default_timeout=None, client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) # Send the request. response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Done; return the response. return response async def create_node( self, request: Union[cloud_tpu.CreateNodeRequest, dict] = None, *, parent: str = None, node: cloud_tpu.Node = None, node_id: str = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> operation_async.AsyncOperation: r"""Creates a node. .. code-block:: python from google.cloud import tpu_v2alpha1 def sample_create_node(): # Create a client client = tpu_v2alpha1.TpuClient() # Initialize request argument(s) node = tpu_v2alpha1.Node() node.accelerator_type = "accelerator_type_value" node.runtime_version = "runtime_version_value" request = tpu_v2alpha1.CreateNodeRequest( parent="parent_value", node=node, ) # Make the request operation = client.create_node(request=request) print("Waiting for operation to complete...") response = operation.result() # Handle the response print(response) Args: request (Union[google.cloud.tpu_v2alpha1.types.CreateNodeRequest, dict]): The request object. Request for [CreateNode][google.cloud.tpu.v2alpha1.Tpu.CreateNode]. parent (:class:`str`): Required. The parent resource name. This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. node (:class:`google.cloud.tpu_v2alpha1.types.Node`): Required. The node. This corresponds to the ``node`` field on the ``request`` instance; if ``request`` is provided, this should not be set. node_id (:class:`str`): The unqualified resource name. This corresponds to the ``node_id`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.api_core.operation_async.AsyncOperation: An object representing a long-running operation. The result type for the operation will be :class:`google.cloud.tpu_v2alpha1.types.Node` A TPU instance. """ # Create or coerce a protobuf request object. # Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent, node, node_id]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) request = cloud_tpu.CreateNodeRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if parent is not None: request.parent = parent if node is not None: request.node = node if node_id is not None: request.node_id = node_id # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.create_node, default_timeout=None, client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Wrap the response in an operation future. response = operation_async.from_gapic( response, self._client._transport.operations_client, cloud_tpu.Node, metadata_type=cloud_tpu.OperationMetadata, ) # Done; return the response. return response async def delete_node( self, request: Union[cloud_tpu.DeleteNodeRequest, dict] = None, *, name: str = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> operation_async.AsyncOperation: r"""Deletes a node. .. code-block:: python from google.cloud import tpu_v2alpha1 def sample_delete_node(): # Create a client client = tpu_v2alpha1.TpuClient() # Initialize request argument(s) request = tpu_v2alpha1.DeleteNodeRequest( name="name_value", ) # Make the request operation = client.delete_node(request=request) print("Waiting for operation to complete...") response = operation.result() # Handle the response print(response) Args: request (Union[google.cloud.tpu_v2alpha1.types.DeleteNodeRequest, dict]): The request object. Request for [DeleteNode][google.cloud.tpu.v2alpha1.Tpu.DeleteNode]. name (:class:`str`): Required. The resource name. This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.api_core.operation_async.AsyncOperation: An object representing a long-running operation. The result type for the operation will be :class:`google.cloud.tpu_v2alpha1.types.Node` A TPU instance. """ # Create or coerce a protobuf request object. # Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([name]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) request = cloud_tpu.DeleteNodeRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if name is not None: request.name = name # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.delete_node, default_timeout=None, client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) # Send the request. response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Wrap the response in an operation future. response = operation_async.from_gapic( response, self._client._transport.operations_client, cloud_tpu.Node, metadata_type=cloud_tpu.OperationMetadata, ) # Done; return the response. return response async def stop_node( self, request: Union[cloud_tpu.StopNodeRequest, dict] = None, *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> operation_async.AsyncOperation: r"""Stops a node. This operation is only available with single TPU nodes. .. code-block:: python from google.cloud import tpu_v2alpha1 def sample_stop_node(): # Create a client client = tpu_v2alpha1.TpuClient() # Initialize request argument(s) request = tpu_v2alpha1.StopNodeRequest( ) # Make the request operation = client.stop_node(request=request) print("Waiting for operation to complete...") response = operation.result() # Handle the response print(response) Args: request (Union[google.cloud.tpu_v2alpha1.types.StopNodeRequest, dict]): The request object. Request for [StopNode][google.cloud.tpu.v2alpha1.Tpu.StopNode]. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.api_core.operation_async.AsyncOperation: An object representing a long-running operation. The result type for the operation will be :class:`google.cloud.tpu_v2alpha1.types.Node` A TPU instance. """ # Create or coerce a protobuf request object. request = cloud_tpu.StopNodeRequest(request) # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.stop_node, default_timeout=None, client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) # Send the request. response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Wrap the response in an operation future. response = operation_async.from_gapic( response, self._client._transport.operations_client, cloud_tpu.Node, metadata_type=cloud_tpu.OperationMetadata, ) # Done; return the response. return response async def start_node( self, request: Union[cloud_tpu.StartNodeRequest, dict] = None, *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> operation_async.AsyncOperation: r"""Starts a node. .. code-block:: python from google.cloud import tpu_v2alpha1 def sample_start_node(): # Create a client client = tpu_v2alpha1.TpuClient() # Initialize request argument(s) request = tpu_v2alpha1.StartNodeRequest( ) # Make the request operation = client.start_node(request=request) print("Waiting for operation to complete...") response = operation.result() # Handle the response print(response) Args: request (Union[google.cloud.tpu_v2alpha1.types.StartNodeRequest, dict]): The request object. Request for [StartNode][google.cloud.tpu.v2alpha1.Tpu.StartNode]. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.api_core.operation_async.AsyncOperation: An object representing a long-running operation. The result type for the operation will be :class:`google.cloud.tpu_v2alpha1.types.Node` A TPU instance. """ # Create or coerce a protobuf request object. request = cloud_tpu.StartNodeRequest(request) # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.start_node, default_timeout=None, client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) # Send the request. response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Wrap the response in an operation future. response = operation_async.from_gapic( response, self._client._transport.operations_client, cloud_tpu.Node, metadata_type=cloud_tpu.OperationMetadata, ) # Done; return the response. return response async def update_node( self, request: Union[cloud_tpu.UpdateNodeRequest, dict] = None, *, node: cloud_tpu.Node = None, update_mask: field_mask_pb2.FieldMask = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> operation_async.AsyncOperation: r"""Updates the configurations of a node. .. code-block:: python from google.cloud import tpu_v2alpha1 def sample_update_node(): # Create a client client = tpu_v2alpha1.TpuClient() # Initialize request argument(s) node = tpu_v2alpha1.Node() node.accelerator_type = "accelerator_type_value" node.runtime_version = "runtime_version_value" request = tpu_v2alpha1.UpdateNodeRequest( node=node, ) # Make the request operation = client.update_node(request=request) print("Waiting for operation to complete...") response = operation.result() # Handle the response print(response) Args: request (Union[google.cloud.tpu_v2alpha1.types.UpdateNodeRequest, dict]): The request object. Request for [UpdateNode][google.cloud.tpu.v2alpha1.Tpu.UpdateNode]. node (:class:`google.cloud.tpu_v2alpha1.types.Node`): Required. The node. Only fields specified in update_mask are updated. This corresponds to the ``node`` field on the ``request`` instance; if ``request`` is provided, this should not be set. update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): Required. Mask of fields from [Node][Tpu.Node] to update. Supported fields: None. This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.api_core.operation_async.AsyncOperation: An object representing a long-running operation. The result type for the operation will be :class:`google.cloud.tpu_v2alpha1.types.Node` A TPU instance. """ # Create or coerce a protobuf request object. # Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([node, update_mask]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) request = cloud_tpu.UpdateNodeRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if node is not None: request.node = node if update_mask is not None: request.update_mask = update_mask # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.update_node, default_timeout=None, client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata( (("node.name", request.node.name),) ), ) # Send the request. response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Wrap the response in an operation future. response = operation_async.from_gapic( response, self._client._transport.operations_client, cloud_tpu.Node, metadata_type=cloud_tpu.OperationMetadata, ) # Done; return the response. return response async def generate_service_identity( self, request: Union[cloud_tpu.GenerateServiceIdentityRequest, dict] = None, *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> cloud_tpu.GenerateServiceIdentityResponse: r"""Generates the Cloud TPU service identity for the project. .. code-block:: python from google.cloud import tpu_v2alpha1 def sample_generate_service_identity(): # Create a client client = tpu_v2alpha1.TpuClient() # Initialize request argument(s) request = tpu_v2alpha1.GenerateServiceIdentityRequest( parent="parent_value", ) # Make the request response = client.generate_service_identity(request=request) # Handle the response print(response) Args: request (Union[google.cloud.tpu_v2alpha1.types.GenerateServiceIdentityRequest, dict]): The request object. Request for [GenerateServiceIdentity][google.cloud.tpu.v2alpha1.Tpu.GenerateServiceIdentity]. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.cloud.tpu_v2alpha1.types.GenerateServiceIdentityResponse: Response for [GenerateServiceIdentity][google.cloud.tpu.v2alpha1.Tpu.GenerateServiceIdentity]. """ # Create or coerce a protobuf request object. request = cloud_tpu.GenerateServiceIdentityRequest(request) # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.generate_service_identity, default_timeout=None, client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Done; return the response. return response async def list_accelerator_types( self, request: Union[cloud_tpu.ListAcceleratorTypesRequest, dict] = None, *, parent: str = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> pagers.ListAcceleratorTypesAsyncPager: r"""Lists accelerator types supported by this API. .. code-block:: python from google.cloud import tpu_v2alpha1 def sample_list_accelerator_types(): # Create a client client = tpu_v2alpha1.TpuClient() # Initialize request argument(s) request = tpu_v2alpha1.ListAcceleratorTypesRequest( parent="parent_value", ) # Make the request page_result = client.list_accelerator_types(request=request) # Handle the response for response in page_result: print(response) Args: request (Union[google.cloud.tpu_v2alpha1.types.ListAcceleratorTypesRequest, dict]): The request object. Request for [ListAcceleratorTypes][google.cloud.tpu.v2alpha1.Tpu.ListAcceleratorTypes]. parent (:class:`str`): Required. The parent resource name. This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.cloud.tpu_v2alpha1.services.tpu.pagers.ListAcceleratorTypesAsyncPager: Response for [ListAcceleratorTypes][google.cloud.tpu.v2alpha1.Tpu.ListAcceleratorTypes]. Iterating over this object will yield results and resolve additional pages automatically. """ # Create or coerce a protobuf request object. # Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) request = cloud_tpu.ListAcceleratorTypesRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if parent is not None: request.parent = parent # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.list_accelerator_types, default_timeout=None, client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # This method is paged; wrap the response in a pager, which provides # an `__aiter__` convenience method. response = pagers.ListAcceleratorTypesAsyncPager( method=rpc, request=request, response=response, metadata=metadata, ) # Done; return the response. return response async def get_accelerator_type( self, request: Union[cloud_tpu.GetAcceleratorTypeRequest, dict] = None, *, name: str = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> cloud_tpu.AcceleratorType: r"""Gets AcceleratorType. .. code-block:: python from google.cloud import tpu_v2alpha1 def sample_get_accelerator_type(): # Create a client client = tpu_v2alpha1.TpuClient() # Initialize request argument(s) request = tpu_v2alpha1.GetAcceleratorTypeRequest( name="name_value", ) # Make the request response = client.get_accelerator_type(request=request) # Handle the response print(response) Args: request (Union[google.cloud.tpu_v2alpha1.types.GetAcceleratorTypeRequest, dict]): The request object. Request for [GetAcceleratorType][google.cloud.tpu.v2alpha1.Tpu.GetAcceleratorType]. name (:class:`str`): Required. The resource name. This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.cloud.tpu_v2alpha1.types.AcceleratorType: A accelerator type that a Node can be configured with. """ # Create or coerce a protobuf request object. # Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([name]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) request = cloud_tpu.GetAcceleratorTypeRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if name is not None: request.name = name # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.get_accelerator_type, default_timeout=None, client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) # Send the request. response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Done; return the response. return response async def list_runtime_versions( self, request: Union[cloud_tpu.ListRuntimeVersionsRequest, dict] = None, *, parent: str = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> pagers.ListRuntimeVersionsAsyncPager: r"""Lists runtime versions supported by this API. .. code-block:: python from google.cloud import tpu_v2alpha1 def sample_list_runtime_versions(): # Create a client client = tpu_v2alpha1.TpuClient() # Initialize request argument(s) request = tpu_v2alpha1.ListRuntimeVersionsRequest( parent="parent_value", ) # Make the request page_result = client.list_runtime_versions(request=request) # Handle the response for response in page_result: print(response) Args: request (Union[google.cloud.tpu_v2alpha1.types.ListRuntimeVersionsRequest, dict]): The request object. Request for [ListRuntimeVersions][google.cloud.tpu.v2alpha1.Tpu.ListRuntimeVersions]. parent (:class:`str`): Required. The parent resource name. This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.cloud.tpu_v2alpha1.services.tpu.pagers.ListRuntimeVersionsAsyncPager: Response for [ListRuntimeVersions][google.cloud.tpu.v2alpha1.Tpu.ListRuntimeVersions]. Iterating over this object will yield results and resolve additional pages automatically. """ # Create or coerce a protobuf request object. # Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) request = cloud_tpu.ListRuntimeVersionsRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if parent is not None: request.parent = parent # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.list_runtime_versions, default_timeout=None, client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # This method is paged; wrap the response in a pager, which provides # an `__aiter__` convenience method. response = pagers.ListRuntimeVersionsAsyncPager( method=rpc, request=request, response=response, metadata=metadata, ) # Done; return the response. return response async def get_runtime_version( self, request: Union[cloud_tpu.GetRuntimeVersionRequest, dict] = None, *, name: str = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> cloud_tpu.RuntimeVersion: r"""Gets a runtime version. .. code-block:: python from google.cloud import tpu_v2alpha1 def sample_get_runtime_version(): # Create a client client = tpu_v2alpha1.TpuClient() # Initialize request argument(s) request = tpu_v2alpha1.GetRuntimeVersionRequest( name="name_value", ) # Make the request response = client.get_runtime_version(request=request) # Handle the response print(response) Args: request (Union[google.cloud.tpu_v2alpha1.types.GetRuntimeVersionRequest, dict]): The request object. Request for [GetRuntimeVersion][google.cloud.tpu.v2alpha1.Tpu.GetRuntimeVersion]. name (:class:`str`): Required. The resource name. This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.cloud.tpu_v2alpha1.types.RuntimeVersion: A runtime version that a Node can be configured with. """ # Create or coerce a protobuf request object. # Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([name]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) request = cloud_tpu.GetRuntimeVersionRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if name is not None: request.name = name # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.get_runtime_version, default_timeout=None, client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) # Send the request. response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Done; return the response. return response async def get_guest_attributes( self, request: Union[cloud_tpu.GetGuestAttributesRequest, dict] = None, *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> cloud_tpu.GetGuestAttributesResponse: r"""Retrieves the guest attributes for the node. .. code-block:: python from google.cloud import tpu_v2alpha1 def sample_get_guest_attributes(): # Create a client client = tpu_v2alpha1.TpuClient() # Initialize request argument(s) request = tpu_v2alpha1.GetGuestAttributesRequest( name="name_value", ) # Make the request response = client.get_guest_attributes(request=request) # Handle the response print(response) Args: request (Union[google.cloud.tpu_v2alpha1.types.GetGuestAttributesRequest, dict]): The request object. Request for [GetGuestAttributes][google.cloud.tpu.v2alpha1.Tpu.GetGuestAttributes]. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.cloud.tpu_v2alpha1.types.GetGuestAttributesResponse: Response for [GetGuestAttributes][google.cloud.tpu.v2alpha1.Tpu.GetGuestAttributes]. """ # Create or coerce a protobuf request object. request = cloud_tpu.GetGuestAttributesRequest(request) # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.get_guest_attributes, default_timeout=None, client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) # Send the request. response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Done; return the response. return response async def __aenter__(self): return self async def __aexit__(self, exc_type, exc, tb): await self.transport.close() try: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( gapic_version=pkg_resources.get_distribution("google-cloud-tpu",).version, ) except pkg_resources.DistributionNotFound: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() __all__ = ("TpuAsyncClient",)
googleapis/python-tpu
google/cloud/tpu_v2alpha1/services/tpu/async_client.py
Python
apache-2.0
54,857
# Clustalw modules """ A set of classes to interact with the multiple alignment command line program clustalw. Clustalw is the command line version of the graphical Clustalx aligment program. This requires clustalw available from: ftp://ftp-igbmc.u-strasbg.fr/pub/ClustalW/. functions: o parse_file o do_alignment classes: o ClustalAlignment o _AlignCreator o MultipleAlignCL""" # standard library import os import sys import string #Obsolete - we should switch to using string object methods instead! # biopython from Bio.Seq import Seq from Bio.SeqRecord import SeqRecord from Bio import Alphabet from Bio.Alphabet import IUPAC import clustal_format from Bio.Align.Generic import Alignment # PyXML package from xml.sax import saxutils from xml.sax import handler def parse_file(file_name, alphabet = IUPAC.unambiguous_dna, debug_level = 0): """Parse the given file into a clustal aligment object. Arguments: o file_name - The name of the file to parse. o alphabet - The type of alphabet to use for the alignment sequences. This should correspond to the type of information contained in the file. Defaults to be unambiguous_dna sequence. """ align_handler = _AlignCreator(Alphabet.Gapped(alphabet)) parser = clustal_format.format.make_parser(debug_level) parser.setContentHandler(align_handler) parser.setErrorHandler(handler.ErrorHandler()) to_parse = open(file_name, 'r') parser.parseFile(to_parse) to_parse.close() return align_handler.align def do_alignment(command_line, alphabet=None): """Perform an alignment with the given command line. Arguments: o command_line - A command line object that can give out the command line we will input into clustalw. o alphabet - the alphabet to use in the created alignment. If not specified IUPAC.unambiguous_dna and IUPAC.protein will be used for dna and protein alignment respectively. Returns: o A clustal alignment object corresponding to the created alignment. If the alignment type was not a clustal object, None is returned. """ run_clust = os.popen(str(command_line)) status = run_clust.close() # The exit status is the second byte of the termination status # TODO - Check this holds on win32... value = 0 if status: value = status / 256 # check the return value for errors, as on 1.81 the return value # from Clustalw is actually helpful for figuring out errors # 1 => bad command line option if value == 1: raise ValueError("Bad command line option in the command: %s" % str(command_line)) # 2 => can't open sequence file elif value == 2: raise IOError("Cannot open sequence file %s" % command_line.sequence_file) # 3 => wrong format in sequence file elif value == 3: raise IOError("Sequence file %s has an invalid format." % command_line.sequence_file) # 4 => sequence file only has one sequence elif value == 4: raise IOError("Sequence file %s has only one sequence present." % command_line.sequence_file) # if an output file was specified, we need to grab it if command_line.output_file: out_file = command_line.output_file else: out_file = os.path.splitext(command_line.sequence_file)[0] + '.aln' # if we can't deal with the format, just return None if command_line.output_type and command_line.output_type != 'CLUSTAL': return None # otherwise parse it into a ClustalAlignment object else: if not alphabet: alphabet = (IUPAC.unambiguous_dna, IUPAC.protein)[ command_line.type == 'PROTEIN'] # check if the outfile exists before parsing if not(os.path.exists(out_file)): raise IOError("Output .aln file %s not produced, commandline: %s" % (out_file, command_line)) return parse_file(out_file, alphabet) class ClustalAlignment(Alignment): """Work with the clustal aligment format. This format is the default output from clustal -- these files normally have an extension of .aln. """ # the default version to use if one isn't set DEFAULT_VERSION = '1.81' def __init__(self, alphabet = Alphabet.Gapped(IUPAC.ambiguous_dna)): Alignment.__init__(self, alphabet) # represent all of those stars in the aln output format self._star_info = '' self._version = '' def __str__(self): """Print out the alignment so it looks pretty. The output produced from this should also be formatted in valid clustal format. """ # if the version isn't set, we need to use the default if self._version == '': self._version = self.DEFAULT_VERSION output = "CLUSTAL X (%s) multiple sequence alignment\n\n\n" % \ self._version cur_char = 0 max_length = len(self._records[0].seq) # keep displaying sequences until we reach the end while cur_char != max_length: # calculate the number of sequences to show, which will # be less if we are at the end of the sequence if (cur_char + 50) > max_length: show_num = max_length - cur_char else: show_num = 50 # go through all of the records and print out the sequences # when we output, we do a nice 80 column output, although this # may result in truncation of the ids. for record in self._records: line = record.description[0:30].ljust(36) line = line + record.seq.data[cur_char:(cur_char + show_num)] output = output + line + "\n" # now we need to print out the star info, if we've got it if self._star_info != '': output = output + (" " * 36) + \ self._star_info[cur_char:(cur_char + show_num)] + "\n" output = output + "\n" cur_char = cur_char + show_num # have a extra newline, so strip two off and add one before returning return string.rstrip(output) + "\n" def _add_star_info(self, stars): """Add all of the stars, which indicate consensus sequence. """ self._star_info = stars def _add_version(self, version): """Add the version information about the clustal file being read. """ self._version = version class _AlignCreator(handler.ContentHandler): """Handler to create a ClustalAlignment object from clustal file info. This handler is used to accept events coming from a Martel parsing stream, and acts like a normal SAX handler. After parsing, the alignment object created is available as the align attribute of the class. """ def __init__(self, alphabet): """Create a new handler ready to deal with output from Martel parsing. Arguments: o alphabet - The alphabet to create all of the new sequences with. """ self.align = ClustalAlignment(alphabet) # store sequence info in a dictionary self.all_info = {} self.all_keys = [] # the current id we are working with self.cur_id = None # info so we know how big the ids and sequences are self.id_size = 0 self.space_size = 0 self.seq_size = 0 # flags so we can keep track of where we are during the parse self.in_version = 0 self.in_stars = 0 self.in_seq_id = 0 self.in_space = 0 self.in_seq = 0 self.all_star_info = '' def startElement(self, name, attrs): """Check the various tags for the info we are interested in.""" if name == "version": self.in_version = 1 self.version_info = '' elif name == "seq_id": self.in_seq_id = 1 self.seq_id_info = '' elif name == "seq_space": self.in_space = 1 self.space_info = '' elif name == "seq_info": self.in_seq = 1 self.seq_info = '' elif name == "match_stars": self.in_stars = 1 self.star_info = '' def characters(self, content): if self.in_version: self.version_info = self.version_info + content elif self.in_seq_id: self.seq_id_info = self.seq_id_info + content elif self.in_space: self.space_info = self.space_info + content elif self.in_seq: self.seq_info = self.seq_info + content elif self.in_stars: self.star_info = self.star_info + content def endElement(self, name): if name == "version": self.in_version = 0 self.align._add_version(string.strip(self.version_info)) elif name == "seq_id": self.in_seq_id = 0 self.id_size = len(self.seq_id_info) self.cur_id = self.seq_id_info elif name == "seq_space": self.in_space = 0 self.space_size = len(self.space_info) elif name == "seq_info": self.in_seq = 0 self.seq_size = len(self.seq_info) # if the id is already there, add the sequence info if self.cur_id in self.all_info.keys(): self.all_info[self.cur_id] = self.all_info[self.cur_id] + \ self.seq_info else: self.all_info[self.cur_id] = self.seq_info self.all_keys.append(self.cur_id) elif name == "match_stars": id_length = self.id_size + self.space_size line_length = id_length + self.seq_size self.all_star_info = self.all_star_info + \ self.star_info[id_length:line_length] def endDocument(self): # when we are done parsing add all of the info we need self.align._add_star_info(self.all_star_info) for id in self.all_keys: self.align.add_sequence(id, self.all_info[id]) class MultipleAlignCL: """Represent a clustalw multiple alignment command line. This is meant to make it easy to code the command line options you want to submit to clustalw. Clustalw has a ton of options and things to do but this is set up to represent a clustalw mutliple alignment. Warning: I don't use all of these options personally, so if you find one to be broken for any reason, please let us know! """ # set the valid options for different parameters OUTPUT_TYPES = ['GCG', 'GDE', 'PHYLIP', 'PIR', 'NEXUS', 'FASTA'] OUTPUT_ORDER = ['INPUT', 'ALIGNED'] OUTPUT_CASE = ['LOWER', 'UPPER'] OUTPUT_SEQNOS = ['OFF', 'ON'] RESIDUE_TYPES = ['PROTEIN', 'DNA'] PROTEIN_MATRIX = ['BLOSUM', 'PAM', 'GONNET', 'ID'] DNA_MATRIX = ['IUB', 'CLUSTALW'] def __init__(self, sequence_file, command = 'clustalw'): """Initialize some general parameters that can be set as attributes. Arguments: o sequence_file - The file to read the sequences for alignment from. o command - The command used to run clustalw. This defaults to just 'clustalw' (ie. assumes you have it on your path somewhere). General attributes that can be set: o is_quick - if set as 1, will use a fast algorithm to create the alignment guide tree. o allow_negative - allow negative values in the alignment matrix. Multiple alignment attributes that can be set as attributes: o gap_open_pen - Gap opening penalty o gap_ext_pen - Gap extension penalty o is_no_end_pen - A flag as to whether or not there should be a gap separation penalty for the ends. o gap_sep_range - The gap separation penalty range. o is_no_pgap - A flag to turn off residue specific gaps o is_no_hgap - A flag to turn off hydrophilic gaps o h_gap_residues - A list of residues to count a hydrophilic o max_div - A percent identity to use for delay (? - I don't undertand this!) o trans_weight - The weight to use for transitions """ self.sequence_file = sequence_file self.command = command self.is_quick = None self.allow_negative = None self.gap_open_pen = None self.gap_ext_pen = None self.is_no_end_pen = None self.gap_sep_range = None self.is_no_pgap = None self.is_no_hgap = None self.h_gap_residues = [] self.max_div = None self.trans_weight = None # other attributes that should be set via various functions # 1. output parameters self.output_file = None self.output_type = None self.output_order = None self.change_case = None self.add_seqnos = None # 2. a guide tree to use self.guide_tree = None self.new_tree = None # 3. matrices self.protein_matrix = None self.dna_matrix = None # 4. type of residues self.type = None def __str__(self): """Write out the command line as a string.""" if sys.platform <> "win32" : #On Linux with clustalw 1.83, you can do: #clustalw input.faa #clustalw /full/path/input.faa #clustalw -INFILE=input.faa #clustalw -INFILE=/full/path/input.faa # #Note these fail (using DOS style slashes): # #clustalw /INFILE=input.faa #clustalw /INFILE=/full/path/input.faa # #To keep things simple, and follow the original #behaviour of Bio.Clustalw use this: cline = self.command + " " + self.sequence_file else : #On Windows XP with clustalw.exe 1.83, these work at #the command prompt: # #clustalw.exe input.faa #clustalw.exe /INFILE=input.faa #clustalw.exe /INFILE="input.faa" #clustalw.exe /INFILE="with space.faa" #clustalw.exe /INFILE=C:\full\path\input.faa #clustalw.exe /INFILE="C:\full path\with spaces.faa" # #Sadly these fail: #clustalw.exe "input.faa" #clustalw.exe "with space.faa" #clustalw.exe C:\full\path\input.faa #clustalw.exe "C:\full path\with spaces.faa" # #These also fail but a minus/dash does seem to #work with other options (!): #clustalw.exe -INFILE=input.faa #clustalw.exe -INFILE=C:\full\path\input.faa # #Also these fail: #clustalw.exe "/INFILE=input.faa" #clustalw.exe "/INFILE=C:\full\path\input.faa" # #Thanks to Emanuel Hey for flagging this on the mailing list. # #In addtion, both self.command and self.sequence_file #may contain spaces, so should be quoted. But clustalw #is fussy. if self.command.count(" ") > 0 : cline = '"%s"' % self.command else : cline = self.command if self.sequence_file.count(" ") > 0 : cline += ' /INFILE="%s"' % self.sequence_file else : cline += ' /INFILE=%s' % self.sequence_file # general options if self.type: cline += " -TYPE=%s" % self.type if self.is_quick == 1: #Some versions of clustalw are case sensitive, #and require -quicktree rather than -QUICKTREE cline += " -quicktree" if self.allow_negative == 1: cline += " -NEGATIVE" # output options if self.output_file: cline += " -OUTFILE=%s" % self.output_file if self.output_type: cline += " -OUTPUT=%s" % self.output_type if self.output_order: cline += " -OUTORDER=%s" % self.output_order if self.change_case: cline += " -CASE=%s" % self.change_case if self.add_seqnos: cline += " -SEQNOS=%s" % self.add_seqnos if self.new_tree: # clustal does not work if -align is written -ALIGN cline += " -NEWTREE=%s -align" % self.new_tree # multiple alignment options if self.guide_tree: cline += " -USETREE=%s" % self.guide_tree if self.protein_matrix: cline += " -MATRIX=%s" % self.protein_matrix if self.dna_matrix: cline += " -DNAMATRIX=%s" % self.dna_matrix if self.gap_open_pen: cline += " -GAPOPEN=%s" % self.gap_open_pen if self.gap_ext_pen: cline += " -GAPEXT=%s" % self.gap_ext_pen if self.is_no_end_pen == 1: cline += " -ENDGAPS" if self.gap_sep_range: cline += " -GAPDIST=%s" % self.gap_sep_range if self.is_no_pgap == 1: cline += " -NOPGAP" if self.is_no_hgap == 1: cline += " -NOHGAP" if len(self.h_gap_residues) != 0: # stick the list of residues together as one big list o' residues residue_list = '' for residue in self.h_gap_residues: residue_list = residue_list + residue cline += " -HGAPRESIDUES=%s" % residue_list if self.max_div: cline += " -MAXDIV=%s" % self.max_div if self.trans_weight: cline += " -TRANSWEIGHT=%s" % self.trans_weight return cline def set_output(self, output_file, output_type = None, output_order = None, change_case = None, add_seqnos = None): """Set the output parameters for the command line. """ self.output_file = output_file if output_type: output_type = string.upper(output_type) if output_type not in self.OUTPUT_TYPES: raise ValueError("Invalid output type %s. Valid choices are %s" % (output_type, self.OUTPUT_TYPES)) else: self.output_type = output_type if output_order: output_order = string.upper(output_order) if output_order not in self.OUTPUT_ORDER: raise ValueError("Invalid output order %s. Valid choices are %s" % (output_order, self.OUTPUT_ORDER)) else: self.output_order = output_order if change_case: change_case = string.upper(change_case) if output_type != "GDE": raise ValueError("Change case only valid for GDE output.") elif change_case not in self.CHANGE_CASE: raise ValueError("Invalid change case %s. Valid choices are %s" % (change_case, self.CHANGE_CASE)) else: self.change_case = change_case if add_seqnos: add_seqnos = string.upper(add_seqnos) if output_type: raise ValueError("Add SeqNos only valid for CLUSTAL output.") elif add_seqnos not in self.OUTPUT_SEQNOS: raise ValueError("Invalid seqnos option %s. Valid choices: %s" % (add_seqnos, self.OUTPUT_SEQNOS)) else: self.add_seqnos = add_seqnos def set_guide_tree(self, tree_file): """Provide a file to use as the guide tree for alignment. Raises: o IOError - If the tree_file doesn't exist.""" if not(os.path.exists(tree_file)): raise IOError("Could not find the guide tree file %s." % tree_file) else: self.guide_tree = tree_file def set_new_guide_tree(self, tree_file): """Set the name of the guide tree file generated in the alignment. """ self.new_tree = tree_file def set_protein_matrix(self, protein_matrix): """Set the type of protein matrix to use. Protein matrix can be either one of the defined types (blosum, pam, gonnet or id) or a file with your own defined matrix. """ if string.upper(protein_matrix) in self.PROTEIN_MATRIX: self.protein_matrix = string.upper(protein_matrix) elif os.path.exists(protein_matrix): self.protein_matrix = protein_matrix else: raise ValueError("Invalid matrix %s. Options are %s or a file." % (string.upper(protein_matrix), self.PROTEIN_MATRIX)) def set_dna_matrix(self, dna_matrix): """Set the type of DNA matrix to use. The dna_matrix can either be one of the defined types (iub or clustalw) or a file with the matrix to use.""" if string.upper(dna_matrix) in self.DNA_MATRIX: self.dna_matrix = string.upper(dna_matrix) elif os.path.exists(dna_matrix): self.dna_matrix = dna_matrix else: raise ValueError("Invalid matrix %s. Options are %s or a file." % (dna_matrix, self.DNA_MATRIX)) def set_type(self, residue_type): """Set the type of residues within the file. Clustal tries to guess whether the info is protein or DNA based on the number of GATCs, but this can be wrong if you have a messed up protein or DNA you are working with, so this allows you to set it explicitly. """ residue_type = string.upper(residue_type) if residue_type in self.RESIDUE_TYPES: self.type = residue_type else: raise ValueError("Invalid residue type %s. Valid choices are %s" % (residue_type, self.RESIDUE_TYPES))
dbmi-pitt/DIKB-Micropublication
scripts/mp-scripts/Bio/Clustalw/__init__.py
Python
apache-2.0
22,263
from django.shortcuts import render_to_response from django.template import RequestContext def index_view(request): return render_to_response('home/index.html', context_instance=RequestContext(request), )
caballerojavier13/python-mda
pythonMDA/apps/home/views.py
Python
apache-2.0
253
"""The Minecraft Server sensor platform.""" from __future__ import annotations from typing import Any from homeassistant.components.sensor import SensorEntity from homeassistant.config_entries import ConfigEntry from homeassistant.const import TIME_MILLISECONDS from homeassistant.core import HomeAssistant from . import MinecraftServer, MinecraftServerEntity from .const import ( ATTR_PLAYERS_LIST, DOMAIN, ICON_LATENCY_TIME, ICON_PLAYERS_MAX, ICON_PLAYERS_ONLINE, ICON_PROTOCOL_VERSION, ICON_VERSION, NAME_LATENCY_TIME, NAME_PLAYERS_MAX, NAME_PLAYERS_ONLINE, NAME_PROTOCOL_VERSION, NAME_VERSION, UNIT_PLAYERS_MAX, UNIT_PLAYERS_ONLINE, UNIT_PROTOCOL_VERSION, UNIT_VERSION, ) async def async_setup_entry( hass: HomeAssistant, config_entry: ConfigEntry, async_add_entities ) -> None: """Set up the Minecraft Server sensor platform.""" server = hass.data[DOMAIN][config_entry.unique_id] # Create entities list. entities = [ MinecraftServerVersionSensor(server), MinecraftServerProtocolVersionSensor(server), MinecraftServerLatencyTimeSensor(server), MinecraftServerPlayersOnlineSensor(server), MinecraftServerPlayersMaxSensor(server), ] # Add sensor entities. async_add_entities(entities, True) class MinecraftServerSensorEntity(MinecraftServerEntity, SensorEntity): """Representation of a Minecraft Server sensor base entity.""" def __init__( self, server: MinecraftServer, type_name: str, icon: str = None, unit: str = None, device_class: str = None, ) -> None: """Initialize sensor base entity.""" super().__init__(server, type_name, icon, device_class) self._state = None self._unit = unit @property def available(self) -> bool: """Return sensor availability.""" return self._server.online @property def state(self) -> Any: """Return sensor state.""" return self._state @property def unit_of_measurement(self) -> str: """Return sensor measurement unit.""" return self._unit class MinecraftServerVersionSensor(MinecraftServerSensorEntity): """Representation of a Minecraft Server version sensor.""" def __init__(self, server: MinecraftServer) -> None: """Initialize version sensor.""" super().__init__( server=server, type_name=NAME_VERSION, icon=ICON_VERSION, unit=UNIT_VERSION ) async def async_update(self) -> None: """Update version.""" self._state = self._server.version class MinecraftServerProtocolVersionSensor(MinecraftServerSensorEntity): """Representation of a Minecraft Server protocol version sensor.""" def __init__(self, server: MinecraftServer) -> None: """Initialize protocol version sensor.""" super().__init__( server=server, type_name=NAME_PROTOCOL_VERSION, icon=ICON_PROTOCOL_VERSION, unit=UNIT_PROTOCOL_VERSION, ) async def async_update(self) -> None: """Update protocol version.""" self._state = self._server.protocol_version class MinecraftServerLatencyTimeSensor(MinecraftServerSensorEntity): """Representation of a Minecraft Server latency time sensor.""" def __init__(self, server: MinecraftServer) -> None: """Initialize latency time sensor.""" super().__init__( server=server, type_name=NAME_LATENCY_TIME, icon=ICON_LATENCY_TIME, unit=TIME_MILLISECONDS, ) async def async_update(self) -> None: """Update latency time.""" self._state = self._server.latency_time class MinecraftServerPlayersOnlineSensor(MinecraftServerSensorEntity): """Representation of a Minecraft Server online players sensor.""" def __init__(self, server: MinecraftServer) -> None: """Initialize online players sensor.""" super().__init__( server=server, type_name=NAME_PLAYERS_ONLINE, icon=ICON_PLAYERS_ONLINE, unit=UNIT_PLAYERS_ONLINE, ) async def async_update(self) -> None: """Update online players state and device state attributes.""" self._state = self._server.players_online extra_state_attributes = None players_list = self._server.players_list if players_list is not None and len(players_list) != 0: extra_state_attributes = {ATTR_PLAYERS_LIST: self._server.players_list} self._extra_state_attributes = extra_state_attributes @property def extra_state_attributes(self) -> dict[str, Any]: """Return players list in device state attributes.""" return self._extra_state_attributes class MinecraftServerPlayersMaxSensor(MinecraftServerSensorEntity): """Representation of a Minecraft Server maximum number of players sensor.""" def __init__(self, server: MinecraftServer) -> None: """Initialize maximum number of players sensor.""" super().__init__( server=server, type_name=NAME_PLAYERS_MAX, icon=ICON_PLAYERS_MAX, unit=UNIT_PLAYERS_MAX, ) async def async_update(self) -> None: """Update maximum number of players.""" self._state = self._server.players_max
kennedyshead/home-assistant
homeassistant/components/minecraft_server/sensor.py
Python
apache-2.0
5,431
import gc import pprint class Graph: def __init__(self, name): self.name = name self.next = None def set_next(self, next): print('Linking nodes {}.next = {}'.format(self, next)) self.next = next def __repr__(self): return '{}({})'.format( self.__class__.__name__, self.name) # Construct a graph cycle one = Graph('one') two = Graph('two') three = Graph('three') one.set_next(two) two.set_next(three) three.set_next(one) print() print('three refers to:') for r in gc.get_referents(three): pprint.pprint(r)
jasonwee/asus-rt-n14uhp-mrtg
src/lesson_runtime_features/gc_get_referents.py
Python
apache-2.0
582
import tensorflow as tf """tf.pow(x,y,name=None) 功能:计算x各元素的y次方。 输入:x,y为张量,可以为`float32`, `float64`, `int32`, `int64`,`complex64`,`complex128`类型。""" x = tf.constant([[2, 3, 5], [2, 3, 5]], tf.float64) y = tf.constant([[2, 3, 4]], tf.float64) z = tf.pow(x, y) sess = tf.Session() print(sess.run(z)) sess.close() """[[ 4. 27. 625.] [ 4. 27. 625.]]"""
Asurada2015/TFAPI_translation
math_ops_basicoperation/tf_pow.py
Python
apache-2.0
416
import numpy as np from .VariableUnitTest import VariableUnitTest from gwlfe.MultiUse_Fxns.Runoff import UrbRunoffLiter class TestUrbanRunoffLiter(VariableUnitTest): def test_UrbanRunoffLiter(self): z = self.z np.testing.assert_array_almost_equal( UrbRunoffLiter.UrbRunoffLiter_f(z.NYrs, z.DaysMonth, z.InitSnow_0, z.Temp, z.Prec, z.NRur, z.NUrb, z.Area, z.CNI_0, z.AntMoist_0, z.Grow_0, z.CNP_0, z.Imper, z.ISRR, z.ISRA), UrbRunoffLiter.UrbRunoffLiter(z.NYrs, z.DaysMonth, z.InitSnow_0, z.Temp, z.Prec, z.NRur, z.NUrb, z.Area, z.CNI_0, z.AntMoist_0, z.Grow_0, z.CNP_0, z.Imper, z.ISRR, z.ISRA), decimal=7)
WikiWatershed/gwlf-e
test/unittests/test_UrbanRunoffLiter.py
Python
apache-2.0
753
import bisect class Solution: def numMatchingSubseq(self, S: str, words): d = {} for i, x in enumerate(S): if x not in d: d[x] = [i] else: d[x].append(i) ans = [] for w in words: i = -1 result = True for x in w: if x not in d: result = False break idx = bisect.bisect_left(d[x], i+1) if idx >= len(d[x]): result = False break i = d[x][idx] if result: ans.append(w) return len(ans) print(Solution().numMatchingSubseq("abcde", ["a", "bb", "acd", "ace"]))
zuun77/givemegoogletshirts
leetcode/python/792_number-of-matching-subsequences.py
Python
apache-2.0
716
# coding=utf-8 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). from __future__ import (absolute_import, division, generators, nested_scopes, print_function, unicode_literals, with_statement) import ast import codecs import itertools import re import textwrap import tokenize from abc import abstractmethod from collections import Sequence from twitter.common.lang import Compatibility, Interface __all__ = ( 'CheckstylePlugin', 'PythonFile', ) class OffByOneList(Sequence): def __init__(self, iterator): # Make sure we properly handle unicode chars in code files. self._list = list(iterator) def __iter__(self): return iter(self._list) def __reversed__(self): return reversed(self._list) def __len__(self): return len(self._list) def __getitem__(self, element_id): if isinstance(element_id, Compatibility.integer): return self.__get_list_item(element_id) elif isinstance(element_id, slice): return self.__getslice(element_id) raise TypeError('__getitem__ only supports integers and slices') def __getslice(self, sl): if sl.start == 0 or sl.stop == 0: raise IndexError new_slice = slice(sl.start - 1 if sl.start > 0 else sl.start, sl.stop - 1 if sl.stop > 0 else sl.stop) return self._list[new_slice] def __get_list_item(self, item): if item == 0: raise IndexError if item < 0: return self._list[item] return self._list[item - 1] def index(self, value): return self._list.index(value) + 1 class PythonFile(object): """Checkstyle wrapper for Python source files.""" SKIP_TOKENS = frozenset((tokenize.COMMENT, tokenize.NL, tokenize.DEDENT)) def _remove_coding_header(self, blob): """ There is a bug in ast.parse that cause it to throw a syntax error if you have a header similar to... # coding=utf-8, we replace this line with something else to bypass the bug. :param blob: file text contents :return: adjusted blob """ # Remove the # coding=utf-8 to avoid AST erroneous parse errors # https://bugs.python.org/issue22221 lines = blob.split('\n') if lines and 'coding=utf-8' in lines[0]: lines[0] = '#remove coding' return '\n'.join(lines).encode('ascii', errors='replace') def __init__(self, blob, filename='<expr>'): self._blob = self._remove_coding_header(blob) self.tree = ast.parse(self._blob, filename) self.lines = OffByOneList(self._blob.split('\n')) self.filename = filename self.logical_lines = dict((start, (start, stop, indent)) for start, stop, indent in self.iter_logical_lines(self._blob)) def __iter__(self): return iter(self.lines) def __getitem__(self, line_number): return self.lines[self.line_range(line_number)] def __str__(self): return 'PythonFile({filename})'.format(filename=self.filename) @classmethod def parse(cls, filename): with codecs.open(filename, encoding='utf-8') as fp: blob = fp.read() return cls(blob, filename) @classmethod def from_statement(cls, statement): """A helper to construct a PythonFile from a triple-quoted string, for testing. :param statement: Python file contents :return: Instance of PythonFile """ return cls('\n'.join(textwrap.dedent(statement).split('\n')[1:])) @classmethod def iter_tokens(cls, blob): """ Iterate over tokens found in blob contents :param blob: Input string with python file contents :return: token iterator """ return tokenize.generate_tokens(Compatibility.StringIO(blob).readline) @property def tokens(self): """An iterator over tokens for this Python file from the tokenize module.""" return self.iter_tokens(self._blob) @staticmethod def translate_logical_line(start, end, contents, indent_stack, endmarker=False): """Translate raw contents to logical lines""" # Remove leading blank lines. while contents[0] == '\n': start += 1 contents.pop(0) # Remove trailing blank lines. while contents[-1] == '\n': end -= 1 contents.pop() indent = len(indent_stack[-1]) if indent_stack else 0 if endmarker: indent = len(contents[0]) return start, end + 1, indent def iter_logical_lines(self, blob): """Returns an iterator of (start_line, stop_line, indent) for logical lines """ indent_stack = [] contents = [] line_number_start = None for token in self.iter_tokens(blob): token_type, token_text, token_start = token[0:3] if token_type == tokenize.INDENT: indent_stack.append(token_text) if token_type == tokenize.DEDENT: indent_stack.pop() if token_type in self.SKIP_TOKENS: continue contents.append(token_text) if line_number_start is None: line_number_start = token_start[0] elif token_type in (tokenize.NEWLINE, tokenize.ENDMARKER): yield self.translate_logical_line( line_number_start, token_start[0] + (1 if token_type is tokenize.NEWLINE else -1), list(filter(None, contents)), indent_stack, endmarker=token_type == tokenize.ENDMARKER) contents = [] line_number_start = None def line_range(self, line_number): """Return a slice for the given line number""" if line_number <= 0 or line_number > len(self.lines): raise IndexError('NOTE: Python file line numbers are offset by 1.') if line_number not in self.logical_lines: return slice(line_number, line_number + 1) else: start, stop, _ = self.logical_lines[line_number] return slice(start, stop) def enumerate(self): """Return an enumeration of line_number, line pairs.""" return enumerate(self, 1) class Nit(object): """Encapsulate a Style faux pas. The general taxonomy of nits: Prefix F => Flake8 errors E => PEP8 error W => PEP8 warning T => Twitter error Prefix: 0 Naming 1 Indentation 2 Whitespace 3 Blank line 4 Import 5 Line length 6 Deprecation 7 Statement 8 Flake / Logic 9 Runtime """ COMMENT = 0 WARNING = 1 ERROR = 2 SEVERITY = { COMMENT: 'COMMENT', WARNING: 'WARNING', ERROR: 'ERROR' } @staticmethod def flatten_lines(*line_or_line_list): return itertools.chain(*line_or_line_list) def __init__(self, code, severity, python_file, message, line_number=None): if severity not in self.SEVERITY: raise ValueError('Severity should be one of {}'.format(' '.join(self.SEVERITY.values()))) self.python_file = python_file if not re.match(r'[A-Z]\d{3}', code): raise ValueError('Code must contain a prefix letter followed by a 3 digit number') self.code = code self.severity = severity self._message = message self._line_number = line_number def __str__(self): """convert ascii for safe terminal output""" flat = list(self.flatten_lines([self.message], self.lines)) return '\n |'.join(flat).encode('ascii', errors='replace') @property def line_number(self): if self._line_number: line_range = self.python_file.line_range(self._line_number) if line_range.stop - line_range.start > 1: return '%03d-%03d' % (line_range.start, line_range.stop - 1) else: return '%03d' % line_range.start @property def message(self): return '{code}:{severity:<7} {filename}:{linenum} {message}'.format( code=self.code, severity=self.SEVERITY[self.severity], filename=self.python_file.filename, linenum=self.line_number or '*', message=self._message) @property def lines(self): return self.python_file[self._line_number] if self._line_number else [] class CheckstylePlugin(Interface): """Interface for checkstyle plugins.""" def __init__(self, python_file): if not isinstance(python_file, PythonFile): raise TypeError('CheckstylePlugin takes PythonFile objects.') self.python_file = python_file def iter_ast_types(self, ast_type): for node in ast.walk(self.python_file.tree): if isinstance(node, ast_type): yield node @abstractmethod def nits(self): """Returns an iterable of Nit pertinent to the enclosed python file.""" def __iter__(self): for nit in self.nits(): yield nit def errors(self): for nit in self: if nit.severity is Nit.ERROR: yield nit def nit(self, code, severity, message, line_number_or_ast=None): line_number = None if isinstance(line_number_or_ast, Compatibility.integer): line_number = line_number_or_ast elif isinstance(line_number_or_ast, ast.AST): line_number = getattr(line_number_or_ast, 'lineno', None) return Nit(code, severity, self.python_file, message, line_number) def comment(self, code, message, line_number_or_ast=None): return self.nit(code, Nit.COMMENT, message, line_number_or_ast) def warning(self, code, message, line_number_or_ast=None): return self.nit(code, Nit.WARNING, message, line_number_or_ast) def error(self, code, message, line_number_or_ast=None): return self.nit(code, Nit.ERROR, message, line_number_or_ast)
jessrosenfield/pants
src/python/pants/backend/python/tasks/checkstyle/common.py
Python
apache-2.0
9,330
# Copyright (c) 2015 Sachi King # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import eventlet import math import os import re #from oslo_config import cfg from smashcache.cache import filler from smashcache.pages import errors opts = [ cfg.StrOpt('chunk_storage_path', default='/tmp/smashcache', help="Location to download chunked target data"), cfg.IntOpt('chunk_size', default=8, help="Size in megabytes to chunk at a time"), cfg.StrOpt('proxy_host_url', help="URL to remote host") ] UNITS_Ki = 1024 UNITS_Mi = 1024 ** 2 #CONF = cfg.CONF #CONF.register_opts(opts) #CONF(project='smashcache', default_config_files=None) # Yes commenting out in git... :( # uwsgi, which I wanted to use, freaked out with oslo config, so fake it class conf(object): chunk_storage_path = '/tmp/smashcache' chunk_size = 8 proxy_host_url = 'http://s3.amz.com/place' CONF = conf CHUNKSIZE = CONF.chunk_size * UNITS_Mi class CacheObject(object): """Storage of known objects""" path_file_re = re.compile('^\/(.+/)?(.+\..+)$') def __init__(self, object_uri): if not isinstance(object_uri, str): raise errors.error500() r = self.path_file_re.match(object_uri) if r: object_path = "" if r.group(1) is None else r.group(1) object_name = r.group(2) else: print("Invalid file name %s" % object_uri) raise errors.error404() self.origin_url = (CONF.proxy_host_url + object_uri) self._headerValues() self._ensurePathsExist(object_path) self.stored_object_path = ("%s/%s/%s" % (CONF.chunk_storage_path, object_path, object_name)) self.total_chunks = math.ceil(self.object_size / CHUNKSIZE) self.last_chunk_size = (self.object_size - (self.total_chunks - 1) * CHUNKSIZE) self.chunks = [] self.chunk_load = [] for _ in range(self.total_chunks): self.chunks.append(False) self.chunk_load.append(False) def _ensurePathsExist(self, object_path): # TODO: Directory transversal paths = [''] if object_path != '': paths.extend(object_path.strip('/').split('/')) path = CONF.chunk_storage_path for p in paths: path = ("%s/%s" % (path, p)) if not os.path.exists(path): os.makedirs(path) def _headerValues(self): upstream_headers = filler.getHeaders(self.origin_url) self.object_size = int(upstream_headers.get('content-length')) self.content_type = upstream_headers.get('content-type') if not self.object_size: raise errors.error502() def getRangeIterable(self, byte_start, byte_end): initial_chunk = math.floor(byte_start / CHUNKSIZE) current_chunk = initial_chunk start_offset = byte_start - initial_chunk * CHUNKSIZE total_bytes = byte_end - byte_start remaining_bytes = total_bytes max_read_bytes = 256 * UNITS_Ki bytes_to_read = max_read_bytes while True: if remaining_bytes == 0: break self.getOrWaitChunk(current_chunk) with open(self._chunk_path(current_chunk), 'rb') as f: if current_chunk == initial_chunk: f.seek(start_offset) while True: if remaining_bytes < max_read_bytes: bytes_to_read = remaining_bytes read_bytes = f.read(bytes_to_read) remaining_bytes -= len(read_bytes) yield read_bytes if len(read_bytes) != max_read_bytes: current_chunk += 1 break def getOrWaitChunk(self, chunk_number): if not self.chunks[chunk_number] and not self.chunk_load[chunk_number]: self.chunk_load[chunk_number] = True self._fetchChunk(chunk_number) self.chunks[chunk_number] = True elif self.chunks[chunk_number]: pass elif self.chunk_load[chunk_number]: while not self.chunks[chunk_number]: eventlet.sleep() else: raise errors.error500() def _fetchChunk(self, chunk_number): byte_range = (chunk_number * CHUNKSIZE, (chunk_number + 1) * CHUNKSIZE - 1) if self._validChunkExists(chunk_number): return filler.fetchRangeToFile(self.origin_url, byte_range, self._chunk_path(chunk_number)) def _validChunkExists(self, chunk_number): chunk_path = self._chunk_path(chunk_number) expected_size = CHUNKSIZE if chunk_number == self.total_chunks - 1: expected_size = self.last_chunk_size return (os.path.isfile(chunk_path) and os.path.getsize(chunk_path) == expected_size) def _chunk_path(self, chunk_number): return ("%s.%s" % (self.stored_object_path, chunk_number)) class Cache(object): def __init__(self): self.objects = {} def headers(self, uri): if uri not in self.objects.keys(): self.objects[uri] = CacheObject(uri) return [('Content-Type', self.objects[uri].content_type)] def headersContentLength(self, uri): if uri not in self.objects.keys(): self.objects[uri] = CacheObject(uri) return [('Content-Length', str(self.objects[uri].object_size))] def getIterator(self, uri, headers, start=0, end=None): if uri not in self.objects.keys(): self.objects[uri] = CacheObject(uri) if not end or end > self.objects[uri].object_size: end = self.objects[uri].object_size if start > end: raise errors.error400() if start == 0 and end == self.objects[uri].object_size: content_length = self.objects[uri].object_size else: # Sigh, so because 0 is "send first byte" and there are 20 bytes # we're sending bytes 0-19. If we tell chrome we are sending # 0-20, that's 21 bytes and chrome freaks and sends a load of RST # # Todo: Look into how I handle send bytes and try to make this # less of a case-by-case modification to headers. It's confusing headers.extend([('Content-Range', ("bytes %s-%s/%s" % (start, end-1, self.objects[uri].object_size)))]) content_length = end - start headers.extend([('Content-Length', str(content_length))]) return self.objects[uri].getRangeIterable(start, end)
nakato/smashcache
smashcache/cache/cache.py
Python
apache-2.0
7,314
import TownLoader import DDStreet from toontown.suit import Suit class DDTownLoader(TownLoader.TownLoader): def __init__(self, hood, parentFSM, doneEvent): TownLoader.TownLoader.__init__(self, hood, parentFSM, doneEvent) self.streetClass = DDStreet.DDStreet self.musicFile = 'phase_6/audio/bgm/DD_SZ.ogg' self.activityMusicFile = 'phase_6/audio/bgm/DD_SZ_activity.ogg' self.townStorageDNAFile = 'phase_6/dna/storage_DD_town.pdna' def load(self, zoneId): TownLoader.TownLoader.load(self, zoneId) Suit.loadSuits(2) dnaFile = 'phase_6/dna/donalds_dock_' + str(self.canonicalBranchZone) + '.pdna' self.createHood(dnaFile) def unload(self): Suit.unloadSuits(2) TownLoader.TownLoader.unload(self) def enter(self, requestStatus): TownLoader.TownLoader.enter(self, requestStatus) def exit(self): TownLoader.TownLoader.exit(self)
silly-wacky-3-town-toon/SOURCE-COD
toontown/town/DDTownLoader.py
Python
apache-2.0
951
# 400 Nth Digit # Find the nth digit of the infinite integer sequence 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, ... # # Note: # n is positive and will fit within the range of a 32-bit signed integer (n < 231). # # Example 1: # # Input: # 3 # # Output: # 3 # # Example 2: # # Input: # 11 # # Output: # 0 # # Explanation: # The 11th digit of the sequence 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, ... is a 0, which is part of the number 10. class Solution(object): # https://www.hrwhisper.me/leetcode-contest-5-solution/ # 主要是求出该数字需要的位数,因为一位数有9*1,两位数有90*2,三位数有900*3以此类推。 # 剩下的直接看看是否整除啥的即可。 def findNthDigit(self, n): """ :type n: int :rtype: int """ num = 9 cnt = 1 while n > num * cnt: n -= (num * cnt) num *= 10 cnt += 1 t = n // cnt base = 10 ** (cnt - 1) + t if t * cnt == n: return (base - 1) % 10 n -= t * cnt return int(str(base)[::-1][-n]) print(Solution().findNthDigit(11))
gengwg/leetcode
400_nth_digit.py
Python
apache-2.0
1,132
''' Have the function MostFreeTime(strArr) read the strArr parameter being passed which will represent a full day and will be filled with events that span from time X to time Y in the day. The format of each event will be hh:mmAM/PM-hh:mmAM/PM. For example, strArr may be ["10:00AM-12:30PM", "02:00PM-02:45PM","09:10AM-09:50AM"]. Your program will have to output the longest amount of free time available between the start of your first event and the end of your last event in the format: hh:mm. The start event should be the earliest event in the day and the latest event should be the latest event in the day. The output for the previous input would therefore be 01:30 (with the earliest event in the day starting at 09:10AM and the latest event ending at 02:45PM). The input will contain at least 3 events and the events may be out of order. ''' def min_bef(hh,mm,pm): if (pm==0) and hh==12: return 1440 elif (pm==1): if(hh<12): hh += 12 mm = 60-mm return ((24-(hh+1))*60)+mm def min_aft(hh,mm,pm): if (pm==0) and hh==12: return 0 elif (pm==1): if(hh<12): hh += 12 return (hh*60)+mm def CountingMinutesI(sen): sen = sen.replace('M','') sen = sen.replace(':',' ') sen = sen.replace('P',' 1') sen = sen.replace('A',' 0') lit = sen.split("-") lit = lit[0].split(" ") +lit[1].split(" ") lita=[] for i in lit: lita.append(int(i)) #print lita if(lita[2] == lita[5]): res = min_bef(lita[0],lita[1],lita[2])-min_bef(lita[3],lita[4],lita[5]) if res <0 : return 1440+res else: return res elif(lita[2] < lita[5]): return min_bef(lita[0],lita[1],lita[2])-min_bef(lita[3],lita[4],lita[5]) elif(lita[2] > lita[5]): return min_bef(lita[0],lita[1],lita[2])+min_aft(lita[3],lita[4],lita[5]) def sort_times(times): ams = [] pms = [] for i in times: if i[5].lower() == 'p' and int(i[0:2])<12: pms.append(i) else: ams.append(i) full = sort_timeList(ams)+sort_timeList(pms) return full def sort_timeList(times): sortedTimes = sorted(times, cmp=make_comparator(less_than), reverse=False)#[::-1] return sortedTimes def less_than(x, y): if(int(x[0:2]) < int(y[0:2])): #print int(x[0:2]),int(y[0:2]) return True elif (int(x[3:5]) < int(y[3:5])): #print int(x[3:5]),int(y[3:5]) return True else: return False def make_comparator(less_than): def compare(x, y): if less_than(x, y): return -1 elif less_than(y, x): return 1 else: return 0 return compare def MostFreeTime(times): times = sort_times(times) #print times maxt = 0 for i in range (0,len(times)-1): free = times[i][8:]+"-"+times[i+1][:7] maxt = max(maxt,CountingMinutesI(free)) #print maxt #print "FInal result: "+str(maxt ) return str(maxt/60).zfill(2) +":"+str(maxt%60).zfill(2) sen = ["12:15PM-02:00PM","09:00AM-12:11PM","02:02PM-04:00PM"] print MostFreeTime(sen)
FA810/My_Codes
Python/most_free_time.py
Python
apache-2.0
2,924
from mtools.util.logevent import LogEvent from mtools.util.input_source import InputSource from math import ceil from datetime import datetime import time import re class LogFile(InputSource): """ wrapper class for log files, either as open file streams of from stdin. """ def __init__(self, filehandle): """ provide logfile as open file stream or stdin. """ self.filehandle = filehandle self.name = filehandle.name self.from_stdin = filehandle.name == "<stdin>" self._start = None self._end = None self._filesize = None self._num_lines = None self._restarts = None self._binary = None self._timezone = None self._datetime_format = None self._year_rollover = None # make sure bounds are calculated before starting to iterate, including potential year rollovers self._calculate_bounds() @property def start(self): """ lazy evaluation of start and end of logfile. Returns None for stdin input currently. """ if not self._start: self._calculate_bounds() return self._start @property def end(self): """ lazy evaluation of start and end of logfile. Returns None for stdin input currently. """ if not self._end: self._calculate_bounds() return self._end @property def timezone(self): """ lazy evaluation of timezone of logfile. """ if not self._timezone: self._calculate_bounds() return self._timezone @property def filesize(self): """ lazy evaluation of start and end of logfile. Returns None for stdin input currently. """ if self.from_stdin: return None if not self._filesize: self._calculate_bounds() return self._filesize @property def datetime_format(self): """ lazy evaluation of the datetime format. """ if not self._datetime_format: self._calculate_bounds() return self._datetime_format @property def year_rollover(self): """ lazy evaluation of the datetime format. """ if self._year_rollover == None: self._calculate_bounds() return self._year_rollover @property def num_lines(self): """ lazy evaluation of the number of lines. Returns None for stdin input currently. """ if self.from_stdin: return None if not self._num_lines: self._iterate_lines() return self._num_lines @property def restarts(self): """ lazy evaluation of all restarts. """ if not self._num_lines: self._iterate_lines() return self._restarts @property def binary(self): """ lazy evaluation of the binary name. """ if not self._num_lines: self._iterate_lines() return self._binary @property def versions(self): """ return all version changes. """ versions = [] for v, _ in self.restarts: if len(versions) == 0 or v != versions[-1]: versions.append(v) return versions def next(self): """ get next line, adjust for year rollover and hint datetime format. """ # use readline here because next() iterator uses internal readahead buffer so seek position is wrong line = self.filehandle.readline() if line == '': raise StopIteration line = line.rstrip('\n') le = LogEvent(line) # hint format and nextpos from previous line if self._datetime_format and self._datetime_nextpos != None: ret = le.set_datetime_hint(self._datetime_format, self._datetime_nextpos, self.year_rollover) if not ret: # logevent indicates timestamp format has changed, invalidate hint info self._datetime_format = None self._datetime_nextpos = None elif le.datetime: # print "not hinting" # gather new hint info from another logevent self._datetime_format = le.datetime_format self._datetime_nextpos = le._datetime_nextpos return le def __iter__(self): """ iteration over LogFile object will return a LogEvent object for each line (generator) """ le = None while True: try: le = self.next() except StopIteration as e: # end of log file, get end date if not self.end and self.from_stdin: if le and le.datetime: self._end = le.datetime # future iterations start from the beginning if not self.from_stdin: self.filehandle.seek(0) # now raise StopIteration exception raise e # get start date for stdin input if not self.start and self.from_stdin: if le and le.datetime: self._start = le.datetime yield le def __len__(self): """ return the number of lines in a log file. """ return self.num_lines def _iterate_lines(self): """ count number of lines (can be expensive). """ self._num_lines = 0 self._restarts = [] l = 0 for l, line in enumerate(self.filehandle): # find version string if "version" in line: restart = None # differentiate between different variations if "mongos" in line or "MongoS" in line: self._binary = 'mongos' elif "db version v" in line: self._binary = 'mongod' else: continue version = re.search(r'(\d\.\d\.\d+)', line) if version: version = version.group(1) restart = (version, LogEvent(line)) self._restarts.append(restart) self._num_lines = l+1 # reset logfile self.filehandle.seek(0) def _calculate_bounds(self): """ calculate beginning and end of logfile. """ if self.from_stdin: return False # get start datetime for line in self.filehandle: logevent = LogEvent(line) if logevent.datetime: self._start = logevent.datetime self._timezone = logevent.datetime.tzinfo self._datetime_format = logevent.datetime_format self._datetime_nextpos = logevent._datetime_nextpos break # get end datetime (lines are at most 10k, go back 30k at most to make sure we catch one) self.filehandle.seek(0, 2) self._filesize = self.filehandle.tell() self.filehandle.seek(-min(self._filesize, 30000), 2) for line in reversed(self.filehandle.readlines()): logevent = LogEvent(line) if logevent.datetime: self._end = logevent.datetime break # if there was a roll-over, subtract 1 year from start time if self._end < self._start: self._start = self._start.replace(year=self._start.year-1) self._year_rollover = self._end else: self._year_rollover = False # reset logfile self.filehandle.seek(0) return True def _find_curr_line(self, prev=False): """ internal helper function that finds the current (or previous if prev=True) line in a log file based on the current seek position. """ curr_pos = self.filehandle.tell() line = None # jump back 15k characters (at most) and find last newline char jump_back = min(self.filehandle.tell(), 15000) self.filehandle.seek(-jump_back, 1) buff = self.filehandle.read(jump_back) self.filehandle.seek(curr_pos, 0) newline_pos = buff.rfind('\n') if prev: newline_pos = buff[:newline_pos].rfind('\n') # move back to last newline char if newline_pos == -1: self.filehandle.seek(0) return self.next() self.filehandle.seek(newline_pos - jump_back + 1, 1) # roll forward until we found a line with a datetime try: logevent = self.next() while not logevent.datetime: logevent = self.next() return logevent except StopIteration: # reached end of file return None def fast_forward(self, start_dt): """ Fast-forward a log file to the given start_dt datetime object using binary search. Only fast for files. Streams need to be forwarded manually, and it will miss the first line that would otherwise match (as it consumes the log line). """ if self.from_stdin: # skip lines until start_dt is reached return else: # fast bisection path min_mark = 0 max_mark = self.filesize step_size = max_mark # check if start_dt is already smaller than first datetime self.filehandle.seek(0) le = self.next() if le.datetime and le.datetime >= start_dt: self.filehandle.seek(0) return le = None self.filehandle.seek(0) # search for lower bound while abs(step_size) > 100: step_size = ceil(step_size / 2.) self.filehandle.seek(step_size, 1) le = self._find_curr_line() if not le: break if le.datetime >= start_dt: step_size = -abs(step_size) else: step_size = abs(step_size) if not le: return # now walk backwards until we found a truely smaller line while le and self.filehandle.tell() >= 2 and le.datetime >= start_dt: self.filehandle.seek(-2, 1) le = self._find_curr_line(prev=True)
corymintz/mtools
mtools/util/logfile.py
Python
apache-2.0
10,383
from robot.libraries.BuiltIn import BuiltIn def fail_with_traceback(traceback_message): BuiltIn().fail(traceback_message)
allure-framework/allure-python
allure-robotframework/examples/status/status_library.py
Python
apache-2.0
129
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from kuryr_libnetwork.schemata import commons REQUEST_POOL_SCHEMA = { u'links': [{ u'method': u'POST', u'href': u'/IpamDriver.RequestPool', u'description': u'Allocate pool of ip addresses', u'rel': u'self', u'title': u'Create' }], u'title': u'Create pool', u'required': [u'AddressSpace', u'Pool', u'SubPool', u'V6'], u'definitions': {u'commons': {}}, u'$schema': u'http://json-schema.org/draft-04/hyper-schema', u'type': u'object', u'properties': { u'AddressSpace': { u'description': u'The name of the address space.', u'type': u'string', u'example': u'foo', }, u'Pool': { u'description': u'A range of IP Addresses represented in ' u'CIDR format address/mask.', u'$ref': u'#/definitions/commons/definitions/cidr' }, u'SubPool': { u'description': u'A subset of IP range from Pool in' u'CIDR format address/mask.', u'$ref': u'#/definitions/commons/definitions/cidr' }, u'Options': { u'type': [u'object', u'null'], u'description': u'Options', u'example': {}, }, u'V6': { u'description': u'If set to "True", requesting IPv6 pool and ' u'vice-versa.', u'type': u'boolean', u'example': False } } } REQUEST_POOL_SCHEMA[u'definitions'][u'commons'] = commons.COMMONS
celebdor/kuryr-libnetwork
kuryr_libnetwork/schemata/request_pool.py
Python
apache-2.0
2,102
# coding: utf-8 # pylint: disable= arguments-differ """Inception, implemented in Gluon.""" __all__ = ['Inception3', 'inception_v3'] from ....context import cpu from ...block import HybridBlock from ... import nn from ..custom_layers import HybridConcurrent # Helpers def _make_basic_conv(**kwargs): out = nn.HybridSequential(prefix='') out.add(nn.Conv2D(use_bias=False, **kwargs)) out.add(nn.BatchNorm(epsilon=0.001)) out.add(nn.Activation('relu')) return out def _make_branch(use_pool, *conv_settings): out = nn.HybridSequential(prefix='') if use_pool == 'avg': out.add(nn.AvgPool2D(pool_size=3, strides=1, padding=1)) elif use_pool == 'max': out.add(nn.MaxPool2D(pool_size=3, strides=2)) setting_names = ['channels', 'kernel_size', 'strides', 'padding'] for setting in conv_settings: kwargs = {} for i, value in enumerate(setting): if value is not None: kwargs[setting_names[i]] = value out.add(_make_basic_conv(**kwargs)) return out def _make_A(pool_features, prefix): out = HybridConcurrent(concat_dim=1, prefix=prefix) with out.name_scope(): out.add(_make_branch(None, (64, 1, None, None))) out.add(_make_branch(None, (48, 1, None, None), (64, 5, None, 2))) out.add(_make_branch(None, (64, 1, None, None), (96, 3, None, 1), (96, 3, None, 1))) out.add(_make_branch('avg', (pool_features, 1, None, None))) return out def _make_B(prefix): out = HybridConcurrent(concat_dim=1, prefix=prefix) with out.name_scope(): out.add(_make_branch(None, (384, 3, 2, None))) out.add(_make_branch(None, (64, 1, None, None), (96, 3, None, 1), (96, 3, 2, None))) out.add(_make_branch('max')) return out def _make_C(channels_7x7, prefix): out = HybridConcurrent(concat_dim=1, prefix=prefix) with out.name_scope(): out.add(_make_branch(None, (192, 1, None, None))) out.add(_make_branch(None, (channels_7x7, 1, None, None), (channels_7x7, (1, 7), None, (0, 3)), (192, (7, 1), None, (3, 0)))) out.add(_make_branch(None, (channels_7x7, 1, None, None), (channels_7x7, (7, 1), None, (3, 0)), (channels_7x7, (1, 7), None, (0, 3)), (channels_7x7, (7, 1), None, (3, 0)), (192, (1, 7), None, (0, 3)))) out.add(_make_branch('avg', (192, 1, None, None))) return out def _make_D(prefix): out = HybridConcurrent(concat_dim=1, prefix=prefix) with out.name_scope(): out.add(_make_branch(None, (192, 1, None, None), (320, 3, 2, None))) out.add(_make_branch(None, (192, 1, None, None), (192, (1, 7), None, (0, 3)), (192, (7, 1), None, (3, 0)), (192, 3, 2, None))) out.add(_make_branch('max')) return out def _make_E(prefix): out = HybridConcurrent(concat_dim=1, prefix=prefix) with out.name_scope(): out.add(_make_branch(None, (320, 1, None, None))) branch_3x3 = nn.HybridSequential(prefix='') out.add(branch_3x3) branch_3x3.add(_make_branch(None, (384, 1, None, None))) branch_3x3_split = HybridConcurrent(concat_dim=1, prefix='') branch_3x3_split.add(_make_branch(None, (384, (1, 3), None, (0, 1)))) branch_3x3_split.add(_make_branch(None, (384, (3, 1), None, (1, 0)))) branch_3x3.add(branch_3x3_split) branch_3x3dbl = nn.HybridSequential(prefix='') out.add(branch_3x3dbl) branch_3x3dbl.add(_make_branch(None, (448, 1, None, None), (384, 3, None, 1))) branch_3x3dbl_split = HybridConcurrent(concat_dim=1, prefix='') branch_3x3dbl.add(branch_3x3dbl_split) branch_3x3dbl_split.add(_make_branch(None, (384, (1, 3), None, (0, 1)))) branch_3x3dbl_split.add(_make_branch(None, (384, (3, 1), None, (1, 0)))) out.add(_make_branch('avg', (192, 1, None, None))) return out def make_aux(classes): out = nn.HybridSequential(prefix='') out.add(nn.AvgPool2D(pool_size=5, strides=3)) out.add(_make_basic_conv(channels=128, kernel_size=1)) out.add(_make_basic_conv(channels=768, kernel_size=5)) out.add(nn.Flatten()) out.add(nn.Dense(classes)) return out # Net class Inception3(HybridBlock): r"""Inception v3 model from `"Rethinking the Inception Architecture for Computer Vision" <http://arxiv.org/abs/1512.00567>`_ paper. Parameters ---------- classes : int, default 1000 Number of classification classes. """ def __init__(self, classes=1000, **kwargs): super(Inception3, self).__init__(**kwargs) # self.use_aux_logits = use_aux_logits with self.name_scope(): self.features = nn.HybridSequential(prefix='') self.features.add(_make_basic_conv(channels=32, kernel_size=3, strides=2)) self.features.add(_make_basic_conv(channels=32, kernel_size=3)) self.features.add(_make_basic_conv(channels=64, kernel_size=3, padding=1)) self.features.add(nn.MaxPool2D(pool_size=3, strides=2)) self.features.add(_make_basic_conv(channels=80, kernel_size=1)) self.features.add(_make_basic_conv(channels=192, kernel_size=3)) self.features.add(nn.MaxPool2D(pool_size=3, strides=2)) self.features.add(_make_A(32, 'A1_')) self.features.add(_make_A(64, 'A2_')) self.features.add(_make_A(64, 'A3_')) self.features.add(_make_B('B_')) self.features.add(_make_C(128, 'C1_')) self.features.add(_make_C(160, 'C2_')) self.features.add(_make_C(160, 'C3_')) self.features.add(_make_C(192, 'C4_')) self.classifier = nn.HybridSequential(prefix='') self.classifier.add(_make_D('D_')) self.classifier.add(_make_E('E1_')) self.classifier.add(_make_E('E2_')) self.classifier.add(nn.AvgPool2D(pool_size=8)) self.classifier.add(nn.Dropout(0.5)) self.classifier.add(nn.Flatten()) self.classifier.add(nn.Dense(classes)) def hybrid_forward(self, F, x): x = self.features(x) x = self.classifier(x) return x # Constructor def inception_v3(pretrained=False, ctx=cpu(), **kwargs): r"""Inception v3 model from `"Rethinking the Inception Architecture for Computer Vision" <http://arxiv.org/abs/1512.00567>`_ paper. Parameters ---------- pretrained : bool, default False Whether to load the pretrained weights for model. ctx : Context, default CPU The context in which to load the pretrained weights. """ net = Inception3(**kwargs) if pretrained: from ..model_store import get_model_file net.load_params(get_model_file('inceptionv3'), ctx=ctx) return net
hesseltuinhof/mxnet
python/mxnet/gluon/model_zoo/vision/inception.py
Python
apache-2.0
7,902
import timeit import pyximport; pyximport.install() from mod2 import cysum, cysum2 def pysum(start, step, count): ret = start for i in range(count): ret += step return ret print('Python', timeit.timeit('pysum(0, 1, 100)', 'from __main__ import pysum')) print('Cython', timeit.timeit('cysum(0, 1, 100)', 'from __main__ import cysum')) print('Cython with types', timeit.timeit('cysum2(0, 1, 100)', 'from __main__ import cysum2'))
asvetlov/optimization-kaunas-2017
2.py
Python
apache-2.0
463
# SPDX-License-Identifier: Apache-2.0 # # Copyright (C) 2015, ARM Limited and contributors. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from env import TestEnv from wlgen import RTA, Periodic, Step import trappy import shutil import os import unittest import logging import json import logging logging.basicConfig(level=logging.INFO) # Read the config file and update the globals CONF_FILE = os.path.join( os.path.dirname( os.path.abspath(__file__)), "hmp_parity.config") with open(CONF_FILE, "r") as fh: conf_vars = json.load(fh) globals().update(conf_vars) def local_setup(env): env.target.cpufreq.set_all_governors("performance") if ENABLE_EAS: env.target.execute( "echo ENERGY_AWARE > /sys/kernel/debug/sched_features") def between_threshold_pct(a, b): THRESHOLD_PERCENT = 3 lower = b - THRESHOLD_PERCENT upper = b + THRESHOLD_PERCENT if a >= lower and a <= upper: return True return False def between_threshold_abs(a, b): THRESHOLD = 0.25 lower = b - THRESHOLD upper = b + THRESHOLD if a >= lower and a <= upper: return True return False SMALL_WORKLOAD = { "duty_cycle_pct": SMALL_DCYCLE, "duration_s": WORKLOAD_DURATION_S, "period_ms": WORKLOAD_PERIOD_MS, } BIG_WORKLOAD = { "duty_cycle_pct": BIG_DCYCLE, "duration_s": WORKLOAD_DURATION_S, "period_ms": WORKLOAD_PERIOD_MS, } STEP_WORKLOAD = { "start_pct": STEP_LOW_DCYCLE, "end_pct": STEP_HIGH_DCYCLE, "time_s": WORKLOAD_DURATION_S, "loops": 2 } from bart.sched.SchedAssert import SchedAssert from bart.sched.SchedMultiAssert import SchedMultiAssert import operator import json def log_result(data, log_fh): result_str = json.dumps(data, indent=3) logging.info(result_str) log_fh.write(result_str) class ForkMigration(unittest.TestCase): """ Goal ==== Check that newly created threads start on a big CPU Detailed Description ==================== The test spawns as many threads as there are little cores. It then checks that all threads started on a big core. Expected Behaviour ================== The threads start on a big core. """ @classmethod def setUpClass(cls): cls.params = {} cls.task_prefix = "fmig" cls.env = TestEnv(test_conf=TEST_CONF) cls.trace_file = os.path.join(cls.env.res_dir, "fork_migration.dat") cls.log_file = os.path.join(cls.env.res_dir, "fork_migration.json") cls.populate_params() cls.tasks = cls.params.keys() cls.num_tasks = len(cls.tasks) local_setup(cls.env) cls.run_workload() cls.log_fh = open(os.path.join(cls.env.res_dir, cls.log_file), "w") @classmethod def tearDownClass(cls): cls.log_fh.close() @classmethod def populate_params(cls): for idx in range(len(cls.env.target.bl.bigs)): task = cls.task_prefix + str(idx) cls.params[task] = Periodic(**BIG_WORKLOAD).get() for idx in range(len(cls.env.target.bl.littles)): task = cls.task_prefix + str(idx) cls.params[task] = Periodic(**SMALL_WORKLOAD).get() @classmethod def run_workload(cls): wload = RTA( cls.env.target, "fork_migration", calibration=cls.env.calibration()) wload.conf(kind="profile", params=cls.params) cls.env.ftrace.start() wload.run( out_dir=cls.env.res_dir, background=False) cls.env.ftrace.stop() trace = cls.env.ftrace.get_trace(cls.trace_file) def test_first_cpu(self): "Fork Migration: Test First CPU" logging.info("Fork Migration: Test First CPU") f_assert = SchedMultiAssert( self.trace_file, self.env.topology, execnames=self.tasks) log_result( f_assert.getFirstCpu(), self.log_fh) self.assertTrue( f_assert.assertFirstCpu( self.env.target.bl.bigs, rank=self.num_tasks), msg="Not all the new generated tasks started on a big CPU") class SmallTaskPacking(unittest.TestCase): """ Goal ==== Many small tasks are packed in little cpus Detailed Description ==================== The tests spawns as many tasks as there are cpus in the system. The tasks are small, so none of them should be run on big cpus, the scheduler should pack them on little cpus. Expected Behaviour ================== All tasks run on little cpus. """ @classmethod def setUpClass(cls): cls.params = {} cls.task_prefix = "stp" cls.env = TestEnv(test_conf=TEST_CONF) cls.trace_file = os.path.join( cls.env.res_dir, "small_task_packing.dat") cls.log_file = os.path.join(cls.env.res_dir, "small_task_packing.json") cls.num_tasks = len(cls.env.target.bl.bigs + cls.env.target.bl.littles) cls.populate_params() cls.tasks = cls.params.keys() local_setup(cls.env) cls.run_workload() cls.s_assert = SchedMultiAssert( cls.trace_file, cls.env.topology, execnames=cls.tasks) cls.log_fh = open(os.path.join(cls.env.res_dir, cls.log_file), "w") @classmethod def tearDownClass(cls): cls.log_fh.close() @classmethod def populate_params(cls): for i in range(cls.num_tasks): task = cls.task_prefix + str(i) cls.params[task] = Periodic(**SMALL_WORKLOAD).get() @classmethod def run_workload(cls): wload = RTA( cls.env.target, "small_task_packing", calibration=cls.env.calibration()) wload.conf(kind="profile", params=cls.params) cls.env.ftrace.start() wload.run( out_dir=cls.env.res_dir, background=False) cls.env.ftrace.stop() trace = cls.env.ftrace.get_trace(cls.trace_file) def test_small_task_pack_first_cpu(self): "Small Task Packing: First CPU: BIG" logging.info("Small Task Packing: First CPU: BIG\n") log_result(self.s_assert.getFirstCpu(), self.log_fh) self.assertTrue( self.s_assert.assertFirstCpu( self.env.target.bl.bigs, rank=self.num_tasks), msg="Not all the new generated tasks started on a big CPU") def test_small_task_residency(self): "Small Task Packing: Test Residency (Little Cluster)" logging.info("Small Task Packing: Test Residency (Little Cluster)") log_result( self.s_assert.getResidency( "cluster", self.env.target.bl.littles, percent=True), self.log_fh) self.assertTrue( self.s_assert.assertResidency( "cluster", self.env.target.bl.littles, EXPECTED_RESIDENCY_PCT, operator.ge, percent=True, rank=self.num_tasks), msg="Not all tasks are running on LITTLE cores for at least {}% of their execution time"\ .format(EXPECTED_RESIDENCY_PCT)) class OffloadMigrationAndIdlePull(unittest.TestCase): """ Goal ==== Big cpus pull big tasks from little cpus when they become idle Detailed Description ==================== This test runs twice as many tasks are there are big cpus. All these tasks are big tasks. Half of them are called "early_starter" and the other half "migrator". The migrator tasks start 1 second after the early_starter tasks. The test checks that when the big cpus finish executing the early starter tasks, the scheduler moves the migrator tasks to the big cpus. Expected Behaviour ================== As there are as many early_starter tasks as there are big cpus, the early_starter tasks should run in the big cpus until they finish. When the migrator tasks start, there is no spare capacity in the big cpus so they run on the little cpus. Once the big cpus finish with the early_starters, they should pull the migrator tasks and run them. """ @classmethod def setUpClass(cls): cls.params = {} cls.env = TestEnv(test_conf=TEST_CONF) cls.trace_file = os.path.join(cls.env.res_dir, "offload_idle_pull.dat") cls.log_file = os.path.join(cls.env.res_dir, "offload_idle_pull.json") cls.early_starters = [] cls.migrators = [] cls.num_tasks = len(cls.env.target.bl.bigs) cls.populate_tasks() local_setup(cls.env) cls.run_workload() cls.offset = cls.get_offset(cls.early_starters[0]) cls.m_assert = SchedMultiAssert( cls.trace_file, cls.env.topology, execnames=cls.migrators) cls.e_assert = SchedMultiAssert( cls.trace_file, cls.env.topology, execnames=cls.early_starters) cls.log_fh = open(os.path.join(cls.env.res_dir, cls.log_file), "w") @classmethod def tearDownClass(cls): cls.log_fh.close() @classmethod def populate_tasks(cls): migrator_workload = BIG_WORKLOAD.copy() migrator_workload["duration_s"] = 9 migrator_workload["delay_s"] = 1 for idx in range(cls.num_tasks): task = "early_starters" + str(idx) cls.params[task] = Periodic(**BIG_WORKLOAD).get() cls.early_starters.append(task) # Tasks that will be idle pulled task = "migrator" + str(idx) cls.params[task] = Periodic(**migrator_workload).get() cls.migrators.append(task) @classmethod def run_workload(cls): wload = RTA( cls.env.target, "offload_idle_pull", calibration=cls.env.calibration()) wload.conf(kind="profile", params=cls.params) cls.env.ftrace.start() wload.run( out_dir=cls.env.res_dir, background=False) cls.env.ftrace.stop() trace = cls.env.ftrace.get_trace(cls.trace_file) @classmethod def get_offset(cls, task_name): return SchedAssert( cls.trace_file, cls.env.topology, execname=task_name).getStartTime() def test_first_cpu_early_starters(self): """Offload Migration and Idle Pull: Test First CPU (Early Starters)""" logging.info( "Offload Migration and Idle Pull: Test First CPU (Early Starters)") log_result( self.e_assert.getFirstCpu(), self.log_fh) self.assertTrue( self.e_assert.assertFirstCpu( self.env.target.bl.bigs, rank=self.num_tasks), msg="Not all the new 'early starter' tasks started on a big CPU") def test_first_cpu_migrators(self): "Offload Migration and Idle Pull: Test First CPU (Migrators)" logging.info( "Offload Migration and Idle Pull: Test First CPU (Migrators)") log_result( self.m_assert.getFirstCpu(), self.log_fh) self.assertTrue( self.m_assert.assertFirstCpu( self.env.target.bl.bigs, rank=self.num_tasks), msg="Not all the new 'migrator' tasks started on a big CPU") def test_little_res_migrators(self): "Offload Migration and Idle Pull: Test Little Residency (Migrators)" little_residency_window = (self.offset + 1, self.offset + 5) logging.info( "Offload Migration and Idle Pull: Test Little Residency (Migrators)") log_result( self.m_assert.getResidency( "cluster", self.env.target.bl.littles, percent=True, window=little_residency_window ), self.log_fh) self.assertTrue( self.m_assert.assertResidency( "cluster", self.env.target.bl.littles, EXPECTED_RESIDENCY_PCT, operator.ge, percent=True, window=little_residency_window, rank=self.num_tasks), msg="Not all 'migrator' tasks are running on LITTLE cores for at least {}% of their execution time"\ .format(EXPECTED_RESIDENCY_PCT)) def test_big_res_migrators(self): "Offload Migration and Idle Pull: Test Big Residency (Migrators)" big_residency_window = (self.offset + 5, self.offset + 10) logging.info( "Offload Migration and Idle Pull: Test Big Residency (Migrators)") log_result( self.m_assert.getResidency( "cluster", self.env.target.bl.bigs, percent=True, window=big_residency_window ), self.log_fh) self.assertTrue( self.m_assert.assertResidency( "cluster", self.env.target.bl.bigs, EXPECTED_RESIDENCY_PCT, operator.ge, percent=True, window=big_residency_window, rank=self.num_tasks), msg="Not all 'migrator' tasks are running on big cores for at least {}% of their execution time"\ .format(EXPECTED_RESIDENCY_PCT)) def test_migrators_switch(self): "Offload Migration and Idle Pull: Test LITTLE -> BIG Idle Pull Switch (Migrators)" switch_window = (self.offset + 4.5, self.offset + 5.5) logging.info( "Offload Migration and Idle Pull: Test LITTLE -> BIG Idle Pull Switch (Migrators)") log_result( self.m_assert.assertSwitch( "cluster", self.env.target.bl.littles, self.env.target.bl.bigs, window=switch_window), self.log_fh) self.assertTrue( self.m_assert.assertSwitch( "cluster", self.env.target.bl.littles, self.env.target.bl.bigs, window=switch_window, rank=self.num_tasks), msg="Not all 'migrator' tasks are pulled by idle big cores when expected") def test_big_res_early_starters(self): """Offload Migration and Idle Pull: Test Big Residency (EarlyStarters)""" logging.info( "Offload Migration and Idle Pull: Test Big Residency (EarlyStarters)") big_residency_window = (self.offset, self.offset + 5) log_result(self.e_assert.getResidency( "cluster", self.env.target.bl.bigs, percent=True, window=big_residency_window), self.log_fh) self.assertTrue( self.e_assert.assertResidency( "cluster", self.env.target.bl.bigs, EXPECTED_RESIDENCY_PCT, operator.ge, percent=True, window=big_residency_window, rank=self.num_tasks), msg="Not all 'early starter' tasks are running on big cores for at least {}% of their execution time"\ .format(EXPECTED_RESIDENCY_PCT)) class WakeMigration(unittest.TestCase): """ Goal ==== A task that switches between being big and small moves to big and little cores accordingly Detailed Description ==================== This test creates two tasks that alternate between being big and small workload. They start being small load for 5 seconds, they become big for another 5 seconds, then small for another 5 seconds and finally big for the last 5 seconds. Expected Behaviour ================== The tasks should run on the litlle cpus when they are small and in the big cpus when they are big. """ @classmethod def setUpClass(cls): cls.params = {} cls.env = TestEnv(test_conf=TEST_CONF) cls.task_prefix = "wmig" cls.trace_file = os.path.join(cls.env.res_dir, "wake_migration.dat") cls.log_file = os.path.join(cls.env.res_dir, "wake_migration.json") cls.populate_params() cls.tasks = cls.params.keys() cls.num_tasks = len(cls.tasks) local_setup(cls.env) cls.run_workload() cls.s_assert = SchedMultiAssert( cls.trace_file, cls.env.topology, execnames=cls.tasks) cls.offset = cls.get_offset(cls.tasks[0]) cls.log_fh = open(os.path.join(cls.env.res_dir, cls.log_file), "w") @classmethod def tearDownClass(cls): cls.log_fh.close() @classmethod def populate_params(cls): cls.params[cls.task_prefix] = Step(**STEP_WORKLOAD).get() cls.params[cls.task_prefix + "1"] = Step(**STEP_WORKLOAD).get() @classmethod def run_workload(cls): wload = RTA( cls.env.target, "wake_migration", calibration=cls.env.calibration()) wload.conf(kind="profile", params=cls.params) cls.env.ftrace.start() wload.run( out_dir=cls.env.res_dir, background=False) cls.env.ftrace.stop() trace = cls.env.ftrace.get_trace(cls.trace_file) @classmethod def get_offset(cls, task_name): return SchedAssert( cls.trace_file, cls.env.topology, execname=task_name).getStartTime() def test_first_cpu(self): """Wake Migration: Test First CPU""" logging.info("Wake Migration: Test First CPU") log_result(self.s_assert.getFirstCpu(), self.log_fh) self.assertTrue( self.s_assert.assertFirstCpu( self.env.target.bl.bigs, rank=self.num_tasks), msg="Not all the new generated tasks started on a big CPU") def test_little_big_switch1(self): """Wake Migration: LITTLE -> BIG: 1""" expected_time = self.offset + 5 switch_window = ( expected_time - SWITCH_WINDOW_HALF, expected_time + SWITCH_WINDOW_HALF) logging.info( "Wake Migration: LITTLE -> BIG Window: {}".format(switch_window)) log_result( self.s_assert.assertSwitch( "cluster", self.env.target.bl.littles, self.env.target.bl.bigs, window=switch_window), self.log_fh) self.assertTrue( self.s_assert.assertSwitch( "cluster", self.env.target.bl.littles, self.env.target.bl.bigs, rank=self.num_tasks, window=switch_window), msg="Not all tasks are wake-migrated to big cores in the expected window: {}"\ .format(switch_window)) def test_little_big_switch2(self): """Wake Migration: LITTLE -> BIG: 2""" expected_time = self.offset + 15 switch_window = ( expected_time - SWITCH_WINDOW_HALF, expected_time + SWITCH_WINDOW_HALF) logging.info( "Wake Migration: LITTLE -> BIG Window: {}".format(switch_window)) log_result( self.s_assert.assertSwitch( "cluster", self.env.target.bl.littles, self.env.target.bl.bigs, window=switch_window), self.log_fh) self.assertTrue( self.s_assert.assertSwitch( "cluster", self.env.target.bl.littles, self.env.target.bl.bigs, rank=self.num_tasks, window=switch_window), msg="Not all tasks are wake-migrated to big cores in the expected window: {}"\ .format(switch_window)) def test_big_little_switch1(self): """Wake Migration: BIG -> LITLLE: 1""" expected_time = self.offset switch_window = ( max(expected_time - SWITCH_WINDOW_HALF, 0), expected_time + SWITCH_WINDOW_HALF) logging.info( "Wake Migration: BIG -> LITTLE Window: {}".format(switch_window)) log_result( self.s_assert.assertSwitch( "cluster", self.env.target.bl.bigs, self.env.target.bl.littles, window=switch_window), self.log_fh) self.assertTrue( self.s_assert.assertSwitch( "cluster", self.env.target.bl.bigs, self.env.target.bl.littles, rank=self.num_tasks, window=switch_window), msg="Not all tasks are wake-migrated to LITTLE cores in the expected window: {}"\ .format(switch_window)) def test_big_little_switch2(self): """Wake Migration: BIG -> LITLLE: 2""" expected_time = self.offset + 10 switch_window = ( expected_time - SWITCH_WINDOW_HALF, expected_time + SWITCH_WINDOW_HALF) logging.info( "Wake Migration: BIG -> LITTLE Window: {}".format(switch_window)) log_result( self.s_assert.assertSwitch( "cluster", self.env.target.bl.bigs, self.env.target.bl.littles, window=switch_window), self.log_fh) self.assertTrue( self.s_assert.assertSwitch( "cluster", self.env.target.bl.bigs, self.env.target.bl.littles, rank=self.num_tasks, window=switch_window), msg="Not all tasks are wake-migrated to LITTLE cores in the expected window: {}"\ .format(switch_window))
JaviMerino/lisa
tests/eas/hmp_parity.py
Python
apache-2.0
22,434
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Created as part of the StratusLab project (http://stratuslab.eu), # co-funded by the European Commission under the Grant Agreement # INFSO-RI-261552." # # Copyright (c) 2011, Centre National de la Recherche Scientifique (CNRS) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import sys from stratuslab.commandbase.AuthnCommand import AuthnCommand sys.path.append('/var/lib/stratuslab/python') from stratuslab.CloudConnectorFactory import CloudConnectorFactory from stratuslab.Util import printError from stratuslab.commandbase.StorageCommand import StorageCommand from stratuslab.volume_manager.volume_manager_factory import VolumeManagerFactory from stratuslab.ConfigHolder import ConfigHolder from stratuslab.Authn import AuthnFactory from stratuslab.Exceptions import OneException # initialize console logging import stratuslab.api.LogUtil as LogUtil LogUtil.get_console_logger() class MainProgram(AuthnCommand, StorageCommand): """A command-line program to detach a persistent disk.""" def __init__(self): super(MainProgram, self).__init__() def parse(self): self.parser.usage = '%prog [options] volume-uuid ...' self.parser.description = ''' Detach one or more persistent volumes (disks) that were dynamically attached to a running virtual machine. The volume-uuid arguments are the unique identifiers of volumes to detach. ''' self.parser.add_option('-i', '--instance', dest='instance', help='The ID of the instance to which the volume attaches', metavar='VM_ID', default=0, type='int') StorageCommand.addPDiskEndpointOptions(self.parser) AuthnCommand.addCloudEndpointOptions(self.parser) super(MainProgram, self).parse() self.options, self.uuids = self.parser.parse_args() def checkOptions(self): super(MainProgram, self).checkOptions() if not self.uuids: printError('Please provide at least one persistent disk UUID to detach') if self.options.instance < 0: printError('Please provide a VM ID on which to detach disk') try: self._retrieveVmNode() except OneException, e: printError(e) def _retrieveVmNode(self): credentials = AuthnFactory.getCredentials(self.options) self.options.cloud = CloudConnectorFactory.getCloud(credentials) self.options.cloud.setEndpoint(self.options.endpoint) self.node = self.options.cloud.getVmNode(self.options.instance) def doWork(self): configHolder = ConfigHolder(self.options.__dict__, self.config or {}) configHolder.pdiskProtocol = "https" pdisk = VolumeManagerFactory.create(configHolder) for uuid in self.uuids: try: target = pdisk.hotDetach(self.options.instance, uuid) print 'DETACHED %s from VM %s on /dev/%s' % (uuid, self.options.instance, target) except Exception, e: printError('DISK %s: %s' % (uuid, e), exit=False) def main(): try: MainProgram() except KeyboardInterrupt: print '\n\nExecution interrupted by the user... goodbye!' return 0
StratusLab/client
cli/user/code/main/python/stratuslab/cmd/stratus_detach_volume.py
Python
apache-2.0
3,765
import re import pscheduler import pprint logger = pscheduler.Log(quiet=True) # A whole bunch of pattern matching against the output of the "iperf" tool # client output. Builds up an object of interesting bits from it. def parse_output(lines): results = {} results['succeeded'] = True seen_header = False streams = {} dest_ip = None dest_port = None src_ip = None src_port = None for line in lines: # ignore bogus sessions if re.match('\(nan%\)', line): results["succeeded"] = False results["error"] = "Found NaN result"; break; if re.match('read failed: Connection refused', line): results["succeeded"] = False results["error"] = "Connection refused" break test = re.match('local ([^ ]+) port (\d+) connected with ([^ ]+) port (\d+)', line) if test: dest_ip = test.group(1) dest_port = test.group(2) src_ip = test.group(3) src_port = test.group(4) # Example lines # TCP window size: 244 KByte (WARNING: requested 64.0 MByte) # TCP window size: 19800 Byte (default) test = re.match('TCP window size:\s+(\d+(\.\d+)?) (\S)?Byte(.*\(WARNING:\s+requested\s+(\d+(\.\d+)?) (\S)?Byte)?', line) if test: window_size = test.group(1) window_si = test.group(3) request_size = test.group(5) request_si = test.group(7) if window_si: window_size = pscheduler.si_as_number("%s%s" % (window_size, window_si)) if request_size: if request_si: request_size = pscheduler.si_as_number("%s%s" % (request_size, request_si)) results["requested-tcp-window-size"] = int(request_size) results["tcp-window-size"] = int(window_size) # Example line # [ 3] MSS size 1448 bytes (MTU 1500 bytes, ethernet) test = re.match('.*MSS size (\d+) bytes \(MTU (\d+) bytes', line) if test: results['mss'] = int(test.group(1)) results['mtu'] = int(test.group(2)) stream_id = None interval_start = None throughput_bytes = None si_bytes = None throughput_bits = None si_bits = None jitter = None lost = None sent = None # Example line # [ 3] 16.0-17.0 sec 37355520 Bytes 298844160 bits/sec test = re.match('\[\s*(\d+|SUM)\s*\]\s+([0-9\.]+)\s*\-\s*([0-9\.]+)\s+sec\s+(\d+(\.\d+)?)\s+(P|T|G|M|K)?Bytes\s+(\d+(\.\d+)?)\s+(P|T|G|M|K)?bits\/sec(\s+([0-9\.]+)\s+ms\s+(\d+)\/\s*(\d+)\s+)?', line) if test: stream_id = test.group(1) interval_start = float(test.group(2)) interval_end = float(test.group(3)) throughput_bytes = float(test.group(4)) si_bytes = test.group(6) throughput_bits = float(test.group(7)) si_bits = test.group(9) # these may or may not be present depending on versions jitter = test.group(11) lost = test.group(12) send = test.group(13) # If the output was in say GBytes convert back to regular Bytes for ease # of things later if si_bytes: throughput_bytes = pscheduler.si_as_number("%s%s" % (throughput_bytes, si_bytes)) if si_bits: throughput_bits = pscheduler.si_as_number("%s%s" % (throughput_bits, si_bits)) # if we found a matching line, we can add this info to our streams if stream_id: key = "%s-%s" % (interval_start, interval_end) # TODO: This would appear to not create a summary when the # duration is very short. # there has to be a better way than this... if interval_end - interval_start > 5: key = "summary" if key not in streams: streams[key] = [] streams[key].append({"jitter": jitter, "lost": lost, "sent": sent, "throughput-bits": throughput_bits, "throughput-bytes": throughput_bytes, "start": interval_start, "end": interval_end, "stream-id": stream_id}) if not streams: results["succeeded"] = False results["error"] = "No results found" return results summary_interval = None intervals = [] for interval in streams: summary_stream = None interval_streams = [] # try to find the SUM if possible for stream in streams[interval]: if stream['stream-id'] == "SUM": summary_stream = stream else: interval_streams.append(stream) # if we couldn't find it, there was probably # just the one line so use that if not summary_stream and len(interval_streams) == 1: summary_stream = interval_streams[0] finalized = { "streams": interval_streams, "summary": summary_stream } if interval == "summary": summary_interval = finalized else: intervals.append(finalized) logger.debug(intervals) # sort according to start interval intervals.sort(key = lambda x: x['summary']['start']) results["intervals"] = intervals results["summary"] = summary_interval return results if __name__ == "__main__": # Test a "regular" output test_output = """ ------------------------------------------------------------ Client connecting to 10.0.2.15, TCP port 5001 TCP window size: 19800 Byte (default) ------------------------------------------------------------ [ 3] local 10.0.2.4 port 50338 connected with 10.0.2.15 port 5001 [ ID] Interval Transfer Bandwidth [ 3] 0.0- 1.0 sec 224788480 Bytes 1798307840 bits/sec [ 3] 1.0- 2.0 sec 222298112 Bytes 1778384896 bits/sec [ 3] 2.0- 3.0 sec 150339584 Bytes 1202716672 bits/sec [ 3] 3.0- 4.0 sec 210501632 Bytes 1684013056 bits/sec [ 3] 4.0- 5.0 sec 218759168 Bytes 1750073344 bits/sec [ 3] 5.0- 6.0 sec 222298112 Bytes 1778384896 bits/sec [ 3] 6.0- 7.0 sec 233177088 Bytes 1865416704 bits/sec [ 3] 7.0- 8.0 sec 230686720 Bytes 1845493760 bits/sec [ 3] 8.0- 9.0 sec 229638144 Bytes 1837105152 bits/sec [ 3] 9.0-10.0 sec 226492416 Bytes 1811939328 bits/sec [ 3] 0.0-10.0 sec 2169110528 Bytes 1735167481 bits/sec """ #result = parse_output(test_output.split("\n")) #pprint.PrettyPrinter(indent=4).pprint(result) test_output = """ ------------------------------------------------------------ Client connecting to 10.0.2.4, TCP port 5001 TCP window size: 244 KByte (WARNING: requested 7.63 MByte) ------------------------------------------------------------ [ 5] local 10.0.2.15 port 42309 connected with 10.0.2.4 port 5001 [ 3] local 10.0.2.15 port 42307 connected with 10.0.2.4 port 5001 [ 4] local 10.0.2.15 port 42308 connected with 10.0.2.4 port 5001 [ ID] Interval Transfer Bandwidth [ 5] 0.0- 1.0 sec 74.8 MBytes 627 Mbits/sec [ 4] 0.0- 1.0 sec 67.0 MBytes 562 Mbits/sec [ 3] 0.0- 1.0 sec 59.0 MBytes 495 Mbits/sec [SUM] 0.0- 1.0 sec 201 MBytes 1.68 Gbits/sec [ 5] 1.0- 2.0 sec 76.4 MBytes 641 Mbits/sec [ 3] 1.0- 2.0 sec 68.1 MBytes 571 Mbits/sec [ 4] 1.0- 2.0 sec 63.8 MBytes 535 Mbits/sec [SUM] 1.0- 2.0 sec 208 MBytes 1.75 Gbits/sec [ 5] 2.0- 3.0 sec 76.9 MBytes 645 Mbits/sec [ 3] 2.0- 3.0 sec 61.8 MBytes 518 Mbits/sec [ 4] 2.0- 3.0 sec 65.9 MBytes 553 Mbits/sec [SUM] 2.0- 3.0 sec 204 MBytes 1.72 Gbits/sec [ 5] 3.0- 4.0 sec 72.6 MBytes 609 Mbits/sec [ 3] 3.0- 4.0 sec 68.8 MBytes 577 Mbits/sec [ 4] 3.0- 4.0 sec 60.9 MBytes 511 Mbits/sec [SUM] 3.0- 4.0 sec 202 MBytes 1.70 Gbits/sec [ 5] 4.0- 5.0 sec 73.4 MBytes 616 Mbits/sec [ 3] 4.0- 5.0 sec 71.5 MBytes 600 Mbits/sec [ 4] 4.0- 5.0 sec 61.6 MBytes 517 Mbits/sec [SUM] 4.0- 5.0 sec 206 MBytes 1.73 Gbits/sec [ 3] 5.0- 6.0 sec 73.2 MBytes 614 Mbits/sec [ 4] 5.0- 6.0 sec 67.0 MBytes 562 Mbits/sec [ 5] 5.0- 6.0 sec 64.5 MBytes 541 Mbits/sec [SUM] 5.0- 6.0 sec 205 MBytes 1.72 Gbits/sec [ 5] 6.0- 7.0 sec 65.6 MBytes 551 Mbits/sec [ 3] 6.0- 7.0 sec 75.0 MBytes 629 Mbits/sec [ 4] 6.0- 7.0 sec 70.4 MBytes 590 Mbits/sec [SUM] 6.0- 7.0 sec 211 MBytes 1.77 Gbits/sec [ 3] 7.0- 8.0 sec 77.0 MBytes 646 Mbits/sec [ 4] 7.0- 8.0 sec 65.9 MBytes 553 Mbits/sec [ 5] 7.0- 8.0 sec 63.8 MBytes 535 Mbits/sec [SUM] 7.0- 8.0 sec 207 MBytes 1.73 Gbits/sec [ 3] 8.0- 9.0 sec 76.2 MBytes 640 Mbits/sec [ 5] 8.0- 9.0 sec 65.0 MBytes 545 Mbits/sec [ 4] 8.0- 9.0 sec 68.4 MBytes 574 Mbits/sec [SUM] 8.0- 9.0 sec 210 MBytes 1.76 Gbits/sec [ 5] 9.0-10.0 sec 67.6 MBytes 567 Mbits/sec [ 5] 0.0-10.0 sec 701 MBytes 588 Mbits/sec [ 3] 9.0-10.0 sec 71.1 MBytes 597 Mbits/sec [ 3] 0.0-10.0 sec 702 MBytes 589 Mbits/sec [ 4] 9.0-10.0 sec 72.2 MBytes 606 Mbits/sec [SUM] 9.0-10.0 sec 211 MBytes 1.77 Gbits/sec [ 4] 0.0-10.0 sec 663 MBytes 556 Mbits/sec [SUM] 0.0-10.0 sec 2.02 GBytes 1.73 Gbits/sec [ 3] MSS size 1448 bytes (MTU 1500 bytes, ethernet) """ result = parse_output(test_output.split("\n")) pprint.PrettyPrinter(indent=4).pprint(result)
perfsonar/pscheduler
pscheduler-tool-iperf2/iperf2/iperf2_parser.py
Python
apache-2.0
9,825
# pylint: disable=g-bad-file-header # Copyright 2020 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Tools for pre-processing the data into individual, standardized formats.""" import collections import datetime import itertools import os import pathlib import re from typing import Callable, Dict, Set, Tuple from absl import logging from dm_c19_modelling.england_data import constants import pandas as pd import yaml _PATH_FILENAME_REGEXES = "filename_regexes.yaml" _COLUMNS = constants.Columns _DATE_FORMAT = "%Y-%m-%d" def _order_columns(df: pd.DataFrame) -> pd.DataFrame: """Orders the columns of the dataframe as: date, region, observations.""" df.insert(0, _COLUMNS.DATE.value, df.pop(_COLUMNS.DATE.value)) reg_columns = [] obs_columns = [] for col in df.columns[1:]: if col.startswith(constants.REGION_PREFIX): reg_columns.append(col) elif col.startswith(constants.OBSERVATION_PREFIX): obs_columns.append(col) else: raise ValueError(f"Unknown column: '{col}'") columns = [_COLUMNS.DATE.value] + reg_columns + obs_columns return df[columns] def _raw_data_formatter_daily_deaths(filepath: str) -> pd.DataFrame: """Loads and formats daily deaths data.""" sheet_name = "Tab4 Deaths by trust" header = 15 df = pd.read_excel(filepath, sheet_name=sheet_name, header=header) # Drop rows and columns which are all nans. df.dropna(axis=0, how="all", inplace=True) df.dropna(axis=1, how="all", inplace=True) # Drop unneeded columns and rows. drop_columns = ["Total", "Awaiting verification"] up_to_mar_1_index = "Up to 01-Mar-20" if sum(i for i in df[up_to_mar_1_index] if isinstance(i, int)) == 0.0: drop_columns.append(up_to_mar_1_index) df.drop(columns=drop_columns, inplace=True) df = df[df["Code"] != "-"] # Melt the death counts by date into "Date" and "Death Count" columns. df = df.melt( id_vars=["NHS England Region", "Code", "Name"], var_name="Date", value_name="Death Count") # Rename the columns to their standard names. df.rename( columns={ "Date": _COLUMNS.DATE.value, "Death Count": _COLUMNS.OBS_DEATHS.value, "Code": _COLUMNS.REG_TRUST_CODE.value, "Name": _COLUMNS.REG_TRUST_NAME.value, "NHS England Region": _COLUMNS.REG_NHSER_NAME.value, }, inplace=True) _order_columns(df) df[_COLUMNS.DATE.value] = df[_COLUMNS.DATE.value].map( lambda x: x.strftime(_DATE_FORMAT)) # Sort and clean up the indices before returning the final dataframe. df.sort_values([ _COLUMNS.DATE.value, _COLUMNS.REG_TRUST_NAME.value, _COLUMNS.REG_TRUST_CODE.value, ], inplace=True) df.reset_index(drop=True, inplace=True) if df.isna().any().any(): raise ValueError("Formatted data 'daily_deaths' contains nans") return df def _raw_data_formatter_daily_cases(filepath: str) -> pd.DataFrame: """Loads and formats daily cases data.""" df = pd.read_csv(filepath) df.rename(columns={"Area type": "Area_type"}, inplace=True) df.query("Area_type == 'ltla'", inplace=True) # Drop unneeded columns and rows. drop_columns = [ "Area_type", "Cumulative lab-confirmed cases", "Cumulative lab-confirmed cases rate" ] df.drop(columns=drop_columns, inplace=True) # Rename the columns to their standard names. df.rename( columns={ "Area name": _COLUMNS.REG_LTLA_NAME.value, "Area code": _COLUMNS.REG_LTLA_CODE.value, "Specimen date": _COLUMNS.DATE.value, "Daily lab-confirmed cases": _COLUMNS.OBS_CASES.value, }, inplace=True) _order_columns(df) # Sort and clean up the indices before returning the final dataframe. df.sort_values([ _COLUMNS.DATE.value, _COLUMNS.REG_LTLA_NAME.value, _COLUMNS.REG_LTLA_CODE.value, ], inplace=True) df.reset_index(drop=True, inplace=True) if df.isna().any().any(): raise ValueError("Formatted data 'daily_cases' contains nans") return df def _raw_data_formatter_google_mobility(filepath: str) -> pd.DataFrame: """Loads and formats Google mobility data.""" df = pd.read_csv(filepath) # Filter to UK. df.query("country_region_code == 'GB'", inplace=True) # Drop unneeded columns and rows. drop_columns = [ "country_region_code", "country_region", "metro_area", "census_fips_code" ] df.drop(columns=drop_columns, inplace=True) # Fill missing region info with "na". df[["sub_region_1", "sub_region_2", "iso_3166_2_code"]].fillna( "na", inplace=True) # Rename the columns to their standard names. df.rename( columns={ "sub_region_1": _COLUMNS.REG_SUB_REGION_1.value, "sub_region_2": _COLUMNS.REG_SUB_REGION_2.value, "iso_3166_2_code": _COLUMNS.REG_ISO_3166_2_CODE.value, "date": _COLUMNS.DATE.value, "retail_and_recreation_percent_change_from_baseline": _COLUMNS.OBS_MOBILITY_RETAIL_AND_RECREATION.value, "grocery_and_pharmacy_percent_change_from_baseline": _COLUMNS.OBS_MOBILITY_GROCERY_AND_PHARMACY.value, "parks_percent_change_from_baseline": _COLUMNS.OBS_MOBILITY_PARKS.value, "transit_stations_percent_change_from_baseline": _COLUMNS.OBS_MOBILITY_TRANSIT_STATIONS.value, "workplaces_percent_change_from_baseline": _COLUMNS.OBS_MOBILITY_WORKPLACES.value, "residential_percent_change_from_baseline": _COLUMNS.OBS_MOBILITY_RESIDENTIAL.value, }, inplace=True) _order_columns(df) # Sort and clean up the indices before returning the final dataframe. df.sort_values([ _COLUMNS.DATE.value, _COLUMNS.REG_SUB_REGION_1.value, _COLUMNS.REG_SUB_REGION_2.value, _COLUMNS.REG_ISO_3166_2_CODE.value, ], inplace=True) df.reset_index(drop=True, inplace=True) return df def _raw_data_formatter_online_111(filepath: str) -> pd.DataFrame: """Loads and formats online 111 data.""" df = pd.read_csv(filepath) # Drop nans. df.dropna(subset=["ccgcode"], inplace=True) # Reformat dates. remap_dict = { "journeydate": lambda x: datetime.datetime.strptime(x, "%d/%m/%Y").strftime( # pylint: disable=g-long-lambda _DATE_FORMAT), "ccgname": lambda x: x.replace("&", "and"), "sex": { "Female": "f", "Male": "m", "Indeterminate": "u", }, "ageband": { "0-18 years": "0", "19-69 years": "19", "70+ years": "70" } } for col, remap in remap_dict.items(): df[col] = df[col].map(remap) journeydate_values = pd.date_range( df.journeydate.min(), df.journeydate.max()).strftime(_DATE_FORMAT) ccgcode_values = df.ccgcode.unique() df.sex.fillna("u", inplace=True) sex_values = ["f", "m", "u"] assert set(sex_values) >= set(df.sex.unique()), "unsupported sex value" df.ageband.fillna("u", inplace=True) ageband_values = ["0", "19", "70", "u"] assert set(ageband_values) >= set( df.ageband.unique()), "unsupported ageband value" ccg_code_name_map = df[["ccgcode", "ccgname" ]].set_index("ccgcode")["ccgname"].drop_duplicates() # Some CCG codes have duplicate names, which differ by their commas. Keep the # longer ones. fn = lambda x: sorted(x["ccgname"].map(lambda y: (len(y), y)))[-1][1] ccg_code_name_map = ccg_code_name_map.reset_index().groupby("ccgcode").apply( fn) df_full = pd.DataFrame( list( itertools.product(journeydate_values, ccgcode_values, sex_values, ageband_values)), columns=["journeydate", "ccgcode", "sex", "ageband"]) df = pd.merge(df_full, df, how="outer") # 0 calls don't have rows, so are nans. df["Total"].fillna(0, inplace=True) df["ccgname"] = df["ccgcode"].map(ccg_code_name_map) # Combine sex and ageband columns into a joint column. df["sex_ageband"] = df["sex"] + "_" + df["ageband"] df = df.pivot_table( index=["journeydate", "ccgcode", "ccgname"], columns="sex_ageband", values="Total").reset_index() df.columns.name = None # Rename the columns to their standard names. df.rename( columns={ "ccgcode": _COLUMNS.REG_CCG_CODE.value, "ccgname": _COLUMNS.REG_CCG_NAME.value, "journeydate": _COLUMNS.DATE.value, "f_0": _COLUMNS.OBS_ONLINE_111_F_0.value, "f_19": _COLUMNS.OBS_ONLINE_111_F_19.value, "f_70": _COLUMNS.OBS_ONLINE_111_F_70.value, "f_u": _COLUMNS.OBS_ONLINE_111_F_U.value, "m_0": _COLUMNS.OBS_ONLINE_111_M_0.value, "m_19": _COLUMNS.OBS_ONLINE_111_M_19.value, "m_70": _COLUMNS.OBS_ONLINE_111_M_70.value, "m_u": _COLUMNS.OBS_ONLINE_111_M_U.value, "u_0": _COLUMNS.OBS_ONLINE_111_U_0.value, "u_19": _COLUMNS.OBS_ONLINE_111_U_19.value, "u_70": _COLUMNS.OBS_ONLINE_111_U_70.value, "u_u": _COLUMNS.OBS_ONLINE_111_U_U.value, }, inplace=True) _order_columns(df) # Sort and clean up the indices before returning the final dataframe. df.sort_values([ _COLUMNS.DATE.value, _COLUMNS.REG_CCG_NAME.value, _COLUMNS.REG_CCG_CODE.value, ], inplace=True) df.reset_index(drop=True, inplace=True) if df.isna().any().any(): raise ValueError("Formatted data 'online_111' contains nans") return df def _raw_data_formatter_calls_111_999(filepath: str) -> pd.DataFrame: """Loads and formats 111 & 999 calls data.""" df = pd.read_csv(filepath) # Drop unneeded columns and rows. drop_columns = [] df.drop(columns=drop_columns, inplace=True) # Drop nans. df.dropna(subset=["CCGCode", "CCGName"], inplace=True) # Reformat values. df["AgeBand"].fillna("u", inplace=True) remap_dict = { "Call Date": lambda x: datetime.datetime.strptime(x, "%d/%m/%Y").strftime( # pylint: disable=g-long-lambda "%Y-%m-%d"), "CCGName": lambda x: x.replace("&", "and"), "SiteType": lambda x: str(int(x)), "Sex": { "Female": "f", "Male": "m", "Unknown": "u", }, "AgeBand": { "0-18 years": "0", "19-69 years": "19", "70-120 years": "70", "u": "u", } } for col, remap in remap_dict.items(): df[col] = df[col].map(remap) call_date_values = pd.date_range(df["Call Date"].min(), df["Call Date"].max()).strftime(_DATE_FORMAT) ccgcode_values = df["CCGCode"].unique() sitetype_values = ["111", "999"] assert set(sitetype_values) >= set( df.SiteType.unique()), "unsupported sitetype value" sex_values = ["f", "m", "u"] assert set(sex_values) >= set(df.Sex.unique()), "unsupported sex value" ageband_values = ["0", "19", "70", "u"] assert set(ageband_values) >= set( df.AgeBand.unique()), "unsupported ageband value" ccg_code_name_map = df[["CCGCode", "CCGName" ]].set_index("CCGCode")["CCGName"].drop_duplicates() df_full = pd.DataFrame( list(itertools.product(call_date_values, ccgcode_values, sitetype_values, sex_values, ageband_values)), columns=["Call Date", "CCGCode", "SiteType", "Sex", "AgeBand"]) df = pd.merge(df_full, df, how="outer") # 0 calls don't have rows, so are nans. df["TriageCount"].fillna(0, inplace=True) df["CCGName"] = df["CCGCode"].map(ccg_code_name_map) # Combine SiteType, Sex, and AgeBand columns into a joint column. df["SiteType_Sex_AgeBand"] = ( df["SiteType"] + "_" + df["Sex"] + "_" + df["AgeBand"]) df = df.pivot_table( index=["Call Date", "CCGCode", "CCGName"], columns="SiteType_Sex_AgeBand", values="TriageCount").reset_index() df.columns.name = None # Rename the columns to their standard names. df.rename( columns={ "CCGCode": _COLUMNS.REG_CCG_CODE.value, "CCGName": _COLUMNS.REG_CCG_NAME.value, "Call Date": _COLUMNS.DATE.value, "111_f_0": _COLUMNS.OBS_CALL_111_F_0.value, "111_f_19": _COLUMNS.OBS_CALL_111_F_19.value, "111_f_70": _COLUMNS.OBS_CALL_111_F_70.value, "111_f_u": _COLUMNS.OBS_CALL_111_F_U.value, "111_m_0": _COLUMNS.OBS_CALL_111_M_0.value, "111_m_19": _COLUMNS.OBS_CALL_111_M_19.value, "111_m_70": _COLUMNS.OBS_CALL_111_M_70.value, "111_m_u": _COLUMNS.OBS_CALL_111_M_U.value, "111_u_0": _COLUMNS.OBS_CALL_111_U_0.value, "111_u_19": _COLUMNS.OBS_CALL_111_U_19.value, "111_u_70": _COLUMNS.OBS_CALL_111_U_70.value, "111_u_u": _COLUMNS.OBS_CALL_111_U_U.value, "999_f_0": _COLUMNS.OBS_CALL_999_F_0.value, "999_f_19": _COLUMNS.OBS_CALL_999_F_19.value, "999_f_70": _COLUMNS.OBS_CALL_999_F_70.value, "999_f_u": _COLUMNS.OBS_CALL_999_F_U.value, "999_m_0": _COLUMNS.OBS_CALL_999_M_0.value, "999_m_19": _COLUMNS.OBS_CALL_999_M_19.value, "999_m_70": _COLUMNS.OBS_CALL_999_M_70.value, "999_m_u": _COLUMNS.OBS_CALL_999_M_U.value, "999_u_0": _COLUMNS.OBS_CALL_999_U_0.value, "999_u_19": _COLUMNS.OBS_CALL_999_U_19.value, "999_u_70": _COLUMNS.OBS_CALL_999_U_70.value, "999_u_u": _COLUMNS.OBS_CALL_999_U_U.value, }, inplace=True) _order_columns(df) # Sort and clean up the indices before returning the final dataframe. df.sort_values([ _COLUMNS.DATE.value, _COLUMNS.REG_CCG_NAME.value, _COLUMNS.REG_CCG_CODE.value, ], inplace=True) df.reset_index(drop=True, inplace=True) if df.isna().any().any(): raise ValueError("Formatted data 'calls_111_999' contains nans") return df _FORMATTER_FUNCTIONS = { "daily_deaths": _raw_data_formatter_daily_deaths, "daily_cases": _raw_data_formatter_daily_cases, "google_mobility": _raw_data_formatter_google_mobility, "online_111": _raw_data_formatter_online_111, "calls_111_999": _raw_data_formatter_calls_111_999, } def _get_raw_data_formatter_by_name(name: str) -> Callable[[str], pd.DataFrame]: return _FORMATTER_FUNCTIONS[name] def _merge_online_111_and_calls_111_999( df_online_111: pd.DataFrame, df_calls_111_999: pd.DataFrame) -> pd.DataFrame: """Merges the 111 online and 111/999 calls into a single dataframe.""" df = pd.merge( df_online_111, df_calls_111_999, how="outer", on=[ _COLUMNS.DATE.value, _COLUMNS.REG_CCG_CODE.value, _COLUMNS.REG_CCG_NAME.value, ]) return df def format_raw_data_files( paths_dict: Dict[str, str]) -> Dict[str, pd.DataFrame]: """Loads and formats the individual raw data files. Args: paths_dict: mapping from data names to filepaths. Returns: mapping from data names to formatted dataframes. """ formatted_dfs = {} for name, path in paths_dict.items(): logging.info("Formatting raw data: %s", name) formatter = _get_raw_data_formatter_by_name(name) formatted_dfs[name] = formatter(path) logging.info("Merging online 111 and 111/999 calls") if "online_111" and "calls_111_999" in formatted_dfs: formatted_dfs[ "online_111_and_calls_111_999"] = _merge_online_111_and_calls_111_999( formatted_dfs.pop("online_111"), formatted_dfs.pop("calls_111_999")) elif "online_111" in formatted_dfs: formatted_dfs["online_111_and_calls_111_999"] = formatted_dfs.pop( "online_111") elif "calls_111_999" in formatted_dfs: formatted_dfs["online_111_and_calls_111_999"] = formatted_dfs.pop( "calls_111_999") return formatted_dfs def merge_formatted_data( formatted_data: Dict[str, pd.DataFrame]) -> pd.DataFrame: """Concatenates all formatted data into a single dataframe. Args: formatted_data: mapping from the data name to its dataframe. Returns: a dataframe containing all of the input dataframes. """ logging.info("Merging all dataframes") dfs = [] for name, df in formatted_data.items(): df = df.copy() df.insert(1, _COLUMNS.OBSERVATION_TYPE.value, name) dfs.append(df) df_merged = pd.concat(dfs) reg_columns = [ c for c in df_merged.columns if c.startswith(constants.REGION_PREFIX) ] df_merged.sort_values( [_COLUMNS.DATE.value, _COLUMNS.OBSERVATION_TYPE.value] + reg_columns, inplace=True) df_merged.reset_index(drop=True, inplace=True) return df_merged def _load_filename_regexes() -> Dict[str, str]: """Gets a mapping from the data name to the regex for that data's filepath.""" path = pathlib.Path(os.path.dirname( os.path.realpath(__file__))) / _PATH_FILENAME_REGEXES with open(path) as fid: return yaml.load(fid, Loader=yaml.SafeLoader) def get_paths_for_given_date( raw_data_directory: str, scrape_date: str) -> Tuple[Dict[str, str], str, Set[str]]: """Get the raw data paths for a scrape date and filename regex. Args: raw_data_directory: the directory where the raw data is saved. scrape_date: the scrape date to use, in the form YYYYMMDD, or 'latest'. Returns: mapping of data names to filepaths the scrape date used names whose data was not found on disk """ filename_regexes = _load_filename_regexes() if scrape_date == "latest": rx = re.compile("^[0-9]{8}$") directories = [] for filename in os.listdir(raw_data_directory): if rx.match(filename) is None: continue path = pathlib.Path(raw_data_directory) / filename if not os.path.isdir(path): continue directories.append(path) if not directories: raise ValueError("Could not find latest scrape date directory") directory = max(directories) scrape_date_dirname = directory.parts[-1] else: try: datetime.datetime.strptime(scrape_date, "%Y%m%d") except ValueError: raise ValueError("Date must be formatted: YYYYMMDD") scrape_date_dirname = scrape_date directory = pathlib.Path(raw_data_directory) / scrape_date_dirname paths_dict = collections.defaultdict(lambda: None) for name, filename_regex in filename_regexes.items(): rx = re.compile(f"^{filename_regex}$") for filename in os.listdir(directory): path = directory / filename if os.path.isdir(path): continue match = rx.match(filename) if match is None: continue if paths_dict[name] is not None: raise ValueError("There should only be 1 file per name") paths_dict[name] = str(path) missing_names = set(filename_regexes.keys()) - set(paths_dict.keys()) return dict(paths_dict), scrape_date_dirname, missing_names def load_population_dataframe(raw_data_directory: str) -> pd.DataFrame: """Load population data from disk, and create a dataframe from it. Args: raw_data_directory: the directory where the raw data is saved. Returns: a dataframe containing population data. """ filename = _load_filename_regexes()["population"] filepath = pathlib.Path(raw_data_directory) / filename kwargs = dict(header=0, skiprows=(0, 1, 2, 3, 4, 5, 7)) try: pop_m = pd.read_excel(filepath, sheet_name="Mid-2019 Males", **kwargs) pop_f = pd.read_excel(filepath, sheet_name="Mid-2019 Females", **kwargs) except FileNotFoundError: return None # Remove lower resolution columns. columns_to_remove = ("STP20 Code", "STP20 Name", "NHSER20 Code", "NHSER20 Name", "All Ages") for col in columns_to_remove: del pop_m[col] del pop_f[col] mapping = {"CCG Code": _COLUMNS.REG_CCG_CODE.value, "CCG Name": _COLUMNS.REG_CCG_NAME.value, "90+": 90} pop_m.rename(columns=mapping, inplace=True) pop_f.rename(columns=mapping, inplace=True) # This labels the male and female data uniquely so they can be merged. pop_m.rename( columns=lambda x: f"m_{str(x).lower()}" if isinstance(x, int) else x, inplace=True) pop_f.rename( columns=lambda x: f"f_{str(x).lower()}" if isinstance(x, int) else x, inplace=True) region_columns = [_COLUMNS.REG_CCG_NAME.value, _COLUMNS.REG_CCG_CODE.value] df = pd.merge(pop_m, pop_f, how="outer", on=tuple(region_columns)) mapping = { f"{gender}_{age}": _COLUMNS.OBS_POPULATION_GENDER_AGE.value.format(gender=gender, age=age) for gender, age in itertools.product(("m", "f"), range(91)) } df.rename(columns=mapping, inplace=True) return df
GoogleCloudPlatform/covid-19-open-data
src/england_data/standardize_data.py
Python
apache-2.0
21,282
# -*- coding: utf-8 -*- # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import proto # type: ignore __protobuf__ = proto.module( package="google.ads.googleads.v9.enums", marshal="google.ads.googleads.v9", manifest={"FeedMappingStatusEnum",}, ) class FeedMappingStatusEnum(proto.Message): r"""Container for enum describing possible statuses of a feed mapping. """ class FeedMappingStatus(proto.Enum): r"""Possible statuses of a feed mapping.""" UNSPECIFIED = 0 UNKNOWN = 1 ENABLED = 2 REMOVED = 3 __all__ = tuple(sorted(__protobuf__.manifest))
googleads/google-ads-python
google/ads/googleads/v9/enums/types/feed_mapping_status.py
Python
apache-2.0
1,148
# # Copyright 2016 The BigDL Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import unittest import numpy as np from bigdl.ppml import FLServer from bigdl.ppml.algorithms.fgboost_regression import FGBoostRegression from bigdl.ppml.utils import init_fl_context class TestHflLogisticRegression(unittest.TestCase): def setUp(self) -> None: self.fl_server = FLServer() self.fl_server.build() self.fl_server.start() init_fl_context() def tearDown(self) -> None: self.fl_server.stop() def test_dummy_data(self): x, y = np.ones([2, 3]), np.ones([2]) if __name__ == '__main__': unittest.main()
intel-analytics/BigDL
python/ppml/test/bigdl/ppml/algorithms/test_hfl_logistic_regression.py
Python
apache-2.0
1,167
# Copyright 2021 The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re import unittest from unittest import mock import pkg_resources from google.cloud.trace_v2.types import AttributeValue, BatchWriteSpansRequest from google.cloud.trace_v2.types import Span as ProtoSpan from google.cloud.trace_v2.types import TruncatableString from google.rpc import code_pb2 from google.rpc.status_pb2 import Status from opentelemetry.exporter.cloud_trace import ( MAX_EVENT_ATTRS, MAX_LINK_ATTRS, MAX_NUM_EVENTS, MAX_NUM_LINKS, CloudTraceSpanExporter, _extract_attributes, _extract_events, _extract_links, _extract_resources, _extract_span_kind, _extract_status, _format_attribute_value, _get_time_from_ns, _strip_characters, _truncate_str, ) from opentelemetry.exporter.cloud_trace.version import __version__ from opentelemetry.sdk.resources import Resource from opentelemetry.sdk.trace import Event from opentelemetry.sdk.trace import _Span as Span from opentelemetry.trace import Link, SpanContext, SpanKind from opentelemetry.trace.status import Status as SpanStatus from opentelemetry.trace.status import StatusCode # pylint: disable=too-many-public-methods class TestCloudTraceSpanExporter(unittest.TestCase): def setUp(self): self.client_patcher = mock.patch( "opentelemetry.exporter.cloud_trace.TraceServiceClient" ) self.client_patcher.start() def tearDown(self): self.client_patcher.stop() @classmethod def setUpClass(cls): cls.project_id = "PROJECT" cls.attributes_variety_pack = { "str_key": "str_value", "bool_key": False, "double_key": 1.421, "int_key": 123, } cls.extracted_attributes_variety_pack = ProtoSpan.Attributes( attribute_map={ "str_key": AttributeValue( string_value=TruncatableString( value="str_value", truncated_byte_count=0 ) ), "bool_key": AttributeValue(bool_value=False), "double_key": AttributeValue( string_value=TruncatableString( value="1.4210", truncated_byte_count=0 ) ), "int_key": AttributeValue(int_value=123), } ) cls.agent_code = _format_attribute_value( "opentelemetry-python {}; google-cloud-trace-exporter {}".format( _strip_characters( pkg_resources.get_distribution("opentelemetry-sdk").version ), _strip_characters(__version__), ) ) cls.example_trace_id = "6e0c63257de34c92bf9efcd03927272e" cls.example_span_id = "95bb5edabd45950f" cls.example_time_in_ns = 1589919268850900051 cls.example_time_stamp = _get_time_from_ns(cls.example_time_in_ns) cls.str_20kb = "a" * 20 * 1024 cls.str_16kb = "a" * 16 * 1024 cls.str_300 = "a" * 300 cls.str_256 = "a" * 256 cls.str_128 = "a" * 128 def test_constructor_default(self): exporter = CloudTraceSpanExporter(self.project_id) self.assertEqual(exporter.project_id, self.project_id) def test_constructor_explicit(self): client = mock.Mock() exporter = CloudTraceSpanExporter(self.project_id, client=client) self.assertIs(exporter.client, client) self.assertEqual(exporter.project_id, self.project_id) def test_export(self): resource_info = Resource( { "cloud.account.id": 123, "host.id": "host", "cloud.zone": "US", "cloud.provider": "gcp", "gcp.resource_type": "gce_instance", } ) span_datas = [ Span( name="span_name", context=SpanContext( trace_id=int(self.example_trace_id, 16), span_id=int(self.example_span_id, 16), is_remote=False, ), parent=None, kind=SpanKind.INTERNAL, resource=resource_info, attributes={"attr_key": "attr_value"}, ) ] cloud_trace_spans = { "name": "projects/{}/traces/{}/spans/{}".format( self.project_id, self.example_trace_id, self.example_span_id ), "span_id": self.example_span_id, "parent_span_id": None, "display_name": TruncatableString( value="span_name", truncated_byte_count=0 ), "attributes": ProtoSpan.Attributes( attribute_map={ "g.co/r/gce_instance/zone": _format_attribute_value("US"), "g.co/r/gce_instance/instance_id": _format_attribute_value( "host" ), "g.co/r/gce_instance/project_id": _format_attribute_value( "123" ), "g.co/agent": self.agent_code, "attr_key": _format_attribute_value("attr_value"), } ), "links": None, "status": None, "time_events": None, "start_time": None, "end_time": None, # pylint: disable=no-member "span_kind": ProtoSpan.SpanKind.INTERNAL, } client = mock.Mock() exporter = CloudTraceSpanExporter(self.project_id, client=client) exporter.export(span_datas) self.assertTrue(client.batch_write_spans.called) client.batch_write_spans.assert_called_with( request=BatchWriteSpansRequest( name="projects/{}".format(self.project_id), spans=[cloud_trace_spans], ) ) def test_extract_status_code_unset(self): self.assertIsNone( _extract_status(SpanStatus(status_code=StatusCode.UNSET)) ) def test_extract_status_code_ok(self): self.assertEqual( _extract_status(SpanStatus(status_code=StatusCode.OK)), Status(code=code_pb2.OK), ) def test_extract_status_code_error(self): self.assertEqual( _extract_status( SpanStatus( status_code=StatusCode.ERROR, description="error_desc", ) ), Status(code=code_pb2.UNKNOWN, message="error_desc"), ) def test_extract_status_code_future_added(self): self.assertEqual( _extract_status( SpanStatus( status_code=mock.Mock(), ) ), Status(code=code_pb2.UNKNOWN), ) def test_extract_empty_attributes(self): self.assertEqual( _extract_attributes({}, num_attrs_limit=4), ProtoSpan.Attributes(attribute_map={}), ) def test_extract_variety_of_attributes(self): self.assertEqual( _extract_attributes( self.attributes_variety_pack, num_attrs_limit=4 ), self.extracted_attributes_variety_pack, ) def test_extract_label_mapping_attributes(self): attributes_labels_mapping = { "http.scheme": "http", "http.host": "172.19.0.4:8000", "http.method": "POST", "http.request_content_length": 321, "http.response_content_length": 123, "http.route": "/fuzzy/search", "http.status_code": 200, "http.url": "http://172.19.0.4:8000/fuzzy/search", "http.user_agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.116 Safari/537.36", } extracted_attributes_labels_mapping = ProtoSpan.Attributes( attribute_map={ "/http/client_protocol": AttributeValue( string_value=TruncatableString( value="http", truncated_byte_count=0 ) ), "/http/host": AttributeValue( string_value=TruncatableString( value="172.19.0.4:8000", truncated_byte_count=0 ) ), "/http/method": AttributeValue( string_value=TruncatableString( value="POST", truncated_byte_count=0 ) ), "/http/request/size": AttributeValue(int_value=321), "/http/response/size": AttributeValue(int_value=123), "/http/route": AttributeValue( string_value=TruncatableString( value="/fuzzy/search", truncated_byte_count=0 ) ), "/http/status_code": AttributeValue(int_value=200), "/http/url": AttributeValue( string_value=TruncatableString( value="http://172.19.0.4:8000/fuzzy/search", truncated_byte_count=0, ) ), "/http/user_agent": AttributeValue( string_value=TruncatableString( value="Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.116 Safari/537.36", truncated_byte_count=0, ) ), } ) self.assertEqual( _extract_attributes(attributes_labels_mapping, num_attrs_limit=9), extracted_attributes_labels_mapping, ) def test_ignore_invalid_attributes(self): self.assertEqual( _extract_attributes( {"illegal_attribute_value": {}, "legal_attribute": 3}, num_attrs_limit=4, ), ProtoSpan.Attributes( attribute_map={"legal_attribute": AttributeValue(int_value=3)}, dropped_attributes_count=1, ), ) def test_too_many_attributes(self): too_many_attrs = {} for attr_key in range(5): too_many_attrs[str(attr_key)] = 0 proto_attrs = _extract_attributes(too_many_attrs, num_attrs_limit=4) self.assertEqual(proto_attrs.dropped_attributes_count, 1) def test_add_agent_attribute(self): self.assertEqual( _extract_attributes({}, num_attrs_limit=4, add_agent_attr=True), ProtoSpan.Attributes( attribute_map={"g.co/agent": self.agent_code}, dropped_attributes_count=0, ), ) def test_agent_attribute_priority(self): # Drop existing attributes in favor of the agent attribute self.assertEqual( _extract_attributes( {"attribute_key": "attr_value"}, num_attrs_limit=1, add_agent_attr=True, ), ProtoSpan.Attributes( attribute_map={"g.co/agent": self.agent_code}, dropped_attributes_count=1, ), ) def test_attribute_value_truncation(self): # shouldn't truncate self.assertEqual( _format_attribute_value(self.str_300), AttributeValue( string_value=TruncatableString( value=self.str_300, truncated_byte_count=0, ) ), ) # huge string should truncate self.assertEqual( _format_attribute_value(self.str_20kb), AttributeValue( string_value=TruncatableString( value=self.str_16kb, truncated_byte_count=(20 - 16) * 1024, ) ), ) def test_list_attribute_value(self): self.assertEqual( _format_attribute_value(("one", "two")), AttributeValue( string_value=TruncatableString( value="one,two", truncated_byte_count=0 ) ), ) self.assertEqual( _format_attribute_value([True]), AttributeValue( string_value=TruncatableString( value="True", truncated_byte_count=0 ) ), ) self.assertEqual( _format_attribute_value((2, 5)), AttributeValue( string_value=TruncatableString( value="2,5", truncated_byte_count=0 ) ), ) self.assertEqual( _format_attribute_value([2.0, 0.5, 4.55]), AttributeValue( string_value=TruncatableString( value="2.0,0.5,4.55", truncated_byte_count=0 ) ), ) def test_attribute_key_truncation(self): self.assertEqual( _extract_attributes( {self.str_300: "attr_value"}, num_attrs_limit=4 ), ProtoSpan.Attributes( attribute_map={ self.str_128: AttributeValue( string_value=TruncatableString( value="attr_value", truncated_byte_count=0 ) ) } ), ) def test_extract_empty_events(self): self.assertIsNone(_extract_events([])) def test_too_many_events(self): event = Event( name="event", timestamp=self.example_time_in_ns, attributes={} ) too_many_events = [event] * (MAX_NUM_EVENTS + 5) self.assertEqual( _extract_events(too_many_events), ProtoSpan.TimeEvents( time_event=[ { "time": self.example_time_stamp, "annotation": { "description": TruncatableString( value="event", ), "attributes": {}, }, }, ] * MAX_NUM_EVENTS, dropped_annotations_count=len(too_many_events) - MAX_NUM_EVENTS, ), ) def test_too_many_event_attributes(self): event_attrs = {} for attr_key in range(MAX_EVENT_ATTRS + 5): event_attrs[str(attr_key)] = 0 proto_events = _extract_events( [ Event( name="a", attributes=event_attrs, timestamp=self.example_time_in_ns, ) ] ) self.assertEqual( len( proto_events.time_event[0].annotation.attributes.attribute_map ), MAX_EVENT_ATTRS, ) self.assertEqual( proto_events.time_event[ 0 ].annotation.attributes.dropped_attributes_count, len(event_attrs) - MAX_EVENT_ATTRS, ) def test_extract_multiple_events(self): event1 = Event( name="event1", attributes=self.attributes_variety_pack, timestamp=self.example_time_in_ns, ) event2_nanos = 1589919438550020326 event2 = Event( name="event2", attributes={"illegal_attr_value": dict()}, timestamp=event2_nanos, ) self.assertEqual( _extract_events([event1, event2]), ProtoSpan.TimeEvents( time_event=[ { "time": self.example_time_stamp, "annotation": { "description": TruncatableString( value="event1", truncated_byte_count=0 ), "attributes": self.extracted_attributes_variety_pack, }, }, { "time": _get_time_from_ns(event2_nanos), "annotation": { "description": TruncatableString( value="event2", truncated_byte_count=0 ), "attributes": ProtoSpan.Attributes( attribute_map={}, dropped_attributes_count=1 ), }, }, ] ), ) def test_event_name_truncation(self): event1 = Event( name=self.str_300, attributes={}, timestamp=self.example_time_in_ns ) self.assertEqual( _extract_events([event1]), ProtoSpan.TimeEvents( time_event=[ { "time": self.example_time_stamp, "annotation": { "description": TruncatableString( value=self.str_256, truncated_byte_count=300 - 256, ), "attributes": {}, }, }, ] ), ) def test_extract_empty_links(self): self.assertIsNone(_extract_links([])) def test_extract_multiple_links(self): span_id1 = "95bb5edabd45950f" span_id2 = "b6b86ad2915c9ddc" link1 = Link( context=SpanContext( trace_id=int(self.example_trace_id, 16), span_id=int(span_id1, 16), is_remote=False, ), attributes={}, ) link2 = Link( context=SpanContext( trace_id=int(self.example_trace_id, 16), span_id=int(span_id1, 16), is_remote=False, ), attributes=self.attributes_variety_pack, ) link3 = Link( context=SpanContext( trace_id=int(self.example_trace_id, 16), span_id=int(span_id2, 16), is_remote=False, ), attributes={"illegal_attr_value": dict(), "int_attr_value": 123}, ) self.assertEqual( _extract_links([link1, link2, link3]), ProtoSpan.Links( link=[ { "trace_id": self.example_trace_id, "span_id": span_id1, "type": "TYPE_UNSPECIFIED", "attributes": ProtoSpan.Attributes(attribute_map={}), }, { "trace_id": self.example_trace_id, "span_id": span_id1, "type": "TYPE_UNSPECIFIED", "attributes": self.extracted_attributes_variety_pack, }, { "trace_id": self.example_trace_id, "span_id": span_id2, "type": "TYPE_UNSPECIFIED", "attributes": { "attribute_map": { "int_attr_value": AttributeValue(int_value=123) }, }, }, ] ), ) def test_extract_link_with_none_attribute(self): link = Link( context=SpanContext( trace_id=int(self.example_trace_id, 16), span_id=int(self.example_span_id, 16), is_remote=False, ), attributes=None, ) self.assertEqual( _extract_links([link]), ProtoSpan.Links( link=[ { "trace_id": self.example_trace_id, "span_id": self.example_span_id, "type": "TYPE_UNSPECIFIED", "attributes": ProtoSpan.Attributes(attribute_map={}), }, ] ), ) def test_too_many_links(self): link = Link( context=SpanContext( trace_id=int(self.example_trace_id, 16), span_id=int(self.example_span_id, 16), is_remote=False, ), attributes={}, ) too_many_links = [link] * (MAX_NUM_LINKS + 5) self.assertEqual( _extract_links(too_many_links), ProtoSpan.Links( link=[ { "trace_id": self.example_trace_id, "span_id": self.example_span_id, "type": "TYPE_UNSPECIFIED", "attributes": {}, } ] * MAX_NUM_LINKS, dropped_links_count=len(too_many_links) - MAX_NUM_LINKS, ), ) def test_too_many_link_attributes(self): link_attrs = {} for attr_key in range(MAX_LINK_ATTRS + 1): link_attrs[str(attr_key)] = 0 attr_link = Link( context=SpanContext( trace_id=int(self.example_trace_id, 16), span_id=int(self.example_span_id, 16), is_remote=False, ), attributes=link_attrs, ) proto_link = _extract_links([attr_link]) self.assertEqual( len(proto_link.link[0].attributes.attribute_map), MAX_LINK_ATTRS ) def test_extract_empty_resources(self): self.assertEqual(_extract_resources(Resource.get_empty()), {}) def test_extract_resource_attributes_with_regex(self): resource_regex = re.compile(r"service\..*") resource = Resource( attributes={ "cloud.account.id": 123, "host.id": "host", "cloud.zone": "US", "cloud.provider": "gcp", "extra_info": "extra", "gcp.resource_type": "gce_instance", "not_gcp_resource": "value", "service.name": "my-app", "service.version": "1", } ) expected_extract = { "g.co/r/gce_instance/project_id": "123", "g.co/r/gce_instance/instance_id": "host", "g.co/r/gce_instance/zone": "US", "service.name": "my-app", "service.version": "1", } self.assertEqual( _extract_resources(resource, resource_regex), expected_extract ) def test_non_matching_regex(self): resource_regex = re.compile(r"this-regex-matches-nothing") resource = Resource( attributes={ "cloud.account.id": 123, "host.id": "host", "cloud.zone": "US", "cloud.provider": "gcp", "extra_info": "extra", "gcp.resource_type": "gce_instance", "not_gcp_resource": "value", } ) expected_extract = { "g.co/r/gce_instance/project_id": "123", "g.co/r/gce_instance/instance_id": "host", "g.co/r/gce_instance/zone": "US", } self.assertEqual( _extract_resources(resource, resource_regex), expected_extract ) def test_extract_well_formed_resources(self): resource = Resource( attributes={ "cloud.account.id": 123, "host.id": "host", "cloud.zone": "US", "cloud.provider": "gcp", "extra_info": "extra", "gcp.resource_type": "gce_instance", "not_gcp_resource": "value", } ) expected_extract = { "g.co/r/gce_instance/project_id": "123", "g.co/r/gce_instance/instance_id": "host", "g.co/r/gce_instance/zone": "US", } self.assertEqual(_extract_resources(resource), expected_extract) def test_extract_malformed_resources(self): # This resource doesn't have all the fields required for a gce_instance # Specifically its missing "host.id", "cloud.zone", "cloud.account.id" resource = Resource( attributes={ "gcp.resource_type": "gce_instance", "cloud.provider": "gcp", } ) # Should throw when passed a malformed GCP resource dict self.assertRaises(KeyError, _extract_resources, resource) def test_extract_unsupported_gcp_resources(self): # Unsupported gcp resources will be ignored resource = Resource( attributes={ "cloud.account.id": "123", "host.id": "host", "extra_info": "extra", "not_gcp_resource": "value", "gcp.resource_type": "unsupported_gcp_resource", "cloud.provider": "gcp", } ) self.assertEqual(_extract_resources(resource), {}) def test_extract_unsupported_provider_resources(self): # Resources with currently unsupported providers will be ignored resource = Resource( attributes={ "cloud.account.id": "123", "host.id": "host", "extra_info": "extra", "not_gcp_resource": "value", "cloud.provider": "aws", } ) self.assertEqual(_extract_resources(resource), {}) def test_truncate_string(self): """Cloud Trace API imposes limits on the length of many things, e.g. strings, number of events, number of attributes. We truncate these things before sending it to the API as an optimization. """ self.assertEqual(_truncate_str("aaaa", limit=1), ("a", 3)) self.assertEqual(_truncate_str("aaaa", limit=5), ("aaaa", 0)) self.assertEqual(_truncate_str("aaaa", limit=4), ("aaaa", 0)) self.assertEqual(_truncate_str("中文翻译", limit=4), ("中", 9)) def test_strip_characters(self): self.assertEqual("0.10.0", _strip_characters("0.10.0b")) self.assertEqual("1.20.5", _strip_characters("1.20.5")) self.assertEqual("3.1.0", _strip_characters("3.1.0beta")) self.assertEqual("4.2.0", _strip_characters("4b.2rc.0a")) self.assertEqual("6.20.15", _strip_characters("b6.20.15")) # pylint: disable=no-member def test_extract_span_kind(self): self.assertEqual( _extract_span_kind(SpanKind.INTERNAL), ProtoSpan.SpanKind.INTERNAL ) self.assertEqual( _extract_span_kind(SpanKind.CLIENT), ProtoSpan.SpanKind.CLIENT ) self.assertEqual( _extract_span_kind(SpanKind.SERVER), ProtoSpan.SpanKind.SERVER ) self.assertEqual( _extract_span_kind(SpanKind.CONSUMER), ProtoSpan.SpanKind.CONSUMER ) self.assertEqual( _extract_span_kind(SpanKind.PRODUCER), ProtoSpan.SpanKind.PRODUCER ) self.assertEqual( _extract_span_kind(-1), ProtoSpan.SpanKind.SPAN_KIND_UNSPECIFIED )
GoogleCloudPlatform/opentelemetry-operations-python
opentelemetry-exporter-gcp-trace/tests/test_cloud_trace_exporter.py
Python
apache-2.0
28,235
from base import BaseClient NURTURING_API_VERSION = '1' class NurturingClient(BaseClient): def _get_path(self, subpath): return 'nurture/v%s/%s' % (NURTURING_API_VERSION, subpath) def get_campaigns(self, **options): return self._call('campaigns', **options) def get_leads(self, campaign_guid, **options): return self._call('campaign/%s/list' % campaign_guid, **options) def get_history(self, lead_guid, **options): return self._call('lead/%s' % lead_guid, **options) def enroll_lead(self, campaign_guid, lead_guid, **options): return self._call('campaign/%s/add' % campaign_guid, data=lead_guid, method='POST', **options) def unenroll_lead(self, campaign_guid, lead_guid, **options): return self._call('campaign/%s/remove' % campaign_guid, data=lead_guid, method='POST', **options)
ack8006/hapipy
hapi/nurturing.py
Python
apache-2.0
890
""" def toLocal(dt, offset = 8): dt: datetime offset: default 8 china time """ import datetime def toLocal(dt, offset = 8): localDateTime = dt + datetime.timedelta(hours=offset) return localDateTime if __name__ == '__main__': now = datetime.datetime.utcnow() print now print toLocal(now) print now
pisceanfoot/py_easyXY
py_easyXY/datetime/tzone.py
Python
apache-2.0
309
#name: pygsf #created: July 2017 #by: p.kennedy@fugro.com #description: python module to read and write a Generic Sensor Formaty (GSF) file natively #notes: See main at end of script for example how to use this #based on GSF Version 3.05 # See readme.md for more details import sys from glob import glob import argparse import os.path import struct import pprint import time import datetime import math import random from datetime import datetime from datetime import timedelta from statistics import mean import mmap # for testing only... # import matplotlib.pyplot as plt import numpy as np #/* The high order 4 bits are used to define the field size for this array */ GSF_FIELD_SIZE_DEFAULT = 0x00 #/* Default values for field size are used used for all beam arrays */ GSF_FIELD_SIZE_ONE = 0x10 #/* value saved as a one byte value after applying scale and offset */ GSF_FIELD_SIZE_TWO = 0x20 #/* value saved as a two byte value after applying scale and offset */ GSF_FIELD_SIZE_FOUR = 0x40 #/* value saved as a four byte value after applying scale and offset */ GSF_MAX_PING_ARRAY_SUBRECORDS = 26 # Record Decriptions (See page 82) HEADER = 1 SWATH_BATHYMETRY = 2 SOUND_VELOCITY_PROFILE = 3 PROCESSING_PARAMETERS = 4 SENSOR_PARAMETERS = 5 COMMENT = 6 HISTORY = 7 NAVIGATION_ERROR = 8 SWATH_BATHY_SUMMARY = 9 SINGLE_BEAM_SOUNDING = 10 HV_NAVIGATION_ERROR = 11 ATTITUDE = 12 SNIPPET_NONE = 0 # extract the mean value from the snippet array SNIPPET_MEAN = 1 # extract the mean value from the snippet array SNIPPET_MAX = 2 # extract the maximum value from the snippet array SNIPPET_DETECT = 3 # extract the bottom detect snippet value from the snippet array SNIPPET_MEAN5DB = 4 # extract the mean of all snippets within 5dB of the mean # the various frequencies we support in the R2Sonic multispectral files ARCIdx = {100000: 0, 200000: 1, 400000: 2} # the rejection flags used by this software REJECT_CLIP = -1 REJECT_RANGE= -2 REJECT_INTENSITY= -4 ############################################################################### def main(): parser = argparse.ArgumentParser(description='Read GSF file and create a reflectivity image.') parser.add_argument('-i', dest='inputFile', action='store', help='Input ALL filename to image. It can also be a wildcard, e.g. *.gsf') if len(sys.argv)==1: parser.print_help() sys.exit(1) args = parser.parse_args() print ("processing with settings: ", args) for filename in glob(args.inputFile): if not filename.endswith('.gsf'): print ("File %s is not a .all file, skipping..." % (filename)) continue if not os.path.isfile(filename): print ("file not found:", filename) exit() # testR2SonicAdjustment() testreader(filename) # conditioner() ############################################################################### def testreader(filename): ''' sample read script so we can see how to use the code ''' start_time = time.time() # time the process so we can keep it quick # filename = "C:/projects/multispectral/PatriciaBasin/20161130-1907 - 0001-2026_1.gsf" # filename = "C:/development/python/sample_subset.gsf" # filename = "F:/Projects/multispectral/_BedfordBasin2016/20160331 - 125110 - 0001-2026_1.gsf" # filename = "F:/Projects/multispectral/_Newbex/20170524-134208 - 0001-2026_1.gsf" # filename = "F:/Projects/multispectral/_BedfordBasin2017/20170502 - 131750 - 0001-2026_1.gsf" # filename = "C:/projects/multispectral/_BedfordBasin2017/20170502 - 150058 - 0001-2026_1.gsf" print (filename) pingcount = 0 # create a GSFREADER class and pass the filename r = GSFREADER(filename) # r.loadnavigation() # f1 = plt.figure() # # f2 = plt.figure() # # f3 = plt.figure() # ax1 = f1.add_subplot(111) # # ax2 = f2.add_subplot(111) # # ax3 = f3.add_subplot(111) print ("pingcount, pingnumber, 100kHz, 200kHz, 400kHz") while r.moreData(): # read a datagram. If we support it, return the datagram type and aclass for that datagram # The user then needs to call the read() method for the class to undertake a fileread and binary decode. This keeps the read super quick. numberofbytes, recordidentifier, datagram = r.readDatagram() # print(datagram) if recordidentifier == SWATH_BATHYMETRY: print(recordidentifier, end=',') datagram.read() datagram.snippettype = SNIPPET_NONE # print ("%s Lat:%.3f Lon:%.3f Ping:%d Freq:%d Serial %s" % (datagram.currentRecordDateTime(), datagram.latitude, datagram.longitude, datagram.pingnumber, datagram.frequency, datagram.serialnumber)) # for cross profile plotting # bs = [] # for s in datagram.MEAN_REL_AMPLITUDE_ARRAY: # if s != 0: # bs.append(20 * math.log10(s) - 100) # else: # bs.append(0) # bs = [20 * math.log10(s) - 100 for s in datagram.MEAN_REL_AMPLITUDE_ARRAY] samplearray = datagram.R2Soniccorrection() if datagram.frequency == 100000: freq100 = mean(samplearray) if datagram.frequency == 200000: freq200 = mean(samplearray) if datagram.frequency == 400000: freq400 = mean(samplearray) # print ("%d,%d,%.3f,%.3f,%.3f" %(pingcount, datagram.pingnumber, freq100, freq200, freq400)) # print ("%d" %(pingcount)) pingcount += 1 # if len(bs) > 0: # plt.plot(datagram.BEAM_ANGLE_ARRAY, bs, linewidth=0.25, color='blue') # plt.ylim([-60,-5]) # plt.xlim([-60,60]) # # ax3.plot(datagram.BEAM_ANGLE_ARRAY, datagram.ALONG_TRACK_ARRAY) # plt.pause(0.001) # datagram.clippolar(-60, 60) # print("Duration %.3fs" % (time.time() - start_time )) # time the process # print ("PingCount:", pingcount) return ############################################################################### class UNKNOWN_RECORD: '''used as a convenience tool for datagrams we have no bespoke classes. Better to make a bespoke class''' def __init__(self, fileptr, numbytes, recordidentifier, hdrlen): self.recordidentifier = recordidentifier self.offset = fileptr.tell() self.hdrlen = hdrlen self.numbytes = numbytes self.fileptr = fileptr self.fileptr.seek(numbytes, 1) # set the file ptr to the end of the record self.data = "" self.name = "unknown" def read(self): self.data = self.fileptr.read(self.numberofbytes) def __str__(self): ''' pretty print this class ''' return pprint.pformat(vars(self)) class SCALEFACTOR: def __init__(self): self.subrecordID = 0 self.compressionFlag = 0 #/* Specifies bytes of storage in high order nibble and type of compression in low order nibble */ self.multiplier = 0.0 self.offset = 0 self.name = "scaleFactor" def __str__(self): ''' pretty print this class ''' return pprint.pformat(vars(self)) class SWATH_BATHYMETRY_PING : def __init__(self, fileptr, numbytes, recordidentifier, hdrlen): self.recordidentifier = recordidentifier # assign the GSF code for this datagram type self.offset = fileptr.tell() # remember where this packet resides in the file so we can return if needed self.hdrlen = hdrlen # remember the header length. it should be 8 bytes, bout if checksum then it is 12 self.numbytes = numbytes # remember how many bytes this packet contains self.fileptr = fileptr # remember the file pointer so we do not need to pass from the host process self.fileptr.seek(numbytes, 1) # move the file pointer to the end of the record so we can skip as the default actions self.scalefactors = [] self.DEPTH_ARRAY = [] self.ACROSS_TRACK_ARRAY = [] self.ALONG_TRACK_ARRAY = [] self.TRAVEL_TIME_ARRAY = [] self.BEAM_ANGLE_ARRAY = [] self.MEAN_CAL_AMPLITUDE_ARRAY = [] self.MEAN_REL_AMPLITUDE_ARRAY = [] self.QUALITY_FACTOR_ARRAY = [] self.BEAM_FLAGS_ARRAY = [] self.BEAM_ANGLE_FORWARD_ARRAY = [] self.VERTICAL_ERROR_ARRAY = [] self.HORIZONTAL_ERROR_ARRAY = [] self.SECTOR_NUMBER_ARRAY = [] # self.INTENSITY_SERIES_ARRAY = [] self.SNIPPET_SERIES_ARRAY = [] self.perbeam = True self.snippettype = SNIPPET_MAX self.numbeams = 0 self.time = 0 self.pingnanotime = 0 self.name = "swath bathy ping" ############################################################################### def __str__(self): ''' pretty print this class ''' return pprint.pformat(vars(self)) ############################################################################### def clippolar(self, leftclipdegrees, rightclipdegrees): '''sets the processing flags to rejected if the beam angle is beyond the clip parameters''' if self.numbeams == 0: return if len(self.QUALITY_FACTOR_ARRAY) != len(self.TRAVEL_TIME_ARRAY): return for i, s in enumerate(self.BEAM_ANGLE_ARRAY): if (s <= leftclipdegrees) or (s >= rightclipdegrees): self.QUALITY_FACTOR_ARRAY[i] += REJECT_CLIP # self.MEAN_REL_AMPLITUDE_ARRAY[i] = 0 # self.ACROSS_TRACK_ARRAY[i] = 0 return ############################################################################### def cliptwtt(self, minimumtraveltime=0.0): '''sets the processing flags to rejected if the two way travel time is less than the clip parameters''' if self.numbeams == 0: return if len(self.QUALITY_FACTOR_ARRAY) != len(self.TRAVEL_TIME_ARRAY): return for i, s in enumerate(self.TRAVEL_TIME_ARRAY): if (s <= minimumtraveltime): self.QUALITY_FACTOR_ARRAY[i] += REJECT_RANGE return ############################################################################### def clipintensity(self, minimumintenisty=0.0): '''sets the processing flags to rejected if the two way travel time is less than the clip parameters''' if self.numbeams == 0: return if len(self.QUALITY_FACTOR_ARRAY) != len(self.TRAVEL_TIME_ARRAY): return for i, s in enumerate(self.MEAN_REL_AMPLITUDE_ARRAY): if (s <= minimumintenisty): self.QUALITY_FACTOR_ARRAY[i] += REJECT_INTENSITY return ############################################################################### def read(self, headeronly=False): self.fileptr.seek(self.offset + self.hdrlen, 0) # move the file pointer to the start of the record so we can read from disc # read ping header hdrfmt = '>llll5hlH3h2Hlllh' hdrlen = struct.calcsize(hdrfmt) rec_unpack = struct.Struct(hdrfmt).unpack self.fileptr.seek(self.offset + self.hdrlen , 0) # move the file pointer to the start of the record so we can read from disc data = self.fileptr.read(hdrlen) s = rec_unpack(data) self.time = s[0] self.longitude = s[2] / 10000000 self.latitude = s[3] / 10000000 self.numbeams = s[4] self.centrebeam = s[5] self.pingflags = s[6] self.reserved = s[7] self.tidecorrector = s[8] / 100 self.depthcorrector = s[9] / 100 self.heading = s[10] / 100 self.pitch = s[11] / 100 self.roll = s[12] / 100 self.heave = s[13] / 100 self.course = s[14] / 100 self.speed = s[15] / 100 self.height = s[16] / 100 self.separation = s[17] / 100 self.gpstidecorrector = s[18] / 100 self.spare = s[19] while (self.fileptr.tell() < self.offset + self.numbytes): #dont read past the end of the packet length. This should never happen! fmt = '>l' fmtlen = struct.calcsize(fmt) rec_unpack = struct.Struct(fmt).unpack data = self.fileptr.read(fmtlen) # read the record from disc s = rec_unpack(data) subrecord_id = (s[0] & 0xFF000000) >> 24 subrecord_size = s[0] & 0x00FFFFFF # skip the record for performance reasons. Very handy in some circumstances if headeronly: if subrecord_id == 21: self.fileptr.seek(self.offset + self.numbytes, 0) #move forwards to the end of the record as we cannot trust the record length from the 2024 else: self.fileptr.seek(subrecord_size, 1) #move forwards to the end of teh record continue # now decode the subrecord # curr = self.fileptr.tell() scale, offset, compressionFlag, datatype = self.getscalefactor(subrecord_id, subrecord_size / int(self.numbeams)) if subrecord_id == 100: self.readscalefactors() elif subrecord_id == 1: self.readarray(self.DEPTH_ARRAY, scale, offset, datatype) elif subrecord_id == 2: self.readarray(self.ACROSS_TRACK_ARRAY, scale, offset, datatype) elif subrecord_id == 3: self.readarray(self.ALONG_TRACK_ARRAY, scale, offset, datatype) elif subrecord_id == 4: self.readarray(self.TRAVEL_TIME_ARRAY, scale, offset, datatype) elif subrecord_id == 5: self.readarray(self.BEAM_ANGLE_ARRAY, scale, offset, datatype) elif subrecord_id == 6: self.readarray(self.MEAN_CAL_AMPLITUDE_ARRAY, scale, offset, datatype) elif subrecord_id == 7: self.readarray(self.MEAN_REL_AMPLITUDE_ARRAY, scale, offset, datatype) elif subrecord_id == 9: self.readarray(self.QUALITY_FACTOR_ARRAY, scale, offset, datatype) elif subrecord_id == 16: self.readarray(self.BEAM_FLAGS_ARRAY, scale, offset, datatype) elif subrecord_id == 18: self.readarray(self.BEAM_ANGLE_FORWARD_ARRAY, scale, offset, datatype) elif subrecord_id == 19: self.readarray(self.VERTICAL_ERROR_ARRAY, scale, offset, datatype) elif subrecord_id == 20: self.readarray(self.VERTICAL_ERROR_ARRAY, scale, offset, datatype) elif subrecord_id == 21: before = self.fileptr.tell() self.readintensityarray(self.SNIPPET_SERIES_ARRAY, scale, offset, datatype, self.snippettype) if subrecord_size % 4 > 0: self.fileptr.seek(4 - (subrecord_size % 4), 1) #pkpk we should not need this!!! elif subrecord_id == 22: self.readarray(self.SECTOR_NUMBER_ARRAY, scale, offset, datatype) else: # read to the end of the record to keep in alignment. This permits us to not have all the decodes in place self.fileptr.seek(subrecord_size, 1) #move forwards to the end of teh record return def getscalefactor(self, ID, bytes_per_value): for s in self.scalefactors: if s.subrecordID == ID: # DEPTH_ARRAY array if bytes_per_value == 1: datatype = 'B' #unsigned values elif bytes_per_value == 2: datatype = 'H' #unsigned values if ID == 2: #ACROSS_TRACK_ARRAY array datatype = 'h' #unsigned values if ID == 3: #ACROSS_TRACK_ARRAY array datatype = 'h' #unsigned values if ID == 5: #beam angle array datatype = 'h' #unsigned values elif bytes_per_value == 4: datatype = 'L' #unsigned values if ID == 2: #ACROSS_TRACK_ARRAY array datatype = 'l' #unsigned values if ID == 5: #beam angle array datatype = 'l' #unsigned values else: datatype = 'L' #unsigned values not sure about this one. needs test data return s.multiplier, s.offset, s.compressionFlag, datatype return 1,0,0, 'h' def readscalefactors(self): # /* First four byte integer contains the number of scale factors */ # now read all scale factors scalefmt = '>l' scalelen = struct.calcsize(scalefmt) rec_unpack = struct.Struct(scalefmt).unpack data = self.fileptr.read(scalelen) s = rec_unpack(data) self.numscalefactors = s[0] scalefmt = '>lll' scalelen = struct.calcsize(scalefmt) rec_unpack = struct.Struct(scalefmt).unpack for i in range(self.numscalefactors): data = self.fileptr.read(scalelen) s = rec_unpack(data) sf = SCALEFACTOR() sf.subrecordID = (s[0] & 0xFF000000) >> 24; sf.compressionFlag = (s[0] & 0x00FF0000) >> 16; sf.multiplier = s[1] sf.offset = s[2] self.scalefactors.append(sf) # print (self.scalefactors) return def readintensityarray(self, snippets, scale, offset, datatype, snippettype): ''' read the time series intensity array type 21 subrecord ''' hdrfmt = '>bl16s' hdrlen = struct.calcsize(hdrfmt) rec_unpack = struct.Struct(hdrfmt).unpack hdr = self.fileptr.read(hdrlen) s = rec_unpack(hdr) bitspersample = s[0] appliedcorrections = s[1] # before we decode the intentisty data, read the sensor specific header #for now just read the r2sonic as that is what we need. For other sensors we need to implement decodes self.decodeR2SonicImagerySpecific() for b in range(self.numbeams): hdrfmt = '>hh8s' hdrlen = struct.calcsize(hdrfmt) rec_unpack = struct.Struct(hdrfmt).unpack hdr = self.fileptr.read(hdrlen) s = rec_unpack(hdr) numsamples = s[0] bottomdetectsamplenumber = s[1] spare = s[2] fmt = '>' + str(numsamples) + 'H' l = struct.calcsize(fmt) rec_unpack = struct.Struct(fmt).unpack data = self.fileptr.read(l) raw = rec_unpack(data) # strip out zero values raw = [s for s in raw if s != 0] if snippettype == SNIPPET_NONE: snippets.append(0) continue elif snippettype == SNIPPET_MEAN5DB: # populate the array with the mean of all samples withing a 5dB range of the mean. As per QPS if len(raw) > 0: raw2 = [20.0 * math.log10(s / scale + offset) for s in raw] mean = (sum(raw2) / float(len(raw2) )) highcut = [s for s in raw2 if s < mean + 5] #high cut +5dB highlowcut = [s for s in highcut if s > mean - 5] #low cut -5dB else: snippets.append(0) continue if len(highlowcut) > 0: snippets.append((sum(highlowcut) / float(len(highlowcut) / scale) + offset)) else: snippets.append((mean / scale) + offset) elif snippettype == SNIPPET_MEAN: # populate the array with the mean of all samples if len(raw) > 0: snippets.append((sum(raw) / float(len(raw) / scale) + offset)) else: snippets.append(0) elif snippettype == SNIPPET_MAX: # populate the array with the MAX of all samples if len(raw) > 0: snippets.append(max(raw) / scale + offset) else: snippets.append(0) elif snippettype == SNIPPET_MEAN: # populate with a single value as identified by the bottom detect if bottomdetectsamplenumber > 0: snippets.append ((raw[bottomdetectsamplenumber] / scale) + offset) else: snippets.append (0) return ############################################################################### def R2Soniccorrection(self): '''entry point for r2sonic backscatter TVG, Gain and footprint correction algorithm''' if self.perbeam: samplearray = self.MEAN_REL_AMPLITUDE_ARRAY return samplearray else: samplearray = self.SNIPPET_SERIES_ARRAY return samplearray # an implementation of the backscatter correction algorithm from Norm Campbell at CSIRO H0_TxPower = self.transmitsourcelevel H0_SoundSpeed = self.soundspeed H0_RxAbsorption = self.absorptioncoefficient H0_TxBeamWidthVert = self.beamwidthvertical H0_TxBeamWidthHoriz = self.beamwidthhorizontal H0_TxPulseWidth = self.pulsewidth H0_RxSpreading = self.receiverspreadingloss H0_RxGain = self.receivergain H0_VTX_Offset = self.vtxoffset for i in range(self.numbeams): if self.BEAM_FLAGS_ARRAY[i] < 0: continue S1_angle = self.BEAM_ANGLE_ARRAY[i] #angle in degrees S1_twtt = self.TRAVEL_TIME_ARRAY[i] S1_range = math.sqrt((self.ACROSS_TRACK_ARRAY[i] ** 2) + (self.ALONG_TRACK_ARRAY[i] ** 2)) if samplearray[i] != 0: S1_uPa = samplearray[i] # adjusted = 0 # a test on request from Norm.... # adjusted = 20 * math.log10(S1_uPa) - 100 # the formal adjustment from Norm Campbell... # if i == 127: adjusted = self.backscatteradjustment( S1_angle, S1_twtt, S1_range, S1_uPa, H0_TxPower, H0_SoundSpeed, H0_RxAbsorption, H0_TxBeamWidthVert, H0_TxBeamWidthHoriz, H0_TxPulseWidth, H0_RxSpreading, H0_RxGain, H0_VTX_Offset) samplearray[i] = adjusted return samplearray ############################################################################### def backscatteradjustment(self, S1_angle, S1_twtt, S1_range, S1_Magnitude, H0_TxPower, H0_SoundSpeed, H0_RxAbsorption, H0_TxBeamWidthVert, H0_TxBeamWidthHoriz, H0_TxPulseWidth, H0_RxSpreading, H0_RxGain, H0_VTX_Offset): '''R2Sonic backscatter correction algorithm from Norm Camblell at CSIRO. This is a port from F77 fortran code, and has been tested and confirmed to provide identical results''' # the following code uses the names for the various packets as listed in the R2Sonic SONIC 2024 Operation Manual v6.0 # so names beginning with # H0_ denote parameters from the BATHY (BTH) and Snippet (SNI) packets from section H0 # R0_ denote parameters from the BATHY (BTH) packets from section R0 # S1_ denote parameters from the Snippet (SNI) packets from section S1 # names beginning with # z_ denote values derived from the packet parameters # the range, z_range_m, can be found from the two-way travel time (and scaling factor), and the sound speed, as follows: one_rad = 57.29577951308232 S1_angle_rad = S1_angle / one_rad z_one_way_travel_secs = S1_twtt / 2.0 z_range_m = z_one_way_travel_secs * H0_SoundSpeed # there is a range of zero, so this is an invalid beam, so quit if z_range_m == 0: return 0 ###### TRANSMISSION LOSS CORRECTION ########################################## # according to Lurton, Augustin and Le Bouffant (Femme 2011), the basic Sonar equation is # received_level = source_level - 2 * transmission_loss + target_strength + receiver_gain # note that this last term does not always appear explicitly in the sonar equation # more specifically: # transmission_loss = H0_RxAbsorption * range_m + 40 log10 ( range_m ) # target_strength = backscatter_dB_m + 10 log10 ( z_area_of_insonification ) # receiver_gain = TVG + H0_RxGain # the components of the Sonar equation can be calculated as follows: # u16 S1_Magnitude[S1_Samples]; // [micropascals] = S1_Magnitude[n] z_received_level = 20.0 * math.log10 ( S1_Magnitude ) z_source_level = H0_TxPower # [dB re 1 uPa at 1 meter] z_transmission_loss_t1 = 2.0 * H0_RxAbsorption * z_range_m / 1000.0 # [dB per kilometer] z_transmission_loss_t2 = 40.0 * math.log10(z_range_m) z_transmission_loss = z_transmission_loss_t1 + z_transmission_loss_t2 ###### INSONIFICATION AREA CORRECTION Checked 19 August 2017 p.kennedy@fugr.com ########################################## # for oblique angles # area_of_insonification = along_track_beam_width * range * sound_speed * pulse_width / 2 sin ( incidence_angle) # for normal incidence # area_of_insonification = along_track_beam_width * across_track_beam_width * range ** 2 sin_S1_angle = math.sin ( abs ( S1_angle_rad ) ) # from Hammerstad 00 EM Technical Note Backscattering and Seabed Image Reflectivity.pdf # A = ψTψr*R^2 around normal incidence z_area_of_insonification_nml = H0_TxBeamWidthVert * H0_TxBeamWidthHoriz * z_range_m **2 # A = ½cτ ψTR/sinφ elsewhere if ( abs ( S1_angle ) >= 0.001 ): z_area_of_insonification_obl = 0.5 * H0_SoundSpeed * H0_TxPulseWidth * H0_TxBeamWidthVert * z_range_m / sin_S1_angle if ( abs ( S1_angle ) < 25. ): z_area_of_insonification = z_area_of_insonification_nml else: z_area_of_insonification = z_area_of_insonification_obl if ( abs ( S1_angle ) < 0.001 ): z_area_of_insonification = z_area_of_insonification_nml elif ( z_area_of_insonification_nml < z_area_of_insonification_obl ): z_area_of_insonification = z_area_of_insonification_nml else: z_area_of_insonification = z_area_of_insonification_obl ###### TIME VARIED GAIN CORRECTION 19 August 2017 p.kennedy@fugr.com ########################################## # note that the first equation refers to the along-track beam width # the R2Sonic Operation Manual refers on p21 to the Beamwidth - Along Track -- moreover, for the 2024, the Beamwidth Along Track is twice # the Beamwidth Across Track # according to the R2Sonic Operation Manual in Section 5.6.3 on p88, the TVG equation is: # TVG = 2*R* α/1000 + Sp*log(R) + G # where: # α = Absorption Loss db/km (H0_RxAbsorption) # R = Range in metres (range_m) # Sp = Spreading loss coefficient (H0_RxSpreading) # G = Gain from Sonar Control setting (H0_RxGain) TVG_1 = 2.0 * z_range_m * H0_RxAbsorption / 1000. TVG_2 = H0_RxSpreading * math.log10 ( z_range_m ) TVG = TVG_1 + TVG_2 + H0_RxGain # as per email from Beaudoin, clip the TVG between 4 and 83 dB TVG = min(max(4, TVG ), 83) ###### NOW COMPUTE THE CORRECTED BACKSCATTER ########################################## backscatter_dB_m = z_received_level - z_source_level + z_transmission_loss - (10.0 * math.log10 ( z_area_of_insonification )) - TVG - H0_VTX_Offset + 100.0 return backscatter_dB_m ############################################################################### def decodeR2SonicImagerySpecific(self): ''' read the imagery information for the r2sonic 2024 ''' fmt = '>12s12slll lllll llllhh lllll lllhh lllll l32s' l = struct.calcsize(fmt) rec_unpack = struct.Struct(fmt).unpack data = self.fileptr.read(l) raw = rec_unpack(data) self.modelnumber = raw[0] self.serialnumber = raw[1].decode('utf-8').rstrip('\x00') self.pingtime = raw[2] self.pingnanotime = raw[3] self.pingnumber = raw[4] self.pingperiod = raw[5] / 1.0e6 self.soundspeed = raw[6] / 1.0e2 self.frequency = raw[7] / 1.0e3 self.transmitsourcelevel = raw[8] / 1.0e2 self.pulsewidth = raw[9] / 1.0e7 self.beamwidthvertical = math.radians(raw[10] / 1.0e6) self.beamwidthhorizontal = math.radians(raw[11] / 1.0e6) #apply scaling as per email from Beaudoin https://jira.qps.nl/browse/SFM-2857 self.beamwidthvertical = math.radians(raw[10] / 1.0e6 * (400000 / self.frequency)) self.beamwidthhorizontal = math.radians(raw[11] / 1.0e6 * (400000 / self.frequency)) transmitsteeringvertical = raw[12] / 1.0e6 transmitsteeringhorizontal = raw[13] / 1.0e6 transmitinfo = raw[14] self.vtxoffset = raw[15] / 100 receiverbandwidth = raw[16] / 1.0e4 receiversamplerate = raw[17] / 1.0e3 receiverrange = raw[18] / 1.0e5 # The GSF file preserves R2Sonic's native scaling of their gain parameter at 0.5 dB resolution, so you need to take the gain and multiply by 2. self.receivergain = raw[19] / 1.0e2 * 2.0 self.receiverspreadingloss = raw[20] / 1.0e3 self.absorptioncoefficient = raw[21]/ 1.0e3 #dB/kilometre mounttiltangle = raw[22] / 1.0e6 # print ("ping %d Date %s freq %d absorption %.3f" % (self.pingnumber, self.currentRecordDateTime(), self.frequency, self.absorptioncoefficient)) receiverinfo = raw[23] reserved = raw[24] numbeams = raw[25] moreinfo1 = raw[26] / 1.0e6 moreinfo2 = raw[27] / 1.0e6 moreinfo3 = raw[28] / 1.0e6 moreinfo4 = raw[29] / 1.0e6 moreinfo5 = raw[30] / 1.0e6 moreinfo6 = raw[31] / 1.0e6 spare = raw[32] return def readarray(self, values, scale, offset, datatype): ''' read the ping array data ''' fmt = '>' + str(self.numbeams) + datatype l = struct.calcsize(fmt) rec_unpack = struct.Struct(fmt).unpack data = self.fileptr.read(l) raw = rec_unpack(data) for d in raw: values.append((d / scale) + offset) return values def currentRecordDateTime(self): return self.from_timestamp(self.time) def to_timestamp(self, recordDate): return (recordDate - datetime(1970, 1, 1)).total_seconds() def from_timestamp(self, unixtime): return datetime(1970, 1 ,1) + timedelta(seconds=unixtime) ############################################################################### class GSFHEADER: def __init__(self, fileptr, numbytes, recordidentifier, hdrlen): self.recordidentifier = recordidentifier # assign the GSF code for this datagram type self.offset = fileptr.tell() # remember where this packet resides in the file so we can return if needed self.hdrlen = hdrlen # remember where this packet resides in the file so we can return if needed self.numbytes = numbytes # remember how many bytes this packet contains self.fileptr = fileptr # remember the file pointer so we do not need to pass from the host process self.fileptr.seek(numbytes, 1) # move the file pointer to the end of the record so we can skip as the default actions self.name = "GSFHeader" def __str__(self): ''' pretty print this class ''' return pprint.pformat(vars(self)) def read(self): rec_fmt = '=12s' rec_len = struct.calcsize(rec_fmt) rec_unpack = struct.Struct(rec_fmt).unpack self.fileptr.seek(self.offset + self.hdrlen, 0) # move the file pointer to the start of the record so we can read from disc data = self.fileptr.read(rec_len) bytesRead = rec_len s = rec_unpack(data) self.version = s[0].decode('utf-8').rstrip('\x00') return ############################################################################### class GSFREADER: def __init__(self, filename, loadscalefactors=False): ''' class to read generic sensor format files. ''' if not os.path.isfile(filename): print ("file not found:", filename) self.fileName = filename self.fileSize = os.path.getsize(filename) f = open(filename, 'r+b') self.fileptr = mmap.mmap(f.fileno(), 0) self.hdrfmt = ">LL" self.hdrlen = struct.calcsize(self.hdrfmt) self.scalefactors = [] if loadscalefactors: self.scalefactors = self.loadscalefactors() def moreData(self): bytesRemaining = self.fileSize - self.fileptr.tell() # print ("current file ptr position: %d size %d" % ( self.fileptr.tell(), self.fileSize)) return bytesRemaining def currentPtr(self): return self.fileptr.tell() def close(self): ''' close the file ''' self.fileptr.close() def rewind(self): ''' go back to start of file ''' self.fileptr.seek(0, 0) def __str__(self): ''' pretty print this class ''' return pprint.pformat(vars(self)) def readDatagramBytes(self, offset, byteCount): '''read the entire raw bytes for the datagram without changing the file pointer. this is used for file conditioning''' curr = self.fileptr.tell() self.fileptr.seek(offset, 0) # move the file pointer to the start of the record so we can read from disc data = self.fileptr.read(byteCount) self.fileptr.seek(curr, 0) return data def loadscalefactors(self): ''' rewind, load the scale factors array and rewind to the original position. We can then use these scalefactors for every ping ''' curr = self.fileptr.tell() self.rewind() while self.moreData(): numberofbytes, recordidentifier, datagram = self.readDatagram() if recordidentifier == SWATH_BATHYMETRY: datagram.read() self.fileptr.seek(curr, 0) return datagram.scalefactors self.fileptr.seek(curr, 0) return None def loadnavigation(self): ''' rewind, load the navigation from the bathy records and rewind ''' navigation = [] curr = self.fileptr.tell() self.rewind() while self.moreData(): numberofbytes, recordidentifier, datagram = self.readDatagram() if recordidentifier == SWATH_BATHYMETRY: datagram.read(True) navigation.append([datagram.time + datagram.pingnanotime/1000000000.0, datagram.longitude, datagram.latitude]) self.fileptr.seek(curr, 0) print ("Navigation records loaded:", len(navigation)) return navigation def getrecordcount(self): ''' rewind, count the number of ping records as fast as possible. useful for progress bars ''' numpings = 0 curr = self.fileptr.tell() self.rewind() while self.moreData(): numberofbytes, recordidentifier, datagram = self.readDatagram() if recordidentifier == SWATH_BATHYMETRY: numpings += 1 self.fileptr.seek(curr, 0) return numpings def readDatagram(self): # read the datagram header. This permits us to skip datagrams we do not support numberofbytes, recordidentifier, haschecksumnumberofbytes, hdrlen = self.sniffDatagramHeader() if recordidentifier == HEADER: # create a class for this datagram, but only decode if the resulting class if called by the user. This makes it much faster dg = GSFHEADER(self.fileptr, numberofbytes, recordidentifier, hdrlen) return numberofbytes, recordidentifier, dg elif recordidentifier == SWATH_BATHYMETRY: dg = SWATH_BATHYMETRY_PING(self.fileptr, numberofbytes, recordidentifier, hdrlen) dg.scalefactors = self.scalefactors return numberofbytes, recordidentifier, dg # elif recordidentifier == 3: # SOUND_VELOCITY_PROFILE # dg = SOUND_VELOCITY_PROFILE(self.fileptr, numberofbytes) # return dg.recordidentifier, dg else: dg = UNKNOWN_RECORD(self.fileptr, numberofbytes, recordidentifier, hdrlen) # self.fileptr.seek(numberofbytes, 1) # set the file ptr to the end of the record return numberofbytes, recordidentifier, dg def sniffDatagramHeader(self): ''' read the las file header from disc ''' curr = self.fileptr.tell() if (self.fileSize - curr) < self.hdrlen: # we have reached the end of the fle, so quit self.fileptr.seek(self.fileSize,0) return (0, 0, False, 0) # version header format data = self.fileptr.read(self.hdrlen) s = struct.unpack(self.hdrfmt, data) sizeofdata = s[0] recordidentifier = s[1] haschecksum = recordidentifier & 0x80000000 temp = recordidentifier & 0x7FC00000 reserved = (temp >> 22) recordidentifier = (recordidentifier & 0x003FFFFF) if haschecksum: # read the checksum of 4 bytes if required chksum = self.fileptr.read(4) return (sizeofdata + self.hdrlen + 4, recordidentifier, haschecksum) # now reset file pointer to the start of the record self.fileptr.seek(curr, 0) if haschecksum: return (sizeofdata + self.hdrlen + 4, recordidentifier, haschecksum, self.hdrlen + 4) else: return (sizeofdata + self.hdrlen, recordidentifier, haschecksum, self.hdrlen ) def isBitSet(int_type, offset): '''testBit() returns a nonzero result, 2**offset, if the bit at 'offset' is one.''' mask = 1 << offset return (int_type & (1 << offset)) != 0 ############################################################################### def createOutputFileName(path): '''Create a valid output filename. if the name of the file already exists the file name is auto-incremented.''' path = os.path.expanduser(path) if not os.path.exists(os.path.dirname(path)): os.makedirs(os.path.dirname(path)) if not os.path.exists(path): return path root, ext = os.path.splitext(os.path.expanduser(path)) dir = os.path.dirname(root) fname = os.path.basename(root) candidate = fname+ext index = 1 ls = set(os.listdir(dir)) while candidate in ls: candidate = "{}_{}{}".format(fname,index,ext) index += 1 return os.path.join(dir, candidate) ############################################################################### class cBeam: def __init__(self, beamDetail, angle): self.sortingDirection = beamDetail[0] self.detectionInfo = beamDetail[1] self.numberOfSamplesPerBeam = beamDetail[2] self.centreSampleNumber = beamDetail[3] self.sector = 0 self.takeOffAngle = angle # used for ARC computation self.sampleSum = 0 # used for backscatter ARC computation process self.sampleMin = 999 self.sampleMax = -999 self.samples = [] self.name = "beam" def __str__(self): ''' pretty print this class ''' return pprint.pformat(vars(self)) ############################################################################### if __name__ == "__main__": main() # def testR2SonicAdjustment(): # ''' # This test code confirms the results are in alignment with those from Norm Campbell at CSIRO who kindly provided the code in F77 # ''' # # adjusted backscatter -38.6 # # adjusted backscatter -47.6 # # adjusted backscatter -27.5 # # adjusted backscatter -36.6 # # adjusted backscatter -35.5 # S1_angle = -58.0 # S1_twtt = 0.20588 # S1_range = 164.8 # H0_TxPower = 197.0 # H0_SoundSpeed = 1468.59 # H0_RxAbsorption = 80.0 # H0_TxBeamWidthVert = 0.0174533 # H0_TxBeamWidthHoriz = 0.0087266 # H0_TxPulseWidth = 0.000275 # H0_RxSpreading = 35.0 # H0_RxGain = 8.0 # H0_VTX_Offset = -21.0 / 100. # n_snpt_val = 470 # S1_uPa = n_snpt_val # z_snpt_BS_dB = 20. * math.log10(S1_uPa) # adjusted = backscatteradjustment( S1_angle, S1_twtt, S1_range, S1_uPa, H0_TxPower, H0_SoundSpeed, H0_RxAbsorption, H0_TxBeamWidthVert, H0_TxBeamWidthHoriz, H0_TxPulseWidth, H0_RxSpreading, H0_RxGain, H0_VTX_Offset, z_snpt_BS_dB) # print (adjusted) # S1_angle = -58.0 # S1_twtt = 0.20588 # S1_range = 164.8 # H0_TxPower = 206.0 # H0_SoundSpeed = 1468.59 # H0_RxAbsorption = 80.0 # H0_TxBeamWidthVert = 0.0174533 # H0_TxBeamWidthHoriz = 0.0087266 # H0_TxPulseWidth = 0.000275 # H0_RxSpreading = 35.0 # H0_RxGain = 8.0 # H0_VTX_Offset = -21.0 / 100. # n_snpt_val = 470 # S1_uPa = n_snpt_val # z_snpt_BS_dB = 20. * math.log10 ( S1_uPa ) # adjusted = backscatteradjustment( S1_angle, S1_twtt, S1_range, S1_uPa, H0_TxPower, H0_SoundSpeed, H0_RxAbsorption, H0_TxBeamWidthVert, H0_TxBeamWidthHoriz, H0_TxPulseWidth, H0_RxSpreading, H0_RxGain, H0_VTX_Offset, z_snpt_BS_dB) # print (adjusted) # S1_angle = - 58.0 # S1_twtt = 0.20588 # S1_range = 164.8 # H0_TxPower = 197.0 # H0_SoundSpeed = 1468.59 # H0_RxAbsorption = 80.0 # H0_TxBeamWidthVert = 0.0174533 # H0_TxBeamWidthHoriz = 0.0087266 # H0_TxPulseWidth = 0.000275 # H0_RxSpreading = 30.0 # H0_RxGain = 8.0 # H0_VTX_Offset = -21.0 / 100. # n_snpt_val = 470 # S1_uPa = n_snpt_val # z_snpt_BS_dB = 20. * math.log10 ( S1_uPa ) # adjusted = backscatteradjustment( S1_angle, S1_twtt, S1_range, S1_uPa, H0_TxPower, H0_SoundSpeed, H0_RxAbsorption, H0_TxBeamWidthVert, H0_TxBeamWidthHoriz, H0_TxPulseWidth, H0_RxSpreading, H0_RxGain, H0_VTX_Offset, z_snpt_BS_dB) # print (adjusted) # S1_angle = - 58.0 # S1_twtt = 0.20588 # S1_range = 164.8 # H0_TxPower = 197.0 # H0_SoundSpeed = 1468.59 # H0_RxAbsorption = 80.0 # H0_TxBeamWidthVert = 0.0174533 # H0_TxBeamWidthHoriz = 0.0087266 # H0_TxPulseWidth = 0.000275 # H0_RxSpreading = 35.0 # H0_RxGain = 6.0 # H0_VTX_Offset = -21.0 / 100. # n_snpt_val = 470 # S1_uPa = n_snpt_val # z_snpt_BS_dB = 20. * math.log10 ( S1_uPa ) # adjusted = backscatteradjustment( S1_angle, S1_twtt, S1_range, S1_uPa, H0_TxPower, H0_SoundSpeed, H0_RxAbsorption, H0_TxBeamWidthVert, H0_TxBeamWidthHoriz, H0_TxPulseWidth, H0_RxSpreading, H0_RxGain, H0_VTX_Offset, z_snpt_BS_dB) # print (adjusted) # S1_angle = - 58.0 # S1_twtt = 0.20588 # S1_range = 164.8 # H0_TxPower = 207.0 # H0_SoundSpeed = 1468.59 # H0_RxAbsorption = 80.0 # H0_TxBeamWidthVert = 0.0174533 # H0_TxBeamWidthHoriz = 0.0087266 # H0_TxPulseWidth = 0.000275 # H0_RxSpreading = 30.0 # H0_RxGain = 6.0 # H0_VTX_Offset = -21.0 / 100. # n_snpt_val = 470 # S1_uPa = n_snpt_val # z_snpt_BS_dB = 20. * math.log10 ( S1_uPa ) # adjusted = backscatteradjustment( S1_angle, S1_twtt, S1_range, S1_uPa, H0_TxPower, H0_SoundSpeed, H0_RxAbsorption, H0_TxBeamWidthVert, H0_TxBeamWidthHoriz, H0_TxPulseWidth, H0_RxSpreading, H0_RxGain, H0_VTX_Offset, z_snpt_BS_dB) # print (adjusted) # return ###############################################################################
pktrigg/pygsf
pygsf.py
Python
apache-2.0
38,850
""" Logger di varie info per ogni host """ from novaclient import client as novaclient from ceilometerclient import client as ceiloclient import os from os import environ as env import time def start(hosts, sleep_sec, base_dir): print 'You must be admin to use this script' # start logger time_dir = get_cur_formatted_time() root_path = os.path.join(base_dir, time_dir) keystone = {} keystone['username'] = env['OS_USERNAME'] keystone['password'] = env['OS_PASSWORD'] keystone['auth_url'] = env['OS_AUTH_URL'] keystone['tenant_name'] = env['OS_TENANT_NAME'] nova = (novaclient.Client(3, keystone['username'], keystone['password'], keystone['tenant_name'], keystone['auth_url'], service_type='compute')) ceilo = (ceiloclient.get_client(2, username=keystone['username'], password=keystone['password'], tenant_name=keystone['tenant_name'], auth_url=keystone['auth_url'])) flavor_list = nova.flavors.list() flavor_dict = dict((flavor.id, flavor.name) for flavor in flavor_list) while True: for host in hosts: host_id = '_'.join([host, host]) #host_node: computeX_computeX log_info(nova, ceilo, host, host_id, root_path, flavor_dict) time.sleep(sleep_sec) def log_info(nova, ceilo, host, host_id, root_path, flavor_dict): # log info every interval path = os.path.join(root_path, host) if not os.path.exists(path): os.makedirs(path) print path log_meter_host_cpu_util(ceilo, host_id, path) log_meter_host_mem_util(ceilo, host_id, path) log_meter_host_cpu_mem(ceilo, host_id, path) log_vms_host(nova, host, path, flavor_dict) log_alarm_host_cpu_mem(ceilo, host_id, path) def log_meter_host_cpu_util(ceilo, host_id, path): # sample of cpu util in percentage host_cpu_util = ceilo.samples.list(meter_name='host.cpu.util', limit=1, q=[{'field':'resource_id', 'op':'eq', 'value':host_id}]) host_cpu_util = (host_cpu_util[0].counter_volume)/100 content = get_string_to_write(str(host_cpu_util)) path_file = get_path_to_file(path, "meter_host_cpu_util") write_file(path_file, content) def log_meter_host_mem_util(ceilo, host_id, path): # sample of ram usage in percentage host_mem_usage = ceilo.samples.list(meter_name='host.memory.usage', limit=1, q=[{'field':'resource_id', 'op':'eq', 'value':host_id}]) host_mem_usage = (host_mem_usage[0].counter_volume)/100 content = get_string_to_write(str(host_mem_usage)) path_file = get_path_to_file(path, "meter_host_mem_util") write_file(path_file, content) def log_meter_host_cpu_mem(ceilo, host_id, path): # sample of cpu-ram combined meter host_cpu_mem_combo = ceilo.samples.list(meter_name='host.cpu.util.memory.usage', limit=1, q=[{'field':'resource_id', 'op':'eq', 'value':host_id}]) content = get_string_to_write(str(host_cpu_mem_combo[0].counter_volume)) path_file = get_path_to_file(path, "meter_host_cpu_mem") write_file(path_file, content) def log_alarm_host_cpu_mem(ceilo, host_id, path): # overload and underload alarms alarms = ceilo.alarms.list(q=[{'field':'meter', 'op':'eq', 'value':'host.cpu.util.memory.usage'}]) hostname = [x.strip() for x in host_id.split('_')][0] for alarm in alarms: name = alarm.name state = alarm.state #print hostname #print name if hostname in name: name_state = '' if state == 'ok': name_state = name + ': ' + '0' elif state == 'alarm': name_state = name + ': ' + '1' else: name_state = name + ': ' + '2' content = get_string_to_write(name_state) if 'overload' in name: path_file = get_path_to_file(path, "alarm_host_cpu_mem_overload") write_file(path_file, content) if 'underload' in name: path_file = get_path_to_file(path, "alarm_host_cpu_mem_underload") write_file(path_file, content) path_file = get_path_to_file(path, "alarm_host_cpu_mem") write_file(path_file, content) content = get_string_to_write("**********") path_file = get_path_to_file(path, "alarm_host_cpu_mem") write_file(path_file, content) def log_vms_host(nova, host, path, flavor_dict): # vms in host search_opts = {'host': host, 'all_tenants': True} vms = nova.servers.list(search_opts=search_opts) path_file = get_path_to_file(path, "vms") id_flavor = [(vm.id, flavor_dict[vm.flavor['id']]) for vm in vms] num_vms = len(vms) content = get_string_to_write(str(num_vms) + ' , ' + str(id_flavor)) write_file(path_file, content) def write_file(path_file, content): out_file = open(path_file,"a") out_file.write(str(content) + os.linesep) out_file.close() def get_path_to_file(path, filename): return os.path.join(path, filename) def get_string_to_write(content): return ", ".join([get_cur_formatted_time(), content]) def get_cur_formatted_time(): cur_time = time.time() formatted_time = time.strftime('%Y-%m-%dT%H:%M:%S', time.localtime(cur_time)) return formatted_time compute_hosts = ['compute02', 'compute03', 'compute04'] sleep_sec = 150 base_dir = "log" start(compute_hosts, sleep_sec, base_dir)
MisterPup/OpenStack-Neat-Ceilometer
alarm_test/info_logger.py
Python
apache-2.0
6,034
#! /usr/bin/python __author__="kebo" __date__ ="$2009-11-5 11:15:55$" import pcap import sys import string import time import socket import struct import getopt protocols={socket.IPPROTO_TCP:'tcp', socket.IPPROTO_UDP:'udp', socket.IPPROTO_ICMP:'icmp'} node = None mb = None decoder = None def send(payload): sz = len(payload) header= struct.pack("!h", sz) return sys.stdout.write( header + payload ) def print_packet(pktlen, data, timestamp): global mb if not data: return #send(data) #print data #print timestamp print '\n%s.%f' % (time.strftime('%H:%M',time.localtime(timestamp)),timestamp % 60) if __name__=='__main__': p = pcap.pcapObject() #dev = pcap.lookupdev() dev = "eth0" net, mask = pcap.lookupnet(dev) # note: to_ms does nothing on linux p.open_live(dev, 1600, 0, 100) #p.dump_open('dumpfile') p.setfilter(string.join(["tcp","port 22"],' '), 0, 0) # try-except block to catch keyboard interrupt. Failure to shut # down cleanly can result in the interface not being taken out of promisc. # mode #p.setnonblock(1) try: while 1: p.dispatch(1, print_packet) except KeyboardInterrupt: print '%s' % sys.exc_type print 'shutting down' print '%d packets received, %d packets dropped, %d packets dropped by interface' % p.stats()
neeraj9/gprsmonitor
src/sniff.py
Python
apache-2.0
1,409
import sublime import os import subprocess import platform from .bootstrapper import get_config from .notifier import log_info, log_error, log_fail from .thread_progress import run_progress_indicator from .command_thread import CommandThread _appbuilder_path = [] def run_command(command, on_data=None, on_done=None, show_progress=True, in_progress_message="Loading", success_message="", failure_message = "", show_status=True, filter_empty_args=True, no_save=False, **kwargs): appbuilder_path = _get_appbuilder_path() if appbuilder_path == None: on_done(False) return None command = appbuilder_path + command command += ["--analyticsClient", "Sublime"] if filter_empty_args: command = [arg for arg in command if arg] thread = CommandThread(command, on_data, on_done, **kwargs) thread.start() if show_progress: run_progress_indicator(thread, in_progress_message, success_message, failure_message) if show_status: message = kwargs.get("status_message", False) or " ".join(command) sublime.status_message(message) return thread def show_quick_panel(window, items, on_done): window.show_quick_panel(items, on_done) def check_output(*popenargs, **kwargs): r"""Run command with arguments and return its output as a byte string. Backported from Python 2.7 as it's implemented as pure python on stdlib. >>> check_output(['/usr/bin/python', '--version']) Python 2.6.2 """ process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs) output, unused_err = process.communicate() retcode = process.poll() if retcode: cmd = kwargs.get("args") if cmd is None: cmd = popenargs[0] error = subprocess.CalledProcessError(retcode, cmd) error.output = output raise error return output def _get_appbuilder_path(): global _appbuilder_path if not _appbuilder_path: if platform.system() == "Windows": _appbuilder_path.append(_find_win_node_path()) _appbuilder_path.append(_find_win_appbuilder_path()) elif platform.system() == "Darwin": osx_node_path = get_config("osx_node_path") osx_appbuilder_path = get_config("osx_appbuilder_path") if os.path.isfile(osx_node_path) and os.path.isfile(osx_appbuilder_path): _appbuilder_path.append(osx_node_path) _appbuilder_path.append(osx_appbuilder_path) else: return None elif platform.system() == "Linux": linux_node_path = get_config("linux_node_path") linux_appbuilder_path = get_config("linux_appbuilder_path") if linux_node_path == "": linux_node_path_raw = check_output(['/bin/bash', '-i', '-c', "which node"]) # returns byte string linux_node_path = str(linux_node_path_raw.decode("utf-8")).strip() if linux_appbuilder_path == "": linux_appbuilder_path_raw = check_output(['/bin/bash', '-i', '-c', "which appbuilder"]) # returns byte string linux_appbuilder_path = str(linux_appbuilder_path_raw.decode("utf-8")).strip() if os.path.isfile(linux_node_path) and os.path.isfile(linux_appbuilder_path): _appbuilder_path.append(linux_node_path) _appbuilder_path.append(linux_appbuilder_path) else: return None return _appbuilder_path def _find_win_node_path(): paths = _get_paths() for path in paths: try: node_path = os.path.join(path, get_config("win_node_name")) proc = subprocess.Popen([node_path]) proc.terminate() return node_path except WindowsError: pass return get_config("win_node_name") def _find_win_appbuilder_path(): paths = _get_paths() for path in paths: try: appbuilder_path = os.path.join(path, get_config("win_appbuilder_name")) proc = subprocess.Popen([appbuilder_path + ".cmd"]) proc.terminate() if "npm" in path: return os.path.join(path, "node_modules", "appbuilder", "bin", get_config("win_appbuilder_name") + ".js") else: return os.path.join(path, get_config("win_appbuilder_name") + ".js") except WindowsError: pass return get_config("win_appbuilder_name") def _get_paths(): return os.environ["PATH"].split(os.pathsep)
dimitardanailov/appbuilder-sublime-package
app_builder/command_executor.py
Python
apache-2.0
4,567
from setuptools import setup, find_packages import sys, os version = '1.0.0' setup(name='brica1', version=version, description="", long_description="""\ """, classifiers=[], # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers keywords='', author='ktnyt', author_email='kotone [at] sfc.keio.ac.jp', url='', license='Apache v2', packages=find_packages(exclude=['ez_setup', 'examples', 'tests']), include_package_data=True, zip_safe=False, install_requires=[ 'numpy' ], entry_points=""" # -*- Entry points: -*- """, )
wbap/V1
python/setup.py
Python
apache-2.0
657
from __future__ import absolute_import, division, print_function, unicode_literals from amaascore.core.amaas_model import AMaaSModel class Reference(AMaaSModel): def __init__(self, reference_value, reference_primary=False, *args, **kwargs): self.reference_value = reference_value self.reference_primary = reference_primary super(Reference, self).__init__(*args, **kwargs) @property def reference_primary(self): if hasattr(self, '_reference_primary'): return self._reference_primary @reference_primary.setter def reference_primary(self, value): """ Always convert to bool if the service/database returns 0 or 1 """ if value is not None: self._reference_primary = True if value else False
amaas-fintech/amaas-core-sdk-python
amaascore/core/reference.py
Python
apache-2.0
800
from tensorflow.keras.applications.vgg16 import VGG16 import tensorflowjs as tfjs model = VGG16(weights='imagenet') tfjs.converters.save_keras_model(model, 'vgg16_tfjs')
tensorflow/tfjs-examples
visualize-convnet/get_vgg16.py
Python
apache-2.0
172
# -*- encoding: utf-8 -*- # # Copyright 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """SQLAlchemy storage backend.""" import collections import datetime from oslo.utils import timeutils from oslo_config import cfg from oslo_db import exception as db_exc from oslo_db.sqlalchemy import session as db_session from oslo_db.sqlalchemy import utils as db_utils from sqlalchemy.orm.exc import NoResultFound from ironic.common import exception from ironic.common.i18n import _ from ironic.common.i18n import _LW from ironic.common import states from ironic.common import utils from ironic.db import api from ironic.db.sqlalchemy import models from ironic.openstack.common import log CONF = cfg.CONF CONF.import_opt('heartbeat_timeout', 'ironic.conductor.manager', group='conductor') LOG = log.getLogger(__name__) _FACADE = None def _create_facade_lazily(): global _FACADE if _FACADE is None: _FACADE = db_session.EngineFacade.from_config(CONF) return _FACADE def get_engine(): facade = _create_facade_lazily() return facade.get_engine() def get_session(**kwargs): facade = _create_facade_lazily() return facade.get_session(**kwargs) def get_backend(): """The backend is this module itself.""" return Connection() def model_query(model, *args, **kwargs): """Query helper for simpler session usage. :param session: if present, the session to use """ session = kwargs.get('session') or get_session() query = session.query(model, *args) return query def add_identity_filter(query, value): """Adds an identity filter to a query. Filters results by ID, if supplied value is a valid integer. Otherwise attempts to filter results by UUID. :param query: Initial query to add filter to. :param value: Value for filtering results by. :return: Modified query. """ if utils.is_int_like(value): return query.filter_by(id=value) elif utils.is_uuid_like(value): return query.filter_by(uuid=value) else: raise exception.InvalidIdentity(identity=value) def add_port_filter(query, value): """Adds a port-specific filter to a query. Filters results by address, if supplied value is a valid MAC address. Otherwise attempts to filter results by identity. :param query: Initial query to add filter to. :param value: Value for filtering results by. :return: Modified query. """ if utils.is_valid_mac(value): return query.filter_by(address=value) else: return add_identity_filter(query, value) def add_port_filter_by_node(query, value): if utils.is_int_like(value): return query.filter_by(node_id=value) else: query = query.join(models.Node, models.Port.node_id == models.Node.id) return query.filter(models.Node.uuid == value) def add_node_filter_by_chassis(query, value): if utils.is_int_like(value): return query.filter_by(chassis_id=value) else: query = query.join(models.Chassis, models.Node.chassis_id == models.Chassis.id) return query.filter(models.Chassis.uuid == value) def _check_port_change_forbidden(port, session): node_id = port['node_id'] if node_id is not None: query = model_query(models.Node, session=session) query = query.filter_by(id=node_id) node_ref = query.one() if node_ref['reservation'] is not None: raise exception.NodeLocked(node=node_ref['uuid'], host=node_ref['reservation']) def _paginate_query(model, limit=None, marker=None, sort_key=None, sort_dir=None, query=None): if not query: query = model_query(model) sort_keys = ['id'] if sort_key and sort_key not in sort_keys: sort_keys.insert(0, sort_key) query = db_utils.paginate_query(query, model, limit, sort_keys, marker=marker, sort_dir=sort_dir) return query.all() class Connection(api.Connection): """SqlAlchemy connection.""" def __init__(self): pass def _add_nodes_filters(self, query, filters): if filters is None: filters = [] if 'chassis_uuid' in filters: # get_chassis_by_uuid() to raise an exception if the chassis # is not found chassis_obj = self.get_chassis_by_uuid(filters['chassis_uuid']) query = query.filter_by(chassis_id=chassis_obj.id) if 'associated' in filters: if filters['associated']: query = query.filter(models.Node.instance_uuid != None) else: query = query.filter(models.Node.instance_uuid == None) if 'reserved' in filters: if filters['reserved']: query = query.filter(models.Node.reservation != None) else: query = query.filter(models.Node.reservation == None) if 'maintenance' in filters: query = query.filter_by(maintenance=filters['maintenance']) if 'driver' in filters: query = query.filter_by(driver=filters['driver']) if 'provision_state' in filters: query = query.filter_by(provision_state=filters['provision_state']) if 'provisioned_before' in filters: limit = timeutils.utcnow() - datetime.timedelta( seconds=filters['provisioned_before']) query = query.filter(models.Node.provision_updated_at < limit) return query def get_nodeinfo_list(self, columns=None, filters=None, limit=None, marker=None, sort_key=None, sort_dir=None): # list-ify columns default values because it is bad form # to include a mutable list in function definitions. if columns is None: columns = [models.Node.id] else: columns = [getattr(models.Node, c) for c in columns] query = model_query(*columns, base_model=models.Node) query = self._add_nodes_filters(query, filters) return _paginate_query(models.Node, limit, marker, sort_key, sort_dir, query) def get_node_list(self, filters=None, limit=None, marker=None, sort_key=None, sort_dir=None): query = model_query(models.Node) query = self._add_nodes_filters(query, filters) return _paginate_query(models.Node, limit, marker, sort_key, sort_dir, query) def reserve_node(self, tag, node_id): session = get_session() with session.begin(): query = model_query(models.Node, session=session) query = add_identity_filter(query, node_id) # be optimistic and assume we usually create a reservation count = query.filter_by(reservation=None).update( {'reservation': tag}, synchronize_session=False) try: node = query.one() if count != 1: # Nothing updated and node exists. Must already be # locked. raise exception.NodeLocked(node=node_id, host=node['reservation']) return node except NoResultFound: raise exception.NodeNotFound(node_id) def release_node(self, tag, node_id): session = get_session() with session.begin(): query = model_query(models.Node, session=session) query = add_identity_filter(query, node_id) # be optimistic and assume we usually release a reservation count = query.filter_by(reservation=tag).update( {'reservation': None}, synchronize_session=False) try: if count != 1: node = query.one() if node['reservation'] is None: raise exception.NodeNotLocked(node=node_id) else: raise exception.NodeLocked(node=node_id, host=node['reservation']) except NoResultFound: raise exception.NodeNotFound(node_id) def create_node(self, values): # ensure defaults are present for new nodes if 'uuid' not in values: values['uuid'] = utils.generate_uuid() if 'power_state' not in values: values['power_state'] = states.NOSTATE if 'provision_state' not in values: # TODO(deva): change this to ENROLL values['provision_state'] = states.AVAILABLE node = models.Node() node.update(values) try: node.save() except db_exc.DBDuplicateEntry as exc: if 'name' in exc.columns: raise exception.DuplicateName(name=values['name']) elif 'instance_uuid' in exc.columns: raise exception.InstanceAssociated( instance_uuid=values['instance_uuid'], node=values['uuid']) raise exception.NodeAlreadyExists(uuid=values['uuid']) return node def get_node_by_id(self, node_id): query = model_query(models.Node).filter_by(id=node_id) try: return query.one() except NoResultFound: raise exception.NodeNotFound(node=node_id) def get_node_by_uuid(self, node_uuid): query = model_query(models.Node).filter_by(uuid=node_uuid) try: return query.one() except NoResultFound: raise exception.NodeNotFound(node=node_uuid) def get_node_by_name(self, node_name): query = model_query(models.Node).filter_by(name=node_name) try: return query.one() except NoResultFound: raise exception.NodeNotFound(node=node_name) def get_node_by_instance(self, instance): if not utils.is_uuid_like(instance): raise exception.InvalidUUID(uuid=instance) query = (model_query(models.Node) .filter_by(instance_uuid=instance)) try: result = query.one() except NoResultFound: raise exception.InstanceNotFound(instance=instance) return result def destroy_node(self, node_id): session = get_session() with session.begin(): query = model_query(models.Node, session=session) query = add_identity_filter(query, node_id) try: node_ref = query.one() except NoResultFound: raise exception.NodeNotFound(node=node_id) # Get node ID, if an UUID was supplied. The ID is # required for deleting all ports, attached to the node. if utils.is_uuid_like(node_id): node_id = node_ref['id'] port_query = model_query(models.Port, session=session) port_query = add_port_filter_by_node(port_query, node_id) port_query.delete() query.delete() def update_node(self, node_id, values): # NOTE(dtantsur): this can lead to very strange errors if 'uuid' in values: msg = _("Cannot overwrite UUID for an existing Node.") raise exception.InvalidParameterValue(err=msg) try: return self._do_update_node(node_id, values) except db_exc.DBDuplicateEntry as e: if 'name' in e.columns: raise exception.DuplicateName(name=values['name']) elif 'uuid' in e.columns: raise exception.NodeAlreadyExists(uuid=values['uuid']) elif 'instance_uuid' in e.columns: raise exception.InstanceAssociated( instance_uuid=values['instance_uuid'], node=node_id) else: raise e def _do_update_node(self, node_id, values): session = get_session() with session.begin(): query = model_query(models.Node, session=session) query = add_identity_filter(query, node_id) try: ref = query.with_lockmode('update').one() except NoResultFound: raise exception.NodeNotFound(node=node_id) # Prevent instance_uuid overwriting if values.get("instance_uuid") and ref.instance_uuid: raise exception.NodeAssociated(node=node_id, instance=ref.instance_uuid) if 'provision_state' in values: values['provision_updated_at'] = timeutils.utcnow() ref.update(values) return ref def get_port_by_id(self, port_id): query = model_query(models.Port).filter_by(id=port_id) try: return query.one() except NoResultFound: raise exception.PortNotFound(port=port_id) def get_port_by_uuid(self, port_uuid): query = model_query(models.Port).filter_by(uuid=port_uuid) try: return query.one() except NoResultFound: raise exception.PortNotFound(port=port_uuid) def get_port_by_address(self, address): query = model_query(models.Port).filter_by(address=address) try: return query.one() except NoResultFound: raise exception.PortNotFound(port=address) def get_port_list(self, limit=None, marker=None, sort_key=None, sort_dir=None): return _paginate_query(models.Port, limit, marker, sort_key, sort_dir) def get_ports_by_node_id(self, node_id, limit=None, marker=None, sort_key=None, sort_dir=None): query = model_query(models.Port) query = query.filter_by(node_id=node_id) return _paginate_query(models.Port, limit, marker, sort_key, sort_dir, query) def create_port(self, values): if not values.get('uuid'): values['uuid'] = utils.generate_uuid() port = models.Port() port.update(values) try: port.save() except db_exc.DBDuplicateEntry as exc: if 'address' in exc.columns: raise exception.MACAlreadyExists(mac=values['address']) raise exception.PortAlreadyExists(uuid=values['uuid']) return port def update_port(self, port_id, values): # NOTE(dtantsur): this can lead to very strange errors if 'uuid' in values: msg = _("Cannot overwrite UUID for an existing Port.") raise exception.InvalidParameterValue(err=msg) session = get_session() try: with session.begin(): query = model_query(models.Port, session=session) query = add_port_filter(query, port_id) ref = query.one() ref.update(values) except NoResultFound: raise exception.PortNotFound(port=port_id) except db_exc.DBDuplicateEntry: raise exception.MACAlreadyExists(mac=values['address']) return ref def destroy_port(self, port_id): session = get_session() with session.begin(): query = model_query(models.Port, session=session) query = add_port_filter(query, port_id) try: ref = query.one() except NoResultFound: raise exception.PortNotFound(port=port_id) _check_port_change_forbidden(ref, session) query.delete() def get_chassis_by_id(self, chassis_id): query = model_query(models.Chassis).filter_by(id=chassis_id) try: return query.one() except NoResultFound: raise exception.ChassisNotFound(chassis=chassis_id) def get_chassis_by_uuid(self, chassis_uuid): query = model_query(models.Chassis).filter_by(uuid=chassis_uuid) try: return query.one() except NoResultFound: raise exception.ChassisNotFound(chassis=chassis_uuid) def get_chassis_list(self, limit=None, marker=None, sort_key=None, sort_dir=None): return _paginate_query(models.Chassis, limit, marker, sort_key, sort_dir) def create_chassis(self, values): if not values.get('uuid'): values['uuid'] = utils.generate_uuid() chassis = models.Chassis() chassis.update(values) try: chassis.save() except db_exc.DBDuplicateEntry: raise exception.ChassisAlreadyExists(uuid=values['uuid']) return chassis def update_chassis(self, chassis_id, values): # NOTE(dtantsur): this can lead to very strange errors if 'uuid' in values: msg = _("Cannot overwrite UUID for an existing Chassis.") raise exception.InvalidParameterValue(err=msg) session = get_session() with session.begin(): query = model_query(models.Chassis, session=session) query = add_identity_filter(query, chassis_id) count = query.update(values) if count != 1: raise exception.ChassisNotFound(chassis=chassis_id) ref = query.one() return ref def destroy_chassis(self, chassis_id): def chassis_not_empty(session): """Checks whether the chassis does not have nodes.""" query = model_query(models.Node, session=session) query = add_node_filter_by_chassis(query, chassis_id) return query.count() != 0 session = get_session() with session.begin(): if chassis_not_empty(session): raise exception.ChassisNotEmpty(chassis=chassis_id) query = model_query(models.Chassis, session=session) query = add_identity_filter(query, chassis_id) count = query.delete() if count != 1: raise exception.ChassisNotFound(chassis=chassis_id) def register_conductor(self, values, update_existing=False): session = get_session() with session.begin(): query = (model_query(models.Conductor, session=session) .filter_by(hostname=values['hostname'])) try: ref = query.one() if ref.online is True and not update_existing: raise exception.ConductorAlreadyRegistered( conductor=values['hostname']) except NoResultFound: ref = models.Conductor() ref.update(values) # always set online and updated_at fields when registering # a conductor, especially when updating an existing one ref.update({'updated_at': timeutils.utcnow(), 'online': True}) ref.save(session) return ref def get_conductor(self, hostname): try: return (model_query(models.Conductor) .filter_by(hostname=hostname, online=True) .one()) except NoResultFound: raise exception.ConductorNotFound(conductor=hostname) def unregister_conductor(self, hostname): session = get_session() with session.begin(): query = (model_query(models.Conductor, session=session) .filter_by(hostname=hostname, online=True)) count = query.update({'online': False}) if count == 0: raise exception.ConductorNotFound(conductor=hostname) def touch_conductor(self, hostname): session = get_session() with session.begin(): query = (model_query(models.Conductor, session=session) .filter_by(hostname=hostname)) # since we're not changing any other field, manually set updated_at # and since we're heartbeating, make sure that online=True count = query.update({'updated_at': timeutils.utcnow(), 'online': True}) if count == 0: raise exception.ConductorNotFound(conductor=hostname) def clear_node_reservations_for_conductor(self, hostname): session = get_session() nodes = [] with session.begin(): query = model_query(models.Node, session=session).filter_by( reservation=hostname) nodes = [node['uuid'] for node in query] query.update({'reservation': None}) if nodes: nodes = ', '.join(nodes) LOG.warn(_LW('Cleared reservations held by %(hostname)s: ' '%(nodes)s'), {'hostname': hostname, 'nodes': nodes}) def get_active_driver_dict(self, interval=None): if interval is None: interval = CONF.conductor.heartbeat_timeout limit = timeutils.utcnow() - datetime.timedelta(seconds=interval) result = (model_query(models.Conductor) .filter_by(online=True) .filter(models.Conductor.updated_at >= limit) .all()) # build mapping of drivers to the set of hosts which support them d2c = collections.defaultdict(set) for row in result: for driver in row['drivers']: d2c[driver].add(row['hostname']) return d2c
ramineni/myironic
ironic/db/sqlalchemy/api.py
Python
apache-2.0
22,173
import unittest from collections import OrderedDict from pypika import ( Array, Field, JSON, QueryException, Table, ) from pypika.dialects import PostgreSQLQuery class InsertTests(unittest.TestCase): table_abc = Table("abc") def test_array_keyword(self): q = PostgreSQLQuery.into(self.table_abc).insert(1, [1, "a", True]) self.assertEqual("INSERT INTO \"abc\" VALUES (1,ARRAY[1,'a',true])", str(q)) class JSONObjectTests(unittest.TestCase): def test_alias_set_correctly(self): table = Table('jsonb_table') q = PostgreSQLQuery.from_('abc').select(table.value.get_text_value('a').as_('name')) self.assertEqual('''SELECT "value"->>'a' "name" FROM "abc"''', str(q)) def test_json_value_from_dict(self): q = PostgreSQLQuery.select(JSON({"a": "foo"})) self.assertEqual('SELECT \'{"a":"foo"}\'', str(q)) def test_json_value_from_array_num(self): q = PostgreSQLQuery.select(JSON([1, 2, 3])) self.assertEqual("SELECT '[1,2,3]'", str(q)) def test_json_value_from_array_str(self): q = PostgreSQLQuery.select(JSON(["a", "b", "c"])) self.assertEqual('SELECT \'["a","b","c"]\'', str(q)) def test_json_value_from_dict_recursive(self): q = PostgreSQLQuery.select(JSON({"a": "z", "b": {"c": "foo"}, "d": 1})) # gotta split this one up to avoid the indeterminate order sql = str(q) start, end = 9, -2 self.assertEqual("SELECT '{}'", sql[:start] + sql[end:]) members_set = set(sql[start:end].split(",")) self.assertSetEqual({'"a":"z"', '"b":{"c":"foo"}', '"d":1'}, members_set) class JSONOperatorsTests(unittest.TestCase): # reference https://www.postgresql.org/docs/9.5/functions-json.html table_abc = Table("abc") def test_get_json_value_by_key(self): q = PostgreSQLQuery.from_(self.table_abc).select("*").where(self.table_abc.json.get_json_value("dates")) self.assertEqual('SELECT * FROM "abc" WHERE "json"->\'dates\'', str(q)) def test_get_json_value_by_index(self): q = PostgreSQLQuery.from_(self.table_abc).select("*").where(self.table_abc.json.get_json_value(1)) self.assertEqual('SELECT * FROM "abc" WHERE "json"->1', str(q)) def test_get_text_value_by_key(self): q = PostgreSQLQuery.from_(self.table_abc).select("*").where(self.table_abc.json.get_text_value("dates")) self.assertEqual('SELECT * FROM "abc" WHERE "json"->>\'dates\'', str(q)) def test_get_text_value_by_index(self): q = PostgreSQLQuery.from_(self.table_abc).select("*").where(self.table_abc.json.get_text_value(1)) self.assertEqual('SELECT * FROM "abc" WHERE "json"->>1', str(q)) def test_get_path_json_value(self): q = PostgreSQLQuery.from_(self.table_abc).select("*").where(self.table_abc.json.get_path_json_value("{a,b}")) self.assertEqual('SELECT * FROM "abc" WHERE "json"#>\'{a,b}\'', str(q)) def test_get_path_text_value(self): q = PostgreSQLQuery.from_(self.table_abc).select("*").where(self.table_abc.json.get_path_text_value("{a,b}")) self.assertEqual('SELECT * FROM "abc" WHERE "json"#>>\'{a,b}\'', str(q)) class JSONBOperatorsTests(unittest.TestCase): # reference https://www.postgresql.org/docs/9.5/functions-json.html table_abc = Table("abc") def test_json_contains_for_json(self): q = PostgreSQLQuery.select(JSON({"a": 1, "b": 2}).contains({"a": 1})) # gotta split this one up to avoid the indeterminate order sql = str(q) start, end = 9, -13 self.assertEqual("SELECT '{}'@>'{\"a\":1}'", sql[:start] + sql[end:]) members_set = set(sql[start:end].split(",")) self.assertSetEqual({'"a":1', '"b":2'}, members_set) def test_json_contains_for_field(self): q = ( PostgreSQLQuery.from_(self.table_abc) .select("*") .where(self.table_abc.json.contains({"dates": "2018-07-10 - 2018-07-17"})) ) self.assertEqual( "SELECT * " 'FROM "abc" ' 'WHERE "json"@>\'{"dates":"2018-07-10 - 2018-07-17"}\'', str(q), ) def test_json_contained_by_using_str_arg(self): q = ( PostgreSQLQuery.from_(self.table_abc) .select("*") .where( self.table_abc.json.contained_by( OrderedDict( [ ("dates", "2018-07-10 - 2018-07-17"), ("imported", "8"), ] ) ) ) ) self.assertEqual( 'SELECT * FROM "abc" ' 'WHERE "json"<@\'{"dates":"2018-07-10 - 2018-07-17","imported":"8"}\'', str(q), ) def test_json_contained_by_using_list_arg(self): q = ( PostgreSQLQuery.from_(self.table_abc) .select("*") .where(self.table_abc.json.contained_by(["One", "Two", "Three"])) ) self.assertEqual('SELECT * FROM "abc" WHERE "json"<@\'["One","Two","Three"]\'', str(q)) def test_json_contained_by_with_complex_criterion(self): q = ( PostgreSQLQuery.from_(self.table_abc) .select("*") .where(self.table_abc.json.contained_by(["One", "Two", "Three"]) & (self.table_abc.id == 26)) ) self.assertEqual( 'SELECT * FROM "abc" WHERE "json"<@\'["One","Two","Three"]\' AND "id"=26', str(q), ) def test_json_has_key(self): q = PostgreSQLQuery.from_(self.table_abc).select("*").where(self.table_abc.json.has_key("dates")) self.assertEqual('SELECT * FROM "abc" WHERE "json"?\'dates\'', str(q)) def test_json_has_keys(self): q = PostgreSQLQuery.from_(self.table_abc).select("*").where(self.table_abc.json.has_keys(["dates", "imported"])) self.assertEqual("SELECT * FROM \"abc\" WHERE \"json\"?&ARRAY['dates','imported']", str(q)) def test_json_has_any_keys(self): q = ( PostgreSQLQuery.from_(self.table_abc) .select("*") .where(self.table_abc.json.has_any_keys(["dates", "imported"])) ) self.assertEqual("SELECT * FROM \"abc\" WHERE \"json\"?|ARRAY['dates','imported']", str(q)) def test_subnet_contains_inet(self): q = ( PostgreSQLQuery.from_(self.table_abc) .select(self.table_abc.a.lshift(2)) .where(self.table_abc.cidr >> "1.1.1.1") ) self.assertEqual("SELECT \"a\"<<2 FROM \"abc\" WHERE \"cidr\">>'1.1.1.1'", str(q)) class DistinctOnTests(unittest.TestCase): table_abc = Table("abc") def test_distinct_on(self): q = PostgreSQLQuery.from_(self.table_abc).distinct_on("lname", self.table_abc.fname).select("lname", "id") self.assertEqual('''SELECT DISTINCT ON("lname","fname") "lname","id" FROM "abc"''', str(q)) class ArrayTests(unittest.TestCase): def test_array_syntax(self): tb = Table("tb") q = PostgreSQLQuery.from_(tb).select(Array(1, "a", ["b", 2, 3])) self.assertEqual(str(q), "SELECT ARRAY[1,'a',ARRAY['b',2,3]] FROM \"tb\"") def test_render_alias_in_array_sql(self): tb = Table("tb") q = PostgreSQLQuery.from_(tb).select(Array(tb.col).as_("different_name")) self.assertEqual(str(q), 'SELECT ARRAY["col"] "different_name" FROM "tb"') class ReturningClauseTests(unittest.TestCase): @classmethod def setUpClass(cls) -> None: super().setUpClass() cls.table_abc = Table('abc') def test_returning_from_missing_table_raises_queryexception(self): field_from_diff_table = Field('xyz', table=Table('other')) with self.assertRaisesRegex(QueryException, "You can't return from other tables"): ( PostgreSQLQuery.from_(self.table_abc) .where(self.table_abc.foo == self.table_abc.bar) .delete() .returning(field_from_diff_table) ) def test_queryexception_if_returning_used_on_invalid_query(self): with self.assertRaisesRegex(QueryException, "Returning can't be used in this query"): PostgreSQLQuery.from_(self.table_abc).select('abc').returning('abc') def test_no_queryexception_if_returning_used_on_valid_query_type(self): # No exceptions for insert, update and delete queries with self.subTest('DELETE'): PostgreSQLQuery.from_(self.table_abc).where(self.table_abc.foo == self.table_abc.bar).delete().returning( "id" ) with self.subTest('UPDATE'): PostgreSQLQuery.update(self.table_abc).where(self.table_abc.foo == 0).set("foo", "bar").returning("id") with self.subTest('INSERT'): PostgreSQLQuery.into(self.table_abc).insert('abc').returning('abc') def test_return_field_from_join_table(self): new_table = Table('xyz') q = ( PostgreSQLQuery.update(self.table_abc) .join(new_table) .on(new_table.id == self.table_abc.xyz) .where(self.table_abc.foo == 0) .set("foo", "bar") .returning(new_table.a) ) self.assertEqual( 'UPDATE "abc" ' 'JOIN "xyz" ON "xyz"."id"="abc"."xyz" ' 'SET "foo"=\'bar\' ' 'WHERE "abc"."foo"=0 ' 'RETURNING "xyz"."a"', str(q), )
kayak/pypika
pypika/tests/dialects/test_postgresql.py
Python
apache-2.0
9,532
#!/usr/bin/python # # Copyright 2018-2021 Polyaxon, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. def hash_value(value, hash_length: int = 12) -> str: import hashlib return hashlib.md5(str(value).encode("utf-8")).hexdigest()[:hash_length]
polyaxon/polyaxon
core/polyaxon/utils/hashing.py
Python
apache-2.0
756
# -*- coding: utf-8 -*- """ Core Application - manage multiplexer connected with Analog Discovery 2 | receive voltage from sensors to estimate distance API : wavefroms API Author: Thatchakon Jom-ud """ from ctypes import * from dwfconstants import * import sys import time class Core(): hdwf = c_int() hzSys = c_double() volt = (c_double*1)() dwf = object distance = lambda self, volt: ((0.0325*(volt**3)) - (0.5832*(volt**2)) + (0.0768*volt) + 9.8164) def __init__(self, window): self.gui = window self.set_library() def set_library(self): if sys.platform.startswith("win"): self.dwf = cdll.dwf elif sys.platform.startswith("darwin"): self.dwf = cdll.LoadLibrary("/Library/Frameworks/dwf.framework/dwf") else: self.dwf = cdll.LoadLibrary("libdwf.so") def set_device(self): self.dwf.FDwfDeviceOpen(c_int(-1), byref(self.hdwf)) self.gui.status.emit("AD2: "+self.get_device_status()) if self.get_device_status() != "Not Connect": self.enable_power_supplies(True) self.setup_digital_output() return False def get_device_status(self): return "Connected" if (self.hdwf.value != hdwfNone.value) else "Not Connect" def enable_power_supplies(self, enable): # set up analog IO channel nodes # enable positive supply self.dwf.FDwfAnalogIOChannelNodeSet(self.hdwf, c_int(0), c_int(0), c_double(True)) # set voltage to 5 V self.dwf.FDwfAnalogIOChannelNodeSet(self.hdwf, c_int(0), c_int(1), c_double(5)) # enable negative supply self.dwf.FDwfAnalogIOChannelNodeSet(self.hdwf, c_int(1), c_int(0), c_double(True)) # set voltage to -5 V self.dwf.FDwfAnalogIOChannelNodeSet(self.hdwf, c_int(1), c_int(1), c_double(-5)) self.dwf.FDwfAnalogIOEnableSet(self.hdwf, c_int(enable)) def setup_digital_output(self): self.dwf.FDwfDigitalOutInternalClockInfo(self.hdwf, byref(self.hzSys)) self.dwf.FDwfDigitalOutEnableSet(self.hdwf, c_int(0), c_int(1)) # prescaler to 2kHz self.dwf.FDwfDigitalOutDividerSet(self.hdwf, c_int(0), c_int(int(self.hzSys.value / 2e3))) # 1 tick low, 1 tick high self.dwf.FDwfDigitalOutCounterSet(self.hdwf, c_int(0), c_int(1), c_int(1)) self.dwf.FDwfDigitalOutEnableSet(self.hdwf, c_int(1), c_int(1)) # prescaler to 2kHz self.dwf.FDwfDigitalOutDividerSet(self.hdwf, c_int(1), c_int(int(self.hzSys.value / 2e3))) # 2 tick low, 2 tick high self.dwf.FDwfDigitalOutCounterSet(self.hdwf, c_int(1), c_int(2), c_int(2)) self.dwf.FDwfDigitalOutEnableSet(self.hdwf, c_int(2), c_int(1)) # prescaler to 2kHz self.dwf.FDwfDigitalOutDividerSet(self.hdwf, c_int(2), c_int(int(self.hzSys.value / 2e3))) # 4 tick low, 4 tick high self.dwf.FDwfDigitalOutCounterSet(self.hdwf, c_int(2), c_int(4), c_int(4)) self.dwf.FDwfDigitalOutConfigure(self.hdwf, c_int(1)) def get_volt_and_distance(self): sts = c_byte() self.dwf.FDwfAnalogInFrequencySet(self.hdwf, c_double(20000000.0)) self.dwf.FDwfAnalogInBufferSizeSet(self.hdwf, c_int(4000)) self.dwf.FDwfAnalogInChannelEnableSet(self.hdwf, c_int(0), c_bool(True)) self.dwf.FDwfAnalogInConfigure(self.hdwf, c_bool(False), c_bool(True)) while True: self.dwf.FDwfAnalogInStatus(self.hdwf, c_int(1), byref(sts)) if sts.value == DwfStateDone.value: break time.sleep(0.1) self.dwf.FDwfAnalogInStatusData(self.hdwf, c_int(0), self.volt, 1) return self.volt[0], self.distance(self.volt[0]) def disconnect_ad2(self): self.enable_power_supplies(False) self.dwf.FDwfDeviceCloseAll() self.hdwf = c_int() self.gui.status.emit("AD2: Disconnected") return False
dsjin/ThinPotApplication
core.py
Python
apache-2.0
3,973
""" Demo platform that has two fake binary sensors. For more details about this platform, please refer to the documentation https://home-assistant.io/components/demo/ """ import homeassistant.util.dt as dt_util from homeassistant.components.calendar import CalendarEventDevice from homeassistant.components.google import CONF_DEVICE_ID, CONF_NAME def setup_platform(hass, config, add_devices, discovery_info=None): """Set up the Demo Calendar platform.""" calendar_data_future = DemoGoogleCalendarDataFuture() calendar_data_current = DemoGoogleCalendarDataCurrent() add_devices([ DemoGoogleCalendar(hass, calendar_data_future, { CONF_NAME: 'Future Event', CONF_DEVICE_ID: 'future_event', }), DemoGoogleCalendar(hass, calendar_data_current, { CONF_NAME: 'Current Event', CONF_DEVICE_ID: 'current_event', }), ]) class DemoGoogleCalendarData(object): """Representation of a Demo Calendar element.""" # pylint: disable=no-self-use def update(self): """Return true so entity knows we have new data.""" return True class DemoGoogleCalendarDataFuture(DemoGoogleCalendarData): """Representation of a Demo Calendar for a future event.""" def __init__(self): """Set the event to a future event.""" one_hour_from_now = dt_util.now() \ + dt_util.dt.timedelta(minutes=30) self.event = { 'start': { 'dateTime': one_hour_from_now.isoformat() }, 'end': { 'dateTime': (one_hour_from_now + dt_util.dt. timedelta(minutes=60)).isoformat() }, 'summary': 'Future Event', } class DemoGoogleCalendarDataCurrent(DemoGoogleCalendarData): """Representation of a Demo Calendar for a current event.""" def __init__(self): """Set the event data.""" middle_of_event = dt_util.now() \ - dt_util.dt.timedelta(minutes=30) self.event = { 'start': { 'dateTime': middle_of_event.isoformat() }, 'end': { 'dateTime': (middle_of_event + dt_util.dt. timedelta(minutes=60)).isoformat() }, 'summary': 'Current Event', } class DemoGoogleCalendar(CalendarEventDevice): """Representation of a Demo Calendar element.""" def __init__(self, hass, calendar_data, data): """Initialize Google Calendar but without the API calls.""" self.data = calendar_data super().__init__(hass, data)
shaftoe/home-assistant
homeassistant/components/calendar/demo.py
Python
apache-2.0
2,739
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """Module to build pipeline fragment that produces given PCollections. For internal use only; no backwards-compatibility guarantees. """ from __future__ import absolute_import import apache_beam as beam from apache_beam.pipeline import PipelineVisitor from apache_beam.testing.test_stream import TestStream class PipelineFragment(object): """A fragment of a pipeline definition. A pipeline fragment is built from the original pipeline definition to include only PTransforms that are necessary to produce the given PCollections. """ def __init__(self, pcolls, options=None): """Constructor of PipelineFragment. Args: pcolls: (List[PCollection]) a list of PCollections to build pipeline fragment for. options: (PipelineOptions) the pipeline options for the implicit pipeline run. """ assert len(pcolls) > 0, ( 'Need at least 1 PCollection as the target data to build a pipeline ' 'fragment that produces it.') for pcoll in pcolls: assert isinstance(pcoll, beam.pvalue.PCollection), ( '{} is not an apache_beam.pvalue.PCollection.'.format(pcoll)) # No modification to self._user_pipeline is allowed. self._user_pipeline = pcolls[0].pipeline # These are user PCollections. Do not use them to deduce anything that # will be executed by any runner. Instead, use # `self._runner_pcolls_to_user_pcolls.keys()` to get copied PCollections. self._pcolls = set(pcolls) for pcoll in self._pcolls: assert pcoll.pipeline is self._user_pipeline, ( '{} belongs to a different user pipeline than other PCollections ' 'given and cannot be used to build a pipeline fragment that produces ' 'the given PCollections.'.format(pcoll)) self._options = options # A copied pipeline instance for modification without changing the user # pipeline instance held by the end user. This instance can be processed # into a pipeline fragment that later run by the underlying runner. self._runner_pipeline = self._build_runner_pipeline() _, self._context = self._runner_pipeline.to_runner_api( return_context=True, use_fake_coders=True) from apache_beam.runners.interactive import pipeline_instrument as instr self._runner_pcoll_to_id = instr.pcolls_to_pcoll_id( self._runner_pipeline, self._context) # Correlate components in the runner pipeline to components in the user # pipeline. The target pcolls are the pcolls given and defined in the user # pipeline. self._id_to_target_pcoll = self._calculate_target_pcoll_ids() self._label_to_user_transform = self._calculate_user_transform_labels() # Below will give us the 1:1 correlation between # PCollections/AppliedPTransforms from the copied runner pipeline and # PCollections/AppliedPTransforms from the user pipeline. # (Dict[PCollection, PCollection]) ( self._runner_pcolls_to_user_pcolls, # (Dict[AppliedPTransform, AppliedPTransform]) self._runner_transforms_to_user_transforms ) = self._build_correlation_between_pipelines( self._runner_pcoll_to_id, self._id_to_target_pcoll, self._label_to_user_transform) # Below are operated on the runner pipeline. (self._necessary_transforms, self._necessary_pcollections) = self._mark_necessary_transforms_and_pcolls( self._runner_pcolls_to_user_pcolls) self._runner_pipeline = self._prune_runner_pipeline_to_fragment( self._runner_pipeline, self._necessary_transforms) def deduce_fragment(self): """Deduce the pipeline fragment as an apache_beam.Pipeline instance.""" return beam.pipeline.Pipeline.from_runner_api( self._runner_pipeline.to_runner_api(use_fake_coders=True), self._runner_pipeline.runner, self._options) def run(self, display_pipeline_graph=False, use_cache=True, blocking=False): """Shorthand to run the pipeline fragment.""" try: preserved_skip_display = self._runner_pipeline.runner._skip_display preserved_force_compute = self._runner_pipeline.runner._force_compute preserved_blocking = self._runner_pipeline.runner._blocking self._runner_pipeline.runner._skip_display = not display_pipeline_graph self._runner_pipeline.runner._force_compute = not use_cache self._runner_pipeline.runner._blocking = blocking return self.deduce_fragment().run() finally: self._runner_pipeline.runner._skip_display = preserved_skip_display self._runner_pipeline.runner._force_compute = preserved_force_compute self._runner_pipeline.runner._blocking = preserved_blocking def _build_runner_pipeline(self): return beam.pipeline.Pipeline.from_runner_api( self._user_pipeline.to_runner_api(use_fake_coders=True), self._user_pipeline.runner, self._options) def _calculate_target_pcoll_ids(self): pcoll_id_to_target_pcoll = {} for pcoll in self._pcolls: pcoll_id_to_target_pcoll[self._runner_pcoll_to_id.get(str(pcoll), '')] = pcoll return pcoll_id_to_target_pcoll def _calculate_user_transform_labels(self): label_to_user_transform = {} class UserTransformVisitor(PipelineVisitor): def enter_composite_transform(self, transform_node): self.visit_transform(transform_node) def visit_transform(self, transform_node): if transform_node is not None: label_to_user_transform[transform_node.full_label] = transform_node v = UserTransformVisitor() self._runner_pipeline.visit(v) return label_to_user_transform def _build_correlation_between_pipelines( self, runner_pcoll_to_id, id_to_target_pcoll, label_to_user_transform): runner_pcolls_to_user_pcolls = {} runner_transforms_to_user_transforms = {} class CorrelationVisitor(PipelineVisitor): def enter_composite_transform(self, transform_node): self.visit_transform(transform_node) def visit_transform(self, transform_node): self._process_transform(transform_node) for in_pcoll in transform_node.inputs: self._process_pcoll(in_pcoll) for out_pcoll in transform_node.outputs.values(): self._process_pcoll(out_pcoll) def _process_pcoll(self, pcoll): pcoll_id = runner_pcoll_to_id.get(str(pcoll), '') if pcoll_id in id_to_target_pcoll: runner_pcolls_to_user_pcolls[pcoll] = (id_to_target_pcoll[pcoll_id]) def _process_transform(self, transform_node): if transform_node.full_label in label_to_user_transform: runner_transforms_to_user_transforms[transform_node] = ( label_to_user_transform[transform_node.full_label]) v = CorrelationVisitor() self._runner_pipeline.visit(v) return runner_pcolls_to_user_pcolls, runner_transforms_to_user_transforms def _mark_necessary_transforms_and_pcolls(self, runner_pcolls_to_user_pcolls): necessary_transforms = set() all_inputs = set() updated_all_inputs = set(runner_pcolls_to_user_pcolls.keys()) # Do this until no more new PCollection is recorded. while len(updated_all_inputs) != len(all_inputs): all_inputs = set(updated_all_inputs) for pcoll in all_inputs: producer = pcoll.producer while producer: if producer in necessary_transforms: break # Mark the AppliedPTransform as necessary. necessary_transforms.add(producer) # Record all necessary input and side input PCollections. updated_all_inputs.update(producer.inputs) # pylint: disable=map-builtin-not-iterating side_input_pvalues = set( map(lambda side_input: side_input.pvalue, producer.side_inputs)) updated_all_inputs.update(side_input_pvalues) # Go to its parent AppliedPTransform. producer = producer.parent return necessary_transforms, all_inputs def _prune_runner_pipeline_to_fragment( self, runner_pipeline, necessary_transforms): class PruneVisitor(PipelineVisitor): def enter_composite_transform(self, transform_node): if isinstance(transform_node.transform, TestStream): return pruned_parts = list(transform_node.parts) for part in transform_node.parts: if part not in necessary_transforms: pruned_parts.remove(part) transform_node.parts = tuple(pruned_parts) self.visit_transform(transform_node) def visit_transform(self, transform_node): if transform_node not in necessary_transforms: transform_node.parent = None v = PruneVisitor() runner_pipeline.visit(v) return runner_pipeline
iemejia/incubator-beam
sdks/python/apache_beam/runners/interactive/pipeline_fragment.py
Python
apache-2.0
9,582
import numpy as np import shapely.affinity import cv2 from collections import defaultdict from shapely.geometry import MultiPolygon, Polygon class Utils: @staticmethod def multi_polygon_to_pixel_mask(polygon, image_size): mask = np.zeros(image_size, np.uint8) int_coords = lambda x: np.array(x).round().astype(np.int32) exteriors = [int_coords(poly.exterior.coords) for poly in polygon] interiors = [int_coords(pi.coords) for poly in polygon for pi in poly.interiors] cv2.fillPoly(mask, exteriors, 1) cv2.fillPoly(mask, interiors, 0) return mask @staticmethod def pixel_mask_to_image(mask, r, g, b): return np.stack([ np.multiply(mask, r), np.multiply(mask, g), np.multiply(mask, b) ]).transpose([1, 2, 0]) @staticmethod def scale_multi_polygon(polygon, x_scale, y_scale): return shapely.affinity.scale(polygon, xfact=x_scale, yfact=y_scale, origin=(0, 0, 0)) @staticmethod def evaluate_prediction(prediction, truth): return average_precision_score(prediction, truth) @staticmethod def prediction_to_binary_prediction(prediction, threshold=0.3): return prediction >= threshold @staticmethod def prediction_mask_to_polygons(mask, epsilon=10., min_area=10.): # first, find contours with cv2: it's much faster than shapely image, contours, hierarchy = cv2.findContours( ((mask == 1) * 255).astype(np.uint8), cv2.RETR_CCOMP, cv2.CHAIN_APPROX_TC89_KCOS) # create approximate contours to have reasonable submission size approx_contours = [cv2.approxPolyDP(cnt, epsilon, True) for cnt in contours] if not contours: return MultiPolygon() # now messy stuff to associate parent and child contours cnt_children = defaultdict(list) child_contours = set() assert hierarchy.shape[0] == 1 # http://docs.opencv.org/3.1.0/d9/d8b/tutorial_py_contours_hierarchy.html for idx, (_, _, _, parent_idx) in enumerate(hierarchy[0]): if parent_idx != -1: child_contours.add(idx) cnt_children[parent_idx].append(approx_contours[idx]) # create actual polygons filtering by area (removes artifacts) all_polygons = [] for idx, cnt in enumerate(approx_contours): if idx not in child_contours and cv2.contourArea(cnt) >= min_area: assert cnt.shape[1] == 1 poly = Polygon( shell=cnt[:, 0, :], holes=[c[:, 0, :] for c in cnt_children.get(idx, []) if cv2.contourArea(c) >= min_area]) all_polygons.append(poly) # approximating polygons might have created invalid ones, fix them all_polygons = MultiPolygon(all_polygons) if not all_polygons.is_valid: all_polygons = all_polygons.buffer(0) # Sometimes buffer() converts a simple Multipolygon to just a Polygon, # need to keep it a Multi throughout if all_polygons.type == 'Polygon': all_polygons = MultiPolygon([all_polygons]) return all_polygons
furgerf/kaggle-projects
dstl/utils.py
Python
apache-2.0
2,976
#!/usr/bin/python from gevent import monkey monkey.patch_all() import logging import gevent from gevent.coros import BoundedSemaphore from kafka import KafkaClient, KeyedProducer, SimpleConsumer, common from uveserver import UVEServer import os import json import copy import traceback import uuid import struct import socket import discoveryclient.client as client from sandesh_common.vns.constants import ALARM_PARTITION_SERVICE_NAME from pysandesh.util import UTCTimestampUsec import select import redis from collections import namedtuple PartInfo = namedtuple("PartInfo",["ip_address","instance_id","acq_time","port"]) def sse_pack(d): """Pack data in SSE format""" buffer = '' for k in ['event','data']: if k in d.keys(): buffer += '%s: %s\n' % (k, d[k]) return buffer + '\n' class UveStreamPart(gevent.Greenlet): def __init__(self, partno, logger, q, pi, rpass): gevent.Greenlet.__init__(self) self._logger = logger self._q = q self._pi = pi self._partno = partno self._rpass = rpass def syncpart(self, redish): inst = self._pi.instance_id part = self._partno keys = list(redish.smembers("AGPARTKEYS:%s:%d" % (inst, part))) ppe = redish.pipeline() for key in keys: ppe.hgetall("AGPARTVALUES:%s:%d:%s" % (inst, part, key)) pperes = ppe.execute() idx=0 for res in pperes: for tk,tv in res.iteritems(): msg = {'event': 'sync', 'data':\ json.dumps({'partition':self._partno, 'key':keys[idx], 'type':tk, 'value':tv})} self._q.put(sse_pack(msg)) idx += 1 def _run(self): lredis = None pb = None while True: try: lredis = redis.StrictRedis( host=self._pi.ip_address, port=self._pi.port, password=self._rpass, db=2) pb = lredis.pubsub() inst = self._pi.instance_id part = self._partno pb.subscribe('AGPARTPUB:%s:%d' % (inst, part)) self.syncpart(lredis) for message in pb.listen(): if message["type"] != "message": continue dataline = message["data"] try: elems = json.loads(dataline) except: self._logger.error("AggUVE Parsing failed: %s" % str(message)) continue else: self._logger.error("AggUVE loading: %s" % str(elems)) ppe = lredis.pipeline() for elem in elems: # This UVE was deleted if elem["type"] is None: ppe.exists("AGPARTVALUES:%s:%d:%s" % \ (inst, part, elem["key"])) else: ppe.hget("AGPARTVALUES:%s:%d:%s" % \ (inst, part, elem["key"]), elem["type"]) pperes = ppe.execute() idx = 0 for elem in elems: if elem["type"] is None: msg = {'event': 'update', 'data':\ json.dumps({'partition':part, 'key':elem["key"], 'type':None})} else: vjson = pperes[idx] if vjson is None: vdata = None else: vdata = json.loads(vjson) msg = {'event': 'update', 'data':\ json.dumps({'partition':part, 'key':elem["key"], 'type':elem["type"], 'value':vdata})} self._q.put(sse_pack(msg)) idx += 1 except gevent.GreenletExit: break except Exception as ex: template = "Exception {0} in uve stream proc. Arguments:\n{1!r}" messag = template.format(type(ex).__name__, ex.args) self._logger.error("%s : traceback %s" % \ (messag, traceback.format_exc())) lredis = None if pb is not None: pb.close() pb = None gevent.sleep(2) return None class UveStreamer(gevent.Greenlet): def __init__(self, logger, q, rfile, agp_cb, partitions, rpass): gevent.Greenlet.__init__(self) self._logger = logger self._q = q self._rfile = rfile self._agp_cb = agp_cb self._agp = {} self._parts = {} self._partitions = partitions self._rpass = rpass def _run(self): inputs = [ self._rfile ] outputs = [ ] msg = {'event': 'init', 'data':\ json.dumps({'partitions':self._partitions})} self._q.put(sse_pack(msg)) while True: readable, writable, exceptional = select.select(inputs, outputs, inputs, 1) if (readable or writable or exceptional): break newagp = self._agp_cb() set_new, set_old = set(newagp.keys()), set(self._agp.keys()) intersect = set_new.intersection(set_old) # deleted parts for elem in set_old - intersect: self.partition_stop(elem) # new parts for elem in set_new - intersect: self.partition_start(elem, newagp[elem]) # changed parts for elem in intersect: if self._agp[elem] != newagp[elem]: self.partition_stop(elem) self.partition_start(elem, newagp[elem]) self._agp = newagp for part, pi in self._agp.iteritems(): self.partition_stop(part) def partition_start(self, partno, pi): self._logger.error("Starting agguve part %d using %s" %( partno, pi)) msg = {'event': 'clear', 'data':\ json.dumps({'partition':partno, 'acq_time':pi.acq_time})} self._q.put(sse_pack(msg)) self._parts[partno] = UveStreamPart(partno, self._logger, self._q, pi, self._rpass) self._parts[partno].start() def partition_stop(self, partno): self._logger.error("Stopping agguve part %d" % partno) self._parts[partno].kill() self._parts[partno].get() del self._parts[partno] class PartitionHandler(gevent.Greenlet): def __init__(self, brokers, group, topic, logger, limit): gevent.Greenlet.__init__(self) self._brokers = brokers self._group = group self._topic = topic self._logger = logger self._limit = limit self._uvedb = {} self._partoffset = 0 self._kfk = None def msg_handler(self, mlist): self._logger.info("%s Reading %s" % (self._topic, str(mlist))) return True def _run(self): pcount = 0 while True: try: self._logger.error("New KafkaClient %s" % self._topic) self._kfk = KafkaClient(self._brokers , "kc-" + self._topic) try: consumer = SimpleConsumer(self._kfk, self._group, self._topic, buffer_size = 4096*4, max_buffer_size=4096*32) #except: except Exception as ex: template = "Consumer Failure {0} occured. Arguments:\n{1!r}" messag = template.format(type(ex).__name__, ex.args) self._logger.info("%s" % messag) raise RuntimeError(messag) self._logger.error("Starting %s" % self._topic) # Find the offset of the last message that has been queued consumer.seek(-1,2) try: mi = consumer.get_message(timeout=0.1) consumer.commit() except common.OffsetOutOfRangeError: mi = None #import pdb; pdb.set_trace() self._logger.info("Last Queued for %s is %s" % \ (self._topic,str(mi))) # start reading from last previously processed message if mi != None: consumer.seek(0,1) else: consumer.seek(0,0) if self._limit: raise gevent.GreenletExit while True: try: mlist = consumer.get_messages(10,timeout=0.5) if not self.msg_handler(mlist): raise gevent.GreenletExit consumer.commit() pcount += len(mlist) except TypeError as ex: self._logger.error("Type Error: %s trace %s" % \ (str(ex.args), traceback.format_exc())) gevent.sleep(0.1) except common.FailedPayloadsError as ex: self._logger.error("Payload Error: %s" % str(ex.args)) gevent.sleep(0.1) except gevent.GreenletExit: break except AssertionError as ex: self._partoffset = ex break except Exception as ex: template = "An exception of type {0} occured. Arguments:\n{1!r}" messag = template.format(type(ex).__name__, ex.args) self._logger.error("%s : traceback %s" % \ (messag, traceback.format_exc())) self.stop_partition() gevent.sleep(2) self._logger.error("Stopping %s pcount %d" % (self._topic, pcount)) partdb = self.stop_partition() return self._partoffset, partdb class UveStreamProc(PartitionHandler): # Arguments: # # brokers : broker list for kafka bootstrap # partition : partition number # uve_topic : Topic to consume # logger : logging object to use # callback : Callback function for reporting the set of the UVEs # that may have changed for a given notification # rsc : Callback function to check on collector status # and get sync contents for new collectors # aginst : instance_id of alarmgen # rport : redis server port # disc : discovery client to publish to def __init__(self, brokers, partition, uve_topic, logger, callback, host_ip, rsc, aginst, rport, disc = None): super(UveStreamProc, self).__init__(brokers, "workers", uve_topic, logger, False) self._uvedb = {} self._uvein = {} self._uveout = {} self._callback = callback self._partno = partition self._host_ip = host_ip self._ip_code, = struct.unpack('>I', socket.inet_pton( socket.AF_INET, host_ip)) self.disc_rset = set() self._resource_cb = rsc self._aginst = aginst self._disc = disc self._acq_time = UTCTimestampUsec() self._rport = rport def acq_time(self): return self._acq_time def resource_check(self, msgs): ''' This function compares the known collectors with the list from discovery, and syncs UVE keys accordingly ''' newset , coll_delete, chg_res = self._resource_cb(self._partno, self.disc_rset, msgs) for coll in coll_delete: self._logger.error("Part %d lost collector %s" % (self._partno, coll)) self.stop_partition(coll) if len(chg_res): self.start_partition(chg_res) self.disc_rset = newset if self._disc: data = { 'instance-id' : self._aginst, 'partition' : str(self._partno), 'ip-address': self._host_ip, 'acq-time': str(self._acq_time), 'port':str(self._rport)} self._disc.publish(ALARM_PARTITION_SERVICE_NAME, data) def stop_partition(self, kcoll=None): clist = [] if not kcoll: clist = self._uvedb.keys() # If all collectors are being cleared, clear resoures too self.disc_rset = set() if self._disc: # TODO: Unpublish instead of setting acq-time to 0 data = { 'instance-id' : self._aginst, 'partition' : str(self._partno), 'ip-address': self._host_ip, 'acq-time': "0", 'port':str(self._rport)} self._disc.publish(ALARM_PARTITION_SERVICE_NAME, data) else: clist = [kcoll] self._logger.error("Stopping part %d collectors %s" % \ (self._partno,clist)) partdb = {} chg = {} for coll in clist: partdb[coll] = {} for gen in self._uvedb[coll].keys(): partdb[coll][gen] = {} for tab in self._uvedb[coll][gen].keys(): for rkey in self._uvedb[coll][gen][tab].keys(): uk = tab + ":" + rkey chg[uk] = None partdb[coll][gen][uk] = \ set(self._uvedb[coll][gen][tab][rkey].keys()) del self._uvedb[coll] self._logger.error("Stopping part %d UVEs %s" % \ (self._partno,str(chg.keys()))) self._callback(self._partno, chg) return partdb def start_partition(self, cbdb): ''' This function loads the initial UVE database. for the partition ''' self._logger.error("Starting part %d collectors %s" % \ (self._partno, str(cbdb.keys()))) uves = {} for kcoll,coll in cbdb.iteritems(): self._uvedb[kcoll] = {} for kgen,gen in coll.iteritems(): self._uvedb[kcoll][kgen] = {} for kk in gen.keys(): tabl = kk.split(":",1) tab = tabl[0] rkey = tabl[1] if not tab in self._uvedb[kcoll][kgen]: self._uvedb[kcoll][kgen][tab] = {} self._uvedb[kcoll][kgen][tab][rkey] = {} uves[kk] = {} for typ, contents in gen[kk].iteritems(): self._uvedb[kcoll][kgen][tab][rkey][typ] = {} self._uvedb[kcoll][kgen][tab][rkey][typ]["c"] = 0 self._uvedb[kcoll][kgen][tab][rkey][typ]["u"] = \ uuid.uuid1(self._ip_code) uves[kk][typ] = contents self._logger.error("Starting part %d UVEs %s" % \ (self._partno, str(uves.keys()))) self._callback(self._partno, uves) def contents(self): return self._uvedb def stats(self): ''' Return the UVEKey-Count stats collected over the last time period for this partition, and the incoming UVE Notifs as well. Also, the stats should be cleared to prepare for the next period of collection. ''' ret_out = copy.deepcopy(self._uveout) ret_in = copy.deepcopy(self._uvein) self._uveout = {} self._uvein = {} return ret_in, ret_out def msg_handler(self, mlist): self.resource_check(mlist) for mm in mlist: if mm is None: continue self._logger.debug("%s Reading offset %d" % \ (self._topic, mm.offset)) if not self.msg_handler_single(mm): self._logger.info("%s could not handle %s" % \ (self._topic, str(mm))) return False return True def msg_handler_single(self, om): self._partoffset = om.offset chg = {} try: uv = json.loads(om.message.value) coll = uv["coll"] gen = uv["gen"] if not self._uvedb.has_key(coll): # This partition is not synced yet. # Ignore this message self._logger.debug("%s Ignoring UVE %s" % (self._topic, str(om))) return True if not self._uvedb[coll].has_key(gen): self._uvedb[coll][gen] = {} if (uv["message"] == "UVEUpdate"): tabl = uv["key"].split(":",1) tab = tabl[0] rkey = tabl[1] if tab not in self._uvedb[coll][gen]: self._uvedb[coll][gen][tab] = {} if not rkey in self._uvedb[coll][gen][tab]: self._uvedb[coll][gen][tab][rkey] = {} removed = False # uv["type"] and uv["value"] can be decoded as follows: # uv["type"] can be one of the following: # - None # All Types under this UVE are deleted # uv["value"] will not be present # (this option is only for agg UVE updates) # - "<Struct>" # uv["value"] refers to this struct # uv["value"] can be one of the following: # - None # This Type has been deleted. # - {} # The Type has a value, which is # not available in this message. # (this option is only for raw UVE updates) # - {<Value>} # The Value of the Type # (this option is only for agg UVE updates) if uv["type"] is None: # TODO: Handling of delete UVE case return False if uv["value"] is None: if uv["type"] in self._uvedb[coll][gen][tab][rkey]: del self._uvedb[coll][gen][tab][rkey][uv["type"]] if not len(self._uvedb[coll][gen][tab][rkey]): del self._uvedb[coll][gen][tab][rkey] removed = True if not removed: if uv["type"] in self._uvedb[coll][gen][tab][rkey]: self._uvedb[coll][gen][tab][rkey][uv["type"]]["c"] +=1 else: self._uvedb[coll][gen][tab][rkey][uv["type"]] = {} self._uvedb[coll][gen][tab][rkey][uv["type"]]["c"] = 1 self._uvedb[coll][gen][tab][rkey][uv["type"]]["u"] = \ uuid.uuid1(self._ip_code) chg[uv["key"]] = { uv["type"] : uv["value"] } # Record stats on UVE Keys being processed if not self._uveout.has_key(tab): self._uveout[tab] = {} if self._uveout[tab].has_key(uv["key"]): self._uveout[tab][uv["key"]] += 1 else: self._uveout[tab][uv["key"]] = 1 # Record stats on the input UVE Notifications if not self._uvein.has_key(tab): self._uvein[tab] = {} if not self._uvein[tab].has_key(coll): self._uvein[tab][coll] = {} if not self._uvein[tab][coll].has_key(gen): self._uvein[tab][coll][gen] = {} if not self._uvein[tab][coll][gen].has_key(uv["type"]): self._uvein[tab][coll][gen][uv["type"]] = 1 else: self._uvein[tab][coll][gen][uv["type"]] += 1 else: # Record stats on UVE Keys being processed for tab in self._uvedb[coll][gen].keys(): for rkey in self._uvedb[coll][gen][tab].keys(): uk = tab + ":" + rkey if not self._uveout.has_key(tab): self._uveout[tab] = {} if self._uveout[tab].has_key(uk): self._uveout[tab][uk] += 1 else: self._uveout[tab][uk] = 1 # when a generator is delelted, we need to # notify for *ALL* its UVEs chg[uk] = None del self._uvedb[coll][gen] except Exception as ex: template = "An exception of type {0} in uve proc . Arguments:\n{1!r}" messag = template.format(type(ex).__name__, ex.args) self._logger.info("%s" % messag) return False else: self._callback(self._partno, chg) return True if __name__ == '__main__': logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)s %(message)s') workers = {} brokers = "localhost:9092,localhost:9093,localhost:9094" group = "workers" kafka = KafkaClient(brokers,str(os.getpid())) cons = SimpleConsumer(kafka, group, "ctrl") cons.provide_partition_info() print "Starting control" end_ready = False while end_ready == False: try: while True: part, mmm = cons.get_message(timeout=None) mm = mmm.message print "Consumed ctrl " + str(mm) if mm.value == "start": if workers.has_key(mm.key): print "Dup partition %s" % mm.key raise ValueError else: ph = UveStreamProc(brokers, int(mm.key), "uve-" + mm.key, "alarm-x" + mm.key, logging) ph.start() workers[int(mm.key)] = ph elif mm.value == "stop": #import pdb; pdb.set_trace() if workers.has_key(int(mm.key)): ph = workers[int(mm.key)] gevent.kill(ph) res,db = ph.get() print "Returned " + str(res) print "State :" for k,v in db.iteritems(): print "%s -> %s" % (k,str(v)) del workers[int(mm.key)] else: end_ready = True cons.commit() gevent.sleep(2) break except TypeError: gevent.sleep(0.1) except common.FailedPayloadsError as ex: print "Payload Error: " + str(ex.args) gevent.sleep(0.1) lw=[] for key, value in workers.iteritems(): gevent.kill(value) lw.append(value) gevent.joinall(lw) print "Ending Consumers"
facetothefate/contrail-controller
src/opserver/partition_handler.py
Python
apache-2.0
23,447
#!/usr/bin/env python3 from functools import partial import math from threading import Lock import rospy from geometry_msgs.msg import Twist from lg_common.helpers import run_with_influx_exception_handler NODE_NAME = 'mux_twists' DEFAULT_TICK_RATE = 65.0 DEFAULT_AXIS_LIMIT = math.sqrt(2) / 2 DEFAULT_AGE_LIMIT = 1.0 def clamp(val, lo, hi): return min(max(val, lo), hi) def clamp_twist(twist, lo, hi): twist.linear.x = clamp(twist.linear.x, lo, hi) twist.linear.y = clamp(twist.linear.y, lo, hi) twist.linear.z = clamp(twist.linear.z, lo, hi) twist.angular.x = clamp(twist.angular.x, lo, hi) twist.angular.y = clamp(twist.angular.y, lo, hi) twist.angular.z = clamp(twist.angular.z, lo, hi) class TwistMuxer: def __init__(self, twist_pub, axis_limit, age_limit): self._lock = Lock() self.twist_pub = twist_pub self.axis_limit = axis_limit self.age_limit = rospy.Duration(age_limit) self.samples = {} self.sample_stamps = {} def handle_twist(self, topic, twist): with self._lock: self._handle_twist(topic, twist) def _handle_twist(self, topic, twist): self.samples[topic] = twist self.sample_stamps[topic] = rospy.Time.now() def tick(self, tev): with self._lock: self._tick(tev) def _tick(self, tev): t = rospy.Time.now() result = Twist() for topic in list(self.samples.keys()): stamp = self.sample_stamps[topic] if t - stamp > self.age_limit: continue twist = self.samples[topic] result.linear.x += twist.linear.x result.linear.y += twist.linear.y result.linear.z += twist.linear.z result.angular.x += twist.angular.x result.angular.y += twist.angular.y result.angular.z += twist.angular.z clamp_twist(result, -self.axis_limit, self.axis_limit) self.twist_pub.publish(result) def main(): rospy.init_node(NODE_NAME) tick_rate = float(rospy.get_param('~tick_rate', DEFAULT_TICK_RATE)) sources = [ s.strip() for s in rospy.get_param('~sources').split(',') ] axis_limit = float(rospy.get_param('~axis_limit', DEFAULT_AXIS_LIMIT)) age_limit = float(rospy.get_param('~age_limit', DEFAULT_AGE_LIMIT)) twist_pub = rospy.Publisher('/lg_twister/twist', Twist, queue_size=10) muxer = TwistMuxer(twist_pub, axis_limit, age_limit) for source in sources: handler = partial(muxer.handle_twist, source) rospy.Subscriber(source, Twist, handler) rospy.Timer(rospy.Duration(1.0 / tick_rate), muxer.tick) rospy.spin() if __name__ == '__main__': run_with_influx_exception_handler(main, NODE_NAME)
EndPointCorp/lg_ros_nodes
lg_twister/scripts/mux_twists.py
Python
apache-2.0
2,793
# coding=utf-8 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). from __future__ import (absolute_import, division, generators, nested_scopes, print_function, unicode_literals, with_statement) import logging import select import sys import traceback from contextlib import contextmanager from pants.init.options_initializer import BuildConfigInitializer, OptionsInitializer from pants.option.options_bootstrapper import OptionsBootstrapper from pants.pantsd.pailgun_server import PailgunServer from pants.pantsd.service.pants_service import PantsService class PailgunService(PantsService): """A service that runs the Pailgun server.""" def __init__(self, bind_addr, exiter_class, runner_class, target_roots_calculator, scheduler_service): """ :param tuple bind_addr: The (hostname, port) tuple to bind the Pailgun server to. :param class exiter_class: The `Exiter` class to be used for Pailgun runs. :param class runner_class: The `PantsRunner` class to be used for Pailgun runs. :param class target_roots_calculator: The `TargetRootsCalculator` class to be used for target root parsing. :param SchedulerService scheduler_service: The SchedulerService instance for access to the resident scheduler. """ super(PailgunService, self).__init__() self._bind_addr = bind_addr self._exiter_class = exiter_class self._runner_class = runner_class self._target_roots_calculator = target_roots_calculator self._scheduler_service = scheduler_service self._logger = logging.getLogger(__name__) self._pailgun = None @property def pailgun(self): if not self._pailgun: self._pailgun = self._setup_pailgun() return self._pailgun @property def pailgun_port(self): return self.pailgun.server_port def _setup_pailgun(self): """Sets up a PailgunServer instance.""" # Constructs and returns a runnable PantsRunner. def runner_factory(sock, arguments, environment): exiter = self._exiter_class(sock) graph_helper = None deferred_exc = None self._logger.debug('execution commandline: %s', arguments) options_bootstrapper = OptionsBootstrapper(args=arguments) build_config = BuildConfigInitializer.get(options_bootstrapper) options = OptionsInitializer.create(options_bootstrapper, build_config) graph_helper, target_roots = None, None try: self._logger.debug('warming the product graph via %s', self._scheduler_service) # N.B. This call is made in the pre-fork daemon context for reach and reuse of the # resident scheduler. graph_helper, target_roots = self._scheduler_service.warm_product_graph( options, self._target_roots_calculator ) except Exception: deferred_exc = sys.exc_info() self._logger.warning( 'encountered exception during SchedulerService.warm_product_graph(), deferring:\n%s', ''.join(traceback.format_exception(*deferred_exc)) ) return self._runner_class( sock, exiter, arguments, environment, target_roots, graph_helper, self.fork_lock, deferred_exc ) # Plumb the daemon's lifecycle lock to the `PailgunServer` to safeguard teardown. @contextmanager def lifecycle_lock(): with self.lifecycle_lock: yield return PailgunServer(self._bind_addr, runner_factory, lifecycle_lock) def run(self): """Main service entrypoint. Called via Thread.start() via PantsDaemon.run().""" self._logger.info('starting pailgun server on port {}'.format(self.pailgun_port)) try: # Manually call handle_request() in a loop vs serve_forever() for interruptability. while not self.is_killed: self.pailgun.handle_request() except select.error: # SocketServer can throw `error: (9, 'Bad file descriptor')` on teardown. Ignore it. self._logger.warning('pailgun service shutting down') def terminate(self): """Override of PantsService.terminate() that cleans up when the Pailgun server is terminated.""" # Tear down the Pailgun TCPServer. if self.pailgun: self.pailgun.server_close() super(PailgunService, self).terminate()
baroquebobcat/pants
src/python/pants/pantsd/service/pailgun_service.py
Python
apache-2.0
4,415
#!/usr/bin/python # # Copyright 2016 Pinterest, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import socket hostname = socket.gethostname() def _escape_path_for_stats_name(path): # Do some formatting the file path. if path is None: return None if path.startswith("/"): path = path[1:] return path.replace("/", "_") class DummyStatsdClient: def __init__(self, *args, **kwargs): pass def increment(self, stats, sample_rate=1, tags={}): pass def gauge(self, stats, value, sample_rate=1, tags={}): pass dummy_statsd = DummyStatsdClient()
pinterest/kingpin
kingpin/kazoo_utils/utils.py
Python
apache-2.0
1,118
from django.test import TestCase from journal.tests.factories import StudentFactory class StudentTestCase(TestCase): """Tests for the Student models""" def test_student(self): """Test to ensure that Students can be created properly""" student = StudentFactory.build() self.assertEqual(student.personal_code, '123456')
WildCAS/CASCategorization
journal/tests/test_persons.py
Python
apache-2.0
353
"""Support for GTFS (Google/General Transport Format Schema).""" from __future__ import annotations import datetime import logging import os import threading from typing import Any, Callable import pygtfs from sqlalchemy.sql import text import voluptuous as vol from homeassistant.components.sensor import PLATFORM_SCHEMA, SensorEntity from homeassistant.const import ( ATTR_ATTRIBUTION, CONF_NAME, CONF_OFFSET, DEVICE_CLASS_TIMESTAMP, STATE_UNKNOWN, ) from homeassistant.core import HomeAssistant import homeassistant.helpers.config_validation as cv from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType from homeassistant.util import slugify import homeassistant.util.dt as dt_util _LOGGER = logging.getLogger(__name__) ATTR_ARRIVAL = "arrival" ATTR_BICYCLE = "trip_bikes_allowed_state" ATTR_DAY = "day" ATTR_FIRST = "first" ATTR_DROP_OFF_DESTINATION = "destination_stop_drop_off_type_state" ATTR_DROP_OFF_ORIGIN = "origin_stop_drop_off_type_state" ATTR_INFO = "info" ATTR_OFFSET = CONF_OFFSET ATTR_LAST = "last" ATTR_LOCATION_DESTINATION = "destination_station_location_type_name" ATTR_LOCATION_ORIGIN = "origin_station_location_type_name" ATTR_PICKUP_DESTINATION = "destination_stop_pickup_type_state" ATTR_PICKUP_ORIGIN = "origin_stop_pickup_type_state" ATTR_ROUTE_TYPE = "route_type_name" ATTR_TIMEPOINT_DESTINATION = "destination_stop_timepoint_exact" ATTR_TIMEPOINT_ORIGIN = "origin_stop_timepoint_exact" ATTR_WHEELCHAIR = "trip_wheelchair_access_available" ATTR_WHEELCHAIR_DESTINATION = "destination_station_wheelchair_boarding_available" ATTR_WHEELCHAIR_ORIGIN = "origin_station_wheelchair_boarding_available" CONF_DATA = "data" CONF_DESTINATION = "destination" CONF_ORIGIN = "origin" CONF_TOMORROW = "include_tomorrow" DEFAULT_NAME = "GTFS Sensor" DEFAULT_PATH = "gtfs" BICYCLE_ALLOWED_DEFAULT = STATE_UNKNOWN BICYCLE_ALLOWED_OPTIONS = {1: True, 2: False} DROP_OFF_TYPE_DEFAULT = STATE_UNKNOWN DROP_OFF_TYPE_OPTIONS = { 0: "Regular", 1: "Not Available", 2: "Call Agency", 3: "Contact Driver", } ICON = "mdi:train" ICONS = { 0: "mdi:tram", 1: "mdi:subway", 2: "mdi:train", 3: "mdi:bus", 4: "mdi:ferry", 5: "mdi:train-variant", 6: "mdi:gondola", 7: "mdi:stairs", 100: "mdi:train", 101: "mdi:train", 102: "mdi:train", 103: "mdi:train", 104: "mdi:train-car", 105: "mdi:train", 106: "mdi:train", 107: "mdi:train", 108: "mdi:train", 109: "mdi:train", 110: "mdi:train-variant", 111: "mdi:train-variant", 112: "mdi:train-variant", 113: "mdi:train-variant", 114: "mdi:train-variant", 115: "mdi:train-variant", 116: "mdi:train-variant", 117: "mdi:train-variant", 200: "mdi:bus", 201: "mdi:bus", 202: "mdi:bus", 203: "mdi:bus", 204: "mdi:bus", 205: "mdi:bus", 206: "mdi:bus", 207: "mdi:bus", 208: "mdi:bus", 209: "mdi:bus", 400: "mdi:subway-variant", 401: "mdi:subway-variant", 402: "mdi:subway", 403: "mdi:subway-variant", 404: "mdi:subway-variant", 405: "mdi:subway-variant", 700: "mdi:bus", 701: "mdi:bus", 702: "mdi:bus", 703: "mdi:bus", 704: "mdi:bus", 705: "mdi:bus", 706: "mdi:bus", 707: "mdi:bus", 708: "mdi:bus", 709: "mdi:bus", 710: "mdi:bus", 711: "mdi:bus", 712: "mdi:bus-school", 713: "mdi:bus-school", 714: "mdi:bus", 715: "mdi:bus", 716: "mdi:bus", 800: "mdi:bus", 900: "mdi:tram", 901: "mdi:tram", 902: "mdi:tram", 903: "mdi:tram", 904: "mdi:tram", 905: "mdi:tram", 906: "mdi:tram", 1000: "mdi:ferry", 1100: "mdi:airplane", 1200: "mdi:ferry", 1300: "mdi:airplane", 1400: "mdi:gondola", 1500: "mdi:taxi", 1501: "mdi:taxi", 1502: "mdi:ferry", 1503: "mdi:train-variant", 1504: "mdi:bicycle-basket", 1505: "mdi:taxi", 1506: "mdi:car-multiple", 1507: "mdi:taxi", 1700: "mdi:train-car", 1702: "mdi:horse-variant", } LOCATION_TYPE_DEFAULT = "Stop" LOCATION_TYPE_OPTIONS = { 0: "Station", 1: "Stop", 2: "Station Entrance/Exit", 3: "Other", } PICKUP_TYPE_DEFAULT = STATE_UNKNOWN PICKUP_TYPE_OPTIONS = { 0: "Regular", 1: "None Available", 2: "Call Agency", 3: "Contact Driver", } ROUTE_TYPE_OPTIONS = { 0: "Tram", 1: "Subway", 2: "Rail", 3: "Bus", 4: "Ferry", 5: "Cable Tram", 6: "Aerial Lift", 7: "Funicular", 100: "Railway Service", 101: "High Speed Rail Service", 102: "Long Distance Trains", 103: "Inter Regional Rail Service", 104: "Car Transport Rail Service", 105: "Sleeper Rail Service", 106: "Regional Rail Service", 107: "Tourist Railway Service", 108: "Rail Shuttle (Within Complex)", 109: "Suburban Railway", 110: "Replacement Rail Service", 111: "Special Rail Service", 112: "Lorry Transport Rail Service", 113: "All Rail Services", 114: "Cross-Country Rail Service", 115: "Vehicle Transport Rail Service", 116: "Rack and Pinion Railway", 117: "Additional Rail Service", 200: "Coach Service", 201: "International Coach Service", 202: "National Coach Service", 203: "Shuttle Coach Service", 204: "Regional Coach Service", 205: "Special Coach Service", 206: "Sightseeing Coach Service", 207: "Tourist Coach Service", 208: "Commuter Coach Service", 209: "All Coach Services", 400: "Urban Railway Service", 401: "Metro Service", 402: "Underground Service", 403: "Urban Railway Service", 404: "All Urban Railway Services", 405: "Monorail", 700: "Bus Service", 701: "Regional Bus Service", 702: "Express Bus Service", 703: "Stopping Bus Service", 704: "Local Bus Service", 705: "Night Bus Service", 706: "Post Bus Service", 707: "Special Needs Bus", 708: "Mobility Bus Service", 709: "Mobility Bus for Registered Disabled", 710: "Sightseeing Bus", 711: "Shuttle Bus", 712: "School Bus", 713: "School and Public Service Bus", 714: "Rail Replacement Bus Service", 715: "Demand and Response Bus Service", 716: "All Bus Services", 800: "Trolleybus Service", 900: "Tram Service", 901: "City Tram Service", 902: "Local Tram Service", 903: "Regional Tram Service", 904: "Sightseeing Tram Service", 905: "Shuttle Tram Service", 906: "All Tram Services", 1000: "Water Transport Service", 1100: "Air Service", 1200: "Ferry Service", 1300: "Aerial Lift Service", 1400: "Funicular Service", 1500: "Taxi Service", 1501: "Communal Taxi Service", 1502: "Water Taxi Service", 1503: "Rail Taxi Service", 1504: "Bike Taxi Service", 1505: "Licensed Taxi Service", 1506: "Private Hire Service Vehicle", 1507: "All Taxi Services", 1700: "Miscellaneous Service", 1702: "Horse-drawn Carriage", } TIMEPOINT_DEFAULT = True TIMEPOINT_OPTIONS = {0: False, 1: True} WHEELCHAIR_ACCESS_DEFAULT = STATE_UNKNOWN WHEELCHAIR_ACCESS_OPTIONS = {1: True, 2: False} WHEELCHAIR_BOARDING_DEFAULT = STATE_UNKNOWN WHEELCHAIR_BOARDING_OPTIONS = {1: True, 2: False} PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend( { # type: ignore vol.Required(CONF_ORIGIN): cv.string, vol.Required(CONF_DESTINATION): cv.string, vol.Required(CONF_DATA): cv.string, vol.Optional(CONF_NAME): cv.string, vol.Optional(CONF_OFFSET, default=0): cv.time_period, vol.Optional(CONF_TOMORROW, default=False): cv.boolean, } ) def get_next_departure( schedule: Any, start_station_id: Any, end_station_id: Any, offset: cv.time_period, include_tomorrow: bool = False, ) -> dict: """Get the next departure for the given schedule.""" now = dt_util.now().replace(tzinfo=None) + offset now_date = now.strftime(dt_util.DATE_STR_FORMAT) yesterday = now - datetime.timedelta(days=1) yesterday_date = yesterday.strftime(dt_util.DATE_STR_FORMAT) tomorrow = now + datetime.timedelta(days=1) tomorrow_date = tomorrow.strftime(dt_util.DATE_STR_FORMAT) # Fetch all departures for yesterday, today and optionally tomorrow, # up to an overkill maximum in case of a departure every minute for those # days. limit = 24 * 60 * 60 * 2 tomorrow_select = tomorrow_where = tomorrow_order = "" if include_tomorrow: limit = int(limit / 2 * 3) tomorrow_name = tomorrow.strftime("%A").lower() tomorrow_select = f"calendar.{tomorrow_name} AS tomorrow," tomorrow_where = f"OR calendar.{tomorrow_name} = 1" tomorrow_order = f"calendar.{tomorrow_name} DESC," sql_query = f""" SELECT trip.trip_id, trip.route_id, time(origin_stop_time.arrival_time) AS origin_arrival_time, time(origin_stop_time.departure_time) AS origin_depart_time, date(origin_stop_time.departure_time) AS origin_depart_date, origin_stop_time.drop_off_type AS origin_drop_off_type, origin_stop_time.pickup_type AS origin_pickup_type, origin_stop_time.shape_dist_traveled AS origin_dist_traveled, origin_stop_time.stop_headsign AS origin_stop_headsign, origin_stop_time.stop_sequence AS origin_stop_sequence, origin_stop_time.timepoint AS origin_stop_timepoint, time(destination_stop_time.arrival_time) AS dest_arrival_time, time(destination_stop_time.departure_time) AS dest_depart_time, destination_stop_time.drop_off_type AS dest_drop_off_type, destination_stop_time.pickup_type AS dest_pickup_type, destination_stop_time.shape_dist_traveled AS dest_dist_traveled, destination_stop_time.stop_headsign AS dest_stop_headsign, destination_stop_time.stop_sequence AS dest_stop_sequence, destination_stop_time.timepoint AS dest_stop_timepoint, calendar.{yesterday.strftime("%A").lower()} AS yesterday, calendar.{now.strftime("%A").lower()} AS today, {tomorrow_select} calendar.start_date AS start_date, calendar.end_date AS end_date FROM trips trip INNER JOIN calendar calendar ON trip.service_id = calendar.service_id INNER JOIN stop_times origin_stop_time ON trip.trip_id = origin_stop_time.trip_id INNER JOIN stops start_station ON origin_stop_time.stop_id = start_station.stop_id INNER JOIN stop_times destination_stop_time ON trip.trip_id = destination_stop_time.trip_id INNER JOIN stops end_station ON destination_stop_time.stop_id = end_station.stop_id WHERE (calendar.{yesterday.strftime("%A").lower()} = 1 OR calendar.{now.strftime("%A").lower()} = 1 {tomorrow_where} ) AND start_station.stop_id = :origin_station_id AND end_station.stop_id = :end_station_id AND origin_stop_sequence < dest_stop_sequence AND calendar.start_date <= :today AND calendar.end_date >= :today ORDER BY calendar.{yesterday.strftime("%A").lower()} DESC, calendar.{now.strftime("%A").lower()} DESC, {tomorrow_order} origin_stop_time.departure_time LIMIT :limit """ result = schedule.engine.execute( text(sql_query), origin_station_id=start_station_id, end_station_id=end_station_id, today=now_date, limit=limit, ) # Create lookup timetable for today and possibly tomorrow, taking into # account any departures from yesterday scheduled after midnight, # as long as all departures are within the calendar date range. timetable = {} yesterday_start = today_start = tomorrow_start = None yesterday_last = today_last = "" for row in result: if row["yesterday"] == 1 and yesterday_date >= row["start_date"]: extras = {"day": "yesterday", "first": None, "last": False} if yesterday_start is None: yesterday_start = row["origin_depart_date"] if yesterday_start != row["origin_depart_date"]: idx = f"{now_date} {row['origin_depart_time']}" timetable[idx] = {**row, **extras} yesterday_last = idx if row["today"] == 1: extras = {"day": "today", "first": False, "last": False} if today_start is None: today_start = row["origin_depart_date"] extras["first"] = True if today_start == row["origin_depart_date"]: idx_prefix = now_date else: idx_prefix = tomorrow_date idx = f"{idx_prefix} {row['origin_depart_time']}" timetable[idx] = {**row, **extras} today_last = idx if ( "tomorrow" in row and row["tomorrow"] == 1 and tomorrow_date <= row["end_date"] ): extras = {"day": "tomorrow", "first": False, "last": None} if tomorrow_start is None: tomorrow_start = row["origin_depart_date"] extras["first"] = True if tomorrow_start == row["origin_depart_date"]: idx = f"{tomorrow_date} {row['origin_depart_time']}" timetable[idx] = {**row, **extras} # Flag last departures. for idx in filter(None, [yesterday_last, today_last]): timetable[idx]["last"] = True _LOGGER.debug("Timetable: %s", sorted(timetable.keys())) item = {} for key in sorted(timetable.keys()): if dt_util.parse_datetime(key) > now: item = timetable[key] _LOGGER.debug( "Departure found for station %s @ %s -> %s", start_station_id, key, item ) break if item == {}: return {} # Format arrival and departure dates and times, accounting for the # possibility of times crossing over midnight. origin_arrival = now if item["origin_arrival_time"] > item["origin_depart_time"]: origin_arrival -= datetime.timedelta(days=1) origin_arrival_time = ( f"{origin_arrival.strftime(dt_util.DATE_STR_FORMAT)} " f"{item['origin_arrival_time']}" ) origin_depart_time = f"{now_date} {item['origin_depart_time']}" dest_arrival = now if item["dest_arrival_time"] < item["origin_depart_time"]: dest_arrival += datetime.timedelta(days=1) dest_arrival_time = ( f"{dest_arrival.strftime(dt_util.DATE_STR_FORMAT)} " f"{item['dest_arrival_time']}" ) dest_depart = dest_arrival if item["dest_depart_time"] < item["dest_arrival_time"]: dest_depart += datetime.timedelta(days=1) dest_depart_time = ( f"{dest_depart.strftime(dt_util.DATE_STR_FORMAT)} " f"{item['dest_depart_time']}" ) depart_time = dt_util.parse_datetime(origin_depart_time) arrival_time = dt_util.parse_datetime(dest_arrival_time) origin_stop_time = { "Arrival Time": origin_arrival_time, "Departure Time": origin_depart_time, "Drop Off Type": item["origin_drop_off_type"], "Pickup Type": item["origin_pickup_type"], "Shape Dist Traveled": item["origin_dist_traveled"], "Headsign": item["origin_stop_headsign"], "Sequence": item["origin_stop_sequence"], "Timepoint": item["origin_stop_timepoint"], } destination_stop_time = { "Arrival Time": dest_arrival_time, "Departure Time": dest_depart_time, "Drop Off Type": item["dest_drop_off_type"], "Pickup Type": item["dest_pickup_type"], "Shape Dist Traveled": item["dest_dist_traveled"], "Headsign": item["dest_stop_headsign"], "Sequence": item["dest_stop_sequence"], "Timepoint": item["dest_stop_timepoint"], } return { "trip_id": item["trip_id"], "route_id": item["route_id"], "day": item["day"], "first": item["first"], "last": item["last"], "departure_time": depart_time, "arrival_time": arrival_time, "origin_stop_time": origin_stop_time, "destination_stop_time": destination_stop_time, } def setup_platform( hass: HomeAssistant, config: ConfigType, add_entities: Callable[[list], None], discovery_info: DiscoveryInfoType | None = None, ) -> None: """Set up the GTFS sensor.""" gtfs_dir = hass.config.path(DEFAULT_PATH) data = config[CONF_DATA] origin = config.get(CONF_ORIGIN) destination = config.get(CONF_DESTINATION) name = config.get(CONF_NAME) offset = config.get(CONF_OFFSET) include_tomorrow = config[CONF_TOMORROW] if not os.path.exists(gtfs_dir): os.makedirs(gtfs_dir) if not os.path.exists(os.path.join(gtfs_dir, data)): _LOGGER.error("The given GTFS data file/folder was not found") return (gtfs_root, _) = os.path.splitext(data) sqlite_file = f"{gtfs_root}.sqlite?check_same_thread=False" joined_path = os.path.join(gtfs_dir, sqlite_file) gtfs = pygtfs.Schedule(joined_path) # pylint: disable=no-member if not gtfs.feeds: pygtfs.append_feed(gtfs, os.path.join(gtfs_dir, data)) add_entities( [GTFSDepartureSensor(gtfs, name, origin, destination, offset, include_tomorrow)] ) class GTFSDepartureSensor(SensorEntity): """Implementation of a GTFS departure sensor.""" _attr_device_class = DEVICE_CLASS_TIMESTAMP def __init__( self, gtfs: Any, name: Any | None, origin: Any, destination: Any, offset: datetime.timedelta, include_tomorrow: bool, ) -> None: """Initialize the sensor.""" self._pygtfs = gtfs self.origin = origin self.destination = destination self._include_tomorrow = include_tomorrow self._offset = offset self._custom_name = name self._available = False self._icon = ICON self._name = "" self._state: str | None = None self._attributes = {} self._agency = None self._departure = {} self._destination = None self._origin = None self._route = None self._trip = None self.lock = threading.Lock() self.update() @property def name(self) -> str: """Return the name of the sensor.""" return self._name @property def state(self) -> str | None: # type: ignore """Return the state of the sensor.""" return self._state @property def available(self) -> bool: """Return True if entity is available.""" return self._available @property def extra_state_attributes(self) -> dict: """Return the state attributes.""" return self._attributes @property def icon(self) -> str: """Icon to use in the frontend, if any.""" return self._icon def update(self) -> None: """Get the latest data from GTFS and update the states.""" with self.lock: # Fetch valid stop information once if not self._origin: stops = self._pygtfs.stops_by_id(self.origin) if not stops: self._available = False _LOGGER.warning("Origin stop ID %s not found", self.origin) return self._origin = stops[0] if not self._destination: stops = self._pygtfs.stops_by_id(self.destination) if not stops: self._available = False _LOGGER.warning( "Destination stop ID %s not found", self.destination ) return self._destination = stops[0] self._available = True # Fetch next departure self._departure = get_next_departure( self._pygtfs, self.origin, self.destination, self._offset, self._include_tomorrow, ) # Define the state as a UTC timestamp with ISO 8601 format if not self._departure: self._state = None else: self._state = dt_util.as_utc( self._departure["departure_time"] ).isoformat() # Fetch trip and route details once, unless updated if not self._departure: self._trip = None else: trip_id = self._departure["trip_id"] if not self._trip or self._trip.trip_id != trip_id: _LOGGER.debug("Fetching trip details for %s", trip_id) self._trip = self._pygtfs.trips_by_id(trip_id)[0] route_id = self._departure["route_id"] if not self._route or self._route.route_id != route_id: _LOGGER.debug("Fetching route details for %s", route_id) self._route = self._pygtfs.routes_by_id(route_id)[0] # Fetch agency details exactly once if self._agency is None and self._route: _LOGGER.debug("Fetching agency details for %s", self._route.agency_id) try: self._agency = self._pygtfs.agencies_by_id(self._route.agency_id)[0] except IndexError: _LOGGER.warning( "Agency ID '%s' was not found in agency table, " "you may want to update the routes database table " "to fix this missing reference", self._route.agency_id, ) self._agency = False # Assign attributes, icon and name self.update_attributes() if self._route: self._icon = ICONS.get(self._route.route_type, ICON) else: self._icon = ICON name = ( f"{getattr(self._agency, 'agency_name', DEFAULT_NAME)} " f"{self.origin} to {self.destination} next departure" ) if not self._departure: name = f"{DEFAULT_NAME}" self._name = self._custom_name or name def update_attributes(self) -> None: """Update state attributes.""" # Add departure information if self._departure: self._attributes[ATTR_ARRIVAL] = dt_util.as_utc( self._departure["arrival_time"] ).isoformat() self._attributes[ATTR_DAY] = self._departure["day"] if self._departure[ATTR_FIRST] is not None: self._attributes[ATTR_FIRST] = self._departure["first"] elif ATTR_FIRST in self._attributes: del self._attributes[ATTR_FIRST] if self._departure[ATTR_LAST] is not None: self._attributes[ATTR_LAST] = self._departure["last"] elif ATTR_LAST in self._attributes: del self._attributes[ATTR_LAST] else: if ATTR_ARRIVAL in self._attributes: del self._attributes[ATTR_ARRIVAL] if ATTR_DAY in self._attributes: del self._attributes[ATTR_DAY] if ATTR_FIRST in self._attributes: del self._attributes[ATTR_FIRST] if ATTR_LAST in self._attributes: del self._attributes[ATTR_LAST] # Add contextual information self._attributes[ATTR_OFFSET] = self._offset.total_seconds() / 60 if self._state is None: self._attributes[ATTR_INFO] = ( "No more departures" if self._include_tomorrow else "No more departures today" ) elif ATTR_INFO in self._attributes: del self._attributes[ATTR_INFO] if self._agency: self._attributes[ATTR_ATTRIBUTION] = self._agency.agency_name elif ATTR_ATTRIBUTION in self._attributes: del self._attributes[ATTR_ATTRIBUTION] # Add extra metadata key = "agency_id" if self._agency and key not in self._attributes: self.append_keys(self.dict_for_table(self._agency), "Agency") key = "origin_station_stop_id" if self._origin and key not in self._attributes: self.append_keys(self.dict_for_table(self._origin), "Origin Station") self._attributes[ATTR_LOCATION_ORIGIN] = LOCATION_TYPE_OPTIONS.get( self._origin.location_type, LOCATION_TYPE_DEFAULT ) self._attributes[ATTR_WHEELCHAIR_ORIGIN] = WHEELCHAIR_BOARDING_OPTIONS.get( self._origin.wheelchair_boarding, WHEELCHAIR_BOARDING_DEFAULT ) key = "destination_station_stop_id" if self._destination and key not in self._attributes: self.append_keys( self.dict_for_table(self._destination), "Destination Station" ) self._attributes[ATTR_LOCATION_DESTINATION] = LOCATION_TYPE_OPTIONS.get( self._destination.location_type, LOCATION_TYPE_DEFAULT ) self._attributes[ ATTR_WHEELCHAIR_DESTINATION ] = WHEELCHAIR_BOARDING_OPTIONS.get( self._destination.wheelchair_boarding, WHEELCHAIR_BOARDING_DEFAULT ) # Manage Route metadata key = "route_id" if not self._route and key in self._attributes: self.remove_keys("Route") elif self._route and ( key not in self._attributes or self._attributes[key] != self._route.route_id ): self.append_keys(self.dict_for_table(self._route), "Route") self._attributes[ATTR_ROUTE_TYPE] = ROUTE_TYPE_OPTIONS[ self._route.route_type ] # Manage Trip metadata key = "trip_id" if not self._trip and key in self._attributes: self.remove_keys("Trip") elif self._trip and ( key not in self._attributes or self._attributes[key] != self._trip.trip_id ): self.append_keys(self.dict_for_table(self._trip), "Trip") self._attributes[ATTR_BICYCLE] = BICYCLE_ALLOWED_OPTIONS.get( self._trip.bikes_allowed, BICYCLE_ALLOWED_DEFAULT ) self._attributes[ATTR_WHEELCHAIR] = WHEELCHAIR_ACCESS_OPTIONS.get( self._trip.wheelchair_accessible, WHEELCHAIR_ACCESS_DEFAULT ) # Manage Stop Times metadata prefix = "origin_stop" if self._departure: self.append_keys(self._departure["origin_stop_time"], prefix) self._attributes[ATTR_DROP_OFF_ORIGIN] = DROP_OFF_TYPE_OPTIONS.get( self._departure["origin_stop_time"]["Drop Off Type"], DROP_OFF_TYPE_DEFAULT, ) self._attributes[ATTR_PICKUP_ORIGIN] = PICKUP_TYPE_OPTIONS.get( self._departure["origin_stop_time"]["Pickup Type"], PICKUP_TYPE_DEFAULT ) self._attributes[ATTR_TIMEPOINT_ORIGIN] = TIMEPOINT_OPTIONS.get( self._departure["origin_stop_time"]["Timepoint"], TIMEPOINT_DEFAULT ) else: self.remove_keys(prefix) prefix = "destination_stop" if self._departure: self.append_keys(self._departure["destination_stop_time"], prefix) self._attributes[ATTR_DROP_OFF_DESTINATION] = DROP_OFF_TYPE_OPTIONS.get( self._departure["destination_stop_time"]["Drop Off Type"], DROP_OFF_TYPE_DEFAULT, ) self._attributes[ATTR_PICKUP_DESTINATION] = PICKUP_TYPE_OPTIONS.get( self._departure["destination_stop_time"]["Pickup Type"], PICKUP_TYPE_DEFAULT, ) self._attributes[ATTR_TIMEPOINT_DESTINATION] = TIMEPOINT_OPTIONS.get( self._departure["destination_stop_time"]["Timepoint"], TIMEPOINT_DEFAULT ) else: self.remove_keys(prefix) @staticmethod def dict_for_table(resource: Any) -> dict: """Return a dictionary for the SQLAlchemy resource given.""" return { col: getattr(resource, col) for col in resource.__table__.columns.keys() } def append_keys(self, resource: dict, prefix: str | None = None) -> None: """Properly format key val pairs to append to attributes.""" for attr, val in resource.items(): if val == "" or val is None or attr == "feed_id": continue key = attr if prefix and not key.startswith(prefix): key = f"{prefix} {key}" key = slugify(key) self._attributes[key] = val def remove_keys(self, prefix: str) -> None: """Remove attributes whose key starts with prefix.""" self._attributes = { k: v for k, v in self._attributes.items() if not k.startswith(prefix) }
kennedyshead/home-assistant
homeassistant/components/gtfs/sensor.py
Python
apache-2.0
29,393
# -*- coding: utf-8 -*- """ Applied Mathematics for Computer Science. Homework4 -- L-M Algorithm. @author: LiBin 11531041 @date: 2016 - 5 - 23. """ #%% Objective: Assuming given type of the certain function # " fun(x) = a*exp(-b*t) ", input data "x1,...x10", and output data "y1,..y10", # using the Levenberg-Marquardt algorithm to find out the optimial value of # "a" and "b". Naturally, the objective function is f(x) = = 1/2 * sum( ( fun(x_i)-y_i) * 2) #%% #0.1 compute the F_x, where F_i(x) = a*exp(-b *x_i) - y_i def F_x( x, y, a, b ): result = (a* np.exp(-b * x) - y).T return result #0.2 compute the jacobian matrix def J_x( x, a, b ): result = np.matrix(np.zeros((10,2)) ) result[:,0] = np.exp(-b*x).T result[:,1] = np.multiply(-(a*x), np.exp(-b*x) ).T return result #0.3 compute the f_x, where f(x) = 1/2 * sum( F_x .* 2) def f_x( x, y, a, b ): temp = a* np.exp(-b * x) - y result = np.sum( np.power( temp, 2) ) /2 return result #%% import matplotlib as mpl from mpl_toolkits.mplot3d import Axes3D import numpy as np import matplotlib.pyplot as plt x = np.matrix([0.25, 0.5, 1, 1.5, 2, 3, 4, 6, 8, 10]) y = np.matrix([19.21, 18.15, 15.36, 14.10, 12.89, 9.32, 7.45, 5.24, 3.01, 1.85]) mu = 0.01 epsilon = 1e-6 max_iter = 50 a=10 b=0.5 #%% a_trend = [] b_trend = [] f_trend = [] for loop in range(max_iter): J = J_x( x, a, b ) F = F_x( x, y, a, b ) ## step - 2 g = J.T * F G = J.T * J ## step - 3 norm_g = np.sqrt( sum( np.power( g, 2) ) ) if norm_g < epsilon: break ## step - 4 key = 0 while key == 0: G_mu = G + mu * np.eye(2) if np.all( np.linalg.eigvals(G_mu)>0 ): key = 1 else: mu = 4 * mu key = 0 ## step - 5 s = np.linalg.solve( G_mu, -g ) ## step - 6 a_new = a + s[0,0] b_new = b + s[1,0] diff_f = f_x( x, y, a_new, b_new ) - f_x( x, y, a, b ) diff_q = (J.T * F).T * s + (s.T*(J.T*J) *s) /2 r = diff_f / diff_q ## step - 7 if r < 0.25: mu = mu * 4 elif r > 0.75: mu = mu / 2 else: pass ## step - 8 if r > 0: a = a_new b = b_new else: pass #print mu a_trend.append(a) b_trend.append(b) f_trend.append(np.log(f_x( x, y, a, b)) ) #%% num_grid = 15 a_index,b_index = np.mgrid[5:25:num_grid*1j,0:0.5:num_grid*1j] z = np.zeros((num_grid,num_grid)) for i in xrange(num_grid): for j in xrange(num_grid): z[i,j] = np.log( f_x( x, y, a_index[i,j], b_index[i,j] ) ) ax = plt.subplot(111,projection='3d') ax.plot_surface(a_index,b_index,z,rstride=2,cstride=1,cmap=plt.cm.coolwarm,alpha=0.8) ax.set_xlabel('a') ax.set_ylabel('b') ax.set_zlabel('log f(x)') mpl.rcParams['legend.fontsize'] = 10 ax.plot(a_trend, b_trend, f_trend, color='blue',linestyle='solid',linewidth = 3,marker='o',markerfacecolor='red',markersize=9,label='optimization curve') ax.legend(loc=3) plt.title('L-M algorithm to evaluate the optimial value') plt.show()
tracer9/Applied_Math
Homework_4.py
Python
apache-2.0
3,155
# coding: utf-8 """ Kubernetes No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) OpenAPI spec version: v1.6.1 Generated by: https://github.com/swagger-api/swagger-codegen.git """ from pprint import pformat from six import iteritems import re class V1NamespaceSpec(object): """ NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ def __init__(self, finalizers=None): """ V1NamespaceSpec - a model defined in Swagger :param dict swaggerTypes: The key is attribute name and the value is attribute type. :param dict attributeMap: The key is attribute name and the value is json key in definition. """ self.swagger_types = { 'finalizers': 'list[str]' } self.attribute_map = { 'finalizers': 'finalizers' } self._finalizers = finalizers @property def finalizers(self): """ Gets the finalizers of this V1NamespaceSpec. Finalizers is an opaque list of values that must be empty to permanently remove object from storage. More info: http://releases.k8s.io/HEAD/docs/design/namespaces.md#finalizers :return: The finalizers of this V1NamespaceSpec. :rtype: list[str] """ return self._finalizers @finalizers.setter def finalizers(self, finalizers): """ Sets the finalizers of this V1NamespaceSpec. Finalizers is an opaque list of values that must be empty to permanently remove object from storage. More info: http://releases.k8s.io/HEAD/docs/design/namespaces.md#finalizers :param finalizers: The finalizers of this V1NamespaceSpec. :type: list[str] """ self._finalizers = finalizers def to_dict(self): """ Returns the model properties as a dict """ result = {} for attr, _ in iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """ Returns the string representation of the model """ return pformat(self.to_dict()) def __repr__(self): """ For `print` and `pprint` """ return self.to_str() def __eq__(self, other): """ Returns true if both objects are equal """ return self.__dict__ == other.__dict__ def __ne__(self, other): """ Returns true if both objects are not equal """ return not self == other
skuda/client-python
kubernetes/client/models/v1_namespace_spec.py
Python
apache-2.0
3,316
# Copyright 2018, OpenCensus Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. try: from weakref import WeakMethod except ImportError: from opencensus.common.backports import WeakMethod import calendar import datetime import weakref UTF8 = 'utf-8' # Max length is 128 bytes for a truncatable string. MAX_LENGTH = 128 ISO_DATETIME_REGEX = '%Y-%m-%dT%H:%M:%S.%fZ' def get_truncatable_str(str_to_convert): """Truncate a string if exceed limit and record the truncated bytes count. """ truncated, truncated_byte_count = check_str_length( str_to_convert, MAX_LENGTH) result = { 'value': truncated, 'truncated_byte_count': truncated_byte_count, } return result def check_str_length(str_to_check, limit=MAX_LENGTH): """Check the length of a string. If exceeds limit, then truncate it. :type str_to_check: str :param str_to_check: String to check. :type limit: int :param limit: The upper limit of the length. :rtype: tuple :returns: The string it self if not exceeded length, or truncated string if exceeded and the truncated byte count. """ str_bytes = str_to_check.encode(UTF8) str_len = len(str_bytes) truncated_byte_count = 0 if str_len > limit: truncated_byte_count = str_len - limit str_bytes = str_bytes[:limit] result = str(str_bytes.decode(UTF8, errors='ignore')) return (result, truncated_byte_count) def to_iso_str(ts=None): """Get an ISO 8601 string for a UTC datetime.""" if ts is None: ts = datetime.datetime.utcnow() return ts.strftime("%Y-%m-%dT%H:%M:%S.%fZ") def timestamp_to_microseconds(timestamp): """Convert a timestamp string into a microseconds value :param timestamp :return time in microseconds """ timestamp_str = datetime.datetime.strptime(timestamp, ISO_DATETIME_REGEX) epoch_time_secs = calendar.timegm(timestamp_str.timetuple()) epoch_time_mus = epoch_time_secs * 1e6 + timestamp_str.microsecond return epoch_time_mus def iuniq(ible): """Get an iterator over unique items of `ible`.""" items = set() for item in ible: if item not in items: items.add(item) yield item def uniq(ible): """Get a list of unique items of `ible`.""" return list(iuniq(ible)) def window(ible, length): """Split `ible` into multiple lists of length `length`. >>> list(window(range(5), 2)) [[0, 1], [2, 3], [4]] """ if length <= 0: # pragma: NO COVER raise ValueError ible = iter(ible) while True: elts = [xx for ii, xx in zip(range(length), ible)] if elts: yield elts else: break def get_weakref(func): """Get a weak reference to bound or unbound `func`. If `func` is unbound (i.e. has no __self__ attr) get a weakref.ref, otherwise get a wrapper that simulates weakref.ref. """ if func is None: raise ValueError if not hasattr(func, '__self__'): return weakref.ref(func) return WeakMethod(func)
census-instrumentation/opencensus-python
opencensus/common/utils/__init__.py
Python
apache-2.0
3,607
# -*- coding: utf-8 -*- ''' Created on 2015/04/17 @author: 2015 AIST ''' import time from django.conf import settings from zbxsend import Metric, send_to_zabbix import logging import json from vt_manager_kvm.communication.geni.v3.configurators.handlerconfigurator import HandlerConfigurator class ZabbixHelper(): logger = logging.getLogger("ZabbixHelper") @staticmethod def sendAgentStatus(server, available): if available == True: status = 1 # UP else: status = 2 # DOWN timestamp = int(time.time()) driver = HandlerConfigurator.get_vt_am_driver() server_urn = driver.generate_component_id(server) itemname = settings.ZBX_ITEM_HOSTSTATUS + '[' + str(server_urn) + ']' metric = Metric(server.name, str(itemname), status, timestamp) ZabbixHelper.sendZabbix(metric) return @staticmethod def sendVMDiscovery(server, vms): timestamp = int(time.time()) discoveryList = [] for vm in vms: discovery = {"{#USERVM.NAME}": vm.name} discoveryList.append(discovery) tmpobj = {"data": discoveryList} discoveryStr = json.dumps(tmpobj) metric = Metric(server.name, settings.ZBX_ITEM_DISCOVERY_USERVM, str(discoveryStr), timestamp) ZabbixHelper.sendZabbix(metric) return @staticmethod def sendVMStatusDiscovery(vms): timestamp = int(time.time()) driver = HandlerConfigurator.get_vt_am_driver() for vm in vms: discoveryList = [] vm_urn = driver.generate_sliver_urn(vm) discovery = {"{#USERVM.URN}": vm_urn} discoveryList.append(discovery) tmpobj = {"data": discoveryList} discoveryStr = json.dumps(tmpobj) metric = Metric(vm.name, settings.ZBX_ITEM_DISCOVERY_USERVMSTATUS, str(discoveryStr), timestamp) ZabbixHelper.sendZabbix(metric) return @staticmethod def sendVMStatus(vm, isUp): if isUp == True: status = 1 # UP else: status = 2 # DOWN driver = HandlerConfigurator.get_vt_am_driver() vm_urn = driver.generate_sliver_urn(vm) timestamp = int(time.time()) itemname = settings.ZBX_ITEM_USERVMSTATUS + '[' + str(vm_urn) + ']' metric = Metric(vm.name, str(itemname), status, timestamp) ZabbixHelper.sendZabbix(metric) return @staticmethod def sendZabbix(metric): ZabbixHelper.logger.debug("send Zabbix " + str(metric)) result = send_to_zabbix([metric], settings.ZBX_SERVER_IP, settings.ZBX_SERVER_PORT) if(result == False): ZabbixHelper.logger.warn("cannot send VM status to Zabbix, continue anyway") return
ict-felix/stack
vt_manager_kvm/src/python/vt_manager_kvm/communication/utils/ZabbixHelper.py
Python
apache-2.0
2,427
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2011 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests For miscellaneous util methods used with volume.""" from cinder import context from cinder import db from cinder import flags from cinder.openstack.common import importutils from cinder.openstack.common import log as logging from cinder.openstack.common.notifier import api as notifier_api from cinder.openstack.common.notifier import test_notifier from cinder import test from cinder.volume import utils as volume_utils LOG = logging.getLogger(__name__) FLAGS = flags.FLAGS class UsageInfoTestCase(test.TestCase): QUEUE_NAME = 'cinder-volume' HOSTNAME = 'my-host.com' HOSTIP = '10.0.0.1' BACKEND = 'test_backend' MULTI_AT_BACKEND = 'test_b@ckend' def setUp(self): super(UsageInfoTestCase, self).setUp() self.flags(connection_type='fake', host='fake', notification_driver=[test_notifier.__name__]) self.volume = importutils.import_object(FLAGS.volume_manager) self.user_id = 'fake' self.project_id = 'fake' self.snapshot_id = 'fake' self.volume_size = 0 self.context = context.RequestContext(self.user_id, self.project_id) test_notifier.NOTIFICATIONS = [] def tearDown(self): notifier_api._reset_drivers() super(UsageInfoTestCase, self).tearDown() def _create_volume(self, params={}): """Create a test volume.""" vol = {} vol['snapshot_id'] = self.snapshot_id vol['user_id'] = self.user_id vol['project_id'] = self.project_id vol['host'] = FLAGS.host vol['availability_zone'] = FLAGS.storage_availability_zone vol['status'] = "creating" vol['attach_status'] = "detached" vol['size'] = self.volume_size vol.update(params) return db.volume_create(self.context, vol)['id'] def test_notify_usage_exists(self): """Ensure 'exists' notification generates appropriate usage data.""" volume_id = self._create_volume() volume = db.volume_get(self.context, volume_id) volume_utils.notify_usage_exists(self.context, volume) LOG.info("%r" % test_notifier.NOTIFICATIONS) self.assertEquals(len(test_notifier.NOTIFICATIONS), 1) msg = test_notifier.NOTIFICATIONS[0] self.assertEquals(msg['priority'], 'INFO') self.assertEquals(msg['event_type'], 'volume.exists') payload = msg['payload'] self.assertEquals(payload['tenant_id'], self.project_id) self.assertEquals(payload['user_id'], self.user_id) self.assertEquals(payload['snapshot_id'], self.snapshot_id) self.assertEquals(payload['volume_id'], volume.id) self.assertEquals(payload['size'], self.volume_size) for attr in ('display_name', 'created_at', 'launched_at', 'status', 'audit_period_beginning', 'audit_period_ending'): self.assertTrue(attr in payload, msg="Key %s not in payload" % attr) db.volume_destroy(context.get_admin_context(), volume['id']) def test_get_host_from_queue_simple(self): fullname = "%s.%s@%s" % (self.QUEUE_NAME, self.HOSTNAME, self.BACKEND) self.assertEquals(volume_utils.get_host_from_queue(fullname), self.HOSTNAME) def test_get_host_from_queue_ip(self): fullname = "%s.%s@%s" % (self.QUEUE_NAME, self.HOSTIP, self.BACKEND) self.assertEquals(volume_utils.get_host_from_queue(fullname), self.HOSTIP) def test_get_host_from_queue_multi_at_symbol(self): fullname = "%s.%s@%s" % (self.QUEUE_NAME, self.HOSTNAME, self.MULTI_AT_BACKEND) self.assertEquals(volume_utils.get_host_from_queue(fullname), self.HOSTNAME) def test_get_host_from_queue_ip_multi_at_symbol(self): fullname = "%s.%s@%s" % (self.QUEUE_NAME, self.HOSTIP, self.MULTI_AT_BACKEND) self.assertEquals(volume_utils.get_host_from_queue(fullname), self.HOSTIP)
tomasdubec/openstack-cinder
cinder/tests/test_volume_utils.py
Python
apache-2.0
4,807
""" config of ops service, Control by OPS devices @author: opsdev """ import httplib import json from OpsServiceConfig import OPS_SERVICE_CONFIG class OpsDeviceManager(): def __init__(self,config): self.server = config["server"] self.port = config["port"] self.addAction = "POST" self.getAction = "GET" self.modifyAction = "PUT" self.deleteAction = "DELETE" def addDevice(self,data): return self.restcall("/devices", data, self.addAction) def delDevice(self,deviceid): path = "/devices/%s" % (deviceid) return self.restcall(path, {}, self.deleteAction) def modifyDevice(self,data): return self.restcall("/devices", data, self.modifyAction) def getDevice(self,deviceid): path = "/devices/%s" % (deviceid) return self.restcall(path, "", self.getAction) def getDevices(self): return self.restcall("/devices", "", self.getAction) def restcall(self, opspath, data, action): conn = httplib.HTTPConnection(self.server, self.port) body = json.dumps(data) if action == self.addAction: conn.request(action, opspath, body) elif action == self.getAction: conn.request(action, opspath) elif action == self.modifyAction: conn.request(action, opspath, body) elif action == self.deleteAction: conn.request(action, opspath, body) response = conn.getresponse() ret = (response.status, response.reason, response.read()) conn.close() return ret
HuaweiSNC/OpsDev
src/plugins/com.huawei.networkos.ops.python/templet/python/OpsDeviceManager.py
Python
apache-2.0
1,654
import unittest from loadsbroker.db import Project, Plan, Step, Database class DatabaseTest(unittest.TestCase): def setUp(self): self.db = Database('sqlite:///:memory:') def test_project(self): session = self.db.session() # a project is defined by a name, a repo and strategies project = Project( name='simplepush', home_page='https://services.mozilla.com') session.add(project) plan = Plan(name='s1', enabled=True) project.plans.append(plan) # Attach a container set to the strategy cset = Step( name="Awesome load-tester", instance_type="t2.micro", instance_count=5, container_name="bbangert/simpletest:latest", additional_command_args="--target=svc.dev.mozilla.com" ) plan.steps.append(cset) session.commit()
loads/loads-broker
loadsbroker/tests/test_db.py
Python
apache-2.0
908
# Copyright The Cloud Custodian Authors. # SPDX-License-Identifier: Apache-2.0 import base64 from datetime import datetime, timedelta import functools import json import os import time import yaml import jinja2 import jmespath from dateutil import parser from dateutil.tz import gettz, tzutc try: from botocore.exceptions import ClientError except ImportError: # pragma: no cover pass # Azure provider class Providers: AWS = 0 Azure = 1 def get_jinja_env(template_folders): env = jinja2.Environment(trim_blocks=True, autoescape=False) # nosec nosemgrep env.filters['yaml_safe'] = functools.partial(yaml.safe_dump, default_flow_style=False) env.filters['date_time_format'] = date_time_format env.filters['get_date_time_delta'] = get_date_time_delta env.filters['from_json'] = json.loads env.filters['get_date_age'] = get_date_age env.globals['format_resource'] = resource_format env.globals['format_struct'] = format_struct env.globals['resource_tag'] = get_resource_tag_value env.globals['get_resource_tag_value'] = get_resource_tag_value env.globals['search'] = jmespath.search env.loader = jinja2.FileSystemLoader(template_folders) return env def get_rendered_jinja( target, sqs_message, resources, logger, specified_template, default_template, template_folders): env = get_jinja_env(template_folders) mail_template = sqs_message['action'].get(specified_template, default_template) if not os.path.isabs(mail_template): mail_template = '%s.j2' % mail_template try: template = env.get_template(mail_template) except Exception as error_msg: logger.error("Invalid template reference %s\n%s" % (mail_template, error_msg)) return # recast seconds since epoch as utc iso datestring, template # authors can use date_time_format helper func to convert local # tz. if no execution start time was passed use current time. execution_start = datetime.utcfromtimestamp( sqs_message.get( 'execution_start', time.mktime( datetime.utcnow().timetuple()) )).isoformat() rendered_jinja = template.render( recipient=target, resources=resources, account=sqs_message.get('account', ''), account_id=sqs_message.get('account_id', ''), partition=sqs_message.get('partition', ''), event=sqs_message.get('event', None), action=sqs_message['action'], policy=sqs_message['policy'], execution_start=execution_start, region=sqs_message.get('region', '')) return rendered_jinja # eg, target_tag_keys could be resource-owners ['Owners', 'SupportTeam'] # and this function would go through the resource and look for any tag keys # that match Owners or SupportTeam, and return those values as targets def get_resource_tag_targets(resource, target_tag_keys): if 'Tags' not in resource: return [] if isinstance(resource['Tags'], dict): tags = resource['Tags'] else: tags = {tag['Key']: tag['Value'] for tag in resource['Tags']} targets = [] for target_tag_key in target_tag_keys: if target_tag_key in tags: targets.append(tags[target_tag_key]) return targets def get_message_subject(sqs_message): default_subject = 'Custodian notification - %s' % (sqs_message['policy']['name']) subject = sqs_message['action'].get('subject', default_subject) jinja_template = jinja2.Template(subject) subject = jinja_template.render( account=sqs_message.get('account', ''), account_id=sqs_message.get('account_id', ''), partition=sqs_message.get('partition', ''), event=sqs_message.get('event', None), action=sqs_message['action'], policy=sqs_message['policy'], region=sqs_message.get('region', '') ) return subject def setup_defaults(config): config.setdefault('region', 'us-east-1') config.setdefault('ses_region', config.get('region')) config.setdefault('memory', 1024) config.setdefault('runtime', 'python3.7') config.setdefault('timeout', 300) config.setdefault('subnets', None) config.setdefault('security_groups', None) config.setdefault('contact_tags', []) config.setdefault('ldap_uri', None) config.setdefault('ldap_bind_dn', None) config.setdefault('ldap_bind_user', None) config.setdefault('ldap_bind_password', None) config.setdefault('endpoint_url', None) config.setdefault('datadog_api_key', None) config.setdefault('slack_token', None) config.setdefault('slack_webhook', None) def date_time_format(utc_str, tz_str='US/Eastern', format='%Y %b %d %H:%M %Z'): return parser.parse(utc_str).astimezone(gettz(tz_str)).strftime(format) def get_date_time_delta(delta): return str(datetime.now().replace(tzinfo=gettz('UTC')) + timedelta(delta)) def get_date_age(date): return (datetime.now(tz=tzutc()) - parser.parse(date)).days def format_struct(evt): return json.dumps(evt, indent=2, ensure_ascii=False) def get_resource_tag_value(resource, k): for t in resource.get('Tags', []): if t['Key'] == k: return t['Value'] return '' def strip_prefix(value, prefix): if value.startswith(prefix): return value[len(prefix):] return value def resource_format(resource, resource_type): if resource_type.startswith('aws.'): resource_type = strip_prefix(resource_type, 'aws.') if resource_type == 'ec2': tag_map = {t['Key']: t['Value'] for t in resource.get('Tags', ())} return "%s %s %s %s %s %s" % ( resource['InstanceId'], resource.get('VpcId', 'NO VPC!'), resource['InstanceType'], resource.get('LaunchTime'), tag_map.get('Name', ''), resource.get('PrivateIpAddress')) elif resource_type == 'ami': return "%s %s %s" % ( resource.get('Name'), resource['ImageId'], resource['CreationDate']) elif resource_type == 'sagemaker-notebook': return "%s" % (resource['NotebookInstanceName']) elif resource_type == 's3': return "%s" % (resource['Name']) elif resource_type == 'ebs': return "%s %s %s %s" % ( resource['VolumeId'], resource['Size'], resource['State'], resource['CreateTime']) elif resource_type == 'rds': return "%s %s %s %s" % ( resource['DBInstanceIdentifier'], "%s-%s" % ( resource['Engine'], resource['EngineVersion']), resource['DBInstanceClass'], resource['AllocatedStorage']) elif resource_type == 'rds-cluster': return "%s %s %s" % ( resource['DBClusterIdentifier'], "%s-%s" % ( resource['Engine'], resource['EngineVersion']), resource['AllocatedStorage']) elif resource_type == 'asg': tag_map = {t['Key']: t['Value'] for t in resource.get('Tags', ())} return "%s %s %s" % ( resource['AutoScalingGroupName'], tag_map.get('Name', ''), "instances: %d" % (len(resource.get('Instances', [])))) elif resource_type == 'elb': tag_map = {t['Key']: t['Value'] for t in resource.get('Tags', ())} if 'ProhibitedPolicies' in resource: return "%s %s %s %s" % ( resource['LoadBalancerName'], "instances: %d" % len(resource['Instances']), "zones: %d" % len(resource['AvailabilityZones']), "prohibited_policies: %s" % ','.join( resource['ProhibitedPolicies'])) return "%s %s %s" % ( resource['LoadBalancerName'], "instances: %d" % len(resource['Instances']), "zones: %d" % len(resource['AvailabilityZones'])) elif resource_type == 'redshift': return "%s %s %s" % ( resource['ClusterIdentifier'], 'nodes:%d' % len(resource['ClusterNodes']), 'encrypted:%s' % resource['Encrypted']) elif resource_type == 'emr': return "%s status:%s" % ( resource['Id'], resource['Status']['State']) elif resource_type == 'cfn': return "%s" % ( resource['StackName']) elif resource_type == 'launch-config': return "%s" % ( resource['LaunchConfigurationName']) elif resource_type == 'security-group': name = resource.get('GroupName', '') for t in resource.get('Tags', ()): if t['Key'] == 'Name': name = t['Value'] return "%s %s %s inrules: %d outrules: %d" % ( name, resource['GroupId'], resource.get('VpcId', 'na'), len(resource.get('IpPermissions', ())), len(resource.get('IpPermissionsEgress', ()))) elif resource_type == 'log-group': if 'lastWrite' in resource: return "name: %s last_write: %s" % ( resource['logGroupName'], resource['lastWrite']) return "name: %s" % (resource['logGroupName']) elif resource_type == 'cache-cluster': return "name: %s created: %s status: %s" % ( resource['CacheClusterId'], resource['CacheClusterCreateTime'], resource['CacheClusterStatus']) elif resource_type == 'cache-snapshot': cid = resource.get('CacheClusterId') if cid is None: cid = ', '.join([ ns['CacheClusterId'] for ns in resource['NodeSnapshots']]) return "name: %s cluster: %s source: %s" % ( resource['SnapshotName'], cid, resource['SnapshotSource']) elif resource_type == 'redshift-snapshot': return "name: %s db: %s" % ( resource['SnapshotIdentifier'], resource['DBName']) elif resource_type == 'ebs-snapshot': return "name: %s date: %s" % ( resource['SnapshotId'], resource['StartTime']) elif resource_type == 'subnet': return "%s %s %s %s %s %s" % ( resource['SubnetId'], resource['VpcId'], resource['AvailabilityZone'], resource['State'], resource['CidrBlock'], resource['AvailableIpAddressCount']) elif resource_type == 'account': return " %s %s" % ( resource['account_id'], resource['account_name']) elif resource_type == 'cloudtrail': return "%s" % ( resource['Name']) elif resource_type == 'vpc': return "%s " % ( resource['VpcId']) elif resource_type == 'iam-group': return " %s %s %s" % ( resource['GroupName'], resource['Arn'], resource['CreateDate']) elif resource_type == 'rds-snapshot': return " %s %s %s" % ( resource['DBSnapshotIdentifier'], resource['DBInstanceIdentifier'], resource['SnapshotCreateTime']) elif resource_type == 'iam-user': return " %s " % ( resource['UserName']) elif resource_type == 'iam-role': return " %s %s " % ( resource['RoleName'], resource['CreateDate']) elif resource_type == 'iam-policy': return " %s " % ( resource['PolicyName']) elif resource_type == 'iam-profile': return " %s " % ( resource['InstanceProfileId']) elif resource_type == 'dynamodb-table': return "name: %s created: %s status: %s" % ( resource['TableName'], resource['CreationDateTime'], resource['TableStatus']) elif resource_type == "sqs": return "QueueURL: %s QueueArn: %s " % ( resource['QueueUrl'], resource['QueueArn']) elif resource_type == "efs": return "name: %s id: %s state: %s" % ( resource['Name'], resource['FileSystemId'], resource['LifeCycleState'] ) elif resource_type == "network-addr": return "ip: %s id: %s scope: %s" % ( resource['PublicIp'], resource['AllocationId'], resource['Domain'] ) elif resource_type == "route-table": return "id: %s vpc: %s" % ( resource['RouteTableId'], resource['VpcId'] ) elif resource_type == "app-elb": return "arn: %s zones: %s scheme: %s" % ( resource['LoadBalancerArn'], len(resource['AvailabilityZones']), resource['Scheme']) elif resource_type == "nat-gateway": return "id: %s state: %s vpc: %s" % ( resource['NatGatewayId'], resource['State'], resource['VpcId']) elif resource_type == "internet-gateway": return "id: %s attachments: %s" % ( resource['InternetGatewayId'], len(resource['Attachments'])) elif resource_type == 'lambda': return "Name: %s RunTime: %s \n" % ( resource['FunctionName'], resource['Runtime']) else: return "%s" % format_struct(resource) def get_provider(mailer_config): if mailer_config.get('queue_url', '').startswith('asq://'): return Providers.Azure return Providers.AWS def kms_decrypt(config, logger, session, encrypted_field): if config.get(encrypted_field): try: kms = session.client('kms') return kms.decrypt( CiphertextBlob=base64.b64decode(config[encrypted_field]))[ 'Plaintext'].decode('utf8') except (TypeError, base64.binascii.Error) as e: logger.warning( "Error: %s Unable to base64 decode %s, will assume plaintext." % (e, encrypted_field)) except ClientError as e: if e.response['Error']['Code'] != 'InvalidCiphertextException': raise logger.warning( "Error: %s Unable to decrypt %s with kms, will assume plaintext." % (e, encrypted_field)) return config[encrypted_field] else: logger.debug("No encrypted value to decrypt.") return None def decrypt(config, logger, session, encrypted_field): if config.get(encrypted_field): provider = get_provider(config) if provider == Providers.Azure: from c7n_mailer.azure_mailer.utils import azure_decrypt return azure_decrypt(config, logger, session, encrypted_field) elif provider == Providers.AWS: return kms_decrypt(config, logger, session, encrypted_field) else: raise Exception("Unknown provider") else: logger.debug("No encrypted value to decrypt.") return None # https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-event-reference-user-identity.html def get_aws_username_from_event(logger, event): if event is None: return None identity = event.get('detail', {}).get('userIdentity', {}) if not identity: logger.warning("Could not get recipient from event \n %s" % ( format_struct(event))) return None if identity['type'] == 'AssumedRole': logger.debug( 'In some cases there is no ldap uid is associated with AssumedRole: %s', identity['arn']) logger.debug( 'We will try to assume that identity is in the AssumedRoleSessionName') user = identity['arn'].rsplit('/', 1)[-1] if user is None or user.startswith('i-') or user.startswith('awslambda'): return None if ':' in user: user = user.split(':', 1)[-1] return user if identity['type'] == 'IAMUser' or identity['type'] == 'WebIdentityUser': return identity['userName'] if identity['type'] == 'Root': return None # this conditional is left here as a last resort, it should # be better documented with an example UserIdentity json if ':' in identity['principalId']: user_id = identity['principalId'].split(':', 1)[-1] else: user_id = identity['principalId'] return user_id
thisisshi/cloud-custodian
tools/c7n_mailer/c7n_mailer/utils.py
Python
apache-2.0
16,298
"""The tests for the manual_mqtt Alarm Control Panel component.""" from datetime import timedelta import unittest from unittest.mock import patch from homeassistant.setup import setup_component from homeassistant.const import ( STATE_ALARM_DISARMED, STATE_ALARM_ARMED_HOME, STATE_ALARM_ARMED_AWAY, STATE_ALARM_ARMED_NIGHT, STATE_ALARM_PENDING, STATE_ALARM_TRIGGERED) from homeassistant.components import alarm_control_panel import homeassistant.util.dt as dt_util from tests.common import ( fire_time_changed, get_test_home_assistant, mock_mqtt_component, fire_mqtt_message, assert_setup_component) CODE = 'HELLO_CODE' class TestAlarmControlPanelManualMqtt(unittest.TestCase): """Test the manual_mqtt alarm module.""" def setUp(self): # pylint: disable=invalid-name """Set up things to be run when tests are started.""" self.hass = get_test_home_assistant() self.mock_publish = mock_mqtt_component(self.hass) def tearDown(self): # pylint: disable=invalid-name """Stop down everything that was started.""" self.hass.stop() def test_fail_setup_without_state_topic(self): """Test for failing with no state topic.""" with assert_setup_component(0) as config: assert setup_component(self.hass, alarm_control_panel.DOMAIN, { alarm_control_panel.DOMAIN: { 'platform': 'mqtt_alarm', 'command_topic': 'alarm/command' } }) assert not config[alarm_control_panel.DOMAIN] def test_fail_setup_without_command_topic(self): """Test failing with no command topic.""" with assert_setup_component(0): assert setup_component(self.hass, alarm_control_panel.DOMAIN, { alarm_control_panel.DOMAIN: { 'platform': 'mqtt_alarm', 'state_topic': 'alarm/state' } }) def test_arm_home_no_pending(self): """Test arm home method.""" self.assertTrue(setup_component( self.hass, alarm_control_panel.DOMAIN, {'alarm_control_panel': { 'platform': 'manual_mqtt', 'name': 'test', 'code': CODE, 'pending_time': 0, 'disarm_after_trigger': False, 'command_topic': 'alarm/command', 'state_topic': 'alarm/state', }})) entity_id = 'alarm_control_panel.test' self.assertEqual(STATE_ALARM_DISARMED, self.hass.states.get(entity_id).state) alarm_control_panel.alarm_arm_home(self.hass, CODE) self.hass.block_till_done() self.assertEqual(STATE_ALARM_ARMED_HOME, self.hass.states.get(entity_id).state) def test_arm_home_with_pending(self): """Test arm home method.""" self.assertTrue(setup_component( self.hass, alarm_control_panel.DOMAIN, {'alarm_control_panel': { 'platform': 'manual_mqtt', 'name': 'test', 'code': CODE, 'pending_time': 1, 'disarm_after_trigger': False, 'command_topic': 'alarm/command', 'state_topic': 'alarm/state', }})) entity_id = 'alarm_control_panel.test' self.assertEqual(STATE_ALARM_DISARMED, self.hass.states.get(entity_id).state) alarm_control_panel.alarm_arm_home(self.hass, CODE, entity_id) self.hass.block_till_done() self.assertEqual(STATE_ALARM_PENDING, self.hass.states.get(entity_id).state) state = self.hass.states.get(entity_id) assert state.attributes['post_pending_state'] == STATE_ALARM_ARMED_HOME future = dt_util.utcnow() + timedelta(seconds=1) with patch(('homeassistant.components.alarm_control_panel.manual_mqtt.' 'dt_util.utcnow'), return_value=future): fire_time_changed(self.hass, future) self.hass.block_till_done() self.assertEqual(STATE_ALARM_ARMED_HOME, self.hass.states.get(entity_id).state) def test_arm_home_with_invalid_code(self): """Attempt to arm home without a valid code.""" self.assertTrue(setup_component( self.hass, alarm_control_panel.DOMAIN, {'alarm_control_panel': { 'platform': 'manual_mqtt', 'name': 'test', 'code': CODE, 'pending_time': 1, 'disarm_after_trigger': False, 'command_topic': 'alarm/command', 'state_topic': 'alarm/state', }})) entity_id = 'alarm_control_panel.test' self.assertEqual(STATE_ALARM_DISARMED, self.hass.states.get(entity_id).state) alarm_control_panel.alarm_arm_home(self.hass, CODE + '2') self.hass.block_till_done() self.assertEqual(STATE_ALARM_DISARMED, self.hass.states.get(entity_id).state) def test_arm_away_no_pending(self): """Test arm home method.""" self.assertTrue(setup_component( self.hass, alarm_control_panel.DOMAIN, {'alarm_control_panel': { 'platform': 'manual_mqtt', 'name': 'test', 'code': CODE, 'pending_time': 0, 'disarm_after_trigger': False, 'command_topic': 'alarm/command', 'state_topic': 'alarm/state', }})) entity_id = 'alarm_control_panel.test' self.assertEqual(STATE_ALARM_DISARMED, self.hass.states.get(entity_id).state) alarm_control_panel.alarm_arm_away(self.hass, CODE, entity_id) self.hass.block_till_done() self.assertEqual(STATE_ALARM_ARMED_AWAY, self.hass.states.get(entity_id).state) def test_arm_home_with_template_code(self): """Attempt to arm with a template-based code.""" self.assertTrue(setup_component( self.hass, alarm_control_panel.DOMAIN, {'alarm_control_panel': { 'platform': 'manual_mqtt', 'name': 'test', 'code_template': '{{ "abc" }}', 'pending_time': 0, 'disarm_after_trigger': False, 'command_topic': 'alarm/command', 'state_topic': 'alarm/state', }})) entity_id = 'alarm_control_panel.test' self.hass.start() self.hass.block_till_done() self.assertEqual(STATE_ALARM_DISARMED, self.hass.states.get(entity_id).state) alarm_control_panel.alarm_arm_home(self.hass, 'abc') self.hass.block_till_done() state = self.hass.states.get(entity_id) self.assertEqual(STATE_ALARM_ARMED_HOME, state.state) def test_arm_away_with_pending(self): """Test arm home method.""" self.assertTrue(setup_component( self.hass, alarm_control_panel.DOMAIN, {'alarm_control_panel': { 'platform': 'manual_mqtt', 'name': 'test', 'code': CODE, 'pending_time': 1, 'disarm_after_trigger': False, 'command_topic': 'alarm/command', 'state_topic': 'alarm/state', }})) entity_id = 'alarm_control_panel.test' self.assertEqual(STATE_ALARM_DISARMED, self.hass.states.get(entity_id).state) alarm_control_panel.alarm_arm_away(self.hass, CODE) self.hass.block_till_done() self.assertEqual(STATE_ALARM_PENDING, self.hass.states.get(entity_id).state) state = self.hass.states.get(entity_id) assert state.attributes['post_pending_state'] == STATE_ALARM_ARMED_AWAY future = dt_util.utcnow() + timedelta(seconds=1) with patch(('homeassistant.components.alarm_control_panel.manual_mqtt.' 'dt_util.utcnow'), return_value=future): fire_time_changed(self.hass, future) self.hass.block_till_done() self.assertEqual(STATE_ALARM_ARMED_AWAY, self.hass.states.get(entity_id).state) def test_arm_away_with_invalid_code(self): """Attempt to arm away without a valid code.""" self.assertTrue(setup_component( self.hass, alarm_control_panel.DOMAIN, {'alarm_control_panel': { 'platform': 'manual_mqtt', 'name': 'test', 'code': CODE, 'pending_time': 1, 'disarm_after_trigger': False, 'command_topic': 'alarm/command', 'state_topic': 'alarm/state', }})) entity_id = 'alarm_control_panel.test' self.assertEqual(STATE_ALARM_DISARMED, self.hass.states.get(entity_id).state) alarm_control_panel.alarm_arm_away(self.hass, CODE + '2') self.hass.block_till_done() self.assertEqual(STATE_ALARM_DISARMED, self.hass.states.get(entity_id).state) def test_arm_night_no_pending(self): """Test arm night method.""" self.assertTrue(setup_component( self.hass, alarm_control_panel.DOMAIN, {'alarm_control_panel': { 'platform': 'manual_mqtt', 'name': 'test', 'code': CODE, 'pending_time': 0, 'disarm_after_trigger': False, 'command_topic': 'alarm/command', 'state_topic': 'alarm/state', }})) entity_id = 'alarm_control_panel.test' self.assertEqual(STATE_ALARM_DISARMED, self.hass.states.get(entity_id).state) alarm_control_panel.alarm_arm_night(self.hass, CODE, entity_id) self.hass.block_till_done() self.assertEqual(STATE_ALARM_ARMED_NIGHT, self.hass.states.get(entity_id).state) def test_arm_night_with_pending(self): """Test arm night method.""" self.assertTrue(setup_component( self.hass, alarm_control_panel.DOMAIN, {'alarm_control_panel': { 'platform': 'manual_mqtt', 'name': 'test', 'code': CODE, 'pending_time': 1, 'disarm_after_trigger': False, 'command_topic': 'alarm/command', 'state_topic': 'alarm/state', }})) entity_id = 'alarm_control_panel.test' self.assertEqual(STATE_ALARM_DISARMED, self.hass.states.get(entity_id).state) alarm_control_panel.alarm_arm_night(self.hass, CODE) self.hass.block_till_done() self.assertEqual(STATE_ALARM_PENDING, self.hass.states.get(entity_id).state) state = self.hass.states.get(entity_id) assert state.attributes['post_pending_state'] == \ STATE_ALARM_ARMED_NIGHT future = dt_util.utcnow() + timedelta(seconds=1) with patch(('homeassistant.components.alarm_control_panel.manual_mqtt.' 'dt_util.utcnow'), return_value=future): fire_time_changed(self.hass, future) self.hass.block_till_done() self.assertEqual(STATE_ALARM_ARMED_NIGHT, self.hass.states.get(entity_id).state) # Do not go to the pending state when updating to the same state alarm_control_panel.alarm_arm_night(self.hass, CODE, entity_id) self.hass.block_till_done() self.assertEqual(STATE_ALARM_ARMED_NIGHT, self.hass.states.get(entity_id).state) def test_arm_night_with_invalid_code(self): """Attempt to arm night without a valid code.""" self.assertTrue(setup_component( self.hass, alarm_control_panel.DOMAIN, {'alarm_control_panel': { 'platform': 'manual_mqtt', 'name': 'test', 'code': CODE, 'pending_time': 1, 'disarm_after_trigger': False, 'command_topic': 'alarm/command', 'state_topic': 'alarm/state', }})) entity_id = 'alarm_control_panel.test' self.assertEqual(STATE_ALARM_DISARMED, self.hass.states.get(entity_id).state) alarm_control_panel.alarm_arm_night(self.hass, CODE + '2') self.hass.block_till_done() self.assertEqual(STATE_ALARM_DISARMED, self.hass.states.get(entity_id).state) def test_trigger_no_pending(self): """Test triggering when no pending submitted method.""" self.assertTrue(setup_component( self.hass, alarm_control_panel.DOMAIN, {'alarm_control_panel': { 'platform': 'manual_mqtt', 'name': 'test', 'trigger_time': 1, 'disarm_after_trigger': False, 'command_topic': 'alarm/command', 'state_topic': 'alarm/state', }})) entity_id = 'alarm_control_panel.test' self.assertEqual(STATE_ALARM_DISARMED, self.hass.states.get(entity_id).state) alarm_control_panel.alarm_trigger(self.hass, entity_id=entity_id) self.hass.block_till_done() self.assertEqual(STATE_ALARM_PENDING, self.hass.states.get(entity_id).state) future = dt_util.utcnow() + timedelta(seconds=60) with patch(('homeassistant.components.alarm_control_panel.manual_mqtt.' 'dt_util.utcnow'), return_value=future): fire_time_changed(self.hass, future) self.hass.block_till_done() self.assertEqual(STATE_ALARM_TRIGGERED, self.hass.states.get(entity_id).state) def test_trigger_with_delay(self): """Test trigger method and switch from pending to triggered.""" self.assertTrue(setup_component( self.hass, alarm_control_panel.DOMAIN, {'alarm_control_panel': { 'platform': 'manual_mqtt', 'name': 'test', 'code': CODE, 'delay_time': 1, 'pending_time': 0, 'disarm_after_trigger': False, 'command_topic': 'alarm/command', 'state_topic': 'alarm/state' }})) entity_id = 'alarm_control_panel.test' self.assertEqual(STATE_ALARM_DISARMED, self.hass.states.get(entity_id).state) alarm_control_panel.alarm_arm_away(self.hass, CODE) self.hass.block_till_done() self.assertEqual(STATE_ALARM_ARMED_AWAY, self.hass.states.get(entity_id).state) alarm_control_panel.alarm_trigger(self.hass, entity_id=entity_id) self.hass.block_till_done() state = self.hass.states.get(entity_id) self.assertEqual(STATE_ALARM_PENDING, state.state) self.assertEqual(STATE_ALARM_TRIGGERED, state.attributes['post_pending_state']) future = dt_util.utcnow() + timedelta(seconds=1) with patch(('homeassistant.components.alarm_control_panel.manual_mqtt.' 'dt_util.utcnow'), return_value=future): fire_time_changed(self.hass, future) self.hass.block_till_done() state = self.hass.states.get(entity_id) self.assertEqual(STATE_ALARM_TRIGGERED, state.state) def test_trigger_zero_trigger_time(self): """Test disabled trigger.""" self.assertTrue(setup_component( self.hass, alarm_control_panel.DOMAIN, {'alarm_control_panel': { 'platform': 'manual_mqtt', 'name': 'test', 'pending_time': 0, 'trigger_time': 0, 'disarm_after_trigger': False, 'command_topic': 'alarm/command', 'state_topic': 'alarm/state' }})) entity_id = 'alarm_control_panel.test' self.assertEqual(STATE_ALARM_DISARMED, self.hass.states.get(entity_id).state) alarm_control_panel.alarm_trigger(self.hass) self.hass.block_till_done() self.assertEqual(STATE_ALARM_DISARMED, self.hass.states.get(entity_id).state) def test_trigger_zero_trigger_time_with_pending(self): """Test disabled trigger.""" self.assertTrue(setup_component( self.hass, alarm_control_panel.DOMAIN, {'alarm_control_panel': { 'platform': 'manual_mqtt', 'name': 'test', 'pending_time': 2, 'trigger_time': 0, 'disarm_after_trigger': False, 'command_topic': 'alarm/command', 'state_topic': 'alarm/state' }})) entity_id = 'alarm_control_panel.test' self.assertEqual(STATE_ALARM_DISARMED, self.hass.states.get(entity_id).state) alarm_control_panel.alarm_trigger(self.hass) self.hass.block_till_done() self.assertEqual(STATE_ALARM_DISARMED, self.hass.states.get(entity_id).state) def test_trigger_with_pending(self): """Test arm home method.""" self.assertTrue(setup_component( self.hass, alarm_control_panel.DOMAIN, {'alarm_control_panel': { 'platform': 'manual_mqtt', 'name': 'test', 'pending_time': 2, 'trigger_time': 3, 'disarm_after_trigger': False, 'command_topic': 'alarm/command', 'state_topic': 'alarm/state', }})) entity_id = 'alarm_control_panel.test' self.assertEqual(STATE_ALARM_DISARMED, self.hass.states.get(entity_id).state) alarm_control_panel.alarm_trigger(self.hass) self.hass.block_till_done() self.assertEqual(STATE_ALARM_PENDING, self.hass.states.get(entity_id).state) state = self.hass.states.get(entity_id) assert state.attributes['post_pending_state'] == STATE_ALARM_TRIGGERED future = dt_util.utcnow() + timedelta(seconds=2) with patch(('homeassistant.components.alarm_control_panel.manual_mqtt.' 'dt_util.utcnow'), return_value=future): fire_time_changed(self.hass, future) self.hass.block_till_done() self.assertEqual(STATE_ALARM_TRIGGERED, self.hass.states.get(entity_id).state) future = dt_util.utcnow() + timedelta(seconds=5) with patch(('homeassistant.components.alarm_control_panel.manual_mqtt.' 'dt_util.utcnow'), return_value=future): fire_time_changed(self.hass, future) self.hass.block_till_done() self.assertEqual(STATE_ALARM_DISARMED, self.hass.states.get(entity_id).state) def test_trigger_with_disarm_after_trigger(self): """Test disarm after trigger.""" self.assertTrue(setup_component( self.hass, alarm_control_panel.DOMAIN, {'alarm_control_panel': { 'platform': 'manual_mqtt', 'name': 'test', 'trigger_time': 5, 'pending_time': 0, 'disarm_after_trigger': True, 'command_topic': 'alarm/command', 'state_topic': 'alarm/state', }})) entity_id = 'alarm_control_panel.test' self.assertEqual(STATE_ALARM_DISARMED, self.hass.states.get(entity_id).state) alarm_control_panel.alarm_trigger(self.hass, entity_id=entity_id) self.hass.block_till_done() self.assertEqual(STATE_ALARM_TRIGGERED, self.hass.states.get(entity_id).state) future = dt_util.utcnow() + timedelta(seconds=5) with patch(('homeassistant.components.alarm_control_panel.manual_mqtt.' 'dt_util.utcnow'), return_value=future): fire_time_changed(self.hass, future) self.hass.block_till_done() self.assertEqual(STATE_ALARM_DISARMED, self.hass.states.get(entity_id).state) def test_trigger_with_zero_specific_trigger_time(self): """Test trigger method.""" self.assertTrue(setup_component( self.hass, alarm_control_panel.DOMAIN, {'alarm_control_panel': { 'platform': 'manual_mqtt', 'name': 'test', 'trigger_time': 5, 'disarmed': { 'trigger_time': 0 }, 'pending_time': 0, 'disarm_after_trigger': True, 'command_topic': 'alarm/command', 'state_topic': 'alarm/state' }})) entity_id = 'alarm_control_panel.test' self.assertEqual(STATE_ALARM_DISARMED, self.hass.states.get(entity_id).state) alarm_control_panel.alarm_trigger(self.hass, entity_id=entity_id) self.hass.block_till_done() self.assertEqual(STATE_ALARM_DISARMED, self.hass.states.get(entity_id).state) def test_trigger_with_unused_zero_specific_trigger_time(self): """Test disarm after trigger.""" self.assertTrue(setup_component( self.hass, alarm_control_panel.DOMAIN, {'alarm_control_panel': { 'platform': 'manual_mqtt', 'name': 'test', 'trigger_time': 5, 'armed_home': { 'trigger_time': 0 }, 'pending_time': 0, 'disarm_after_trigger': True, 'command_topic': 'alarm/command', 'state_topic': 'alarm/state' }})) entity_id = 'alarm_control_panel.test' self.assertEqual(STATE_ALARM_DISARMED, self.hass.states.get(entity_id).state) alarm_control_panel.alarm_trigger(self.hass, entity_id=entity_id) self.hass.block_till_done() self.assertEqual(STATE_ALARM_TRIGGERED, self.hass.states.get(entity_id).state) future = dt_util.utcnow() + timedelta(seconds=5) with patch(('homeassistant.components.alarm_control_panel.manual_mqtt.' 'dt_util.utcnow'), return_value=future): fire_time_changed(self.hass, future) self.hass.block_till_done() self.assertEqual(STATE_ALARM_DISARMED, self.hass.states.get(entity_id).state) def test_trigger_with_specific_trigger_time(self): """Test disarm after trigger.""" self.assertTrue(setup_component( self.hass, alarm_control_panel.DOMAIN, {'alarm_control_panel': { 'platform': 'manual_mqtt', 'name': 'test', 'disarmed': { 'trigger_time': 5 }, 'pending_time': 0, 'disarm_after_trigger': True, 'command_topic': 'alarm/command', 'state_topic': 'alarm/state' }})) entity_id = 'alarm_control_panel.test' self.assertEqual(STATE_ALARM_DISARMED, self.hass.states.get(entity_id).state) alarm_control_panel.alarm_trigger(self.hass, entity_id=entity_id) self.hass.block_till_done() self.assertEqual(STATE_ALARM_TRIGGERED, self.hass.states.get(entity_id).state) future = dt_util.utcnow() + timedelta(seconds=5) with patch(('homeassistant.components.alarm_control_panel.manual_mqtt.' 'dt_util.utcnow'), return_value=future): fire_time_changed(self.hass, future) self.hass.block_till_done() self.assertEqual(STATE_ALARM_DISARMED, self.hass.states.get(entity_id).state) def test_back_to_back_trigger_with_no_disarm_after_trigger(self): """Test no disarm after back to back trigger.""" self.assertTrue(setup_component( self.hass, alarm_control_panel.DOMAIN, {'alarm_control_panel': { 'platform': 'manual_mqtt', 'name': 'test', 'trigger_time': 5, 'pending_time': 0, 'disarm_after_trigger': False, 'command_topic': 'alarm/command', 'state_topic': 'alarm/state', }})) entity_id = 'alarm_control_panel.test' self.assertEqual(STATE_ALARM_DISARMED, self.hass.states.get(entity_id).state) alarm_control_panel.alarm_arm_away(self.hass, CODE, entity_id) self.hass.block_till_done() self.assertEqual(STATE_ALARM_ARMED_AWAY, self.hass.states.get(entity_id).state) alarm_control_panel.alarm_trigger(self.hass, entity_id=entity_id) self.hass.block_till_done() self.assertEqual(STATE_ALARM_TRIGGERED, self.hass.states.get(entity_id).state) future = dt_util.utcnow() + timedelta(seconds=5) with patch(('homeassistant.components.alarm_control_panel.manual_mqtt.' 'dt_util.utcnow'), return_value=future): fire_time_changed(self.hass, future) self.hass.block_till_done() self.assertEqual(STATE_ALARM_ARMED_AWAY, self.hass.states.get(entity_id).state) alarm_control_panel.alarm_trigger(self.hass, entity_id=entity_id) self.hass.block_till_done() self.assertEqual(STATE_ALARM_TRIGGERED, self.hass.states.get(entity_id).state) future = dt_util.utcnow() + timedelta(seconds=5) with patch(('homeassistant.components.alarm_control_panel.manual_mqtt.' 'dt_util.utcnow'), return_value=future): fire_time_changed(self.hass, future) self.hass.block_till_done() self.assertEqual(STATE_ALARM_ARMED_AWAY, self.hass.states.get(entity_id).state) def test_disarm_while_pending_trigger(self): """Test disarming while pending state.""" self.assertTrue(setup_component( self.hass, alarm_control_panel.DOMAIN, {'alarm_control_panel': { 'platform': 'manual_mqtt', 'name': 'test', 'trigger_time': 5, 'disarm_after_trigger': False, 'command_topic': 'alarm/command', 'state_topic': 'alarm/state', }})) entity_id = 'alarm_control_panel.test' self.assertEqual(STATE_ALARM_DISARMED, self.hass.states.get(entity_id).state) alarm_control_panel.alarm_trigger(self.hass) self.hass.block_till_done() self.assertEqual(STATE_ALARM_PENDING, self.hass.states.get(entity_id).state) alarm_control_panel.alarm_disarm(self.hass, entity_id=entity_id) self.hass.block_till_done() self.assertEqual(STATE_ALARM_DISARMED, self.hass.states.get(entity_id).state) future = dt_util.utcnow() + timedelta(seconds=5) with patch(('homeassistant.components.alarm_control_panel.manual_mqtt.' 'dt_util.utcnow'), return_value=future): fire_time_changed(self.hass, future) self.hass.block_till_done() self.assertEqual(STATE_ALARM_DISARMED, self.hass.states.get(entity_id).state) def test_disarm_during_trigger_with_invalid_code(self): """Test disarming while code is invalid.""" self.assertTrue(setup_component( self.hass, alarm_control_panel.DOMAIN, {'alarm_control_panel': { 'platform': 'manual_mqtt', 'name': 'test', 'pending_time': 5, 'code': CODE + '2', 'disarm_after_trigger': False, 'command_topic': 'alarm/command', 'state_topic': 'alarm/state', }})) entity_id = 'alarm_control_panel.test' self.assertEqual(STATE_ALARM_DISARMED, self.hass.states.get(entity_id).state) alarm_control_panel.alarm_trigger(self.hass) self.hass.block_till_done() self.assertEqual(STATE_ALARM_PENDING, self.hass.states.get(entity_id).state) alarm_control_panel.alarm_disarm(self.hass, entity_id=entity_id) self.hass.block_till_done() self.assertEqual(STATE_ALARM_PENDING, self.hass.states.get(entity_id).state) future = dt_util.utcnow() + timedelta(seconds=5) with patch(('homeassistant.components.alarm_control_panel.manual_mqtt.' 'dt_util.utcnow'), return_value=future): fire_time_changed(self.hass, future) self.hass.block_till_done() self.assertEqual(STATE_ALARM_TRIGGERED, self.hass.states.get(entity_id).state) def test_trigger_with_unused_specific_delay(self): """Test trigger method and switch from pending to triggered.""" self.assertTrue(setup_component( self.hass, alarm_control_panel.DOMAIN, {'alarm_control_panel': { 'platform': 'manual_mqtt', 'name': 'test', 'code': CODE, 'delay_time': 5, 'pending_time': 0, 'armed_home': { 'delay_time': 10 }, 'disarm_after_trigger': False, 'command_topic': 'alarm/command', 'state_topic': 'alarm/state' }})) entity_id = 'alarm_control_panel.test' self.assertEqual(STATE_ALARM_DISARMED, self.hass.states.get(entity_id).state) alarm_control_panel.alarm_arm_away(self.hass, CODE) self.hass.block_till_done() self.assertEqual(STATE_ALARM_ARMED_AWAY, self.hass.states.get(entity_id).state) alarm_control_panel.alarm_trigger(self.hass, entity_id=entity_id) self.hass.block_till_done() state = self.hass.states.get(entity_id) self.assertEqual(STATE_ALARM_PENDING, state.state) self.assertEqual(STATE_ALARM_TRIGGERED, state.attributes['post_pending_state']) future = dt_util.utcnow() + timedelta(seconds=5) with patch(('homeassistant.components.alarm_control_panel.manual_mqtt.' 'dt_util.utcnow'), return_value=future): fire_time_changed(self.hass, future) self.hass.block_till_done() state = self.hass.states.get(entity_id) assert state.state == STATE_ALARM_TRIGGERED def test_trigger_with_specific_delay(self): """Test trigger method and switch from pending to triggered.""" self.assertTrue(setup_component( self.hass, alarm_control_panel.DOMAIN, {'alarm_control_panel': { 'platform': 'manual_mqtt', 'name': 'test', 'code': CODE, 'delay_time': 10, 'pending_time': 0, 'armed_away': { 'delay_time': 1 }, 'disarm_after_trigger': False, 'command_topic': 'alarm/command', 'state_topic': 'alarm/state' }})) entity_id = 'alarm_control_panel.test' self.assertEqual(STATE_ALARM_DISARMED, self.hass.states.get(entity_id).state) alarm_control_panel.alarm_arm_away(self.hass, CODE) self.hass.block_till_done() self.assertEqual(STATE_ALARM_ARMED_AWAY, self.hass.states.get(entity_id).state) alarm_control_panel.alarm_trigger(self.hass, entity_id=entity_id) self.hass.block_till_done() state = self.hass.states.get(entity_id) self.assertEqual(STATE_ALARM_PENDING, state.state) self.assertEqual(STATE_ALARM_TRIGGERED, state.attributes['post_pending_state']) future = dt_util.utcnow() + timedelta(seconds=1) with patch(('homeassistant.components.alarm_control_panel.manual_mqtt.' 'dt_util.utcnow'), return_value=future): fire_time_changed(self.hass, future) self.hass.block_till_done() state = self.hass.states.get(entity_id) assert state.state == STATE_ALARM_TRIGGERED def test_trigger_with_pending_and_delay(self): """Test trigger method and switch from pending to triggered.""" self.assertTrue(setup_component( self.hass, alarm_control_panel.DOMAIN, {'alarm_control_panel': { 'platform': 'manual_mqtt', 'name': 'test', 'code': CODE, 'delay_time': 1, 'pending_time': 0, 'triggered': { 'pending_time': 1 }, 'disarm_after_trigger': False, 'command_topic': 'alarm/command', 'state_topic': 'alarm/state' }})) entity_id = 'alarm_control_panel.test' self.assertEqual(STATE_ALARM_DISARMED, self.hass.states.get(entity_id).state) alarm_control_panel.alarm_arm_away(self.hass, CODE) self.hass.block_till_done() self.assertEqual(STATE_ALARM_ARMED_AWAY, self.hass.states.get(entity_id).state) alarm_control_panel.alarm_trigger(self.hass, entity_id=entity_id) self.hass.block_till_done() state = self.hass.states.get(entity_id) assert state.state == STATE_ALARM_PENDING assert state.attributes['post_pending_state'] == STATE_ALARM_TRIGGERED future = dt_util.utcnow() + timedelta(seconds=1) with patch(('homeassistant.components.alarm_control_panel.manual_mqtt.' 'dt_util.utcnow'), return_value=future): fire_time_changed(self.hass, future) self.hass.block_till_done() state = self.hass.states.get(entity_id) assert state.state == STATE_ALARM_PENDING assert state.attributes['post_pending_state'] == STATE_ALARM_TRIGGERED future += timedelta(seconds=1) with patch(('homeassistant.components.alarm_control_panel.manual_mqtt.' 'dt_util.utcnow'), return_value=future): fire_time_changed(self.hass, future) self.hass.block_till_done() state = self.hass.states.get(entity_id) assert state.state == STATE_ALARM_TRIGGERED def test_trigger_with_pending_and_specific_delay(self): """Test trigger method and switch from pending to triggered.""" self.assertTrue(setup_component( self.hass, alarm_control_panel.DOMAIN, {'alarm_control_panel': { 'platform': 'manual_mqtt', 'name': 'test', 'code': CODE, 'delay_time': 10, 'pending_time': 0, 'armed_away': { 'delay_time': 1 }, 'triggered': { 'pending_time': 1 }, 'disarm_after_trigger': False, 'command_topic': 'alarm/command', 'state_topic': 'alarm/state' }})) entity_id = 'alarm_control_panel.test' self.assertEqual(STATE_ALARM_DISARMED, self.hass.states.get(entity_id).state) alarm_control_panel.alarm_arm_away(self.hass, CODE) self.hass.block_till_done() self.assertEqual(STATE_ALARM_ARMED_AWAY, self.hass.states.get(entity_id).state) alarm_control_panel.alarm_trigger(self.hass, entity_id=entity_id) self.hass.block_till_done() state = self.hass.states.get(entity_id) assert state.state == STATE_ALARM_PENDING assert state.attributes['post_pending_state'] == STATE_ALARM_TRIGGERED future = dt_util.utcnow() + timedelta(seconds=1) with patch(('homeassistant.components.alarm_control_panel.manual_mqtt.' 'dt_util.utcnow'), return_value=future): fire_time_changed(self.hass, future) self.hass.block_till_done() state = self.hass.states.get(entity_id) assert state.state == STATE_ALARM_PENDING assert state.attributes['post_pending_state'] == STATE_ALARM_TRIGGERED future += timedelta(seconds=1) with patch(('homeassistant.components.alarm_control_panel.manual_mqtt.' 'dt_util.utcnow'), return_value=future): fire_time_changed(self.hass, future) self.hass.block_till_done() state = self.hass.states.get(entity_id) assert state.state == STATE_ALARM_TRIGGERED def test_armed_home_with_specific_pending(self): """Test arm home method.""" self.assertTrue(setup_component( self.hass, alarm_control_panel.DOMAIN, {'alarm_control_panel': { 'platform': 'manual_mqtt', 'name': 'test', 'pending_time': 10, 'armed_home': { 'pending_time': 2 }, 'command_topic': 'alarm/command', 'state_topic': 'alarm/state', }})) entity_id = 'alarm_control_panel.test' alarm_control_panel.alarm_arm_home(self.hass) self.hass.block_till_done() self.assertEqual(STATE_ALARM_PENDING, self.hass.states.get(entity_id).state) future = dt_util.utcnow() + timedelta(seconds=2) with patch(('homeassistant.components.alarm_control_panel.manual_mqtt.' 'dt_util.utcnow'), return_value=future): fire_time_changed(self.hass, future) self.hass.block_till_done() self.assertEqual(STATE_ALARM_ARMED_HOME, self.hass.states.get(entity_id).state) def test_armed_away_with_specific_pending(self): """Test arm home method.""" self.assertTrue(setup_component( self.hass, alarm_control_panel.DOMAIN, {'alarm_control_panel': { 'platform': 'manual_mqtt', 'name': 'test', 'pending_time': 10, 'armed_away': { 'pending_time': 2 }, 'command_topic': 'alarm/command', 'state_topic': 'alarm/state', }})) entity_id = 'alarm_control_panel.test' alarm_control_panel.alarm_arm_away(self.hass) self.hass.block_till_done() self.assertEqual(STATE_ALARM_PENDING, self.hass.states.get(entity_id).state) future = dt_util.utcnow() + timedelta(seconds=2) with patch(('homeassistant.components.alarm_control_panel.manual_mqtt.' 'dt_util.utcnow'), return_value=future): fire_time_changed(self.hass, future) self.hass.block_till_done() self.assertEqual(STATE_ALARM_ARMED_AWAY, self.hass.states.get(entity_id).state) def test_armed_night_with_specific_pending(self): """Test arm home method.""" self.assertTrue(setup_component( self.hass, alarm_control_panel.DOMAIN, {'alarm_control_panel': { 'platform': 'manual_mqtt', 'name': 'test', 'pending_time': 10, 'armed_night': { 'pending_time': 2 }, 'command_topic': 'alarm/command', 'state_topic': 'alarm/state', }})) entity_id = 'alarm_control_panel.test' alarm_control_panel.alarm_arm_night(self.hass) self.hass.block_till_done() self.assertEqual(STATE_ALARM_PENDING, self.hass.states.get(entity_id).state) future = dt_util.utcnow() + timedelta(seconds=2) with patch(('homeassistant.components.alarm_control_panel.manual_mqtt.' 'dt_util.utcnow'), return_value=future): fire_time_changed(self.hass, future) self.hass.block_till_done() self.assertEqual(STATE_ALARM_ARMED_NIGHT, self.hass.states.get(entity_id).state) def test_trigger_with_specific_pending(self): """Test arm home method.""" self.assertTrue(setup_component( self.hass, alarm_control_panel.DOMAIN, {'alarm_control_panel': { 'platform': 'manual_mqtt', 'name': 'test', 'pending_time': 10, 'triggered': { 'pending_time': 2 }, 'trigger_time': 3, 'disarm_after_trigger': False, 'command_topic': 'alarm/command', 'state_topic': 'alarm/state', }})) entity_id = 'alarm_control_panel.test' alarm_control_panel.alarm_trigger(self.hass) self.hass.block_till_done() self.assertEqual(STATE_ALARM_PENDING, self.hass.states.get(entity_id).state) future = dt_util.utcnow() + timedelta(seconds=2) with patch(('homeassistant.components.alarm_control_panel.manual_mqtt.' 'dt_util.utcnow'), return_value=future): fire_time_changed(self.hass, future) self.hass.block_till_done() self.assertEqual(STATE_ALARM_TRIGGERED, self.hass.states.get(entity_id).state) future = dt_util.utcnow() + timedelta(seconds=5) with patch(('homeassistant.components.alarm_control_panel.manual_mqtt.' 'dt_util.utcnow'), return_value=future): fire_time_changed(self.hass, future) self.hass.block_till_done() self.assertEqual(STATE_ALARM_DISARMED, self.hass.states.get(entity_id).state) def test_arm_away_after_disabled_disarmed(self): """Test pending state with and without zero trigger time.""" self.assertTrue(setup_component( self.hass, alarm_control_panel.DOMAIN, {'alarm_control_panel': { 'platform': 'manual_mqtt', 'name': 'test', 'code': CODE, 'pending_time': 0, 'delay_time': 1, 'armed_away': { 'pending_time': 1, }, 'disarmed': { 'trigger_time': 0 }, 'disarm_after_trigger': False, 'command_topic': 'alarm/command', 'state_topic': 'alarm/state', }})) entity_id = 'alarm_control_panel.test' self.assertEqual(STATE_ALARM_DISARMED, self.hass.states.get(entity_id).state) alarm_control_panel.alarm_arm_away(self.hass, CODE) self.hass.block_till_done() state = self.hass.states.get(entity_id) self.assertEqual(STATE_ALARM_PENDING, state.state) self.assertEqual(STATE_ALARM_DISARMED, state.attributes['pre_pending_state']) self.assertEqual(STATE_ALARM_ARMED_AWAY, state.attributes['post_pending_state']) alarm_control_panel.alarm_trigger(self.hass, entity_id=entity_id) self.hass.block_till_done() state = self.hass.states.get(entity_id) self.assertEqual(STATE_ALARM_PENDING, state.state) self.assertEqual(STATE_ALARM_DISARMED, state.attributes['pre_pending_state']) self.assertEqual(STATE_ALARM_ARMED_AWAY, state.attributes['post_pending_state']) future = dt_util.utcnow() + timedelta(seconds=1) with patch(('homeassistant.components.alarm_control_panel.manual_mqtt.' 'dt_util.utcnow'), return_value=future): fire_time_changed(self.hass, future) self.hass.block_till_done() state = self.hass.states.get(entity_id) self.assertEqual(STATE_ALARM_ARMED_AWAY, state.state) alarm_control_panel.alarm_trigger(self.hass, entity_id=entity_id) self.hass.block_till_done() state = self.hass.states.get(entity_id) self.assertEqual(STATE_ALARM_PENDING, state.state) self.assertEqual(STATE_ALARM_ARMED_AWAY, state.attributes['pre_pending_state']) self.assertEqual(STATE_ALARM_TRIGGERED, state.attributes['post_pending_state']) future += timedelta(seconds=1) with patch(('homeassistant.components.alarm_control_panel.manual_mqtt.' 'dt_util.utcnow'), return_value=future): fire_time_changed(self.hass, future) self.hass.block_till_done() state = self.hass.states.get(entity_id) self.assertEqual(STATE_ALARM_TRIGGERED, state.state) def test_disarm_with_template_code(self): """Attempt to disarm with a valid or invalid template-based code.""" self.assertTrue(setup_component( self.hass, alarm_control_panel.DOMAIN, {'alarm_control_panel': { 'platform': 'manual_mqtt', 'name': 'test', 'code_template': '{{ "" if from_state == "disarmed" else "abc" }}', 'pending_time': 0, 'disarm_after_trigger': False, 'command_topic': 'alarm/command', 'state_topic': 'alarm/state', }})) entity_id = 'alarm_control_panel.test' self.hass.start() self.hass.block_till_done() self.assertEqual(STATE_ALARM_DISARMED, self.hass.states.get(entity_id).state) alarm_control_panel.alarm_arm_home(self.hass, 'def') self.hass.block_till_done() state = self.hass.states.get(entity_id) self.assertEqual(STATE_ALARM_ARMED_HOME, state.state) alarm_control_panel.alarm_disarm(self.hass, 'def') self.hass.block_till_done() state = self.hass.states.get(entity_id) self.assertEqual(STATE_ALARM_ARMED_HOME, state.state) alarm_control_panel.alarm_disarm(self.hass, 'abc') self.hass.block_till_done() state = self.hass.states.get(entity_id) self.assertEqual(STATE_ALARM_DISARMED, state.state) def test_arm_home_via_command_topic(self): """Test arming home via command topic.""" assert setup_component(self.hass, alarm_control_panel.DOMAIN, { alarm_control_panel.DOMAIN: { 'platform': 'manual_mqtt', 'name': 'test', 'pending_time': 1, 'state_topic': 'alarm/state', 'command_topic': 'alarm/command', 'payload_arm_home': 'ARM_HOME', } }) entity_id = 'alarm_control_panel.test' self.assertEqual(STATE_ALARM_DISARMED, self.hass.states.get(entity_id).state) # Fire the arm command via MQTT; ensure state changes to pending fire_mqtt_message(self.hass, 'alarm/command', 'ARM_HOME') self.hass.block_till_done() self.assertEqual(STATE_ALARM_PENDING, self.hass.states.get(entity_id).state) # Fast-forward a little bit future = dt_util.utcnow() + timedelta(seconds=1) with patch(('homeassistant.components.alarm_control_panel.manual_mqtt.' 'dt_util.utcnow'), return_value=future): fire_time_changed(self.hass, future) self.hass.block_till_done() self.assertEqual(STATE_ALARM_ARMED_HOME, self.hass.states.get(entity_id).state) def test_arm_away_via_command_topic(self): """Test arming away via command topic.""" assert setup_component(self.hass, alarm_control_panel.DOMAIN, { alarm_control_panel.DOMAIN: { 'platform': 'manual_mqtt', 'name': 'test', 'pending_time': 1, 'state_topic': 'alarm/state', 'command_topic': 'alarm/command', 'payload_arm_away': 'ARM_AWAY', } }) entity_id = 'alarm_control_panel.test' self.assertEqual(STATE_ALARM_DISARMED, self.hass.states.get(entity_id).state) # Fire the arm command via MQTT; ensure state changes to pending fire_mqtt_message(self.hass, 'alarm/command', 'ARM_AWAY') self.hass.block_till_done() self.assertEqual(STATE_ALARM_PENDING, self.hass.states.get(entity_id).state) # Fast-forward a little bit future = dt_util.utcnow() + timedelta(seconds=1) with patch(('homeassistant.components.alarm_control_panel.manual_mqtt.' 'dt_util.utcnow'), return_value=future): fire_time_changed(self.hass, future) self.hass.block_till_done() self.assertEqual(STATE_ALARM_ARMED_AWAY, self.hass.states.get(entity_id).state) def test_arm_night_via_command_topic(self): """Test arming night via command topic.""" assert setup_component(self.hass, alarm_control_panel.DOMAIN, { alarm_control_panel.DOMAIN: { 'platform': 'manual_mqtt', 'name': 'test', 'pending_time': 1, 'state_topic': 'alarm/state', 'command_topic': 'alarm/command', 'payload_arm_night': 'ARM_NIGHT', } }) entity_id = 'alarm_control_panel.test' self.assertEqual(STATE_ALARM_DISARMED, self.hass.states.get(entity_id).state) # Fire the arm command via MQTT; ensure state changes to pending fire_mqtt_message(self.hass, 'alarm/command', 'ARM_NIGHT') self.hass.block_till_done() self.assertEqual(STATE_ALARM_PENDING, self.hass.states.get(entity_id).state) # Fast-forward a little bit future = dt_util.utcnow() + timedelta(seconds=1) with patch(('homeassistant.components.alarm_control_panel.manual_mqtt.' 'dt_util.utcnow'), return_value=future): fire_time_changed(self.hass, future) self.hass.block_till_done() self.assertEqual(STATE_ALARM_ARMED_NIGHT, self.hass.states.get(entity_id).state) def test_disarm_pending_via_command_topic(self): """Test disarming pending alarm via command topic.""" assert setup_component(self.hass, alarm_control_panel.DOMAIN, { alarm_control_panel.DOMAIN: { 'platform': 'manual_mqtt', 'name': 'test', 'pending_time': 1, 'state_topic': 'alarm/state', 'command_topic': 'alarm/command', 'payload_disarm': 'DISARM', } }) entity_id = 'alarm_control_panel.test' self.assertEqual(STATE_ALARM_DISARMED, self.hass.states.get(entity_id).state) alarm_control_panel.alarm_trigger(self.hass) self.hass.block_till_done() self.assertEqual(STATE_ALARM_PENDING, self.hass.states.get(entity_id).state) # Now that we're pending, receive a command to disarm fire_mqtt_message(self.hass, 'alarm/command', 'DISARM') self.hass.block_till_done() self.assertEqual(STATE_ALARM_DISARMED, self.hass.states.get(entity_id).state) def test_state_changes_are_published_to_mqtt(self): """Test publishing of MQTT messages when state changes.""" assert setup_component(self.hass, alarm_control_panel.DOMAIN, { alarm_control_panel.DOMAIN: { 'platform': 'manual_mqtt', 'name': 'test', 'pending_time': 1, 'trigger_time': 1, 'state_topic': 'alarm/state', 'command_topic': 'alarm/command', } }) # Component should send disarmed alarm state on startup self.hass.block_till_done() self.mock_publish.async_publish.assert_called_once_with( 'alarm/state', STATE_ALARM_DISARMED, 0, True) self.mock_publish.async_publish.reset_mock() # Arm in home mode alarm_control_panel.alarm_arm_home(self.hass) self.hass.block_till_done() self.mock_publish.async_publish.assert_called_once_with( 'alarm/state', STATE_ALARM_PENDING, 0, True) self.mock_publish.async_publish.reset_mock() # Fast-forward a little bit future = dt_util.utcnow() + timedelta(seconds=1) with patch(('homeassistant.components.alarm_control_panel.manual_mqtt.' 'dt_util.utcnow'), return_value=future): fire_time_changed(self.hass, future) self.hass.block_till_done() self.mock_publish.async_publish.assert_called_once_with( 'alarm/state', STATE_ALARM_ARMED_HOME, 0, True) self.mock_publish.async_publish.reset_mock() # Arm in away mode alarm_control_panel.alarm_arm_away(self.hass) self.hass.block_till_done() self.mock_publish.async_publish.assert_called_once_with( 'alarm/state', STATE_ALARM_PENDING, 0, True) self.mock_publish.async_publish.reset_mock() # Fast-forward a little bit future = dt_util.utcnow() + timedelta(seconds=1) with patch(('homeassistant.components.alarm_control_panel.manual_mqtt.' 'dt_util.utcnow'), return_value=future): fire_time_changed(self.hass, future) self.hass.block_till_done() self.mock_publish.async_publish.assert_called_once_with( 'alarm/state', STATE_ALARM_ARMED_AWAY, 0, True) self.mock_publish.async_publish.reset_mock() # Arm in night mode alarm_control_panel.alarm_arm_night(self.hass) self.hass.block_till_done() self.mock_publish.async_publish.assert_called_once_with( 'alarm/state', STATE_ALARM_PENDING, 0, True) self.mock_publish.async_publish.reset_mock() # Fast-forward a little bit future = dt_util.utcnow() + timedelta(seconds=1) with patch(('homeassistant.components.alarm_control_panel.manual_mqtt.' 'dt_util.utcnow'), return_value=future): fire_time_changed(self.hass, future) self.hass.block_till_done() self.mock_publish.async_publish.assert_called_once_with( 'alarm/state', STATE_ALARM_ARMED_NIGHT, 0, True) self.mock_publish.async_publish.reset_mock() # Disarm alarm_control_panel.alarm_disarm(self.hass) self.hass.block_till_done() self.mock_publish.async_publish.assert_called_once_with( 'alarm/state', STATE_ALARM_DISARMED, 0, True)
persandstrom/home-assistant
tests/components/alarm_control_panel/test_manual_mqtt.py
Python
apache-2.0
56,123
# -*- coding: utf-8 -*- ''' Uncoded Add-on This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. ''' import urlparse,sys,urllib params = dict(urlparse.parse_qsl(sys.argv[2].replace('?',''))) action = params.get('action') name = params.get('name') title = params.get('title') year = params.get('year') imdb = params.get('imdb') tvdb = params.get('tvdb') tmdb = params.get('tmdb') season = params.get('season') episode = params.get('episode') tvshowtitle = params.get('tvshowtitle') premiered = params.get('premiered') url = params.get('url') image = params.get('image') meta = params.get('meta') select = params.get('select') query = params.get('query') source = params.get('source') content = params.get('content') windowedtrailer = params.get('windowedtrailer') windowedtrailer = int(windowedtrailer) if windowedtrailer in ("0","1") else 0 if action == None: from resources.lib.indexers import navigator from resources.lib.modules import cache cache.cache_version_check() navigator.navigator().root() elif action == 'movieNavigator': from resources.lib.indexers import navigator navigator.navigator().movies() elif action == 'movieliteNavigator': from resources.lib.indexers import navigator navigator.navigator().movies(lite=True) elif action == 'mymovieNavigator': from resources.lib.indexers import navigator navigator.navigator().mymovies() elif action == 'mymovieliteNavigator': from resources.lib.indexers import navigator navigator.navigator().mymovies(lite=True) elif action == 'tvNavigator': from resources.lib.indexers import navigator navigator.navigator().tvshows() elif action == 'tvliteNavigator': from resources.lib.indexers import navigator navigator.navigator().tvshows(lite=True) elif action == 'mytvNavigator': from resources.lib.indexers import navigator navigator.navigator().mytvshows() elif action == 'mytvliteNavigator': from resources.lib.indexers import navigator navigator.navigator().mytvshows(lite=True) elif action == 'downloadNavigator': from resources.lib.indexers import navigator navigator.navigator().downloads() elif action == 'libraryNavigator': from resources.lib.indexers import navigator navigator.navigator().library() elif action == 'toolNavigator': from resources.lib.indexers import navigator navigator.navigator().tools() elif action == 'searchNavigator': from resources.lib.indexers import navigator navigator.navigator().search() elif action == 'viewsNavigator': from resources.lib.indexers import navigator navigator.navigator().views() elif action == 'clearCache': from resources.lib.indexers import navigator navigator.navigator().clearCache() elif action == 'clearCacheSearch': from resources.lib.indexers import navigator navigator.navigator().clearCacheSearch() elif action == 'infoCheck': from resources.lib.indexers import navigator navigator.navigator().infoCheck('') elif action == 'movies': from resources.lib.indexers import movies movies.movies().get(url) elif action == 'moviePage': from resources.lib.indexers import movies movies.movies().get(url) elif action == 'movieWidget': from resources.lib.indexers import movies movies.movies().widget() elif action == 'movieSearch': from resources.lib.indexers import movies movies.movies().search() elif action == 'movieSearchnew': from resources.lib.indexers import movies movies.movies().search_new() elif action == 'movieSearchterm': from resources.lib.indexers import movies movies.movies().search_term(name) elif action == 'moviePerson': from resources.lib.indexers import movies movies.movies().person() elif action == 'movieGenres': from resources.lib.indexers import movies movies.movies().genres() elif action == 'movieLanguages': from resources.lib.indexers import movies movies.movies().languages() elif action == 'movieCertificates': from resources.lib.indexers import movies movies.movies().certifications() elif action == 'movieYears': from resources.lib.indexers import movies movies.movies().years() elif action == 'moviePersons': from resources.lib.indexers import movies movies.movies().persons(url) elif action == 'movieUserlists': from resources.lib.indexers import movies movies.movies().userlists() elif action == 'channels': from resources.lib.indexers import channels channels.channels().get() elif action == 'tvshows': from resources.lib.indexers import tvshows tvshows.tvshows().get(url) elif action == 'tvshowPage': from resources.lib.indexers import tvshows tvshows.tvshows().get(url) elif action == 'tvSearch': from resources.lib.indexers import tvshows tvshows.tvshows().search() elif action == 'tvSearchnew': from resources.lib.indexers import tvshows tvshows.tvshows().search_new() elif action == 'tvSearchterm': from resources.lib.indexers import tvshows tvshows.tvshows().search_term(name) elif action == 'tvPerson': from resources.lib.indexers import tvshows tvshows.tvshows().person() elif action == 'tvGenres': from resources.lib.indexers import tvshows tvshows.tvshows().genres() elif action == 'tvNetworks': from resources.lib.indexers import tvshows tvshows.tvshows().networks() elif action == 'tvLanguages': from resources.lib.indexers import tvshows tvshows.tvshows().languages() elif action == 'tvCertificates': from resources.lib.indexers import tvshows tvshows.tvshows().certifications() elif action == 'tvPersons': from resources.lib.indexers import tvshows tvshows.tvshows().persons(url) elif action == 'tvUserlists': from resources.lib.indexers import tvshows tvshows.tvshows().userlists() elif action == 'seasons': from resources.lib.indexers import episodes episodes.seasons().get(tvshowtitle, year, imdb, tvdb) elif action == 'episodes': from resources.lib.indexers import episodes episodes.episodes().get(tvshowtitle, year, imdb, tvdb, season, episode) elif action == 'calendar': from resources.lib.indexers import episodes episodes.episodes().calendar(url) elif action == 'tvWidget': from resources.lib.indexers import episodes episodes.episodes().widget() elif action == 'calendars': from resources.lib.indexers import episodes episodes.episodes().calendars() elif action == 'episodeUserlists': from resources.lib.indexers import episodes episodes.episodes().userlists() elif action == 'refresh': from resources.lib.modules import control control.refresh() elif action == 'queueItem': from resources.lib.modules import control control.queueItem() elif action == 'openSettings': from resources.lib.modules import control control.openSettings(query) elif action == 'artwork': from resources.lib.modules import control control.artwork() elif action == 'addView': from resources.lib.modules import views views.addView(content) elif action == 'moviePlaycount': from resources.lib.modules import playcount playcount.movies(imdb, query) elif action == 'episodePlaycount': from resources.lib.modules import playcount playcount.episodes(imdb, tvdb, season, episode, query) elif action == 'tvPlaycount': from resources.lib.modules import playcount playcount.tvshows(name, imdb, tvdb, season, query) elif action == 'trailer': from resources.lib.modules import trailer trailer.trailer().play(name, url, windowedtrailer) elif action == 'traktManager': from resources.lib.modules import trakt trakt.manager(name, imdb, tvdb, content) elif action == 'authTrakt': from resources.lib.modules import trakt trakt.authTrakt() elif action == 'smuSettings': try: import urlresolver except: pass urlresolver.display_settings() elif action == 'download': import json from resources.lib.modules import sources from resources.lib.modules import downloader try: downloader.download(name, image, sources.sources().sourcesResolve(json.loads(source)[0], True)) except: pass elif action == 'play': from resources.lib.modules import sources sources.sources().play(title, year, imdb, tvdb, season, episode, tvshowtitle, premiered, meta, select) elif action == 'addItem': from resources.lib.modules import sources sources.sources().addItem(title) elif action == 'playItem': from resources.lib.modules import sources sources.sources().playItem(title, source) elif action == 'alterSources': from resources.lib.modules import sources sources.sources().alterSources(url, meta) elif action == 'clearSources': from resources.lib.modules import sources sources.sources().clearSources() elif action == 'random': rtype = params.get('rtype') if rtype == 'movie': from resources.lib.indexers import movies rlist = movies.movies().get(url, create_directory=False) r = sys.argv[0]+"?action=play" elif rtype == 'episode': from resources.lib.indexers import episodes rlist = episodes.episodes().get(tvshowtitle, year, imdb, tvdb, season, create_directory=False) r = sys.argv[0]+"?action=play" elif rtype == 'season': from resources.lib.indexers import episodes rlist = episodes.seasons().get(tvshowtitle, year, imdb, tvdb, create_directory=False) r = sys.argv[0]+"?action=random&rtype=episode" elif rtype == 'show': from resources.lib.indexers import tvshows rlist = tvshows.tvshows().get(url, create_directory=False) r = sys.argv[0]+"?action=random&rtype=season" from resources.lib.modules import control from random import randint import json try: rand = randint(1,len(rlist))-1 for p in ['title','year','imdb','tvdb','season','episode','tvshowtitle','premiered','select']: if rtype == "show" and p == "tvshowtitle": try: r += '&'+p+'='+urllib.quote_plus(rlist[rand]['title']) except: pass else: try: r += '&'+p+'='+urllib.quote_plus(rlist[rand][p]) except: pass try: r += '&meta='+urllib.quote_plus(json.dumps(rlist[rand])) except: r += '&meta='+urllib.quote_plus("{}") if rtype == "movie": try: control.infoDialog(rlist[rand]['title'], control.lang(32536).encode('utf-8'), time=30000) except: pass elif rtype == "episode": try: control.infoDialog(rlist[rand]['tvshowtitle']+" - Season "+rlist[rand]['season']+" - "+rlist[rand]['title'], control.lang(32536).encode('utf-8'), time=30000) except: pass control.execute('RunPlugin(%s)' % r) except: control.infoDialog(control.lang(32537).encode('utf-8'), time=8000) elif action == 'movieToLibrary': from resources.lib.modules import libtools libtools.libmovies().add(name, title, year, imdb, tmdb) elif action == 'moviesToLibrary': from resources.lib.modules import libtools libtools.libmovies().range(url) elif action == 'tvshowToLibrary': from resources.lib.modules import libtools libtools.libtvshows().add(tvshowtitle, year, imdb, tvdb) elif action == 'tvshowsToLibrary': from resources.lib.modules import libtools libtools.libtvshows().range(url) elif action == 'updateLibrary': from resources.lib.modules import libtools libtools.libepisodes().update(query) elif action == 'service': from resources.lib.modules import libtools libtools.libepisodes().service()
TheWardoctor/Wardoctors-repo
plugin.video.uncoded/uncoded.py
Python
apache-2.0
12,280
# # Copyright (c) 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. class DictUtils(object): ''' Provides dict services ''' @staticmethod def exclude(dct, keys=[]): """ Removes given items from the disct @param dct: the ditc to look at @param keys: the keys of items to pop @return: updated dict """ if dct: for key in keys: if dct.has_key(key): dct.pop(key)
oVirt/ovirt-engine-sdk-tests
src/utils/dictutils.py
Python
apache-2.0
1,025
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2012 Nebula, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from django.utils.translation import ugettext_lazy as _ import horizon class BasePanels(horizon.PanelGroup): slug = "compute" name = _("Manage Compute") panels = ('overview', 'instances', 'volumes', 'images_and_snapshots', 'access_and_security',) class NetworkPanels(horizon.PanelGroup): slug = "network" name = _("Manage Network") panels = ('networks', 'routers', 'loadbalancers', 'network_topology',) class ObjectStorePanels(horizon.PanelGroup): slug = "object_store" name = _("Object Store") panels = ('containers',) class BackupJobPanels(horizon.PanelGroup): slug = "backupjobs" name = _("Backup Jobs") panels = ('backupjobs',) class Project(horizon.Dashboard): name = _("Project") slug = "project" panels = (BasePanels, NetworkPanels, ObjectStorePanels, BackupJobPanels) default_panel = 'overview' supports_tenants = True horizon.register(Project)
DPaaS-Raksha/horizon
openstack_dashboard/dashboards/project/dashboard.py
Python
apache-2.0
1,678
# Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file> # Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file> # Created By: david@reciprocitylabs.com # Maintained By: miha@reciprocitylabs.com import datetime import json import sys import time from flask import current_app class DateTimeEncoder(json.JSONEncoder): """Custom JSON Encoder to handle datetime objects from: `http://stackoverflow.com/questions/12122007/python-json-encoder-to-support-datetime`_ also consider: `http://hg.tryton.org/2.4/trytond/file/ade5432ac476/trytond/protocols/jsonrpc.py#l53`_ """ def default(self, obj): if isinstance(obj, datetime.datetime): return obj.isoformat() elif isinstance(obj, datetime.date): return obj.isoformat() elif isinstance(obj, datetime.timedelta): return (datetime.datetime.min + obj).time().isoformat() else: return super(DateTimeEncoder, self).default(obj) class UnicodeSafeJsonWrapper(dict): """JSON received via POST has keys as unicode. This makes get work with plain `str` keys. """ def __getitem__(self, key): ret = self.get(key) if ret is None: raise KeyError(key) return ret def get(self, key, default=None): return super(UnicodeSafeJsonWrapper, self).get(unicode(key), default) # noqa def as_json(obj, **kwargs): return json.dumps(obj, cls=DateTimeEncoder, **kwargs) def service_for(obj): module = sys.modules['ggrc.services'] if type(obj) is str or type(obj) is unicode: # noqa model_type = obj else: model_type = obj.__class__.__name__ return getattr(module, model_type, None) def url_for(obj, id=None): service = service_for(obj) if service is None: return None if id is not None: return service.url_for(id=id) return service.url_for(obj) def view_service_for(obj): module = sys.modules['ggrc.views'] if type(obj) is str or type(obj) is unicode: # noqa model_type = obj else: model_type = obj.__class__.__name__ return getattr(module, model_type, None) def view_url_for(obj, id=None): service = view_service_for(obj) if service is None: return None if id is not None: return service.url_for(id=id) return service.url_for(obj) def encoded_dict(in_dict): # http://stackoverflow.com/questions/6480723/urllib-urlencode-doesnt-like-unicode-values-how-about-this-workaround out_dict = {} for k, v in in_dict.iteritems(): if isinstance(v, unicode): # noqa v = v.encode('utf8') elif isinstance(v, str): # Must be encoded in UTF-8 v.decode('utf8') out_dict[k] = v return out_dict def merge_dict(destination, source, path=None): """merges source into destination""" if path is None: path = [] for key in source: if key in destination: if isinstance(destination[key], dict) and isinstance(source[key], dict): merge_dict(destination[key], source[key], path + [str(key)]) elif destination[key] == source[key]: pass # same leaf value else: raise Exception('Conflict at %s' % '.'.join(path + [str(key)])) else: destination[key] = source[key] return destination def merge_dicts(*args): result = {} for arg in args: result = merge_dict(result, arg) return result class BenchmarkContextManager(object): def __init__(self, message): self.message = message def __enter__(self): self.start = time.time() def __exit__(self, exc_type, exc_value, exc_trace): end = time.time() current_app.logger.info("{:.4f} {}".format(end - self.start, self.message)) benchmark = BenchmarkContextManager
vladan-m/ggrc-core
src/ggrc/utils.py
Python
apache-2.0
3,658
# Copyright 2009-2014 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Test the pymongo module itself.""" import unittest import os import sys sys.path[0:0] = [""] import pymongo from test import host, port class TestPyMongo(unittest.TestCase): def test_mongo_client_alias(self): # Testing that pymongo module imports mongo_client.MongoClient c = pymongo.MongoClient(host, port) self.assertEqual(c.host, host) self.assertEqual(c.port, port) if __name__ == "__main__": unittest.main()
antonnik/code-classifier
naive_bayes/resources/python/test_pymongo.py
Python
apache-2.0
1,040
# SPDX-License-Identifer: Apache-2.0 # Copyright 2021 The Meson development team from pathlib import Path import pickle from .loaderbase import LoaderBase from .model import ReferenceManual class LoaderPickle(LoaderBase): def __init__(self, in_file: Path) -> None: super().__init__() self.in_file = in_file def load_impl(self) -> ReferenceManual: res = pickle.loads(self.in_file.read_bytes()) assert isinstance(res, ReferenceManual) return res # Assume that the pickled data is OK and skip validation def load(self) -> ReferenceManual: return self.load_impl()
mesonbuild/meson
docs/refman/loaderpickle.py
Python
apache-2.0
629
# # Copyright 2011 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import zope import oz.Fedora import oz.TDL import subprocess import os import re import guestfs import string import libxml2 import traceback import ConfigParser import boto.ec2 import sys from time import * from tempfile import * from imgfac.ApplicationConfiguration import ApplicationConfiguration from imgfac.ImageFactoryException import ImageFactoryException from imgfac.ReservationManager import ReservationManager from boto.s3.connection import S3Connection from boto.s3.connection import Location from boto.exception import * from boto.ec2.blockdevicemapping import EBSBlockDeviceType, BlockDeviceMapping from imgfac.CloudDelegate import CloudDelegate # Boto is very verbose - shut it up logging.getLogger('boto').setLevel(logging.INFO) def subprocess_check_output(*popenargs, **kwargs): if 'stdout' in kwargs: raise ValueError('stdout argument not allowed, it will be overridden.') process = subprocess.Popen(stdout=subprocess.PIPE, stderr=subprocess.STDOUT, *popenargs, **kwargs) stdout, stderr = process.communicate() retcode = process.poll() if retcode: cmd = ' '.join(*popenargs) raise ImageFactoryException("'%s' failed(%d): %s" % (cmd, retcode, stderr)) return (stdout, stderr, retcode) class EC2Cloud(object): zope.interface.implements(CloudDelegate) def activity(self, activity): # Simple helper function # Activity should be a one line human-readable string indicating the task in progress # We log it at DEBUG and also set it as the status_detail on our active image self.log.debug(activity) self.active_image.status_detail['activity'] = activity def __init__(self): # Note that we are now missing ( template, target, config_block = None): super(EC2Cloud, self).__init__() self.log = logging.getLogger('%s.%s' % (__name__, self.__class__.__name__)) config_obj = ApplicationConfiguration() self.app_config = config_obj.configuration self.oz_config = ConfigParser.SafeConfigParser() self.oz_config.read("/etc/oz/oz.cfg") self.oz_config.set('paths', 'output_dir', self.app_config["imgdir"]) if "ec2" in config_obj.jeos_images: self.ec2_jeos_amis = config_obj.jeos_images['ec2'] else: self.log.warning("No JEOS amis defined for ec2. Snapshot builds will not be possible.") self.ec2_jeos_amis = {} def builder_should_create_target_image(self, builder, target, image_id, template, parameters): self.log.info('builder_should_create_target_image() called on EC2Cloud plugin - returning True') return True def builder_will_create_target_image(self, builder, target, image_id, template, parameters): # Nothing really to do here pass def builder_did_create_target_image(self, builder, target, image_id, template, parameters): self.log.info('builder_did_create_target_image() called in EC2Cloud plugin') # The bulk of what is done here is EC2 specific # There are OS conditionals thrown in at the moment # For now we are putting everything into the EC2 Cloud plugin # TODO: Revisit this, and the plugin interface, to see if there are ways to # make the separation cleaner # This lets our logging helper know what image is being operated on self.builder = builder self.active_image = self.builder.target_image try: # TODO: More convenience vars - revisit self.template = template self.target = target self.tdlobj = oz.TDL.TDL(xmlstring=self.template.xml, rootpw_required=True) self._get_os_helper() # Add in target specific content self.add_target_content() # TODO: This is a convenience variable for refactoring - rename self.new_image_id = builder.target_image.identifier # This lets our logging helper know what image is being operated on self.activity("Initializing Oz environment") # Create a name combining the TDL name and the UUID for use when tagging EC2 AMIs self.longname = self.tdlobj.name + "-" + self.new_image_id # Oz assumes unique names - TDL built for multiple backends guarantees they are not unique # We don't really care about the name so just force uniqueness self.tdlobj.name = "factory-build-" + self.new_image_id # populate a config object to pass to OZ; this allows us to specify our # own output dir but inherit other Oz behavior self.oz_config = ConfigParser.SafeConfigParser() self.oz_config.read("/etc/oz/oz.cfg") self.oz_config.set('paths', 'output_dir', self.app_config["imgdir"]) # make this a property to enable quick cleanup on abort self.instance = None # OK great, we now have a customized KVM image # Now we do some target specific transformation # None of these things actually require anything other than the TDL object # and the original disk image # At this point our builder has a target_image and a base_image # OS plugin has already provided the initial file for us to work with # which we can currently assume is a raw KVM compatible image self.image = builder.target_image.data self.modify_oz_filesystem() self.ec2_copy_filesystem() self.ec2_modify_filesystem() except: self.log_exc() self.status="FAILED" raise self.percent_complete=100 self.status="COMPLETED" def _get_os_helper(self): # For now we are adopting a 'mini-plugin' approach to OS specific code within the EC2 plugin # In theory, this could live in the OS plugin - however, the code in question is very tightly # related to the EC2 plugin, so it probably should stay here try: # Change RHEL-6 to RHEL6, etc. os_name = self.tdlobj.distro.translate(None, '-') class_name = "%s_ec2_Helper" % (os_name) module_name = "imagefactory_plugins.EC2Cloud.EC2CloudOSHelpers" __import__(module_name) os_helper_class = getattr(sys.modules[module_name], class_name) self.os_helper = os_helper_class(self) except: self.log_exc() raise ImageFactoryException("Unable to create EC2 OS helper object for distro (%s) in TDL" % (self.tdlobj.distro) ) def push_image_to_provider(self, builder, provider, credentials, target, target_image, parameters): self.log.info('push_image_to_provider() called in EC2Cloud') self.builder = builder self.active_image = self.builder.provider_image # TODO: This is a convenience variable for refactoring - rename self.new_image_id = builder.provider_image.identifier self.tdlobj = oz.TDL.TDL(xmlstring=builder.target_image.template, rootpw_required=True) self._get_os_helper() self.push_image_upload(target_image, provider, credentials) def delete_from_provider(self, builder, provider, credentials, target, parameters): self.log.debug("Deleting AMI (%s)" % (self.builder.provider_image.identifier_on_provider)) self.activity("Preparing EC2 region details") region=provider region_conf=self.ec2_region_details[region] boto_loc = region_conf['boto_loc'] if region != "ec2-us-east-1": s3_url = "http://s3-%s.amazonaws.com/" % (region_conf['host']) else: # Note to Amazon - would it be that hard to have s3-us-east-1.amazonaws.com? s3_url = "http://s3.amazonaws.com/" self.ec2_decode_credentials(credentials) ec2region = boto.ec2.get_region(boto_loc, aws_access_key_id=self.ec2_access_key, aws_secret_access_key=self.ec2_secret_key) conn = ec2region.connect(aws_access_key_id=self.ec2_access_key, aws_secret_access_key=self.ec2_secret_key) amis = conn.get_all_images([ self.builder.provider_image.identifier_on_provider ]) if len(amis) == 0: raise ImageFactoryException("Unable to find AMI (%s) - cannot delete it" % (self.builder.provider_image.identifier_on_provider)) if len(amis) > 1: raise ImageFactoryException("AMI lookup during delete returned more than one result - this should never happen - aborting") if ami.root_device_type == "ebs": self.log.debug("This is an EBS AMI") # Disect the block device mapping to identify the snapshots bd_map = ami.block_device_mapping self.log.debug("De-registering AMI") ami.deregister() self.log.debug("Deleting EBS snapshots associated with AMI") for bd in bd_map: self.log.debug("Deleting bd snapshot (%s) for bd (%s)" % (bd_map[bd].snapshot_id, bd)) conn.delete_snapshot(bd_map[bd].snapshot_id) else: self.log.debug("This is an S3 AMI") s3_conn = boto.s3.connection.S3Connection(aws_access_key_id=self.ec2_access_key, aws_secret_access_key=self.ec2_secret_key, host=s3_url) # Disect the location to get the bucket and key for the manifest (bucket, key) = split(ami.location, '/', 1) self.log.debug("Retrieving S3 AMI manifest from bucket (%s) at key (%s)" % (bucket, key)) bucket = s3_conn.get_bucket(bucket) key_obj = bucket.get_key(key) manifest = key_obj.get_contents_as_string() # It is possible that the key has a path-like structure" # The XML contains only filenames - not path components # so extract any "directory" type stuff here keyprefix = "" keysplit = rsplit(key,"/",1) if len(keysplit) == 2: keyprefix="%s/" % (keysplit[0]) self.log.debug("Deleting S3 image disk chunks") man_etree = ElementTree.fromstring(manifest) for part in man_etree.find("image").find("parts").findall("part"): filename = part.find("filename").text fullname = "%s%s" % (keyprefix, filename) part_key_obj = bucket.get_key(fullname) self.log.debug("Deleting %s" % (fullname)) part_key_obj.delete() self.log.debug("Deleting manifest object %s" % (key)) key_obj.delete() self.log.debug("de-registering the AMI itself") ami.deregister() def log_exc(self): self.log.debug("Exception caught in ImageFactory") self.log.debug(traceback.format_exc()) self.active_image.status_detail['error'] = traceback.format_exc() def modify_oz_filesystem(self): self.activity("Removing unique identifiers from image - Adding cloud information") self.log.debug("init guestfs") g = guestfs.GuestFS () self.log.debug("add input image") g.add_drive (self.image) self.log.debug("launch guestfs") g.launch () g.mount_options("", "/dev/VolGroup00/LogVol00", "/") # F16 and upwards end up with boot on sda2 due to GRUB changes if (self.tdlobj.distro == 'Fedora') and (int(self.tdlobj.update) >= 16): g.mount_options("", "/dev/sda2", "/boot") else: g.mount_options("", "/dev/sda1", "/boot") self.log.info("Creating cloud-info file indicating target (%s)" % (self.target)) tmpl = 'CLOUD_TYPE="%s"\n' % (self.target) g.write("/etc/sysconfig/cloud-info", tmpl) # In the cloud context we currently never need or want persistent net device names # This is known to break networking in RHEL/VMWare and could potentially do so elsewhere # Just delete the file to be safe if g.is_file("/etc/udev/rules.d/70-persistent-net.rules"): g.rm("/etc/udev/rules.d/70-persistent-net.rules") # Also clear out the MAC address this image was bound to. # Second argument is 0 - means don't save a backup - this confuses network init g.aug_init("/", 0) if g.aug_rm("/files/etc/sysconfig/network-scripts/ifcfg-eth0/HWADDR"): self.log.debug("Removed HWADDR from image's /etc/sysconfig/network-scripts/ifcfg-eth0") g.aug_save() else: self.log.debug("Failed to remove HWADDR from image's /etc/sysconfig/network-scripts/ifcfg-eth0") g.aug_close() g.sync () g.umount_all () def ec2_copy_filesystem(self): self.activity("Copying image contents to single flat partition for EC2") target_image=self.image + ".tmp" self.log.debug("init guestfs") g = guestfs.GuestFS () self.log.debug("add input image") g.add_drive (self.image) self.log.debug("creat target image") f = open (target_image, "w") # TODO: Can this be larger, smaller - should it be? f.truncate (10000 * 1024 * 1024) f.close () g.add_drive(target_image) self.log.debug("creat tmp image") # We need a small FS to mount target and dest on - make image file for it # TODO: Use Marek's create mount point trick instead of a temp file tmp_image_file = "/tmp/tmp-img-" + self.new_image_id f = open (tmp_image_file, "w") f.truncate (10 * 1024 * 1024) f.close g.add_drive(tmp_image_file) self.log.debug("launch guestfs") g.launch () # TODO: Re-enable this? # Do inspection here, as libguestfs prefers we do it before mounting anything #inspection = g.inspect_os() # This assumes, I think reasonably, only one OS on the disk image provided by Oz #rootdev = inspection[0] # At this point sda is original image - sdb is blank target - sdc is small helper self.log.info("Making filesystems for EC2 transform") # TODO: Make different FS types depending on the type of the original root fs g.mkfs ("ext3", "/dev/sdb") g.set_e2label ("/dev/sdb", "/") g.mkfs ("ext3", "/dev/sdc") self.log.info("Done") g.mount_options ("", "/dev/sdc", "/") g.mkdir("/in") g.mkdir("/out") # Yes, this looks odd but it is the easiest way to use cp_a from guestfs # because we cannot use wildcards directly with guestfs g.mkdir("/out/in") g.mount_ro ("/dev/VolGroup00/LogVol00", "/in") # F16 and upwards end up with boot on sda2 due to GRUB changes if (self.tdlobj.distro == 'Fedora') and (int(self.tdlobj.update) >= 16): g.mount_ro ("/dev/sda2", "/in/boot") else: g.mount_ro ("/dev/sda1", "/in/boot") g.mount_options ("", "/dev/sdb", "/out/in") self.log.info("Copying image contents to EC2 flat filesystem") g.cp_a("/in/", "/out") self.log.info("Done") g.sync () g.umount_all () os.unlink(tmp_image_file) self.log.debug("Copy complete - removing old image and replacing with new flat filesystem image") os.unlink(self.image) os.rename(target_image, self.image) def ec2_modify_filesystem(self): # Modifications # Many of these are more or less directly ported from BoxGrinder # Boxgrinder is written and maintained by Marek Goldmann and can be found at: # http://boxgrinder.org/ # TODO: This would be safer and more robust if done within the running modified # guest - in this would require tighter Oz integration self.activity("Modifying flat filesystem with EC2 specific changes") g = guestfs.GuestFS () g.add_drive(self.image) g.launch () # Do inspection here, as libguestfs prefers we do it before mounting anything # This should always be /dev/vda or /dev/sda but we do it anyway to be safe osroot = g.inspect_os()[0] # eg "fedora" distro = g.inspect_get_distro(osroot) arch = g.inspect_get_arch(osroot) major_version = g.inspect_get_major_version(osroot) minor_version = g.inspect_get_minor_version(osroot) self.log.debug("distro: %s - arch: %s - major: %s - minor %s" % (distro, arch, major_version, minor_version)) g.mount_options ("", osroot, "/") self.log.info("Modifying flat FS contents to be EC2 compatible") self.log.info("Disabling SELINUX") tmpl = '# Factory Disabled SELINUX - sorry\nSELINUX=permissive\nSELINUXTYPE=targeted\n' g.write("/etc/sysconfig/selinux", tmpl) # Make a /data directory for 64 bit hosts # Ephemeral devs come pre-formatted from AWS - weird if arch == "x86_64": self.log.info("Making data directory") g.mkdir("/data") # BG - Upload one of two templated fstabs # Input - root device name # TODO: Match OS default behavior and/or what is found in the existing image self.log.info("Modifying and uploading fstab") # Make arch conditional if arch == "x86_64": tmpl=self.fstab_64bit else: tmpl=self.fstab_32bit g.write("/etc/fstab", tmpl) # BG - Enable networking # Upload a known good ifcfg-eth0 and then chkconfig on networking self.log.info("Enabling networking and uploading ifcfg-eth0") g.sh("/sbin/chkconfig network on") g.write("/etc/sysconfig/network-scripts/ifcfg-eth0", self.ifcfg_eth0) # Disable first boot - this slows things down otherwise if g.is_file("/etc/init.d/firstboot"): g.sh("/sbin/chkconfig firstboot off") # Ensure a sensible runlevel on systemd systems (>=F15) # Oz/Anaconda hand us a graphical runlevel if g.is_symlink("/etc/systemd/system/default.target"): g.rm("/etc/systemd/system/default.target") g.ln_s("/lib/systemd/system/multi-user.target","/etc/systemd/system/default.target") # BG - Upload rc.local extra content # Again, this uses a static copy - this bit is where the ssh key is downloaded # TODO: Is this where we inject puppet? # TODO - Possibly modify the key injection from rc_local to be only non-root # and add a special user to sudoers - this is what BG has evolved to do self.log.info("Updating rc.local for key injection") g.write("/tmp/rc.local", self.rc_local) # Starting with F16, rc.local doesn't exist by default if not g.exists("/etc/rc.d/rc.local"): g.sh("echo \#\!/bin/bash > /etc/rc.d/rc.local") g.sh("chmod a+x /etc/rc.d/rc.local") g.sh("cat /tmp/rc.local >> /etc/rc.d/rc.local") g.rm("/tmp/rc.local") # Don't ever allow password logins to EC2 sshd g.aug_init("/", 0) g.aug_set("/files/etc/ssh/sshd_config/PermitRootLogin", "without-password") g.aug_save() g.aug_close() self.log.debug("Disabled root loging with password in /etc/ssh/sshd_config") # Install menu list # Derive the kernel version from the last element of ls /lib/modules and some # other magic - look at linux_helper for details # Look at /lib/modules and assume that the last kernel listed is the version we use self.log.info("Modifying and updating menu.lst") kernel_versions = g.ls("/lib/modules") kernel_version = None if (distro == "rhel") and (major_version == 5): xenre = re.compile("xen$") for kern in kernel_versions: if xenre.search(kern): kernel_version = kern elif (len(kernel_versions) > 1) and (arch == "i386") and (distro == "fedora") and (int(major_version) <=13): paere = re.compile("PAE$") for kern in kernel_versions: if paere.search(kern): kernel_version = kern else: kernel_version = kernel_versions[len(kernel_versions)-1] if not kernel_version: self.log.debug("Unable to extract correct kernel version from: %s" % (str(kernel_versions))) raise ImageFactoryException("Unable to extract kernel version") self.log.debug("Using kernel version: %s" % (kernel_version)) # We could deduce this from version but it's easy to inspect bootramfs = int(g.sh("ls -1 /boot | grep initramfs | wc -l")) ramfs_prefix = "initramfs" if bootramfs > 0 else "initrd" name="Image Factory EC2 boot - kernel: " + kernel_version if (distro == "rhel") and (major_version == 5): g.sh("/sbin/mkinitrd -f -v --preload xenblk --preload xennet /boot/initrd-%s.img %s" % (kernel_version)) kernel_options = "" if (distro == "fedora") and (str(major_version) == "16"): self.log.debug("Adding idle=halt option for Fedora 16 on EC2") kernel_options += "idle=halt " tmpl = self.menu_lst tmpl = string.replace(tmpl, "#KERNEL_OPTIONS#", kernel_options) tmpl = string.replace(tmpl, "#KERNEL_VERSION#", kernel_version) tmpl = string.replace(tmpl, "#KERNEL_IMAGE_NAME#", ramfs_prefix) tmpl = string.replace(tmpl, "#TITLE#", name) g.write("/boot/grub/menu.lst", tmpl) # EC2 Xen nosegneg bug # This fixes issues with Fedora >=14 on EC2: https://bugzilla.redhat.com/show_bug.cgi?id=651861#c39 if (arch == "i386") and (distro == "fedora") and (int(major_version) >= 14): self.log.info("Fixing Xen EC2 bug") g.sh("echo \"hwcap 1 nosegneg\" > /etc/ld.so.conf.d/libc6-xen.conf") g.sh("/sbin/ldconfig") self.log.info("Done with EC2 filesystem modifications") g.sync () g.umount_all () # TODO: Based on architecture associate one of two XML blocks that contain the correct # regional AKIs for pvgrub def wait_for_ec2_ssh_access(self, guestaddr): self.activity("Waiting for SSH access to EC2 instance") for i in range(300): if i % 10 == 0: self.log.debug("Waiting for EC2 ssh access: %d/300" % (i)) try: stdout, stderr, retcode = self.guest.guest_execute_command(guestaddr, "/bin/true", timeout = 10) break except: pass sleep(1) if i == 299: raise ImageFactoryException("Unable to gain ssh access after 300 seconds - aborting") def wait_for_ec2_instance_start(self, instance): self.activity("Waiting for EC2 instance to become active") for i in range(300): if i % 10 == 0: self.log.debug("Waiting for EC2 instance to start: %d/300" % (i)) try: instance.update() except EC2ResponseError, e: # We occasionally get errors when querying an instance that has just started - ignore them and hope for the best self.log.warning("EC2ResponseError encountered when querying EC2 instance (%s) - trying to continue" % (instance.id), exc_info = True) except: self.log.error("Exception encountered when updating status of instance (%s)" % (instance.id), exc_info = True) self.status="FAILED" try: self.terminate_instance(instance) except: log.warning("WARNING: Instance (%s) failed to start and will not terminate - it may still be running" % (instance.id), exc_info = True) raise ImageFactoryException("Instance (%s) failed to fully start or terminate - it may still be running" % (instance.id)) raise ImageFactoryException("Exception encountered when waiting for instance (%s) to start" % (instance.id)) if instance.state == u'running': break sleep(1) if instance.state != u'running': self.status="FAILED" try: self.terminate_instance(instance) except: log.warning("WARNING: Instance (%s) failed to start and will not terminate - it may still be running" % (instance.id), exc_info = True) raise ImageFactoryException("Instance (%s) failed to fully start or terminate - it may still be running" % (instance.id)) raise ImageFactoryException("Instance failed to start after 300 seconds - stopping") def terminate_instance(self, instance): # boto 1.9 claims a terminate() method but does not implement it # boto 2.0 throws an exception if you attempt to stop() an S3 backed instance # introspect here and do the best we can if "terminate" in dir(instance): instance.terminate() else: instance.stop() def snapshot_image_on_provider(self, builder, provider, credentials, target, template, parameters): self.log.info('snapshot_image_on_provider() called in EC2Cloud') self.builder = builder self.active_image = self.builder.provider_image # TODO: This is a convenience variable for refactoring - rename self.new_image_id = builder.provider_image.identifier # Template must be defined for snapshots self.tdlobj = oz.TDL.TDL(xmlstring=str(template), rootpw_required=True) self._get_os_helper() self.os_helper.init_guest() def replace(item): if item in [self.ec2_access_key, self.ec2_secret_key]: return "REDACTED" return item self.log.debug("Being asked to push for provider %s" % (provider)) self.log.debug("distro: %s - update: %s - arch: %s" % (self.tdlobj.distro, self.tdlobj.update, self.tdlobj.arch)) self.ec2_decode_credentials(credentials) self.log.debug("acting as EC2 user: %s" % (str(self.ec2_user_id))) self.status="PUSHING" self.percent_complete=0 self.activity("Preparing EC2 region details") region=provider # These are the region details for the TARGET region for our new AMI region_conf=self.ec2_region_details[region] aki = region_conf[self.tdlobj.arch] boto_loc = region_conf['boto_loc'] if region != "ec2-us-east-1": upload_url = "http://s3-%s.amazonaws.com/" % (region_conf['host']) else: # Note to Amazon - would it be that hard to have s3-us-east-1.amazonaws.com? upload_url = "http://s3.amazonaws.com/" register_url = "http://ec2.%s.amazonaws.com/" % (region_conf['host']) ami_id = "none" build_region = provider try: ami_id = self.ec2_jeos_amis[provider][self.tdlobj.distro][self.tdlobj.update][self.tdlobj.arch] except KeyError: pass if ami_id == "none": try: # Fallback to modification on us-east and upload cross-region ami_id = self.ec2_jeos_amis['ec2-us-east-1'][self.tdlobj.distro][self.tdlobj.update][self.tdlobj.arch] build_region = 'ec2-us-east-1' self.log.info("WARNING: Building in ec2-us-east-1 for upload to %s" % (provider)) self.log.info(" This may be a bit slow - ask the Factory team to create a region-local JEOS") except KeyError: pass if ami_id == "none": self.status="FAILED" raise ImageFactoryException("No available JEOS for desired OS, verison combination") # These are the region details for the region we are building in (which may be different from the target) build_region_conf = self.ec2_region_details[build_region] # Note that this connection may be to a region other than the target self.activity("Preparing EC2 JEOS AMI details") ec2region = boto.ec2.get_region(build_region_conf['host'], aws_access_key_id=self.ec2_access_key, aws_secret_access_key=self.ec2_secret_key) conn = ec2region.connect(aws_access_key_id=self.ec2_access_key, aws_secret_access_key=self.ec2_secret_key) # Verify that AMI actually exists - err out if not # Extract AMI type - "ebs" or "instance-store" (S3) # If build_region != provider (meaning we are not building in our target region) # if type == ebs throw an error - EBS builds must be in the target region/provider amis = conn.get_all_images([ ami_id ]) ami = amis[0] if (build_region != provider) and (ami.root_device_type == "ebs"): self.log.error("EBS JEOS image exists in us-east-1 but not in target region (%s)" % (provider)) raise ImageFactoryException("No EBS JEOS image for region (%s) - aborting" % (provider)) instance_type=self.app_config.get('ec2-64bit-util','m1.large') if self.tdlobj.arch == "i386": instance_type=self.app_config.get('ec2-32bit-util','m1.small') # Create a use-once SSH-able security group self.activity("Creating EC2 security group for SSH access to utility image") factory_security_group_name = "imagefactory-%s" % (self.new_image_id, ) factory_security_group_desc = "Temporary ImageFactory generated security group with SSH access" self.log.debug("Creating temporary security group (%s)" % (factory_security_group_name)) factory_security_group = conn.create_security_group(factory_security_group_name, factory_security_group_desc) factory_security_group.authorize('tcp', 22, 22, '0.0.0.0/0') # Create a use-once SSH key self.activity("Creating EC2 SSH key pair") key_name = "fac-tmp-key-%s" % (self.new_image_id) key = conn.create_key_pair(key_name) # Shove into a named temp file key_file_object = NamedTemporaryFile() key_file_object.write(key.material) key_file_object.flush() key_file=key_file_object.name # Now launch it self.activity("Launching EC2 JEOS image") self.log.debug("Starting ami %s with instance_type %s" % (ami_id, instance_type)) reservation = conn.run_instances(ami_id, instance_type=instance_type, key_name=key_name, security_groups = [ factory_security_group_name ]) if len(reservation.instances) != 1: self.status="FAILED" raise ImageFactoryException("run_instances did not result in the expected single instance - stopping") self.instance = reservation.instances[0] self.wait_for_ec2_instance_start(self.instance) # From this point on we must be sure to terminate the instance when we are done # so wrap in a try/finally # Accidentally running a 64 bit instance doing nothing costs 56 USD week try: guestaddr = self.instance.public_dns_name self.guest.sshprivkey = key_file # Ugly ATM because failed access always triggers an exception self.wait_for_ec2_ssh_access(guestaddr) # There are a handful of additional boot tasks after SSH starts running # Give them an additional 20 seconds for good measure self.log.debug("Waiting 20 seconds for remaining boot tasks") sleep(20) self.activity("Customizing running EC2 JEOS instance") self.log.debug("Stopping cron and killing any updatedb process that may be running") # updatedb interacts poorly with the bundle step - make sure it isn't running self.guest.guest_execute_command(guestaddr, "/sbin/service crond stop") self.guest.guest_execute_command(guestaddr, "killall -9 updatedb || /bin/true") self.log.debug("Done") if ami.root_device_type == "instance-store": # Different OSes need different steps here # Only needed for S3 images self.install_euca_tools(guestaddr) # Not all JEOS images contain this - redoing it if already present is harmless self.log.info("Creating cloud-info file indicating target (%s)" % (self.target)) self.guest.guest_execute_command(guestaddr, 'echo CLOUD_TYPE=\\\"%s\\\" > /etc/sysconfig/cloud-info' % (self.target)) self.log.debug("Customizing guest: %s" % (guestaddr)) self.guest.mkdir_p(self.guest.icicle_tmp) self.guest.do_customize(guestaddr) self.log.debug("Customization step complete") self.log.debug("Generating ICICLE from customized guest") self.output_descriptor = self.guest.do_icicle(guestaddr) self.log.debug("ICICLE generation complete") self.log.debug("Re-de-activate firstboot just in case it has been revived during customize") self.guest.guest_execute_command(guestaddr, "[ -f /etc/init.d/firstboot ] && /sbin/chkconfig firstboot off || /bin/true") self.log.debug("De-activation complete") new_ami_id = None image_name = str(self.longname) image_desc = "%s - %s" % (asctime(localtime()), self.tdlobj.description) if ami.root_device_type == "instance-store": # This is an S3 image so we snapshot to another S3 image using euca-bundle-vol and # associated tools ec2cert = "/etc/pki/imagefactory/cert-ec2.pem" # This is needed for uploading and registration # Note that it is excluded from the final image self.activity("Uploading certificate material for bundling of instance") self.guest.guest_live_upload(guestaddr, self.ec2_cert_file, "/tmp") self.guest.guest_live_upload(guestaddr, self.ec2_key_file, "/tmp") self.guest.guest_live_upload(guestaddr, ec2cert, "/tmp") self.log.debug("Cert upload complete") # Some local variables to make the calls below look a little cleaner ec2_uid = self.ec2_user_id arch = self.tdlobj.arch # AKI is set above uuid = self.new_image_id # We exclude /mnt /tmp and /root/.ssh to avoid embedding our utility key into the image command = "euca-bundle-vol -c /tmp/%s -k /tmp/%s -u %s -e /mnt,/tmp,/root/.ssh --arch %s -d /mnt/bundles --kernel %s -p %s -s 10240 --ec2cert /tmp/cert-ec2.pem --fstab /etc/fstab -v /" % (os.path.basename(self.ec2_cert_file), os.path.basename(self.ec2_key_file), ec2_uid, arch, aki, uuid) self.activity("Bundling remote instance in-place") self.log.debug("Executing bundle vol command: %s" % (command)) stdout, stderr, retcode = self.guest.guest_execute_command(guestaddr, command) self.log.debug("Bundle output: %s" % (stdout)) # Now, ensure we have an appropriate bucket to receive this image # TODO: This is another copy - make it a function soon please bucket= "imagefactory-" + region + "-" + self.ec2_user_id self.activity("Preparing S3 destination for image bundle") sconn = S3Connection(self.ec2_access_key, self.ec2_secret_key) try: sconn.create_bucket(bucket, location=boto_loc) except S3CreateError as buckerr: if buckerr.error_code == "BucketAlreadyOwnedByYou": # Expected behavior after first push - not an error pass else: raise # TODO: End of copy # TODO: We cannot timeout on any of the three commands below - can we fix that? manifest = "/mnt/bundles/%s.manifest.xml" % (uuid) # Unfortunately, for some OS versions we need to correct the manifest self.correct_remote_manifest(guestaddr, manifest) command = ['euca-upload-bundle', '-b', bucket, '-m', manifest, '--ec2cert', '/tmp/cert-ec2.pem', '-a', self.ec2_access_key, '-s', self.ec2_secret_key, '-U', upload_url] command_log = map(replace, command) self.activity("Uploading bundle to S3") self.log.debug("Executing upload bundle command: %s" % (command_log)) stdout, stderr, retcode = self.guest.guest_execute_command(guestaddr, ' '.join(command)) self.log.debug("Upload output: %s" % (stdout)) manifest_s3_loc = "%s/%s.manifest.xml" % (bucket, uuid) command = ['euca-register', '-U', register_url, '-A', self.ec2_access_key, '-S', self.ec2_secret_key, '-a', self.tdlobj.arch, #'-n', image_name, '-d', image_desc, manifest_s3_loc] command_log = map(replace, command) self.activity("Registering bundle as a new AMI") self.log.debug("Executing register command: %s" % (command_log)) stdout, stderr, retcode = self.guest.guest_execute_command(guestaddr, ' '.join(command)) self.log.debug("Register output: %s" % (stdout)) m = re.match(".*(ami-[a-fA-F0-9]+)", stdout) new_ami_id = m.group(1) self.log.debug("Extracted AMI ID: %s " % (new_ami_id)) ### End S3 snapshot code else: self.activity("Preparing image for an EBS snapshot") self.log.debug("Performing image prep tasks for EBS backed images") self.ebs_pre_shapshot_tasks(guestaddr) self.activity("Requesting EBS snapshot creation by EC2") self.log.debug("Creating a new EBS backed image from our running EBS instance") new_ami_id = conn.create_image(self.instance.id, image_name, image_desc) self.log.debug("EUCA creat_image call returned AMI ID: %s" % (new_ami_id)) self.activity("Waiting for newly generated AMI to become available") # As with launching an instance we have seen occasional issues when trying to query this AMI right # away - give it a moment to settle sleep(10) new_amis = conn.get_all_images([ new_ami_id ]) new_ami = new_amis[0] timeout = 120 interval = 10 for i in range(timeout): new_ami.update() if new_ami.state == "available": break elif new_ami.state == "failed": raise ImageFactoryException("Amazon reports EBS image creation failed") self.log.debug("AMI status (%s) - waiting for 'available' - [%d of %d seconds elapsed]" % (new_ami.state, i * interval, timeout * interval)) sleep(interval) if not new_ami_id: raise ImageFactoryException("Failed to produce an AMI ID") # This replaces our Warehouse calls self.builder.provider_image.icicle = self.output_descriptor self.builder.provider_image.identifier_on_provider = new_ami_id self.builder.provider_account_identifier = self.ec2_access_key finally: self.activity("Terminating EC2 instance and deleting security group and SSH key") self.terminate_instance(self.instance) key_file_object.close() conn.delete_key_pair(key_name) try: timeout = 60 interval = 5 for i in range(timeout): self.instance.update() if(self.instance.state == "terminated"): factory_security_group.delete() self.log.debug("Removed temporary security group (%s)" % (factory_security_group_name)) break elif(i < timeout): self.log.debug("Instance status (%s) - waiting for 'terminated'. [%d of %d seconds elapsed]" % (self.instance.state, i * interval, timeout * interval)) sleep(interval) else: raise Exception("Timeout waiting for instance to terminate.") except Exception, e: self.log.debug("Unable to delete temporary security group (%s) due to exception: %s" % (factory_security_group_name, e)) self.log.debug("Fedora_ec2_Builder instance %s pushed image with uuid %s to provider_image UUID (%s)" % (id(self), target_image_id, self.new_image_id)) self.percent_complete=100 self.status="COMPLETED" def push_image_upload(self, target_image_id, provider, credentials): self.status="PUSHING" self.percent_complete=0 try: if self.app_config["ec2_ami_type"] == "s3": self.ec2_push_image_upload(target_image_id, provider, credentials) elif self.app_config["ec2_ami_type"] == "ebs": self.ec2_push_image_upload_ebs(target_image_id, provider, credentials) else: raise ImageFactoryException("Invalid or unspecified EC2 AMI type in config file") except: self.log_exc() self.status="FAILED" raise self.status="COMPLETED" def _ec2_get_xml_node(self, doc, credtype): nodes = doc.xpathEval("//provider_credentials/ec2_credentials/%s" % (credtype)) if len(nodes) < 1: raise ImageFactoryException("No EC2 %s available" % (credtype)) return nodes[0].content def ec2_decode_credentials(self, credentials): self.activity("Preparing EC2 credentials") doc = libxml2.parseDoc(credentials) self.ec2_user_id = self._ec2_get_xml_node(doc, "account_number") self.ec2_access_key = self._ec2_get_xml_node(doc, "access_key") self.provider_account_identifier = self.ec2_access_key self.ec2_secret_key = self._ec2_get_xml_node(doc, "secret_access_key") # Support both "key" and "x509_private" as element names ec2_key_node = doc.xpathEval("//provider_credentials/ec2_credentials/key") if not ec2_key_node: ec2_key_node = doc.xpathEval("//provider_credentials/ec2_credentials/x509_private") if not ec2_key_node: raise ImageFactoryException("No x509 private key found in ec2 credentials") ec2_key=ec2_key_node[0].content # Support both "certificate" and "x509_public" as element names ec2_cert_node = doc.xpathEval("//provider_credentials/ec2_credentials/certificate") if not ec2_cert_node: ec2_cert_node = doc.xpathEval("//provider_credentials/ec2_credentials/x509_public") if not ec2_cert_node: raise ImageFactoryException("No x509 public certificate found in ec2 credentials") ec2_cert = ec2_cert_node[0].content doc.freeDoc() # Shove certs into named temporary files self.ec2_cert_file_object = NamedTemporaryFile() self.ec2_cert_file_object.write(ec2_cert) self.ec2_cert_file_object.flush() self.ec2_cert_file=self.ec2_cert_file_object.name self.ec2_key_file_object = NamedTemporaryFile() self.ec2_key_file_object.write(ec2_key) self.ec2_key_file_object.flush() self.ec2_key_file=self.ec2_key_file_object.name def ec2_push_image_upload_ebs(self, target_image_id, provider, credentials): # TODO: Merge with ec2_push_image_upload and/or factor out duplication # In this case we actually do need an Oz object to manipulate a remote guest self.os_helper.init_guest() self.ec2_decode_credentials(credentials) # We don't need the x509 material here so close the temp files right away # TODO: Mod the decode to selectively create the files in the first place # This is silly and messy self.ec2_cert_file_object.close() self.ec2_key_file_object.close() # Image is always here and it is the target_image datafile input_image = self.builder.target_image.data input_image_compressed = input_image + ".gz" input_image_compressed_name = os.path.basename(input_image_compressed) compress_complete_marker = input_image_compressed + "-factory-compressed" # We are guaranteed to hit this from multiple builders looking at the same image # Grab a named lock based on the file name # If the file is not present this guarantees that only one thread will compress # NOTE: It is important to grab the lock before we even look for the file # TODO: Switched this to use shell callouts because of a 64 bit bug - fix that res_mgr = ReservationManager() res_mgr.get_named_lock(input_image_compressed) try: if not os.path.isfile(input_image_compressed) or not os.path.isfile(compress_complete_marker): self.activity("Compressing image file for upload to EC2") self.log.debug("No compressed version of image file found - compressing now") compress_command = 'gzip -c %s > %s' % (input_image, input_image_compressed) self.log.debug("Compressing image file with external gzip cmd: %s" % (compress_command)) result = subprocess.call(compress_command, shell = True) if result: raise ImageFactoryException("Compression of image failed") self.log.debug("Compression complete") # Mark completion with an empty file # Without this we might use a partially compressed file that resulted from a crash or termination subprocess.call("touch %s" % (compress_complete_marker), shell = True) finally: res_mgr.release_named_lock(input_image_compressed) self.activity("Preparing EC2 region details") region=provider region_conf=self.ec2_region_details[region] aki = region_conf[self.tdlobj.arch] # Use our F16 - 32 bit JEOS image as the utility image for uploading to the EBS volume try: ami_id = self.ec2_jeos_amis[provider]['Fedora']['16']['i386'] except KeyError: raise ImageFactoryException("No Fedora 16 i386 JEOS/utility image in region (%s) - aborting", (provider)) # i386 instance_type=self.app_config.get('ec2-32bit-util','m1.small') self.activity("Initializing connection to ec2 region (%s)" % region_conf['host']) ec2region = boto.ec2.get_region(region_conf['host'], aws_access_key_id=self.ec2_access_key, aws_secret_access_key=self.ec2_secret_key) conn = ec2region.connect(aws_access_key_id=self.ec2_access_key, aws_secret_access_key=self.ec2_secret_key) # Create security group self.activity("Creating EC2 security group for SSH access to utility image") factory_security_group_name = "imagefactory-%s" % (str(self.new_image_id)) factory_security_group_desc = "Temporary ImageFactory generated security group with SSH access" self.log.debug("Creating temporary security group (%s)" % (factory_security_group_name)) factory_security_group = conn.create_security_group(factory_security_group_name, factory_security_group_desc) factory_security_group.authorize('tcp', 22, 22, '0.0.0.0/0') # Create a use-once SSH key self.activity("Creating SSH key pair for image upload") key_name = "fac-tmp-key-%s" % (self.new_image_id) key = conn.create_key_pair(key_name) # Shove into a named temp file key_file_object = NamedTemporaryFile() key_file_object.write(key.material) key_file_object.flush() key_file=key_file_object.name # Now launch it self.activity("Launching EC2 utility image") reservation = conn.run_instances(ami_id, instance_type=instance_type, key_name=key_name, security_groups = [ factory_security_group_name ]) if len(reservation.instances) != 1: self.status="FAILED" raise ImageFactoryException("run_instances did not result in the expected single instance - stopping") self.instance = reservation.instances[0] self.wait_for_ec2_instance_start(self.instance) # From this point on we must be sure to terminate the instance when we are done # so wrap in a try/finally # Accidentally running a 64 bit instance doing nothing costs 56 USD week volume = None try: guestaddr = self.instance.public_dns_name self.guest.sshprivkey = key_file # Ugly ATM because failed access always triggers an exception self.wait_for_ec2_ssh_access(guestaddr) # There are a handful of additional boot tasks after SSH starts running # Give them an additional 20 seconds for good measure self.log.debug("Waiting 20 seconds for remaining boot tasks") sleep(20) self.activity("Creating 10 GiB volume in (%s) to hold new image" % (self.instance.placement)) volume = conn.create_volume(10, self.instance.placement) # Do the upload before testing to see if the volume has completed # to get a bit of parallel work self.activity("Uploading compressed image file") self.guest.guest_live_upload(guestaddr, input_image_compressed, "/mnt") # Don't burden API users with the step-by-step details here self.activity("Preparing EC2 volume to receive new image") # Volumes can sometimes take a very long time to create # Wait up to 10 minutes for now (plus the time taken for the upload above) self.log.debug("Waiting up to 600 seconds for volume (%s) to become available" % (volume.id)) retcode = 1 for i in range(60): volume.update() if volume.status == "available": retcode = 0 break self.log.debug("Volume status (%s) - waiting for 'available': %d/600" % (volume.status, i*10)) sleep(10) if retcode: raise ImageFactoryException("Unable to create target volume for EBS AMI - aborting") # Volume is now available # Attach it conn.attach_volume(volume.id, self.instance.id, "/dev/sdh") self.log.debug("Waiting up to 120 seconds for volume (%s) to become in-use" % (volume.id)) retcode = 1 for i in range(12): volume.update() vs = volume.attachment_state() if vs == "attached": retcode = 0 break self.log.debug("Volume status (%s) - waiting for 'attached': %d/120" % (vs, i*10)) sleep(10) if retcode: raise ImageFactoryException("Unable to attach volume (%s) to instance (%s) aborting" % (volume.id, self.instance.id)) # TODO: This may not be necessary but it helped with some funnies observed during testing # At some point run a bunch of builds without the delay to see if it breaks anything self.log.debug("Waiting 20 seconds for EBS attachment to stabilize") sleep(20) # Decompress image into new EBS volume self.activity("Decompressing image into new volume") command = "gzip -dc /mnt/%s | dd of=/dev/xvdh bs=4k\n" % (input_image_compressed_name) self.log.debug("Decompressing image file into EBS device via command: %s" % (command)) self.guest.guest_execute_command(guestaddr, command) # Sync before snapshot self.guest.guest_execute_command(guestaddr, "sync") # Snapshot EBS volume self.activity("Taking EC2 snapshot of new volume") self.log.debug("Taking snapshot of volume (%s)" % (volume.id)) snapshot = conn.create_snapshot(volume.id, 'Image Factory Snapshot for provider image %s' % self.new_image_id) # This can take a _long_ time - wait up to 20 minutes self.log.debug("Waiting up to 1200 seconds for snapshot (%s) to become completed" % (snapshot.id)) retcode = 1 for i in range(120): snapshot.update() if snapshot.status == "completed": retcode = 0 break self.log.debug("Snapshot progress(%s) - status (%s) - waiting for 'completed': %d/1200" % (str(snapshot.progress), snapshot.status, i*10)) sleep(10) if retcode: raise ImageFactoryException("Unable to snapshot volume (%s) - aborting" % (volume.id)) # register against snapshot self.activity("Registering snapshot as a new AMI") self.log.debug("Registering snapshot (%s) as new EBS AMI" % (snapshot.id)) ebs = EBSBlockDeviceType() ebs.snapshot_id = snapshot.id ebs.delete_on_termination = True block_map = BlockDeviceMapping() block_map['/dev/sda1'] = ebs # The ephemeral mappings are automatic with S3 images # For EBS images we need to make them explicit # These settings are required to make the same fstab work on both S3 and EBS images e0 = EBSBlockDeviceType() e0.ephemeral_name = 'ephemeral0' e1 = EBSBlockDeviceType() e1.ephemeral_name = 'ephemeral1' if self.tdlobj.arch == "i386": block_map['/dev/sda2'] = e0 block_map['/dev/sda3'] = e1 else: block_map['/dev/sdb'] = e0 block_map['/dev/sdc'] = e1 result = conn.register_image(name='ImageFactory created AMI - %s' % (self.new_image_id), description='ImageFactory created AMI - %s' % (self.new_image_id), architecture=self.tdlobj.arch, kernel_id=aki, root_device_name='/dev/sda1', block_device_map=block_map) ami_id = str(result) self.log.debug("Extracted AMI ID: %s " % (ami_id)) except: self.log.debug("EBS image upload failed on exception") #DANGER!!! Uncomment at your own risk! #This is for deep debugging of the EBS utility instance - don't forget to shut it down manually #self.log.debug("EBS image upload failed on exception", exc_info = True) #self.log.debug("Waiting more or less forever to allow inspection of the instance") #self.log.debug("run this: ssh -i %s root@%s" % (key_file, self.instance.public_dns_name)) #sleep(999999) raise finally: self.activity("Terminating EC2 instance and deleting temp security group and volume") self.terminate_instance(self.instance) key_file_object.close() conn.delete_key_pair(key_name) self.log.debug("Waiting up to 240 seconds for instance (%s) to shut down" % (self.instance.id)) retcode = 1 for i in range(24): self.instance.update() if self.instance.state == "terminated": retcode = 0 break self.log.debug("Instance status (%s) - waiting for 'terminated': %d/240" % (self.instance.state, i*10)) sleep(10) if retcode: self.log.warning("Instance (%s) failed to terminate - Unable to delete volume (%s) or delete factory temp security group" % (self.instance.id, volume.id)) else: self.log.debug("Deleting temporary security group") factory_security_group.delete() if volume: self.log.debug("Deleting EBS volume (%s)" % (volume.id)) volume.delete() # TODO: Add back-reference to ICICLE from base image object # This replaces our warehouse calls self.builder.provider_image.identifier_on_provider=ami_id self.builder.provider_image.provider_account_identifier=self.ec2_access_key self.log.debug("Fedora_ec2_Builder instance %s pushed image with uuid %s to provider_image UUID (%s)" % (id(self), target_image_id, self.new_image_id)) self.percent_complete=100 def ec2_push_image_upload(self, target_image_id, provider, credentials): def replace(item): if item in [self.ec2_access_key, self.ec2_secret_key]: return "REDACTED" return item # Image is always here and it is the target_image datafile input_image = self.builder.target_image.data input_image_name = os.path.basename(input_image) self.ec2_decode_credentials(credentials) bundle_destination=self.app_config['imgdir'] self.activity("Preparing EC2 region details and connection") region=provider region_conf=self.ec2_region_details[region] aki = region_conf[self.tdlobj.arch] boto_loc = region_conf['boto_loc'] if region != "ec2-us-east-1": upload_url = "http://s3-%s.amazonaws.com/" % (region_conf['host']) else: # Note to Amazon - would it be that hard to have s3-us-east-1.amazonaws.com? upload_url = "http://s3.amazonaws.com/" register_url = "http://ec2.%s.amazonaws.com/" % (region_conf['host']) bucket= "imagefactory-" + region + "-" + self.ec2_user_id # Euca does not support specifying region for bucket # (Region URL is not sufficient) # See: https://bugs.launchpad.net/euca2ools/+bug/704658 # What we end up having to do is manually create a bucket in the right region # then explicitly point to that region URL when doing the image upload # We CANNOT let euca create the bucket when uploading or it will end up in us-east-1 conn = S3Connection(self.ec2_access_key, self.ec2_secret_key) try: conn.create_bucket(bucket, location=boto_loc) except S3CreateError as buckerr: # if the bucket already exists, it is not an error if buckerr.error_code != "BucketAlreadyOwnedByYou": raise # TODO: Make configurable? ec2_service_cert = "/etc/pki/imagefactory/cert-ec2.pem" bundle_command = [ "euca-bundle-image", "-i", input_image, "--kernel", aki, "-d", bundle_destination, "-a", self.ec2_access_key, "-s", self.ec2_secret_key, "-c", self.ec2_cert_file, "-k", self.ec2_key_file, "-u", self.ec2_user_id, "-r", self.tdlobj.arch, "--ec2cert", ec2_service_cert ] bundle_command_log = map(replace, bundle_command) self.activity("Bundling image locally") self.log.debug("Executing bundle command: %s " % (bundle_command_log)) bundle_output = subprocess_check_output(bundle_command) self.log.debug("Bundle command complete") self.log.debug("Bundle command output: %s " % (str(bundle_output))) self.percent_complete=40 manifest = bundle_destination + "/" + input_image_name + ".manifest.xml" upload_command = [ "euca-upload-bundle", "-b", bucket, "-m", manifest, "--ec2cert", ec2_service_cert, "-a", self.ec2_access_key, "-s", self.ec2_secret_key, "-U" , upload_url ] upload_command_log = map(replace, upload_command) self.activity("Uploading image to EC2") self.log.debug("Executing upload command: %s " % (upload_command_log)) upload_output = subprocess_check_output(upload_command) self.log.debug("Upload command output: %s " % (str(upload_output))) self.percent_complete=90 s3_path = bucket + "/" + input_image_name + ".manifest.xml" register_env = { 'EC2_URL':register_url } register_command = [ "euca-register" , "-A", self.ec2_access_key, "-S", self.ec2_secret_key, "-a", self.tdlobj.arch, s3_path ] register_command_log = map(replace, register_command) self.activity("Registering image") self.log.debug("Executing register command: %s with environment %s " % (register_command_log, repr(register_env))) register_output = subprocess_check_output(register_command, env=register_env) self.log.debug("Register command output: %s " % (str(register_output))) m = re.match(".*(ami-[a-fA-F0-9]+)", register_output[0]) ami_id = m.group(1) self.log.debug("Extracted AMI ID: %s " % (ami_id)) # TODO: This should be in a finally statement that rethrows exceptions self.ec2_cert_file_object.close() self.ec2_key_file_object.close() self.status = "PUSHING" # TODO: Generate and store ICICLE # This replaces our warehouse calls self.builder.provider_image.identifier_on_provider = ami_id self.builder.provider_image.provider_account_identifier = self.ec2_access_key self.log.debug("Fedora_ec2_Builder instance %s pushed image with uuid %s to provider_image UUID (%s)" % (id(self), target_image_id, self.new_image_id)) self.percent_complete=100 def abort(self): # TODO: Make this progressively more robust # In the near term, the most important thing we can do is terminate any EC2 instance we may be using if self.instance: instance_id = self.instance.id try: self.terminate_instance(self.instance) except Exception, e: self.log.warning("Warning, encountered - Instance %s may not be terminated ******** " % (instance_id)) self.log.exception(e) # This file content is tightly bound up with our mod code above # I've inserted it as class variables for convenience rc_local="""# We have seen timing issues with curl commands - try several times for t in 1 2 3 4 5 6 7 8 9 10; do echo "Try number $t" >> /tmp/ec2-keypull.stderr curl -o /tmp/my-key http://169.254.169.254/2009-04-04/meta-data/public-keys/0/openssh-key 2>> /tmp/ec2-keypull.stderr [ -f /tmp/my-key ] && break sleep 10 done if ! [ -f /tmp/my-key ]; then echo "Failed to retrieve SSH key after 10 tries and 100 seconds" > /dev/hvc0 exit 1 fi dd if=/dev/urandom count=50 2>/dev/null|md5sum|awk '{ print $1 }'|passwd --stdin root >/dev/null if [ ! -d /root/.ssh ] ; then mkdir /root/.ssh chmod 700 /root/.ssh fi cat /tmp/my-key >> /root/.ssh/authorized_keys chmod 600 /root/.ssh/authorized_keys for home in `find /home/* -maxdepth 0 -type d 2>/dev/null | tr '\\n' ' '`; do user=`echo $home | awk -F '/' '{ print $3 }'` if [ ! -d $home/.ssh ] ; then mkdir -p $home/.ssh chmod 700 $home/.ssh chown $user $home/.ssh fi cat /tmp/my-key >> $home/.ssh/authorized_keys chmod 600 $home/.ssh/authorized_keys chown $user $home/.ssh/authorized_keys done rm /tmp/my-key """ ifcfg_eth0="""DEVICE=eth0 BOOTPROTO=dhcp ONBOOT=yes TYPE=Ethernet USERCTL=yes PEERDNS=yes IPV6INIT=no """ menu_lst="""default=0 timeout=0 title #TITLE# root (hd0) kernel /boot/vmlinuz-#KERNEL_VERSION# ro root=LABEL=/ rd_NO_PLYMOUTH #KERNEL_OPTIONS# initrd /boot/#KERNEL_IMAGE_NAME#-#KERNEL_VERSION#.img """ fstab_32bit="""LABEL=/ / ext3 defaults 1 1 /dev/xvda2 /mnt ext3 defaults,nofail 1 2 /dev/xvda3 swap swap defaults,nofail 0 0 none /dev/pts devpts gid=5,mode=620 0 0 none /dev/shm tmpfs defaults 0 0 none /proc proc defaults 0 0 none /sys sysfs defaults 0 0 """ fstab_64bit="""LABEL=/ / ext3 defaults 1 1 /dev/xvdb /mnt ext3 defaults,nofail 0 0 /dev/xvdc /data ext3 defaults,nofail 0 0 none /dev/pts devpts gid=5,mode=620 0 0 none /dev/shm tmpfs defaults 0 0 none /proc proc defaults 0 0 none /sys sysfs defaults 0 0 """ ############ BEGIN CONFIG-LIKE class variables ########################### ########################################################################## # Perhaps there is a better way to do this but this works for now # TODO: Ideally we should use boto "Location" references when possible - 1.9 contains only DEFAULT and EU # The rest are hard coded strings for now. ec2_region_details={ 'ec2-us-east-1': { 'boto_loc': Location.DEFAULT, 'host':'us-east-1', 'i386': 'aki-805ea7e9', 'x86_64': 'aki-825ea7eb' }, 'ec2-us-west-1': { 'boto_loc': 'us-west-1', 'host':'us-west-1', 'i386': 'aki-83396bc6', 'x86_64': 'aki-8d396bc8' }, 'ec2-us-west-2': { 'boto_loc': 'us-west-2', 'host':'us-west-2', 'i386': 'aki-c2e26ff2', 'x86_64': 'aki-98e26fa8' }, 'ec2-ap-southeast-1': { 'boto_loc': 'ap-southeast-1', 'host':'ap-southeast-1', 'i386': 'aki-a4225af6', 'x86_64': 'aki-aa225af8' }, 'ec2-ap-northeast-1': { 'boto_loc': 'ap-northeast-1', 'host':'ap-northeast-1', 'i386': 'aki-ec5df7ed', 'x86_64': 'aki-ee5df7ef' }, 'ec2-sa-east-1': { 'boto_loc': 'sa-east-1', 'host':'sa-east-1', 'i386': 'aki-bc3ce3a1', 'x86_64': 'aki-cc3ce3d1' }, 'ec2-eu-west-1': { 'boto_loc': Location.EU, 'host':'eu-west-1', 'i386': 'aki-64695810', 'x86_64': 'aki-62695816' } } # July 13 - new approach - generic JEOS AMIs for Fedora - no userdata and no euca-tools # ad-hoc ssh keys replace userdata - runtime install of euca tools for bundling # v0.6 of F14 and F15 - dropped F13 for now - also include official public RHEL hourly AMIs for RHEL6 # Sept 1 - 2011 - updated us-west Fedora JEOSes to 0.6 # Sept 30 - 2011 - Moved out of here entirely to ApplicationConfiguration # ec2_jeos_amis = <not here anymore> def add_target_content(self): """Merge in target specific package and repo content. TDL object must already exist as self.tdlobj""" doc = None # TODONOW: Fix # if self.config_block: import os.path if None: doc = libxml2.parseDoc(self.config_block) elif os.path.isfile("/etc/imagefactory/target_content.xml"): doc = libxml2.parseFile("/etc/imagefactory/target_content.xml") else: self.log.debug("Found neither a call-time config nor a config file - doing nothing") return # Purely to make the xpath statements below a tiny bit shorter target = self.target os=self.tdlobj.distro version=self.tdlobj.update arch=self.tdlobj.arch # We go from most to least specific in this order: # arch -> version -> os-> target # Note that at the moment we even allow an include statment that covers absolutely everything. # That is, one that doesn't even specify a target - this is to support a very simple call-time syntax include = doc.xpathEval("/template_includes/include[@target='%s' and @os='%s' and @version='%s' and @arch='%s']" % (target, os, version, arch)) if len(include) == 0: include = doc.xpathEval("/template_includes/include[@target='%s' and @os='%s' and @version='%s' and not(@arch)]" % (target, os, version)) if len(include) == 0: include = doc.xpathEval("/template_includes/include[@target='%s' and @os='%s' and not(@version) and not(@arch)]" % (target, os)) if len(include) == 0: include = doc.xpathEval("/template_includes/include[@target='%s' and not(@os) and not(@version) and not(@arch)]" % (target)) if len(include) == 0: include = doc.xpathEval("/template_includes/include[not(@target) and not(@os) and not(@version) and not(@arch)]") if len(include) == 0: self.log.debug("cannot find a config section that matches our build details - doing nothing") return # OK - We have at least one config block that matches our build - take the first one, merge it and be done # TODO: Merge all of them? Err out if there is more than one? Warn? include = include[0] packages = include.xpathEval("packages") if len(packages) > 0: self.tdlobj.merge_packages(str(packages[0])) repositories = include.xpathEval("repositories") if len(repositories) > 0: self.tdlobj.merge_repositories(str(repositories[0]))
henrysher/imagefactory
imagefactory-plugins/EC2Cloud/EC2Cloud.py
Python
apache-2.0
70,484
from django.shortcuts import render from django.views.generic import ListView from ..cms.models import Page from ..utils import JsonView from .models import Original, Print class ShopOriginalsView(ListView): context_object_name = 'shop_items' queryset = Original.objects.filter(disable=False).order_by('-order') template_name = 'shop/shop.html' def get_context_data(self, **kwargs): context = super(ShopOriginalsView, self).get_context_data(**kwargs) context['page'] = Page.objects.get(name='shop/originals') return context class ShopPrintsView(ListView): context_object_name = 'shop_items' queryset = Print.objects.filter(disable=False).order_by('-order') template_name = 'shop/shop.html' def get_context_data(self, **kwargs): context = super(ShopPrintsView, self).get_context_data(**kwargs) context['page'] = Page.objects.get(name='shop/prints') return context class ShopSortJson(JsonView): def json_post(self, request, *args, **kwargs): ids = request.POST.getlist('data[]') shop_type = request.POST.get('shop_type') index = len(ids) if shop_type == 'originals': klass = Original elif shop_type == 'prints': klass = Print else: raise Exception("Unknown shop type %s." % shop_type) for item_id in ids: item = klass.objects.get(pk=item_id) item.order = index item.save() index -= 1
rogerhil/flaviabernardes
flaviabernardes/flaviabernardes/shop/views.py
Python
apache-2.0
1,517
# # -*- coding: utf-8 -*- # # Copyright (c) 2016 by Anselm Kruis # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ ===================== Pyheapdump.__main__ ===================== Debug heap dumps. .. warning:: This is alpha quality code. .. autofunction:: main """ from __future__ import absolute_import, print_function, unicode_literals, division import argparse import sys import os from pyheapdump import debug_dump def main(argv=None): """Debug a Python heap dump file. You can invoke this function using the following command:: python -m pyheapdump [OPTIONS] pyheapdump Use the option '-h' to get help:: python -m pyheapdump -h """ if argv is None: argv = sys.argv[1:] parser = argparse.ArgumentParser(description='debug a Python heap dump', prog=os.path.basename(sys.executable) + " -m pyheapdump") parser.add_argument('--debugger', '-d', choices=['auto', 'pdb', 'pydevd'], default="auto", help="select the debugger, default is 'auto'") parser.add_argument('--debugger-dir', help='pydevd only: path to the Python files of PyDev, usually <ECLIPSE_INSTALATION_DIR>/plugins/org.python.pydev_<VERSION>/pysrc/') parser.add_argument('--host', help='pydevd only: the user may specify another host, if the debug server is not in the same machine') parser.add_argument('--port', type=int, default=5678, help='pydevd only: specifies which port to use for communicating with the server. Default is port 5678') parser.add_argument('--stdout', choices=['server', 'console'], default='server', help='pydevd only: pass the stdout to the debug server so that it is printed in its console or to this process console') parser.add_argument('--stderr', choices=['server', 'console'], default='server', help='pydevd only: pass the stderr to the debug server so that it is printed in its console or to this process console') parser.add_argument('--debug-pyheapdump', action='store_true', help=argparse.SUPPRESS) parser.add_argument('dumpfile', type=argparse.FileType(mode='rb'), help="the heap dump file") namespace = parser.parse_args(argv) if namespace.debug_pyheapdump: # It is better to use remote debugging, because of the debugger specific code later on sys.path.append(namespace.debugger_dir) import pydevd # @UnresolvedImport pydevd.settrace(stdoutToServer=True, stderrToServer=True, suspend=True, trace_only_current_thread=True) return debug_dump(dumpfile=namespace.dumpfile, debugger_options=vars(namespace)) if __name__ == '__main__': sys.exit(main())
akruis/pyheapdump
pyheapdump/__main__.py
Python
apache-2.0
3,105
# Copyright 2012 OpenStack Foundation. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import logging import time import urllib import requests import six.moves.urllib.parse as urlparse from neutronclient import client from neutronclient.common import _ from neutronclient.common import constants from neutronclient.common import exceptions from neutronclient.common import serializer from neutronclient.common import utils _logger = logging.getLogger(__name__) def exception_handler_v20(status_code, error_content): """Exception handler for API v2.0 client. This routine generates the appropriate Neutron exception according to the contents of the response body. :param status_code: HTTP error status code :param error_content: deserialized body of error response """ error_dict = None if isinstance(error_content, dict): error_dict = error_content.get('NeutronError') # Find real error type bad_neutron_error_flag = False if error_dict: # If Neutron key is found, it will definitely contain # a 'message' and 'type' keys? try: error_type = error_dict['type'] error_message = error_dict['message'] if error_dict['detail']: error_message += "\n" + error_dict['detail'] except Exception: bad_neutron_error_flag = True if not bad_neutron_error_flag: # If corresponding exception is defined, use it. client_exc = getattr(exceptions, '%sClient' % error_type, None) # Otherwise look up per status-code client exception if not client_exc: client_exc = exceptions.HTTP_EXCEPTION_MAP.get(status_code) if client_exc: raise client_exc(message=error_message, status_code=status_code) else: raise exceptions.NeutronClientException( status_code=status_code, message=error_message) else: raise exceptions.NeutronClientException(status_code=status_code, message=error_dict) else: message = None if isinstance(error_content, dict): message = error_content.get('message') if message: raise exceptions.NeutronClientException(status_code=status_code, message=message) # If we end up here the exception was not a neutron error msg = "%s-%s" % (status_code, error_content) raise exceptions.NeutronClientException(status_code=status_code, message=msg) class APIParamsCall(object): """A Decorator to add support for format and tenant overriding and filters. """ def __init__(self, function): self.function = function def __get__(self, instance, owner): def with_params(*args, **kwargs): _format = instance.format if 'format' in kwargs: instance.format = kwargs['format'] ret = self.function(instance, *args, **kwargs) instance.format = _format return ret return with_params class Client(object): """Client for the OpenStack Neutron v2.0 API. :param string username: Username for authentication. (optional) :param string user_id: User ID for authentication. (optional) :param string password: Password for authentication. (optional) :param string token: Token for authentication. (optional) :param string tenant_name: Tenant name. (optional) :param string tenant_id: Tenant id. (optional) :param string auth_url: Keystone service endpoint for authorization. :param string service_type: Network service type to pull from the keystone catalog (e.g. 'network') (optional) :param string endpoint_type: Network service endpoint type to pull from the keystone catalog (e.g. 'publicURL', 'internalURL', or 'adminURL') (optional) :param string region_name: Name of a region to select when choosing an endpoint from the service catalog. :param string endpoint_url: A user-supplied endpoint URL for the neutron service. Lazy-authentication is possible for API service calls if endpoint is set at instantiation.(optional) :param integer timeout: Allows customization of the timeout for client http requests. (optional) :param bool insecure: SSL certificate validation. (optional) :param string ca_cert: SSL CA bundle file to use. (optional) :param integer retries: How many times idempotent (GET, PUT, DELETE) requests to Neutron server should be retried if they fail (default: 0). :param bool raise_errors: If True then exceptions caused by connection failure are propagated to the caller. (default: True) :param session: Keystone client auth session to use. (optional) :param auth: Keystone auth plugin to use. (optional) Example:: from neutronclient.v2_0 import client neutron = client.Client(username=USER, password=PASS, tenant_name=TENANT_NAME, auth_url=KEYSTONE_URL) nets = neutron.list_networks() ... """ networks_path = "/networks" network_path = "/networks/%s" ports_path = "/ports" port_path = "/ports/%s" subnets_path = "/subnets" subnet_path = "/subnets/%s" quotas_path = "/quotas" quota_path = "/quotas/%s" extensions_path = "/extensions" extension_path = "/extensions/%s" routers_path = "/routers" router_path = "/routers/%s" floatingips_path = "/floatingips" floatingip_path = "/floatingips/%s" security_groups_path = "/security-groups" security_group_path = "/security-groups/%s" security_group_rules_path = "/security-group-rules" security_group_rule_path = "/security-group-rules/%s" vpnservices_path = "/vpn/vpnservices" vpnservice_path = "/vpn/vpnservices/%s" ipsecpolicies_path = "/vpn/ipsecpolicies" ipsecpolicy_path = "/vpn/ipsecpolicies/%s" ikepolicies_path = "/vpn/ikepolicies" ikepolicy_path = "/vpn/ikepolicies/%s" ipsec_site_connections_path = "/vpn/ipsec-site-connections" ipsec_site_connection_path = "/vpn/ipsec-site-connections/%s" vips_path = "/lb/vips" vip_path = "/lb/vips/%s" pools_path = "/lb/pools" pool_path = "/lb/pools/%s" pool_path_stats = "/lb/pools/%s/stats" members_path = "/lb/members" member_path = "/lb/members/%s" health_monitors_path = "/lb/health_monitors" health_monitor_path = "/lb/health_monitors/%s" associate_pool_health_monitors_path = "/lb/pools/%s/health_monitors" disassociate_pool_health_monitors_path = ( "/lb/pools/%(pool)s/health_monitors/%(health_monitor)s") qos_queues_path = "/qos-queues" qos_queue_path = "/qos-queues/%s" agents_path = "/agents" agent_path = "/agents/%s" network_gateways_path = "/network-gateways" network_gateway_path = "/network-gateways/%s" gateway_devices_path = "/gateway-devices" gateway_device_path = "/gateway-devices/%s" service_providers_path = "/service-providers" credentials_path = "/credentials" credential_path = "/credentials/%s" network_profiles_path = "/network_profiles" network_profile_path = "/network_profiles/%s" network_profile_bindings_path = "/network_profile_bindings" policy_profiles_path = "/policy_profiles" policy_profile_path = "/policy_profiles/%s" policy_profile_bindings_path = "/policy_profile_bindings" metering_labels_path = "/metering/metering-labels" metering_label_path = "/metering/metering-labels/%s" metering_label_rules_path = "/metering/metering-label-rules" metering_label_rule_path = "/metering/metering-label-rules/%s" packet_filters_path = "/packet_filters" packet_filter_path = "/packet_filters/%s" DHCP_NETS = '/dhcp-networks' DHCP_AGENTS = '/dhcp-agents' L3_ROUTERS = '/l3-routers' L3_AGENTS = '/l3-agents' LOADBALANCER_POOLS = '/loadbalancer-pools' LOADBALANCER_AGENT = '/loadbalancer-agent' firewall_rules_path = "/fw/firewall_rules" firewall_rule_path = "/fw/firewall_rules/%s" firewall_policies_path = "/fw/firewall_policies" firewall_policy_path = "/fw/firewall_policies/%s" firewall_policy_insert_path = "/fw/firewall_policies/%s/insert_rule" firewall_policy_remove_path = "/fw/firewall_policies/%s/remove_rule" firewalls_path = "/fw/firewalls" firewall_path = "/fw/firewalls/%s" net_partitions_path = "/net-partitions" net_partition_path = "/net-partitions/%s" # API has no way to report plurals, so we have to hard code them EXTED_PLURALS = {'routers': 'router', 'floatingips': 'floatingip', 'service_types': 'service_type', 'service_definitions': 'service_definition', 'security_groups': 'security_group', 'security_group_rules': 'security_group_rule', 'ipsecpolicies': 'ipsecpolicy', 'ikepolicies': 'ikepolicy', 'ipsec_site_connections': 'ipsec_site_connection', 'vpnservices': 'vpnservice', 'vips': 'vip', 'pools': 'pool', 'members': 'member', 'health_monitors': 'health_monitor', 'quotas': 'quota', 'service_providers': 'service_provider', 'firewall_rules': 'firewall_rule', 'firewall_policies': 'firewall_policy', 'firewalls': 'firewall', 'metering_labels': 'metering_label', 'metering_label_rules': 'metering_label_rule', 'net_partitions': 'net_partition', 'packet_filters': 'packet_filter', } # 8192 Is the default max URI len for eventlet.wsgi.server MAX_URI_LEN = 8192 def get_attr_metadata(self): if self.format == 'json': return {} old_request_format = self.format self.format = 'json' exts = self.list_extensions()['extensions'] self.format = old_request_format ns = dict([(ext['alias'], ext['namespace']) for ext in exts]) self.EXTED_PLURALS.update(constants.PLURALS) return {'plurals': self.EXTED_PLURALS, 'xmlns': constants.XML_NS_V20, constants.EXT_NS: ns} @APIParamsCall def get_quotas_tenant(self, **_params): """Fetch tenant info in server's context for following quota operation. """ return self.get(self.quota_path % 'tenant', params=_params) @APIParamsCall def list_quotas(self, **_params): """Fetch all tenants' quotas.""" return self.get(self.quotas_path, params=_params) @APIParamsCall def show_quota(self, tenant_id, **_params): """Fetch information of a certain tenant's quotas.""" return self.get(self.quota_path % (tenant_id), params=_params) @APIParamsCall def update_quota(self, tenant_id, body=None): """Update a tenant's quotas.""" return self.put(self.quota_path % (tenant_id), body=body) @APIParamsCall def delete_quota(self, tenant_id): """Delete the specified tenant's quota values.""" return self.delete(self.quota_path % (tenant_id)) @APIParamsCall def list_extensions(self, **_params): """Fetch a list of all exts on server side.""" return self.get(self.extensions_path, params=_params) @APIParamsCall def show_extension(self, ext_alias, **_params): """Fetch a list of all exts on server side.""" return self.get(self.extension_path % ext_alias, params=_params) @APIParamsCall def list_ports(self, retrieve_all=True, **_params): """Fetches a list of all networks for a tenant.""" # Pass filters in "params" argument to do_request return self.list('ports', self.ports_path, retrieve_all, **_params) @APIParamsCall def show_port(self, port, **_params): """Fetches information of a certain network.""" return self.get(self.port_path % (port), params=_params) @APIParamsCall def create_port(self, body=None): """Creates a new port.""" return self.post(self.ports_path, body=body) @APIParamsCall def update_port(self, port, body=None): """Updates a port.""" return self.put(self.port_path % (port), body=body) @APIParamsCall def delete_port(self, port): """Deletes the specified port.""" return self.delete(self.port_path % (port)) @APIParamsCall def list_networks(self, retrieve_all=True, **_params): """Fetches a list of all networks for a tenant.""" # Pass filters in "params" argument to do_request return self.list('networks', self.networks_path, retrieve_all, **_params) @APIParamsCall def show_network(self, network, **_params): """Fetches information of a certain network.""" return self.get(self.network_path % (network), params=_params) @APIParamsCall def create_network(self, body=None): """Creates a new network.""" return self.post(self.networks_path, body=body) @APIParamsCall def update_network(self, network, body=None): """Updates a network.""" return self.put(self.network_path % (network), body=body) @APIParamsCall def delete_network(self, network): """Deletes the specified network.""" return self.delete(self.network_path % (network)) @APIParamsCall def list_subnets(self, retrieve_all=True, **_params): """Fetches a list of all networks for a tenant.""" return self.list('subnets', self.subnets_path, retrieve_all, **_params) @APIParamsCall def show_subnet(self, subnet, **_params): """Fetches information of a certain subnet.""" return self.get(self.subnet_path % (subnet), params=_params) @APIParamsCall def create_subnet(self, body=None): """Creates a new subnet.""" return self.post(self.subnets_path, body=body) @APIParamsCall def update_subnet(self, subnet, body=None): """Updates a subnet.""" return self.put(self.subnet_path % (subnet), body=body) @APIParamsCall def delete_subnet(self, subnet): """Deletes the specified subnet.""" return self.delete(self.subnet_path % (subnet)) @APIParamsCall def list_routers(self, retrieve_all=True, **_params): """Fetches a list of all routers for a tenant.""" # Pass filters in "params" argument to do_request return self.list('routers', self.routers_path, retrieve_all, **_params) @APIParamsCall def show_router(self, router, **_params): """Fetches information of a certain router.""" return self.get(self.router_path % (router), params=_params) @APIParamsCall def create_router(self, body=None): """Creates a new router.""" return self.post(self.routers_path, body=body) @APIParamsCall def update_router(self, router, body=None): """Updates a router.""" return self.put(self.router_path % (router), body=body) @APIParamsCall def delete_router(self, router): """Deletes the specified router.""" return self.delete(self.router_path % (router)) @APIParamsCall def add_interface_router(self, router, body=None): """Adds an internal network interface to the specified router.""" return self.put((self.router_path % router) + "/add_router_interface", body=body) @APIParamsCall def remove_interface_router(self, router, body=None): """Removes an internal network interface from the specified router.""" return self.put((self.router_path % router) + "/remove_router_interface", body=body) @APIParamsCall def add_gateway_router(self, router, body=None): """Adds an external network gateway to the specified router.""" return self.put((self.router_path % router), body={'router': {'external_gateway_info': body}}) @APIParamsCall def remove_gateway_router(self, router): """Removes an external network gateway from the specified router.""" return self.put((self.router_path % router), body={'router': {'external_gateway_info': {}}}) @APIParamsCall def list_floatingips(self, retrieve_all=True, **_params): """Fetches a list of all floatingips for a tenant.""" # Pass filters in "params" argument to do_request return self.list('floatingips', self.floatingips_path, retrieve_all, **_params) @APIParamsCall def show_floatingip(self, floatingip, **_params): """Fetches information of a certain floatingip.""" return self.get(self.floatingip_path % (floatingip), params=_params) @APIParamsCall def create_floatingip(self, body=None): """Creates a new floatingip.""" return self.post(self.floatingips_path, body=body) @APIParamsCall def update_floatingip(self, floatingip, body=None): """Updates a floatingip.""" return self.put(self.floatingip_path % (floatingip), body=body) @APIParamsCall def delete_floatingip(self, floatingip): """Deletes the specified floatingip.""" return self.delete(self.floatingip_path % (floatingip)) @APIParamsCall def create_security_group(self, body=None): """Creates a new security group.""" return self.post(self.security_groups_path, body=body) @APIParamsCall def update_security_group(self, security_group, body=None): """Updates a security group.""" return self.put(self.security_group_path % security_group, body=body) @APIParamsCall def list_security_groups(self, retrieve_all=True, **_params): """Fetches a list of all security groups for a tenant.""" return self.list('security_groups', self.security_groups_path, retrieve_all, **_params) @APIParamsCall def show_security_group(self, security_group, **_params): """Fetches information of a certain security group.""" return self.get(self.security_group_path % (security_group), params=_params) @APIParamsCall def delete_security_group(self, security_group): """Deletes the specified security group.""" return self.delete(self.security_group_path % (security_group)) @APIParamsCall def create_security_group_rule(self, body=None): """Creates a new security group rule.""" return self.post(self.security_group_rules_path, body=body) @APIParamsCall def delete_security_group_rule(self, security_group_rule): """Deletes the specified security group rule.""" return self.delete(self.security_group_rule_path % (security_group_rule)) @APIParamsCall def list_security_group_rules(self, retrieve_all=True, **_params): """Fetches a list of all security group rules for a tenant.""" return self.list('security_group_rules', self.security_group_rules_path, retrieve_all, **_params) @APIParamsCall def show_security_group_rule(self, security_group_rule, **_params): """Fetches information of a certain security group rule.""" return self.get(self.security_group_rule_path % (security_group_rule), params=_params) @APIParamsCall def list_vpnservices(self, retrieve_all=True, **_params): """Fetches a list of all configured VPN services for a tenant.""" return self.list('vpnservices', self.vpnservices_path, retrieve_all, **_params) @APIParamsCall def show_vpnservice(self, vpnservice, **_params): """Fetches information of a specific VPN service.""" return self.get(self.vpnservice_path % (vpnservice), params=_params) @APIParamsCall def create_vpnservice(self, body=None): """Creates a new VPN service.""" return self.post(self.vpnservices_path, body=body) @APIParamsCall def update_vpnservice(self, vpnservice, body=None): """Updates a VPN service.""" return self.put(self.vpnservice_path % (vpnservice), body=body) @APIParamsCall def delete_vpnservice(self, vpnservice): """Deletes the specified VPN service.""" return self.delete(self.vpnservice_path % (vpnservice)) @APIParamsCall def list_ipsec_site_connections(self, retrieve_all=True, **_params): """Fetches all configured IPsecSiteConnections for a tenant.""" return self.list('ipsec_site_connections', self.ipsec_site_connections_path, retrieve_all, **_params) @APIParamsCall def show_ipsec_site_connection(self, ipsecsite_conn, **_params): """Fetches information of a specific IPsecSiteConnection.""" return self.get( self.ipsec_site_connection_path % (ipsecsite_conn), params=_params ) @APIParamsCall def create_ipsec_site_connection(self, body=None): """Creates a new IPsecSiteConnection.""" return self.post(self.ipsec_site_connections_path, body=body) @APIParamsCall def update_ipsec_site_connection(self, ipsecsite_conn, body=None): """Updates an IPsecSiteConnection.""" return self.put( self.ipsec_site_connection_path % (ipsecsite_conn), body=body ) @APIParamsCall def delete_ipsec_site_connection(self, ipsecsite_conn): """Deletes the specified IPsecSiteConnection.""" return self.delete(self.ipsec_site_connection_path % (ipsecsite_conn)) @APIParamsCall def list_ikepolicies(self, retrieve_all=True, **_params): """Fetches a list of all configured IKEPolicies for a tenant.""" return self.list('ikepolicies', self.ikepolicies_path, retrieve_all, **_params) @APIParamsCall def show_ikepolicy(self, ikepolicy, **_params): """Fetches information of a specific IKEPolicy.""" return self.get(self.ikepolicy_path % (ikepolicy), params=_params) @APIParamsCall def create_ikepolicy(self, body=None): """Creates a new IKEPolicy.""" return self.post(self.ikepolicies_path, body=body) @APIParamsCall def update_ikepolicy(self, ikepolicy, body=None): """Updates an IKEPolicy.""" return self.put(self.ikepolicy_path % (ikepolicy), body=body) @APIParamsCall def delete_ikepolicy(self, ikepolicy): """Deletes the specified IKEPolicy.""" return self.delete(self.ikepolicy_path % (ikepolicy)) @APIParamsCall def list_ipsecpolicies(self, retrieve_all=True, **_params): """Fetches a list of all configured IPsecPolicies for a tenant.""" return self.list('ipsecpolicies', self.ipsecpolicies_path, retrieve_all, **_params) @APIParamsCall def show_ipsecpolicy(self, ipsecpolicy, **_params): """Fetches information of a specific IPsecPolicy.""" return self.get(self.ipsecpolicy_path % (ipsecpolicy), params=_params) @APIParamsCall def create_ipsecpolicy(self, body=None): """Creates a new IPsecPolicy.""" return self.post(self.ipsecpolicies_path, body=body) @APIParamsCall def update_ipsecpolicy(self, ipsecpolicy, body=None): """Updates an IPsecPolicy.""" return self.put(self.ipsecpolicy_path % (ipsecpolicy), body=body) @APIParamsCall def delete_ipsecpolicy(self, ipsecpolicy): """Deletes the specified IPsecPolicy.""" return self.delete(self.ipsecpolicy_path % (ipsecpolicy)) @APIParamsCall def list_vips(self, retrieve_all=True, **_params): """Fetches a list of all load balancer vips for a tenant.""" # Pass filters in "params" argument to do_request return self.list('vips', self.vips_path, retrieve_all, **_params) @APIParamsCall def show_vip(self, vip, **_params): """Fetches information of a certain load balancer vip.""" return self.get(self.vip_path % (vip), params=_params) @APIParamsCall def create_vip(self, body=None): """Creates a new load balancer vip.""" return self.post(self.vips_path, body=body) @APIParamsCall def update_vip(self, vip, body=None): """Updates a load balancer vip.""" return self.put(self.vip_path % (vip), body=body) @APIParamsCall def delete_vip(self, vip): """Deletes the specified load balancer vip.""" return self.delete(self.vip_path % (vip)) @APIParamsCall def list_pools(self, retrieve_all=True, **_params): """Fetches a list of all load balancer pools for a tenant.""" # Pass filters in "params" argument to do_request return self.list('pools', self.pools_path, retrieve_all, **_params) @APIParamsCall def show_pool(self, pool, **_params): """Fetches information of a certain load balancer pool.""" return self.get(self.pool_path % (pool), params=_params) @APIParamsCall def create_pool(self, body=None): """Creates a new load balancer pool.""" return self.post(self.pools_path, body=body) @APIParamsCall def update_pool(self, pool, body=None): """Updates a load balancer pool.""" return self.put(self.pool_path % (pool), body=body) @APIParamsCall def delete_pool(self, pool): """Deletes the specified load balancer pool.""" return self.delete(self.pool_path % (pool)) @APIParamsCall def retrieve_pool_stats(self, pool, **_params): """Retrieves stats for a certain load balancer pool.""" return self.get(self.pool_path_stats % (pool), params=_params) @APIParamsCall def list_members(self, retrieve_all=True, **_params): """Fetches a list of all load balancer members for a tenant.""" # Pass filters in "params" argument to do_request return self.list('members', self.members_path, retrieve_all, **_params) @APIParamsCall def show_member(self, member, **_params): """Fetches information of a certain load balancer member.""" return self.get(self.member_path % (member), params=_params) @APIParamsCall def create_member(self, body=None): """Creates a new load balancer member.""" return self.post(self.members_path, body=body) @APIParamsCall def update_member(self, member, body=None): """Updates a load balancer member.""" return self.put(self.member_path % (member), body=body) @APIParamsCall def delete_member(self, member): """Deletes the specified load balancer member.""" return self.delete(self.member_path % (member)) @APIParamsCall def list_health_monitors(self, retrieve_all=True, **_params): """Fetches a list of all load balancer health monitors for a tenant.""" # Pass filters in "params" argument to do_request return self.list('health_monitors', self.health_monitors_path, retrieve_all, **_params) @APIParamsCall def show_health_monitor(self, health_monitor, **_params): """Fetches information of a certain load balancer health monitor.""" return self.get(self.health_monitor_path % (health_monitor), params=_params) @APIParamsCall def create_health_monitor(self, body=None): """Creates a new load balancer health monitor.""" return self.post(self.health_monitors_path, body=body) @APIParamsCall def update_health_monitor(self, health_monitor, body=None): """Updates a load balancer health monitor.""" return self.put(self.health_monitor_path % (health_monitor), body=body) @APIParamsCall def delete_health_monitor(self, health_monitor): """Deletes the specified load balancer health monitor.""" return self.delete(self.health_monitor_path % (health_monitor)) @APIParamsCall def associate_health_monitor(self, pool, body): """Associate specified load balancer health monitor and pool.""" return self.post(self.associate_pool_health_monitors_path % (pool), body=body) @APIParamsCall def disassociate_health_monitor(self, pool, health_monitor): """Disassociate specified load balancer health monitor and pool.""" path = (self.disassociate_pool_health_monitors_path % {'pool': pool, 'health_monitor': health_monitor}) return self.delete(path) @APIParamsCall def create_qos_queue(self, body=None): """Creates a new queue.""" return self.post(self.qos_queues_path, body=body) @APIParamsCall def list_qos_queues(self, **_params): """Fetches a list of all queues for a tenant.""" return self.get(self.qos_queues_path, params=_params) @APIParamsCall def show_qos_queue(self, queue, **_params): """Fetches information of a certain queue.""" return self.get(self.qos_queue_path % (queue), params=_params) @APIParamsCall def delete_qos_queue(self, queue): """Deletes the specified queue.""" return self.delete(self.qos_queue_path % (queue)) @APIParamsCall def list_agents(self, **_params): """Fetches agents.""" # Pass filters in "params" argument to do_request return self.get(self.agents_path, params=_params) @APIParamsCall def show_agent(self, agent, **_params): """Fetches information of a certain agent.""" return self.get(self.agent_path % (agent), params=_params) @APIParamsCall def update_agent(self, agent, body=None): """Updates an agent.""" return self.put(self.agent_path % (agent), body=body) @APIParamsCall def delete_agent(self, agent): """Deletes the specified agent.""" return self.delete(self.agent_path % (agent)) @APIParamsCall def list_network_gateways(self, **_params): """Retrieve network gateways.""" return self.get(self.network_gateways_path, params=_params) @APIParamsCall def show_network_gateway(self, gateway_id, **_params): """Fetch a network gateway.""" return self.get(self.network_gateway_path % gateway_id, params=_params) @APIParamsCall def create_network_gateway(self, body=None): """Create a new network gateway.""" return self.post(self.network_gateways_path, body=body) @APIParamsCall def update_network_gateway(self, gateway_id, body=None): """Update a network gateway.""" return self.put(self.network_gateway_path % gateway_id, body=body) @APIParamsCall def delete_network_gateway(self, gateway_id): """Delete the specified network gateway.""" return self.delete(self.network_gateway_path % gateway_id) @APIParamsCall def connect_network_gateway(self, gateway_id, body=None): """Connect a network gateway to the specified network.""" base_uri = self.network_gateway_path % gateway_id return self.put("%s/connect_network" % base_uri, body=body) @APIParamsCall def disconnect_network_gateway(self, gateway_id, body=None): """Disconnect a network from the specified gateway.""" base_uri = self.network_gateway_path % gateway_id return self.put("%s/disconnect_network" % base_uri, body=body) @APIParamsCall def list_gateway_devices(self, **_params): """Retrieve gateway devices.""" return self.get(self.gateway_devices_path, params=_params) @APIParamsCall def show_gateway_device(self, gateway_device_id, **_params): """Fetch a gateway device.""" return self.get(self.gateway_device_path % gateway_device_id, params=_params) @APIParamsCall def create_gateway_device(self, body=None): """Create a new gateway device.""" return self.post(self.gateway_devices_path, body=body) @APIParamsCall def update_gateway_device(self, gateway_device_id, body=None): """Updates a new gateway device.""" return self.put(self.gateway_device_path % gateway_device_id, body=body) @APIParamsCall def delete_gateway_device(self, gateway_device_id): """Delete the specified gateway device.""" return self.delete(self.gateway_device_path % gateway_device_id) @APIParamsCall def list_dhcp_agent_hosting_networks(self, network, **_params): """Fetches a list of dhcp agents hosting a network.""" return self.get((self.network_path + self.DHCP_AGENTS) % network, params=_params) @APIParamsCall def list_networks_on_dhcp_agent(self, dhcp_agent, **_params): """Fetches a list of dhcp agents hosting a network.""" return self.get((self.agent_path + self.DHCP_NETS) % dhcp_agent, params=_params) @APIParamsCall def add_network_to_dhcp_agent(self, dhcp_agent, body=None): """Adds a network to dhcp agent.""" return self.post((self.agent_path + self.DHCP_NETS) % dhcp_agent, body=body) @APIParamsCall def remove_network_from_dhcp_agent(self, dhcp_agent, network_id): """Remove a network from dhcp agent.""" return self.delete((self.agent_path + self.DHCP_NETS + "/%s") % ( dhcp_agent, network_id)) @APIParamsCall def list_l3_agent_hosting_routers(self, router, **_params): """Fetches a list of L3 agents hosting a router.""" return self.get((self.router_path + self.L3_AGENTS) % router, params=_params) @APIParamsCall def list_routers_on_l3_agent(self, l3_agent, **_params): """Fetches a list of L3 agents hosting a router.""" return self.get((self.agent_path + self.L3_ROUTERS) % l3_agent, params=_params) @APIParamsCall def add_router_to_l3_agent(self, l3_agent, body): """Adds a router to L3 agent.""" return self.post((self.agent_path + self.L3_ROUTERS) % l3_agent, body=body) @APIParamsCall def list_firewall_rules(self, retrieve_all=True, **_params): """Fetches a list of all firewall rules for a tenant.""" # Pass filters in "params" argument to do_request return self.list('firewall_rules', self.firewall_rules_path, retrieve_all, **_params) @APIParamsCall def show_firewall_rule(self, firewall_rule, **_params): """Fetches information of a certain firewall rule.""" return self.get(self.firewall_rule_path % (firewall_rule), params=_params) @APIParamsCall def create_firewall_rule(self, body=None): """Creates a new firewall rule.""" return self.post(self.firewall_rules_path, body=body) @APIParamsCall def update_firewall_rule(self, firewall_rule, body=None): """Updates a firewall rule.""" return self.put(self.firewall_rule_path % (firewall_rule), body=body) @APIParamsCall def delete_firewall_rule(self, firewall_rule): """Deletes the specified firewall rule.""" return self.delete(self.firewall_rule_path % (firewall_rule)) @APIParamsCall def list_firewall_policies(self, retrieve_all=True, **_params): """Fetches a list of all firewall policies for a tenant.""" # Pass filters in "params" argument to do_request return self.list('firewall_policies', self.firewall_policies_path, retrieve_all, **_params) @APIParamsCall def show_firewall_policy(self, firewall_policy, **_params): """Fetches information of a certain firewall policy.""" return self.get(self.firewall_policy_path % (firewall_policy), params=_params) @APIParamsCall def create_firewall_policy(self, body=None): """Creates a new firewall policy.""" return self.post(self.firewall_policies_path, body=body) @APIParamsCall def update_firewall_policy(self, firewall_policy, body=None): """Updates a firewall policy.""" return self.put(self.firewall_policy_path % (firewall_policy), body=body) @APIParamsCall def delete_firewall_policy(self, firewall_policy): """Deletes the specified firewall policy.""" return self.delete(self.firewall_policy_path % (firewall_policy)) @APIParamsCall def firewall_policy_insert_rule(self, firewall_policy, body=None): """Inserts specified rule into firewall policy.""" return self.put(self.firewall_policy_insert_path % (firewall_policy), body=body) @APIParamsCall def firewall_policy_remove_rule(self, firewall_policy, body=None): """Removes specified rule from firewall policy.""" return self.put(self.firewall_policy_remove_path % (firewall_policy), body=body) @APIParamsCall def list_firewalls(self, retrieve_all=True, **_params): """Fetches a list of all firewals for a tenant.""" # Pass filters in "params" argument to do_request return self.list('firewalls', self.firewalls_path, retrieve_all, **_params) @APIParamsCall def show_firewall(self, firewall, **_params): """Fetches information of a certain firewall.""" return self.get(self.firewall_path % (firewall), params=_params) @APIParamsCall def create_firewall(self, body=None): """Creates a new firewall.""" return self.post(self.firewalls_path, body=body) @APIParamsCall def update_firewall(self, firewall, body=None): """Updates a firewall.""" return self.put(self.firewall_path % (firewall), body=body) @APIParamsCall def delete_firewall(self, firewall): """Deletes the specified firewall.""" return self.delete(self.firewall_path % (firewall)) @APIParamsCall def remove_router_from_l3_agent(self, l3_agent, router_id): """Remove a router from l3 agent.""" return self.delete((self.agent_path + self.L3_ROUTERS + "/%s") % ( l3_agent, router_id)) @APIParamsCall def get_lbaas_agent_hosting_pool(self, pool, **_params): """Fetches a loadbalancer agent hosting a pool.""" return self.get((self.pool_path + self.LOADBALANCER_AGENT) % pool, params=_params) @APIParamsCall def list_pools_on_lbaas_agent(self, lbaas_agent, **_params): """Fetches a list of pools hosted by the loadbalancer agent.""" return self.get((self.agent_path + self.LOADBALANCER_POOLS) % lbaas_agent, params=_params) @APIParamsCall def list_service_providers(self, retrieve_all=True, **_params): """Fetches service providers.""" # Pass filters in "params" argument to do_request return self.list('service_providers', self.service_providers_path, retrieve_all, **_params) def list_credentials(self, **_params): """Fetch a list of all credentials for a tenant.""" return self.get(self.credentials_path, params=_params) @APIParamsCall def show_credential(self, credential, **_params): """Fetch a credential.""" return self.get(self.credential_path % (credential), params=_params) @APIParamsCall def create_credential(self, body=None): """Create a new credential.""" return self.post(self.credentials_path, body=body) @APIParamsCall def update_credential(self, credential, body=None): """Update a credential.""" return self.put(self.credential_path % (credential), body=body) @APIParamsCall def delete_credential(self, credential): """Delete the specified credential.""" return self.delete(self.credential_path % (credential)) def list_network_profile_bindings(self, **params): """Fetch a list of all tenants associated for a network profile.""" return self.get(self.network_profile_bindings_path, params=params) @APIParamsCall def list_network_profiles(self, **params): """Fetch a list of all network profiles for a tenant.""" return self.get(self.network_profiles_path, params=params) @APIParamsCall def show_network_profile(self, profile, **params): """Fetch a network profile.""" return self.get(self.network_profile_path % (profile), params=params) @APIParamsCall def create_network_profile(self, body=None): """Create a network profile.""" return self.post(self.network_profiles_path, body=body) @APIParamsCall def update_network_profile(self, profile, body=None): """Update a network profile.""" return self.put(self.network_profile_path % (profile), body=body) @APIParamsCall def delete_network_profile(self, profile): """Delete the network profile.""" return self.delete(self.network_profile_path % profile) @APIParamsCall def list_policy_profile_bindings(self, **params): """Fetch a list of all tenants associated for a policy profile.""" return self.get(self.policy_profile_bindings_path, params=params) @APIParamsCall def list_policy_profiles(self, **params): """Fetch a list of all network profiles for a tenant.""" return self.get(self.policy_profiles_path, params=params) @APIParamsCall def show_policy_profile(self, profile, **params): """Fetch a network profile.""" return self.get(self.policy_profile_path % (profile), params=params) @APIParamsCall def update_policy_profile(self, profile, body=None): """Update a policy profile.""" return self.put(self.policy_profile_path % (profile), body=body) @APIParamsCall def create_metering_label(self, body=None): """Creates a metering label.""" return self.post(self.metering_labels_path, body=body) @APIParamsCall def delete_metering_label(self, label): """Deletes the specified metering label.""" return self.delete(self.metering_label_path % (label)) @APIParamsCall def list_metering_labels(self, retrieve_all=True, **_params): """Fetches a list of all metering labels for a tenant.""" return self.list('metering_labels', self.metering_labels_path, retrieve_all, **_params) @APIParamsCall def show_metering_label(self, metering_label, **_params): """Fetches information of a certain metering label.""" return self.get(self.metering_label_path % (metering_label), params=_params) @APIParamsCall def create_metering_label_rule(self, body=None): """Creates a metering label rule.""" return self.post(self.metering_label_rules_path, body=body) @APIParamsCall def delete_metering_label_rule(self, rule): """Deletes the specified metering label rule.""" return self.delete(self.metering_label_rule_path % (rule)) @APIParamsCall def list_metering_label_rules(self, retrieve_all=True, **_params): """Fetches a list of all metering label rules for a label.""" return self.list('metering_label_rules', self.metering_label_rules_path, retrieve_all, **_params) @APIParamsCall def show_metering_label_rule(self, metering_label_rule, **_params): """Fetches information of a certain metering label rule.""" return self.get(self.metering_label_rule_path % (metering_label_rule), params=_params) @APIParamsCall def list_net_partitions(self, **params): """Fetch a list of all network partitions for a tenant.""" return self.get(self.net_partitions_path, params=params) @APIParamsCall def show_net_partition(self, netpartition, **params): """Fetch a network partition.""" return self.get(self.net_partition_path % (netpartition), params=params) @APIParamsCall def create_net_partition(self, body=None): """Create a network partition.""" return self.post(self.net_partitions_path, body=body) @APIParamsCall def delete_net_partition(self, netpartition): """Delete the network partition.""" return self.delete(self.net_partition_path % netpartition) @APIParamsCall def create_packet_filter(self, body=None): """Create a new packet filter.""" return self.post(self.packet_filters_path, body=body) @APIParamsCall def update_packet_filter(self, packet_filter_id, body=None): """Update a packet filter.""" return self.put(self.packet_filter_path % packet_filter_id, body=body) @APIParamsCall def list_packet_filters(self, retrieve_all=True, **_params): """Fetch a list of all packet filters for a tenant.""" return self.list('packet_filters', self.packet_filters_path, retrieve_all, **_params) @APIParamsCall def show_packet_filter(self, packet_filter_id, **_params): """Fetch information of a certain packet filter.""" return self.get(self.packet_filter_path % packet_filter_id, params=_params) @APIParamsCall def delete_packet_filter(self, packet_filter_id): """Delete the specified packet filter.""" return self.delete(self.packet_filter_path % packet_filter_id) def __init__(self, **kwargs): """Initialize a new client for the Neutron v2.0 API.""" super(Client, self).__init__() self.retries = kwargs.pop('retries', 0) self.raise_errors = kwargs.pop('raise_errors', True) self.httpclient = client.construct_http_client(**kwargs) self.version = '2.0' self.format = 'json' self.action_prefix = "/v%s" % (self.version) self.retry_interval = 1 def _handle_fault_response(self, status_code, response_body): # Create exception with HTTP status code and message _logger.debug("Error message: %s", response_body) # Add deserialized error message to exception arguments try: des_error_body = self.deserialize(response_body, status_code) except Exception: # If unable to deserialized body it is probably not a # Neutron error des_error_body = {'message': response_body} # Raise the appropriate exception exception_handler_v20(status_code, des_error_body) def _check_uri_length(self, action): uri_len = len(self.httpclient.endpoint_url) + len(action) if uri_len > self.MAX_URI_LEN: raise exceptions.RequestURITooLong( excess=uri_len - self.MAX_URI_LEN) def do_request(self, method, action, body=None, headers=None, params=None): # Add format and tenant_id action += ".%s" % self.format action = self.action_prefix + action if type(params) is dict and params: params = utils.safe_encode_dict(params) action += '?' + urllib.urlencode(params, doseq=1) # Ensure client always has correct uri - do not guesstimate anything self.httpclient.authenticate_and_fetch_endpoint_url() self._check_uri_length(action) if body: body = self.serialize(body) resp, replybody = self.httpclient.do_request( action, method, body=body, content_type=self.content_type()) status_code = resp.status_code if status_code in (requests.codes.ok, requests.codes.created, requests.codes.accepted, requests.codes.no_content): return self.deserialize(replybody, status_code) else: if not replybody: replybody = resp.reason self._handle_fault_response(status_code, replybody) def get_auth_info(self): return self.httpclient.get_auth_info() def serialize(self, data): """Serializes a dictionary into either XML or JSON. A dictionary with a single key can be passed and it can contain any structure. """ if data is None: return None elif type(data) is dict: return serializer.Serializer( self.get_attr_metadata()).serialize(data, self.content_type()) else: raise Exception(_("Unable to serialize object of type = '%s'") % type(data)) def deserialize(self, data, status_code): """Deserializes an XML or JSON string into a dictionary.""" if status_code == 204: return data return serializer.Serializer(self.get_attr_metadata()).deserialize( data, self.content_type())['body'] def content_type(self, _format=None): """Returns the mime-type for either 'xml' or 'json'. Defaults to the currently set format. """ _format = _format or self.format return "application/%s" % (_format) def retry_request(self, method, action, body=None, headers=None, params=None): """Call do_request with the default retry configuration. Only idempotent requests should retry failed connection attempts. :raises: ConnectionFailed if the maximum # of retries is exceeded """ max_attempts = self.retries + 1 for i in range(max_attempts): try: return self.do_request(method, action, body=body, headers=headers, params=params) except exceptions.ConnectionFailed: # Exception has already been logged by do_request() if i < self.retries: _logger.debug('Retrying connection to Neutron service') time.sleep(self.retry_interval) elif self.raise_errors: raise if self.retries: msg = (_("Failed to connect to Neutron server after %d attempts") % max_attempts) else: msg = _("Failed to connect Neutron server") raise exceptions.ConnectionFailed(reason=msg) def delete(self, action, body=None, headers=None, params=None): return self.retry_request("DELETE", action, body=body, headers=headers, params=params) def get(self, action, body=None, headers=None, params=None): return self.retry_request("GET", action, body=body, headers=headers, params=params) def post(self, action, body=None, headers=None, params=None): # Do not retry POST requests to avoid the orphan objects problem. return self.do_request("POST", action, body=body, headers=headers, params=params) def put(self, action, body=None, headers=None, params=None): return self.retry_request("PUT", action, body=body, headers=headers, params=params) def list(self, collection, path, retrieve_all=True, **params): if retrieve_all: res = [] for r in self._pagination(collection, path, **params): res.extend(r[collection]) return {collection: res} else: return self._pagination(collection, path, **params) def _pagination(self, collection, path, **params): if params.get('page_reverse', False): linkrel = 'previous' else: linkrel = 'next' next = True while next: res = self.get(path, params=params) yield res next = False try: for link in res['%s_links' % collection]: if link['rel'] == linkrel: query_str = urlparse.urlparse(link['href']).query params = urlparse.parse_qs(query_str) next = True break except KeyError: break
cboling/SDNdbg
docs/old-stuff/pydzcvr/doc/neutronclient/v2_0/client.py
Python
apache-2.0
54,103
from cloudshell.cp.vcenter.models.vCenterVMFromImageResourceModel import vCenterVMFromImageResourceModel class DeployFromImageDetails(object): def __init__(self, image_params, app_name): """ :type image_params: vCenterVMFromImageResourceModel :type app_name: str :return: """ self.image_params = image_params self.app_name = app_name
QualiSystems/vCenterShell
package/cloudshell/cp/vcenter/models/DeployFromImageDetails.py
Python
apache-2.0
396