text
stringlengths
4
1.02M
meta
dict
import pytest from hybra import core import datetime import os from helpers import filters from helpers import counters from helpers import urls from helpers import exporter class TestTextFilter: def setup(self): self.dataMedia = core.data('news', folder='', terms=['yle.json']) self.dataFacebook = core.data( 'facebook', folder='', terms=['facebook.json']) def test_filter_text_empty(self): fb = filters.filter_by_text(self.dataFacebook) media = filters.filter_by_text(self.dataMedia) assert(len(list(fb)) == 4) assert(len(list(media)) == 276) def test_filter_text_uppercase(self): fb = filters.filter_by_text(self.dataFacebook, text=['POST']) media = filters.filter_by_text(self.dataMedia, text=['ALGORITMI']) assert(len(fb) == 3) assert(len(media) == 1) def test_filter_text_substrings(self): fb = filters.filter_by_text(self.dataFacebook, text=['pos']) media = filters.filter_by_text(self.dataMedia, text=['algorit']) assert(len(fb) == 3) assert(len(media) == 1) def test_filter_text_substrings_false(self): fb = filters.filter_by_text(self.dataFacebook, text=[ 'pos'], substrings=False) media = filters.filter_by_text( self.dataMedia, text=['algorit'], substrings=False) assert(len(fb) == 0) assert(len(media) == 0) def test_filter_text_inclusive(self): fb = filters.filter_by_text(self.dataFacebook, text=[ 'post', 'missing text']) media = filters.filter_by_text( self.dataMedia, text=['algoritmi', 'missing text']) assert(len(fb) == 0) assert(len(media) == 0) def test_filter_text_not_inclusive(self): fb = filters.filter_by_text(self.dataFacebook, text=[ 'post', 'missing text'], inclusive=False) media = filters.filter_by_text( self.dataMedia, text=['algoritmi', 'missing text'], inclusive=False) assert(len(fb) == 3) assert(len(media) == 1) class TestDatetimeFilter: def setup(self): self.dataMedia = core.data('news', folder='', terms=['yle.json']) self.dataFacebook = core.data( 'facebook', folder='', terms=['facebook.json']) def test_filter_datetime_no_dates(self): fb = filters.filter_by_datetime(self.dataFacebook) media = filters.filter_by_datetime(self.dataMedia) assert(len(list(fb)) == 4) assert(len(list(media)) == 276) def test_filter_datetime_after(self): fb = filters.filter_by_datetime(self.dataFacebook, after='2017-1-1') media = filters.filter_by_datetime( self.dataMedia, after='2017-6-30 21:00:00') assert(len(fb) == 2) assert(len(media) == 4) def test_filter_datetime_before(self): fb = filters.filter_by_datetime(self.dataFacebook, before='2017-1-1') media = filters.filter_by_datetime( self.dataMedia, before='2017-6-30 21:00:00') assert(len(fb) == 2) assert(len(media) == 272) def test_filter_datetime_after_before(self): fb = filters.filter_by_datetime( self.dataFacebook, after='2017-1-3 15:09:23', before='2017-2-6 19:52:09') media = filters.filter_by_datetime( self.dataMedia, after='2017-6-30 21:00:04', before='2017-6-30 23:13:53') assert(len(fb) == 1) assert(len(media) == 3) class TestAuthorFilter: def setup(self): self.dataMedia = core.data('news', folder='', terms=['yle.json']) self.dataFacebook = core.data( 'facebook', folder='', terms=['facebook.json']) def test_filter_author_empty(self): fb = filters.filter_by_author(self.dataFacebook) media = filters.filter_by_author(self.dataMedia) assert(len(list(fb)) == 4) assert(len(list(media)) == 276) def test_filter_author_one(self): fb = filters.filter_by_author( self.dataFacebook, authors=['Matti Nelimarkka']) media = filters.filter_by_author( self.dataMedia, authors=['Teemu Toivola']) assert(len(fb) == 4) assert(len(media) == 2) def test_filter_author_two(self): fb = filters.filter_by_author(self.dataFacebook, authors=[ 'Matti Nelimarkka', 'Heikki Heiskanen']) media = filters.filter_by_author(self.dataMedia, authors=[ 'Teemu Toivola', 'Heikki Heiskanen']) assert(len(fb) == 4) assert(len(media) == 3) def test_filter_author_not_found(self): fb = filters.filter_by_author( self.dataFacebook, authors=['Missing author']) media = filters.filter_by_author(self.dataMedia, authors=[ 'Missing author', 'Missing author2']) assert(len(fb) == 0) assert(len(media) == 0) class TestDomainFilter: def setup(self): self.dataMedia = core.data('news', folder='', terms=['yle.json']) self.dataFacebook = core.data( 'facebook', folder='', terms=['facebook.json']) def test_filter_domain_empty(self): fb = filters.filter_by_domain(self.dataFacebook) media = filters.filter_by_domain(self.dataMedia) assert(len(list(fb)) == 4) assert(len(list(media)) == 276) def test_filter_domain_one(self): fb = filters.filter_by_domain( self.dataFacebook, domains=['facebook.com']) media = filters.filter_by_domain(self.dataMedia, domains=['yle.fi']) assert(len(list(fb)) == 4) assert(len(list(media)) == 276) def test_filter_domain_missing(self): fb = filters.filter_by_domain( self.dataFacebook, domains=['twitter.com']) media = filters.filter_by_domain(self.dataMedia, domains=['hs.fi']) assert(len(list(fb)) == 0) assert(len(list(media)) == 0) class TestUrls: def setup(self): self.dataMedia = core.data('news', folder='', terms=['yle.json']) self.dataFacebook = core.data( 'facebook', folder='', terms=['facebook.json']) def test_list_links(self): fb = urls.links(self.dataFacebook) media = urls.links(self.dataMedia) assert(len(fb) == 4) assert(len(media) == 433) def test_extract_domains(self): fb = urls.domains(self.dataFacebook) media = urls.domains(urls.links(self.dataMedia)) assert(len(fb) == 4) assert(len(media) == 433) class TestCounter: def setup(self): self.dataMedia = core.data('news', folder='', terms=['yle.json']) self.dataFacebook = core.data( 'facebook', folder='', terms=['facebook.json']) def test_count_authors(self): fb = counters.counts_author(self.dataFacebook, verbose=False) media = counters.counts_author(self.dataMedia, verbose=False) assert(len(fb.keys()) == 1) assert(len(media.keys()) == 140) def test_count_domains(self): fb = counters.counts_domain(self.dataFacebook, verbose=False) media = counters.counts_domain(self.dataMedia, verbose=False) assert(len(fb.keys()) == 1) assert(len(media.keys()) == 1) class TestXlsxExporter: def setup(self): self.dataMedia = core.data('news', folder='', terms=['yle.json']) self.dataFacebook = core.data( 'facebook', folder='', terms=['facebook.json']) self.out_fb = 'out_fb' self.out_media = 'out_media' def test_export_generator_xlsx(self): try: exporter.export_csv(self.dataMedia, self.out_media + '.xlsx') except Exception as e: pytest.fail( msg = e.args[0] ) try: exporter.export_csv(self.dataFacebook, self.out_fb + '.xlsx') except Exception as e: pytest.fail( msg = e.args[0] ) assert(os.path.isfile(self.out_media + '.xlsx')) assert(os.path.isfile(self.out_fb + '.xlsx')) def test_export_list_xlsx(self): try: exporter.export_csv(list(self.dataMedia), self.out_media + '.xlsx') except Exception as e: pytest.fail( msg = e.args[0] ) try: exporter.export_csv(list(self.dataFacebook), self.out_fb + '.xlsx') except Exception as e: pytest.fail( msg = e.args[0] ) assert(os.path.isfile(self.out_media + '.xlsx')) assert(os.path.isfile(self.out_fb + '.xlsx')) def teardown(self): os.remove(self.out_media + '.xlsx') os.remove(self.out_fb + '.xlsx') class TestCsvExporter: def setup(self): self.dataMedia = core.data('news', folder='', terms=['yle.json']) self.dataFacebook = core.data( 'facebook', folder='', terms=['facebook.json']) self.out_fb = 'out_fb' self.out_media = 'out_media' def test_export_generator_csv(self): try: exporter.export_csv(self.dataMedia, self.out_media + '.csv') except Exception as e: pytest.fail( msg = e.args[0] ) try: exporter.export_csv(self.dataFacebook, self.out_fb + '.csv') except Exception as e: pytest.fail( msg = e.args[0] ) assert(os.path.isfile(self.out_media + '.csv')) assert(os.path.isfile(self.out_fb + '.csv')) def test_export_list_csv(self): try: exporter.export_csv(list(self.dataMedia), self.out_media + '.csv') except Exception as e: pytest.fail( msg = e.args[0] ) try: exporter.export_csv(list(self.dataFacebook), self.out_fb + '.csv') except Exception as e: pytest.fail( msg = e.args[0] ) assert(os.path.isfile(self.out_media + '.csv')) assert(os.path.isfile(self.out_fb + '.csv')) def teardown(self): os.remove(self.out_media + '.csv') os.remove(self.out_fb + '.csv')
{ "content_hash": "ff9fe112e78298a0fd439fcdf2027821", "timestamp": "", "source": "github", "line_count": 337, "max_line_length": 85, "avg_line_length": 30.0919881305638, "alnum_prop": 0.5796272556947046, "repo_name": "HIIT/hybra-core", "id": "102d6cd36185b6b3d7b407fa0cceb808eccdf78d", "size": "10155", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "test/test_helpers.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "531" }, { "name": "HTML", "bytes": "236" }, { "name": "JavaScript", "bytes": "19409" }, { "name": "Python", "bytes": "75197" }, { "name": "R", "bytes": "8079" }, { "name": "Shell", "bytes": "1107" } ], "symlink_target": "" }
import os PRODUCTION = os.environ.get('SERVER_SOFTWARE', '').startswith('Google App Eng') DEBUG = DEVELOPMENT = not PRODUCTION try: # This part is surrounded in try/except because the config.py file is # also used in the run.py script which is used to compile/minify the client # side files (*.less, *.coffee, *.js) and is not aware of the GAE from google.appengine.api import app_identity APPLICATION_ID = app_identity.get_application_id() except (ImportError, AttributeError): pass else: from datetime import datetime CURRENT_APPLICATION_ID = os.environ['APPLICATION_ID'] CURRENT_VERSION_ID = os.environ.get('CURRENT_VERSION_ID') CURRENT_VERSION_MAJOR, CURRENT_VERSION_MINOR = CURRENT_VERSION_ID.rsplit('.', 1) CURRENT_VERSION_NAME = CURRENT_VERSION_ID.split('.')[0] CURRENT_VERSION_TIMESTAMP = long(CURRENT_VERSION_ID.split('.')[1]) >> 28 if DEVELOPMENT: import calendar CURRENT_VERSION_TIMESTAMP = calendar.timegm(datetime.utcnow().timetuple()) CURRENT_VERSION_DATE = datetime.utcfromtimestamp(CURRENT_VERSION_TIMESTAMP) USER_AGENT = '%s.appspot.com/%s' % (APPLICATION_ID, CURRENT_VERSION_ID) from apps.admin import models CONFIG_DB = models.Config.get_master_db() SECRET_KEY = CONFIG_DB.flask_secret_key.encode('ascii') RECAPTCHA_PUBLIC_KEY = CONFIG_DB.recaptcha_public_key RECAPTCHA_PRIVATE_KEY = CONFIG_DB.recaptcha_private_key DEFAULT_DB_LIMIT = 64 ############################################################################### # Client modules, also used by the run.py script. ############################################################################### STYLES = [ 'src/style/style.less', ] SCRIPTS = [ ('libs', [ 'ext/js/jquery/jquery.js', 'ext/js/moment/moment.js', 'ext/js/nprogress/nprogress.js', 'ext/js/bootstrap/alert.js', 'ext/js/bootstrap/button.js', 'ext/js/bootstrap/transition.js', 'ext/js/bootstrap/collapse.js', 'ext/js/bootstrap/dropdown.js', 'ext/js/bootstrap/tooltip.js', ]), ('core', [ 'src/script/core/service.coffee', 'src/script/core/util.coffee', ]), ('apps', [ 'src/script/apps/admin/admin.coffee', 'src/script/apps/auth/signin.coffee', 'src/script/apps/feedback/admin.coffee', 'src/script/apps/user/admin.coffee', 'src/script/apps/user/profile.coffee', 'src/script/apps/init.coffee', ]), ]
{ "content_hash": "7044b60c07dcb662408595b13d1bdae6", "timestamp": "", "source": "github", "line_count": 69, "max_line_length": 82, "avg_line_length": 35.6231884057971, "alnum_prop": 0.628152969894223, "repo_name": "gmist/3dhero2", "id": "02599bb4e8e354b6b6d0219c2441a296262381c1", "size": "2475", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "main/config.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "6787" }, { "name": "CoffeeScript", "bytes": "10634" }, { "name": "Python", "bytes": "110054" } ], "symlink_target": "" }
"""Pipelines for image preprocessing""" import os def lren_build_daily_folder_path_callable(folder, date): daily_folder = os.path.join(folder, date.strftime('%Y'), date.strftime('%Y%m%d')) # Ugly hack for LREN if not os.path.isdir(daily_folder): daily_folder = os.path.join(folder, '2014', date.strftime('%Y%m%d')) return daily_folder def lren_accept_folder(path): session_id = os.path.basename(path) sid = session_id.strip().lower() return not ('delete' in sid) and not ('phantom' in sid)
{ "content_hash": "a9e36661a3ddde2bb74cfca30d8e78ab", "timestamp": "", "source": "github", "line_count": 20, "max_line_length": 85, "avg_line_length": 26.7, "alnum_prop": 0.6610486891385767, "repo_name": "LREN-CHUV/airflow-mri-preprocessing-dags", "id": "6e4721f5a5fe0be8e4e6b742672fbfcc28a07672", "size": "534", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "preprocessing_pipelines/__init__.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "69222" }, { "name": "Shell", "bytes": "60" } ], "symlink_target": "" }
from .latest_activity import LatestActivity from .chart import TagChart class Tagged: """Base class for wrapping `values` dictionaries of related tag information """ def __init__(self, **kwargs): self._latest_translation = kwargs.pop("latest_translation", None) self.approved_strings = kwargs.get("approved_strings") self.pretranslated_strings = kwargs.get("pretranslated_strings") self.strings_with_warnings = kwargs.get("strings_with_warnings") self.strings_with_errors = kwargs.get("strings_with_errors") self.total_strings = kwargs.get("total_strings") self.unreviewed_strings = kwargs.get("unreviewed_strings") self.kwargs = kwargs @property def chart(self): """Generate a dict of chart information""" return TagChart(**self.kwargs) if self.total_strings else None @property def latest_translation(self): return self._latest_translation @property def latest_activity(self): """Returns wrapped LatestActivity data if available""" return ( LatestActivity(self.latest_translation) if self.latest_translation else None ) @property def tag(self): return self.kwargs.get("slug") def get_latest_activity(self, x): return self.latest_activity def get_chart(self, x): return self.chart class TaggedLocale(Tagged): """Wraps a Locale to provide stats and latest information""" @property def code(self): return self.kwargs.get("code") @property def name(self): return self.kwargs.get("name") @property def population(self): return self.kwargs.get("population") @property def project(self): return self.kwargs.get("project")
{ "content_hash": "1f585c8bb3df11fc72762c54a97ac64e", "timestamp": "", "source": "github", "line_count": 64, "max_line_length": 88, "avg_line_length": 28.1875, "alnum_prop": 0.6513303769401331, "repo_name": "mozilla/pontoon", "id": "b9fc64a6d6cf061f78d75db1871d87f4969115a5", "size": "1804", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "pontoon/tags/utils/tagged.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "CSS", "bytes": "238788" }, { "name": "Dockerfile", "bytes": "1772" }, { "name": "Fluent", "bytes": "39684" }, { "name": "HTML", "bytes": "181601" }, { "name": "JavaScript", "bytes": "1163971" }, { "name": "Jinja", "bytes": "1894" }, { "name": "Makefile", "bytes": "5635" }, { "name": "Procfile", "bytes": "234" }, { "name": "Python", "bytes": "1488341" }, { "name": "Shell", "bytes": "6245" }, { "name": "TypeScript", "bytes": "457667" } ], "symlink_target": "" }
import unittest from pymatgen.symmetry.analyzer import SpacegroupAnalyzer from pymatgen.util.testing import PymatgenTest from pymatgen.util.coord import in_coord_list from pymatgen.core.lattice import Lattice from pymatgen.core.structure import Structure from pymatgen.analysis.wulff import WulffShape import json import os __author__ = 'Zihan Xu, Richard Tran, Balachandran Radhakrishnan' __copyright__ = 'Copyright 2013, The Materials Virtual Lab' __version__ = '0.1' __maintainer__ = 'Zihan Xu' __email__ = 'zix009@eng.ucsd.edu' __date__ = 'May 05 2016' class WulffShapeTest(PymatgenTest): def setUp(self): module_dir = os.path.dirname(os.path.abspath(__file__)) with open( os.path.join(module_dir, "surface_samples.json")) as data_file: surface_properties = json.load(data_file) surface_energies, miller_indices = {}, {} for mpid in surface_properties.keys(): e_surf_list, miller_list = [], [] for surface in surface_properties[mpid]["surfaces"]: e_surf_list.append(surface["surface_energy"]) miller_list.append(surface["miller_index"]) surface_energies[mpid] = e_surf_list miller_indices[mpid] = miller_list # In the case of a high anisotropy material # Nb: mp-8636 latt_Nb = Lattice.cubic(2.992) # In the case of an fcc material # Ir: mp-101 latt_Ir = Lattice.cubic(3.8312) # In the case of a hcp material # Ti: mp-72 latt_Ti = Lattice.hexagonal(4.6000, 2.8200) self.ucell_Nb = Structure(latt_Nb, ["Nb", "Nb", "Nb", "Nb"], [[0, 0, 0], [0, 0.5, 0.5], [0.5, 0, 0.5], [0.5, 0.5, 0]]) self.wulff_Nb = WulffShape(latt_Nb, miller_indices["mp-8636"], surface_energies["mp-8636"]) self.ucell_Ir = Structure(latt_Nb, ["Ir", "Ir", "Ir", "Ir"], [[0, 0, 0], [0, 0.5, 0.5], [0.5, 0, 0.5], [0.5, 0.5, 0]]) self.wulff_Ir = WulffShape(latt_Ir, miller_indices["mp-101"], surface_energies["mp-101"]) self.ucell_Ti = Structure(latt_Ti, ["Ti", "Ti", "Ti"], [[0, 0, 0], [0.333333, 0.666667, 0.5], [0.666667, 0.333333, 0.5]]) self.wulff_Ti = WulffShape(latt_Ti, miller_indices["mp-72"], surface_energies["mp-72"]) self.cube = WulffShape(Lattice.cubic(1), [(1, 0, 0)], [1]) self.hex_prism = WulffShape(Lattice.hexagonal(2.63, 5.21), [(0, 0, 1), (1, 0, 0)], [0.35, 0.53]) self.surface_properties = surface_properties @unittest.skipIf("DISPLAY" not in os.environ, "Need display") def test_get_plot(self): # Basic test, not really a unittest. self.wulff_Ti.get_plot() self.wulff_Nb.get_plot() self.wulff_Ir.get_plot() def symm_check(self, ucell, wulff_vertices): """ # Checks if the point group of the Wulff shape matches # the point group of its conventional unit cell Args: ucell (string): Unit cell that the Wulff shape is based on. wulff_vertices (list): List of all vertices on the Wulff shape. Use wulff.wulff_pt_list to obtain the list (see wulff_generator.py). return (bool) """ space_group_analyzer = SpacegroupAnalyzer(ucell) symm_ops = space_group_analyzer.get_point_group_operations( cartesian=True) for point in wulff_vertices: for op in symm_ops: symm_point = op.operate(point) if in_coord_list(wulff_vertices, symm_point): continue else: return False return True def consistency_tests(self): # For a set of given values, these tests will # ensure that the general result given by the # algorithm does not change as the code is editted # For fcc Ir, make sure the (111) direction # is the most dominant facet on the Wulff shape fractional_areas = self.wulff_Ir.area_fraction_dict miller_list = [hkl for hkl in fractional_areas.keys()] area_list = [fractional_areas[hkl] for hkl in fractional_areas.keys()] self.assertEqual(miller_list[area_list.index(max(area_list))], (1, 1, 1)) # Overall weighted surface energy of fcc Nb should be # equal to the energy of the (310) surface, ie. fcc Nb # is anisotropic, the (310) surface is so low in energy, # its the only facet that exists in the Wulff shape Nb_area_fraction_dict = self.wulff_Nb.area_fraction_dict for hkl in Nb_area_fraction_dict.keys(): if hkl == (3, 1, 0): self.assertEqual(Nb_area_fraction_dict[hkl], 1) else: self.assertEqual(Nb_area_fraction_dict[hkl], 0) self.assertEqual(self.wulff_Nb.miller_energy_dict[(3, 1, 0)], self.wulff_Nb.weighted_surface_energy) def symmetry_test(self): # Maintains that all wulff shapes have the same point # groups as the conventional unit cell they were # derived from. This test should pass for all subsequent # updates of the surface_properties collection check_symmetry_Nb = self.symm_check(self.ucell_Nb, self.wulff_Nb.wulff_pt_list) check_symmetry_Ir = self.symm_check(self.ucell_Ir, self.wulff_Ir.wulff_pt_list) check_symmetry_Ti = self.symm_check(self.ucell_Ti, self.wulff_Ti.wulff_pt_list) self.assertTrue(check_symmetry_Nb) self.assertTrue(check_symmetry_Ir) self.assertTrue(check_symmetry_Ti) def test_get_azimuth_elev(self): # Test out the viewing of the Wulff shape from Miller indices. azim, elev = self.wulff_Ir._get_azimuth_elev((0, 0, 1)) self.assertEqual(azim, 0) self.assertEqual(elev, 90) azim, elev = self.wulff_Ir._get_azimuth_elev((1, 1, 1)) self.assertAlmostEqual(azim, 45) def test_properties(self): # Simple test to check if the values of some # properties are consistent with what we already have wulff_shapes = {"mp-8636": self.wulff_Nb, "mp-72": self.wulff_Ti, "mp-101": self.wulff_Ir} for mpid in wulff_shapes.keys(): properties = self.surface_properties[mpid] wulff = wulff_shapes[mpid] self.assertEqual(round(wulff.weighted_surface_energy, 3), round(properties["weighted_surface_energy"], 3)) self.assertEqual(round(wulff.shape_factor, 3), round(properties["shape_factor"], 3)) self.assertEqual(round(wulff.anisotropy, 3), round(properties["surface_anisotropy"], 3)) def test_corner_and_edges(self): # Test if it is returning the correct number of corner and edges self.assertArrayEqual(self.cube.tot_corner_sites, 8) self.assertArrayEqual(self.cube.tot_edges, 12) self.assertArrayEqual(self.hex_prism.tot_corner_sites, 12) self.assertArrayEqual(self.hex_prism.tot_edges, 18) if __name__ == "__main__": unittest.main()
{ "content_hash": "f9556664057147727a0c30247e9692ae", "timestamp": "", "source": "github", "line_count": 185, "max_line_length": 79, "avg_line_length": 41.41081081081081, "alnum_prop": 0.5678109907322804, "repo_name": "tschaume/pymatgen", "id": "fe9514d5a150f3ed59fdf81d3ad303da81bb413c", "size": "7678", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "pymatgen/analysis/tests/test_wulff.py", "mode": "33188", "license": "mit", "language": [ { "name": "Batchfile", "bytes": "5100" }, { "name": "CSS", "bytes": "7550" }, { "name": "Common Lisp", "bytes": "3029065" }, { "name": "Dockerfile", "bytes": "277" }, { "name": "HTML", "bytes": "827" }, { "name": "Makefile", "bytes": "5573" }, { "name": "Perl", "bytes": "229104" }, { "name": "Propeller Spin", "bytes": "15152267" }, { "name": "Python", "bytes": "7560590" }, { "name": "Roff", "bytes": "4298591" }, { "name": "Shell", "bytes": "711" } ], "symlink_target": "" }
from fabric.api import task, cd, run, env, prompt, execute, sudo from fabric.api import open_shell, settings, put, local import os import boto.ec2 import time # IMPORTANT: This fabfile requires a credentials.py file to be imported # from its current directory, since turning a repo for a Django app into # a package is something I've never done before, and on a deadline it # sounds too much like it could become a large time sink of new stuff # to learn before tomorrow, such as whether the __init__.py file will # mess up any of Django's other imports in non-obvious ways. # Therefore, this fabfile will assume the credentials.py file is located... # (from the repo directory): django_site/django_site/credentials.py # And, if it cannot be imported here, a try:except block will handle it # and use Fabric to move the file into its own directory and import it. # This is all carried out inside _setup_database() so that it does not # obnoxiously intrude on other uses of the fabfile. # For future readers, I apologize and accept all blame for this monstrosity. # So, that didn't work. Documented my failure in # This fabfile will assume credentials.py exists in both settings.py's # directory on the server and fabfile.py's directory on the local machine. import credentials credentials.set_credentials() env.hosts = ['*',] env['user'] = 'ubuntu' # Getting a connection to us-west-2: env['aws_region'] = 'us-west-2' # This inserts the credentials into the OS environment. # Do not do this. Execfile() is bad and I should feel bad for trying it. # env['dbpwd'] = execfile("./imagr_site/imagr_site/credentials.py") def _install_pips(): with cd("django-imagr"): # Reference for the following: # https://bugs.launchpad.net # /ubuntu/+source/python-pip/+bug/1306991/comments/24 # This had to happen because of some kind of error pip has in # this version, possibly related to unbutu. #sudo("apt-get remove python-pip") sudo("wget https://raw.github.com/pypa/pip/master/contrib/get-pip.py") sudo("python get-pip.py") sudo("pip install -r requirements.txt") def install_pips(askforchoice=False): run_command_on_selected_server(_install_pips, askforchoice=askforchoice) # also, we need to sudo and bind on Ubuntu to get access to low number ports # sudo(".....b0000") # when copying using put(), first param is local machine, # second is remote, and third is use_sudo=True # we will need this string to invoke gunicorn def _runserver(): sudo("/etc/init.d/nginx restart") with cd("django-imagr/imagr_site"): sudo("python manage.py migrate") sudo("python manage.py collectstatic") # CMGTODO: maybe "run" below to avoid overprivileging the app sudo('gunicorn -b 127.0.0.1:8888 imagr_site.wsgi:application') def runserver(askforchoice=False): run_command_on_selected_server(_runserver, askforchoice=askforchoice) # "Our interactions with boto are all oriented around # creating and destroying servers. # In fabric, our orientation will be toward # manipulating a server once it exists. # Before we do that, though we need to # get a server running." def get_ec2_connection(): if 'ec2' not in env: ec2_connection = boto.ec2.connect_to_region(env.aws_region) if ec2_connection is not None: env.ec2 = ec2_connection print("Connected to EC2 region {}".format(env.aws_region)) else: raise IOError( "Unable to connect to EC2 region {}".format(env.aws_region)) return env.ec2 # "Arguments passed in from the command line appear in your function # as strings. You are responsible for converting them if needed." # Fabfile will run the following Python command using your command line args: # provision_instance(wait_for_running, timeout, interval) def provision_instance(wait_for_running=False, timeout=60, interval=2): wait_value = int(interval) timeout_value = int(timeout) connection = get_ec2_connection() # NOTE: Changing the following value can # cost you money if you aren't careful. instance_type = 't1.micro' # This one is in your ~/.ssh dir: key_name = 'pk-aws' # 20141105 for cmg security_group = 'ssh-access' ami_id = 'ami-37501207' reservations = connection.run_instances(ami_id, key_name=key_name, instance_type=instance_type, security_groups=[security_group]) new_instances = [] for each_instance in reservations.instances: if each_instance.state == u'pending': new_instances.append(each_instance) running_instance = [] if wait_for_running: time_waited = 0 while new_instances and (time_waited < timeout_value): time.sleep(wait_value) time_waited += int(wait_value) for each_instance in new_instances: # range! current_state = each_instance.state print("Instance {} is currently {}".format(each_instance.id, current_state)) if current_state == "running": # NOTE: This part does not work for circumstances where len(new_instances) > 1, but that's okay, because this is provision_instance, not provision_instances(). It's very unpythonic though and must be changed when I have more time. running_instance.append( new_instances.pop(new_instances.index(each_instance))) # ! each_instance.update() # Example use of above function: # fab provision_instance:wait_for_running=1 # Example output: # [localhost] Executing task 'provision_instance' # Connected to EC2 region us-west-2 # Instance i-8c424a85 is pending # Instance i-8c424a85 is pending # Instance i-8c424a85 is pending # Instance i-8c424a85 is pending # Instance i-8c424a85 is pending # Instance i-8c424a85 is pending # Instance i-8c424a85 is pending # Instance i-8c424a85 is running # Done. # "Once the function completes, you should be able to load your EC2 # dashboard and see the new instance you just created running happily." # "When you execute a function using fabric, it is actually run repeatedly, # once for each host you list in the env.hosts environment setting. # At the moment, our script lists 'localhost', but we don't actually want # to run this command on 'localhost', so we need to get around this. # We could add a new host into the list, but that would require our # knowing the name of the host ahead of time. # Moreover, it would still mean that the commands were run both # on localhost and this new remote host. That's no good." # "Fabric provides the execute command specifically for this purpose. # We can pass it the list of hosts on which we want to run a particular # command, and it will do so for us. This means that we can dynamically # set the name of the instance we want, and fabric will execute # our chosen command on that server only!" # "In order to play with that, though, we need to interactively select the # instance we want to use (after all, we might have more than one, right?). # We'll begin by building a function that allows us to list instances, # optionally filtering for a particular state." # NOTE: When list_aws_instances is not given :verbose=1, it will not print # anything about instances; it is used to prep for another function. def list_aws_instances(verbose=False, state='all'): ec2_connection = get_ec2_connection() reservations = ec2_connection.get_all_reservations() instances = [] for each_reservation in reservations: for each_instance in each_reservation.instances: if state == 'all' or each_instance.state == state: each_instance = { 'id': each_instance.id, 'type': each_instance.instance_type, 'image': each_instance.image_id, 'state': each_instance.state, 'instance': each_instance, } instances.append(each_instance) # ! env.instances = instances if verbose: import pprint pprint.pprint(env.instances) # Example use of above function: # fab list_aws_instances:verbose=1,state=running # Example output: # [localhost] Executing task 'list_aws_instances' # Connected to EC2 region us-west-2 # [{'id': u'i-ab5159a2', # 'image': u'ami-d0d8b8e0', # 'instance': Instance:i-ab5159a2, # 'state': u'running', # 'type': u't1.micro'}] # Done. # "Here, we build a list of the available instances that are 'running', # and then ask the user to choose among them. def select_instance(state='running', askforchoice=True): # If there is no active instance, exit the function: if env.get('active_instance', False): return list_aws_instances(state=state) if askforchoice: prompt_text = "Please select from the following instances:\n" # NOTE: The following syntax does different stuff from {} and .format() instance_printing_template = " %(ct)d: %(state)s instance %(id)s\n" for idx, instance in enumerate(env.instances): ct = idx + 1 args = {'ct': ct} args.update(instance) prompt_text += instance_printing_template % args prompt_text += "Choose an instance: " def validation(input): choice = int(input) if choice not in range(1, (len(env.instances) + 1)): raise ValueError("{} is not a valid instance.".format(choice)) else: return choice choice = prompt(prompt_text, validate=validation) else: choice = 1 # "After a valid choice is made, we can then hang that instance # on our env so that we can access it from other functions:" env.active_instance = env.instances[choice - 1]['instance'] def run_command_on_selected_server(command, askforchoice=True): # Ask the user to select an instance: select_instance(askforchoice=askforchoice) # Build a list of hosts, including the server (always ubuntu for us): # (This is what Charles and I had a hard time finding due to AWS subnets!) selected_hosts = [ 'ubuntu@' + env.active_instance.public_dns_name ] # Execute the command: execute(command, hosts=selected_hosts) # The following two functions were created to automatically copy # the credentials file into the fabfile's directory during run_complete_setup() def run_command_on_localhost(command): # select_instance() not required because env.active_instance is not needed. execute(command, hosts=['localhost', ]) def _copy_credentials_file_locally(): # put("imagr_site/imagr_site/credentials.py", # "credentials.py", use_sudo=True) local("cp ./imagr_site/imagr_site/credentials.py credentials.py") def _restart_server(wait_for_running=True): sudo("shutdown -r now") # Wait for it in the main chaining function, don't block inside the function. # time.sleep(60) # "Remember, you cannot use password authentication to log into AWS servers. # If you find that you are prompted to enter a password in order to run # this command on your remote server, it means you have some work to do." # ((Do this first maybe? Review)) # "In order to run a command on this server, you need to ensure that # the keypair you set up for your AWS instances is available to the ssh agent. # You can do that at the system level on your local machine: # ssh-add ~/.ssh/pk-aws.pem # Now your local ssh agent knows that this is a file you might use # to connect. When the AWS server asks to use public-key authentication, # the agent will then try this key along with any others the agent # knows about. If no known key works, ssh will bomb out." def _setup_imagr_aptgets(): # check for any system updates # For some reason, there must be a pause before apt-getting anything, # or else even a status=='running' server will time out the request. time.sleep(60) sudo("apt-get -y update") #time.sleep(60) # check for any system upgrades sudo("apt-get -y upgrade") # Attempting to fix "ImportError: cannot import name IncompleteRead" #sudo("pip install -U pip") # pip before dev or dev before pip? # sudo("apt-get -y install python-pip") # IMPORTANT NOTE: The above command gives a bad version of pip for ubuntu! # Reference for the following: # https://bugs.launchpad.net # /ubuntu/+source/python-pip/+bug/1306991/comments/24 sudo("apt-get -y install python-dev") sudo("apt-get -y install postgresql-9.3") sudo("apt-get -y install postgresql-server-dev-9.3") sudo("apt-get -y install git") sudo("apt-get -y install gunicorn") sudo("apt-get -y install nginx") # if any updates were performed above, we probably have to reboot server sudo("/etc/init.d/nginx start") # This requires some waiting for the server restart to take effect. # Moving it to the main chaining function to emphasize this. # _restart_server() def setup_imagr_aptgets(): run_command_on_selected_server(_setup_imagr_aptgets) def _move_sources(): # CMGTODO: # We don't want to git clone if we already have this directory # set up on the machine. # TEMPORARILY COMMENTED FOR TESTING # I'm using my own fork to test a change and will discuss it with Charles # after I've debugged it. This requires changing where the remote instance # looks for a clone: # run("git clone https://github.com/CharlesGust/django-imagr.git") run("git clone https://github.com/BFriedland/django-imagr.git") sudo("ln -s /home/ubuntu/django-imagr/nginx.conf" + " /etc/nginx/sites-enabled/amazonaws.com") # The credentials.py file is needed in two places, now. # One for the fabfile's database creation and one for Django's settings.py: # with cd("django-imagr"): # put("imagr_site/imagr_site/credentials.py", # "/home/ubuntu/django-imagr/credentials.py", use_sudo=True) # Uh, I think that file needs to be copied on my computer for fabric. <_< # That will have to happen on every deployment computer. # The fabfile could automate that too, but I'll have to do it later. with cd("django-imagr"): # Reference for the following: # https://bugs.launchpad.net # /ubuntu/+source/python-pip/+bug/1306991/comments/24 # So, this doesn't work on its own here at the time of writing: # sudo("pip install -r requirements.txt") # EITHER the following must happen BEFORE the above: # sudo apt-get remove python-pip # wget https://raw.github.com/pypa/pip/master/contrib/get-pip.py # sudo python get-pip.py # OR pip install -r requirements.txt doesn't need to be called at all, # though I have no idea why not -- I did the above after getting the # IncompleteRead error, and got a ton of # "Requirement already satisfied" messages -- so I checked and # YES it was there already even though it never did it without getting # me an error. No idea what's going on there. # ... # It could be that IncompleteRead traceback pops up but # pip installs the requirements anyways. That would mean we tell Fabric # to ignore tracebacks, somehow. # Reason I commented this: # To separate it into _install_pips # sudo("apt-get remove python-pip") # sudo("wget https://raw.github.com/pypa/pip/master/contrib/get-pip.py") # sudo("python get-pip.py") # To be clear, all of this "pip install -r requirements.txt" business # goes in in _install_pips. # sudo("pip install -r requirements.txt") # I may also figured out why all of that wasn't working before. # It's because we were pip installing requirements.txt twice, probably. # Or else it was the ubuntu error which required removing pip # and installing it again. I don't know, but now it works, because # I centralized it into the preexisting function, _install_pips put("imagr_site/imagr_site/credentials.py", "/home/ubuntu/django-imagr/imagr_site/imagr_site/credentials.py", use_sudo=True) def move_sources(): run_command_on_selected_server(_move_sources) # The following two functions are useful if nginx is used to run the # server instead of gunicorn # # An internal command that should be wrapped for fab: # def _install_nginx(): # sudo('apt-get install nginx') # # This command runs nginx with the start arg: # sudo('/etc/init.d/nginx start') # # "Finally, we need to wrap this function in a function we might call # # from the command line that will run it on the server we select:"" # def install_nginx(): # run_command_on_selected_server(_install_nginx) # "Now, if we run this fabric command from our command line, # we can get nginx installed on our AWS instance: # Executing task 'install_nginx' # Connected to EC2 region us-west-2 # Please select from the following instances: # 1: running instance i-ab5159a2 # Choose an instance: 1 # Executing task '_install_nginx' # sudo: apt-get install nginx # out: Setting up nginx-full (1.1.19-1ubuntu0.5) ... # out: Setting up nginx (1.1.19-1ubuntu0.5) ... # out: Processing triggers for libc-bin ... # out: ldconfig deferred processing now taking place # Done. # At this point, you should be able to open a web browser and point # it at your EC2 instance and see the default nginx web page. # Remember, the public_dns_name of your instance is the way to get at it, # so use that in your browser: # http://ec2-54-185-44-188.us-west-2.compute.amazonaws.com # Challenge yourself to add two more functions: # 1. Select a running instance to stop, # and then stop it with boto # 2. Select a stopped instance to terminate, # and then terminate it with boto" def stop_running_instance(askforchoice=True): # This code is from: # https://github.com/miracode/django-imagr/blob/master/fabfile.py # I tried to find a way to do this using run_command_on_selected_server(), # but miracode's example turned out to be the way to go. # This is because the various commands store things in fabric's env # rather than passing them as parameters, which threw me through a # loop at first. Printing the state of the dictionary as things run # might help visualize this if your future fabfiles get longer. # Have the user place an instance selection in the env: select_instance(askforchoice=askforchoice) # Acquire a connection. Should done in this instance rather than # relying on the fact that select_instance() calls list_aws_instances(). # It might be technically possible to place a connection in the env, # but I'd prefer to keep them function-scoped. ec2_connection = get_ec2_connection() # This command is too simple to require execute: ec2_connection.stop_instances(instance_ids=[env.active_instance.id]) # Similarly to stop_running_instance(), this function will not be # effectively chained further up than select_instance(). def terminate_stopped_instance(askforchoice=True): # This code is also from: # https://github.com/miracode/django-imagr/blob/master/fabfile.py select_instance(state='stopped', askforchoice=askforchoice) ec2_connection = get_ec2_connection() ec2_connection.terminate_instances(instance_ids=[env.active_instance.id]) def terminate_running_instance(askforchoice=True): select_instance(state='running', askforchoice=askforchoice) ec2_connection = get_ec2_connection() ec2_connection.terminate_instances(instance_ids=[env.active_instance.id]) def run_custom_command(command): select_instance(askforchoice=False) sudo(command) # "Stop. No. Why." # execfile("./imagr_site/imagr_site/credentials.py") # Give it more time than it needs to complete the above, just in case. # time.sleep(5) # print "os.environ['DATABASE_PASSWORD'] == {}".format(os.environ['DATABASE_PASSWORD']) # The following comes mostly from miracode again. Reference: # https://github.com/miracode/django-imagr/blob/master/fabfile.py def _setup_database(): # Insert the database password into the os.environ dictionary (without # make it visible on GitHub, from this file): # Turns out this has to be called outside of the fabfile's functions' # scope for some reason -- it can't import packages inside functions # and I'm not entirely sure why. It might be trying to run the functions # somewhere other than the fabfile's directory but on the same machine. # More discussion and a record of my failed attempt is saved at the # following function's comment: # _set_credentials() # Take it out of the dictionary and use it for the SQL command string, # which is handed to the server instance after it's constructed: password = os.environ['DATABASE_PASSWORD'] sudo('createdb imagr', user='postgres') create_user_sql_query = """" CREATE USER imagr WITH password '%s'; GRANT ALL ON DATABASE imagr TO imagr;" """ % password sudo('psql -U postgres imagr -c %s' % create_user_sql_query, user='postgres') def _set_credentials(): # This isn't working and I don't know why and I ran out of time. <_< # import credentials does not work when running fabric from the CLI, # even though the file is sitting right next to it, there's no naming # problems, the file is identical to what I'd expect, I'm running # the local() command from the fabric API, I'm working with try:except # to catch all the errors leading to the end when it should work on # the first try: section because the file is simply there and imported # already... # I think fabric is taking all of this code inside the _set_credentials() # function here and executing it somewhere there is no ability to call # import credentials within the function. # I thought making the import statement inside the fabric function would # reduce unnecessary imports, however this seems to have been an # incorrect assumption on my part. # I think the easiest way to make this work at this point is to # take advantage of Python's scripting nature and put a fabric call # to the bash cp command above the import credentials call, but keep # the import outside of a fabric function; that way the copying will # happen but the import won't be executed in some other context that # I don't know about (presumably). # This would all be interesting to research when I have more time. # So the above explanation makes sense, I'm going to leave the commented # code here as a record of my attempt to get this to work: # try: # credentials.set_credentials() # except: # try: # import credentials # except: # # If credentials is not in the current directory, it's probably # # still sitting next to settings.py in the imagr_site/imagr_site/ # # directory. It must then be transferred on the local machine: # run_command_on_localhost(_copy_credentials_file_locally) # time.sleep(5) # import credentials print "Deprecated function, debugging print here." def _alter_database_user_password(): # Turns out this has to be called outside of the fabfile's functions' # scope for some reason -- it can't import packages inside functions # and I'm not entirely sure why. It might be trying to run the functions # somewhere other than the fabfile's directory but on the same machine. # More discussion and a record of my failed attempt is saved at the # following function's comment: # _set_credentials() password = os.environ['DATABASE_PASSWORD'] alter_password_sql_query = """" ALTER ROLE imagr password to '%s';" """ % password sudo('psql -U postgres testimagr -c %s' % alter_password_sql_query, user='postgres') def alter_database_user_password(): run_command_on_selected_server(_alter_database_user_password) def _destroy_database(): # For testing. # DOES NOT YET WORK, so... raise Exception("_destroy_database not yet implemented." + " Try remaking the server instead.") # destroy_database_sql_query = """" # DROP DATABASE imagr; # DROP USER imagr;" # """ # sudo('psql -U postgres postgres -c %s' % destroy_database_sql_query, # user='postgres') # sudo('destroydb imagr', user='postgres') # sudo('destroyuser imagr', user='postgres') def destroy_database(askforchoice=True): run_command_on_selected_server(_destroy_database, askforchoice=askforchoice) def setup_database(askforchoice=True): run_command_on_selected_server(_setup_database, askforchoice=askforchoice) def ssh(): run_command_on_selected_server(open_shell) # Not actually needed, it turns out. Credentials must be invoked LOCALLY # # in the fabfile for setting up the database, not on the remote server. # def _invoke_credentials_on_server(): # with cd('django-imagr/imagr_site/imagr_site'): # sudo('python credentials.py') # # time.sleep(2) # def invoke_credentials_on_server(): # run_command_on_selected_server(_invoke_credentials_on_server) # These need run_command_on_selected_server() to run on them, so set # askforchoice=False def run_complete_setup(): provision_instance(wait_for_running=True) run_command_on_selected_server(_setup_imagr_aptgets, askforchoice=False) run_command_on_selected_server(_restart_server, askforchoice=False) # Restarting the server requires a little waiting time for it to finish # installing all of its updates. time.sleep(120) # Involves moving credentials.py: run_command_on_selected_server(_move_sources, askforchoice=False) # Involves installing from requirements.txt: run_command_on_selected_server(_install_pips, askforchoice=False) # Credentials must be loaded on the LOCAL machine before creating # the database. They're not needed when running through the fabfile, # except as called by the Django settings.py file in _runserver(). # In a sleepy haze I wasted an hour trying to perfect this for no # real reason, but I learned a bit about os.environ and execfile, so... # run_command_on_selected_server(_invoke_credentials_on_server, # askforchoice=False) # What we actually have to do is put the database password into localhost's # os.environ while the fabric script is running, so that it can read it # and use data in it to construct the database with a .gitignored password. # That function happens inside _setup_database, because that's the only # fabric command that needs the database password. run_command_on_selected_server(_setup_database, askforchoice=False) run_command_on_selected_server(_runserver, askforchoice=False)
{ "content_hash": "efbde86a7e3ad82a20e5a2f32eca7fe2", "timestamp": "", "source": "github", "line_count": 696, "max_line_length": 250, "avg_line_length": 39.51293103448276, "alnum_prop": 0.6804843460237809, "repo_name": "defzzd/django-imagr", "id": "7ec3fe808a659f0a93adfe3146a519076c2002cc", "size": "27501", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "fabfile.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "50788" } ], "symlink_target": "" }
import json from flask import Flask, render_template, request, abort, Response from app.util import doTranslate app = Flask(__name__) @app.errorhandler(500) def not_found(): return Response({'error': 'Bad Request'}, status=400) @app.route("/") def index(): return render_template('index.html') @app.route("/translate", methods=["POST"]) def translate(): if not request.json or not request.json['sourceText'] or len(request.json['sourceText']) <= 0: abort(400) responseJson = { 'translatedText': doTranslate(request.json['sourceText']) } return Response(json.dumps(responseJson), status=200, mimetype='application/json') if __name__ == "__main__": app.run(host='0.0.0.0', port=8080)
{ "content_hash": "c617added1f2da2072b10eea747e784f", "timestamp": "", "source": "github", "line_count": 31, "max_line_length": 98, "avg_line_length": 23.774193548387096, "alnum_prop": 0.6648575305291723, "repo_name": "fkrafi/moses-web", "id": "2e82ceee54edd4f0d8962427489ecd97ee90dd0e", "size": "737", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "app.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "HTML", "bytes": "5042" }, { "name": "JavaScript", "bytes": "1870" }, { "name": "Python", "bytes": "4027" } ], "symlink_target": "" }
import asyncio import logging import ssl import threading try: from asyncio import all_tasks as asyncio_all_tasks # from py39, asyncio.Task.all_tasks is deprecated except ImportError: asyncio_all_tasks = asyncio.Task.all_tasks # pylint: disable=no-member from tornado.ioloop import IOLoop from tornado.testing import bind_unused_port from tornado.web import Application, RequestHandler from .fuzzer_decorator import FuzzerDecorator logger = logging.getLogger(__name__) class TornadoDecorator(FuzzerDecorator): """ Decorator for fuzzers to transport generated content through http. It is useful for transporting fuzz tests to browser SUTs. The decorator starts a Tornado server at the start of the fuzz job and returns an http url as test input. If the SUT accesses the domain root through a GET request, then the decorated fuzzer is invoked and the response is the generated test. Accessing other paths can return static or dynamically rendered content. When the certfile and possibly also the keyfile optional parameters are defined, the traffic will be served through SSL. **Optional parameters of the fuzzer decorator:** - ``template_path``: Directory containing .html template files. These are served from the path ``/`` without the .html extension. - ``static_path``: Directory from which static files will be served. These are served from the path ``/static/``. - ``url``: Url template with ``{port}`` and ``{index}`` placeholders, that will be filled in with appropriate values. This is the url that will be served for the SUT as the test case. (Default: ``http(s)://localhost:{port}?index={index}``) - ``refresh``: Integer number denoting the time interval (in seconds) for the document at the root path (i.e., the test case) to refresh itself. Setting it to 0 means no refresh. (Default: 0) - ``certfile``: Path to a PEM file containing the certificate (Default: None). - ``keyfile``: Path to a file containing the private key (Default: None). **Example configuration snippet:** .. code-block:: ini [sut.foo] # assuming that foo expects a http url as input, which it tries to access # afterwards [fuzz.foo-with-bar-over-http] sut=foo #fuzzer=... fuzzer.decorate(0)=fuzzinator.fuzzer.TornadoDecorator batch=5 [fuzz.foo-with-bar-over-http.fuzzer.decorate(0)] template_path=/home/lili/fuzzer/templates/ static_path=/home/lili/fuzzer/static/ # assuming that there is a main.html in the template_path directory url=http://localhost:{port}/main?index={index} refresh=3 """ def __init__(self, *, template_path=None, static_path=None, url=None, refresh=None, certfile=None, keyfile=None, **kwargs): self.template_path = template_path self.static_path = static_path self.url = url or ('https' if certfile else 'http') + '://localhost:{port}?index={index}' self.refresh = int(refresh) if refresh else 0 if certfile: self.ssl_ctx = ssl.create_default_context(ssl.Purpose.SERVER_AUTH) self.ssl_ctx.check_hostname = False self.ssl_ctx.verify_mode = ssl.CERT_NONE self.ssl_ctx.load_cert_chain(certfile, keyfile=keyfile) else: self.ssl_ctx = None # Disable all the output of the tornado server to avoid messing up with Fuzzinator's messages. hn = logging.NullHandler() hn.setLevel(logging.DEBUG) logging.getLogger('tornado.access').addHandler(hn) logging.getLogger('tornado.access').propagate = False def init(self, cls, obj, **kwargs): super(cls, obj).__init__(**kwargs) obj.index = 0 obj.test = None obj._port = None obj._thread = None obj._asyncio_loop = None obj._ioloop = None def _url(self, obj): return self.url.format(port=obj._port, index=obj.index) def _service(self, obj): return '{scheme}://localhost:{port}'.format(scheme='https' if self.ssl_ctx else 'http', port=obj._port) def call(self, cls, obj, *, index): if index != 0 and obj.test is None: return None return self._url(obj) def enter(self, cls, obj): decorator = self class MainHandler(RequestHandler): def data_received(self, chunk): pass def get(self): try: obj.test = super(cls, obj).__call__(index=obj.index) if obj.test is not None: obj.index += 1 if decorator.refresh > 0: self.set_header('Refresh', '{timeout}; url={url}' .format(timeout=decorator.refresh, url=decorator._url(obj))) test = obj.test if not isinstance(test, (str, bytes, dict)): test = str(test) self.write(test) except Exception as e: logger.warning('Unhandled exception in TornadoDecorator.', exc_info=e) class TemplateHandler(RequestHandler): def get(self, page): try: self.render(page + '.html') except FileNotFoundError: logger.debug('%s not found', page) self.send_error(404) except Exception as e: logger.debug('Exception while rendering %s', page, exc_info=e) self.send_error(500) def start_tornado(): # Create a new asyncio event loop, set it as current, and wrap it in Tornado's IOLoop. obj._asyncio_loop = asyncio.new_event_loop() # save asyncio event loop so that we can cancel its tasks when shutting down asyncio.set_event_loop(obj._asyncio_loop) obj._ioloop = IOLoop.current() # save the Tornado-wrapped event loop so that we can stop it when shutting down # Set up the web service (application). handlers = [(r'/', MainHandler)] if self.template_path: handlers += [(r'/(.+)', TemplateHandler)] app = Application(handlers, template_path=self.template_path, static_path=self.static_path, debug=False) app.listen(obj._port, ssl_options=self.ssl_ctx) # Run the event loop and the application within. logger.debug('Starting Tornado server at %s', self._service(obj)) obj._ioloop.start() # block here until even loop is stopped obj._ioloop.close(all_fds=True) # release port after event loop is stopped logger.debug('Stopped Tornado server at %s', self._service(obj)) # Call decorated fuzzer's __enter__. super(cls, obj).__enter__() # Start Tornado in a separate thread. _, obj._port = bind_unused_port() # get random available port before starting the thread, because we cannot be sure when the thread will actually start and __call__ may need it sooner obj._thread = threading.Thread(target=start_tornado) # save the thread so that we can join it when shutting down obj._thread.start() return obj def exit(self, cls, obj, *exc): def stop_tornado(): # (Ask to) cancel all pending tasks of the underlying asyncio event loop. Cancellation actually happens in a next iteration of the loop. for task in asyncio_all_tasks(obj._asyncio_loop): # this is to avoid harmless(?) "ERROR:asyncio:Task was destroyed but it is pending!" messages task.cancel() # Ask to stop the event loop (after the cancellations happen). obj._ioloop.add_callback(obj._ioloop.stop) # Call decorated fuzzer's __exit__. suppress = super(cls, obj).__exit__(*exc) # Stop the thread of Tornado and wait until it terminates. logger.debug('Stopping Tornado server at %s', self._service(obj)) obj._ioloop.add_callback(stop_tornado) obj._thread.join() return suppress
{ "content_hash": "e3dcb53cafee5357dbdd22a697239d1a", "timestamp": "", "source": "github", "line_count": 198, "max_line_length": 192, "avg_line_length": 42.83838383838384, "alnum_prop": 0.6000943173779769, "repo_name": "renatahodovan/fuzzinator", "id": "e7434713a5a04fa1e769ceef798fc48772c51574", "size": "8736", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "fuzzinator/fuzzer/tornado_decorator.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "ANTLR", "bytes": "339" }, { "name": "C++", "bytes": "3678" }, { "name": "CSS", "bytes": "10728" }, { "name": "HTML", "bytes": "44477" }, { "name": "JavaScript", "bytes": "25491" }, { "name": "Makefile", "bytes": "1755" }, { "name": "Python", "bytes": "412762" } ], "symlink_target": "" }
''' Menu for Community Scripts Author: Christoph Stoettner Mail: christoph.stoettner@stoeps.de Documentation: http://scripting101.org Version: 5.0.1 Date: 09/19/2015 License: Apache 2.0 History: Changed by Jan Alderlieste ''' import sys import os import ibmcnx.functions import ibmcnx.menu.MenuClass import java from java.lang import String from java.util import HashSet from java.util import HashMap # Only load commands if not initialized directly (call from menu) # if __name__ == "__main__": # execfile("ibmcnx/loadCnxApps.py") global globdict globdict = globals() def cnxFilesVersionStamp(): execfile('ibmcnx/cnx/VersionStamp.py', globdict) def cnxCommunitiesReparenting(): execfile('ibmcnx/cnx/CommunitiesReparenting.py', globdict) def cnxFilesPolicies(): execfile('ibmcnx/cnx/FilesPolicies.py', globdict) def cnxLibraryPolicies(): execfile('ibmcnx/cnx/LibraryPolicies.py', globdict) def cnxLibraryLarge(): execfile('ibmcnx/cnx/LibrarySizesOverview.py', globdict) def cnxLibrarySizes(): execfile('ibmcnx/cnx/LibrarySizes.py', globdict) def cnxProfAddRole(): execfile('ibmcnx/cnx/ProfilesAddExtRole.py', globdict) comm = ibmcnx.menu.MenuClass.cnxMenu() comm.AddItem('Update VersionStamp (ibmcnx/cnx/VersionStamp.py)', cnxFilesVersionStamp) comm.AddItem( 'Work with Files Policies (ibmcnx/cnx/FilesPolicies.py)', cnxFilesPolicies) comm.AddItem('Work with Libraries (ibmcnx/cnx/LibraryPolicies.py)', cnxLibraryPolicies) comm.AddItem('Show Library Sizes (ibmcnx/cnx/LibrarySizes.py)', cnxLibrarySizes) comm.AddItem( 'List Libraries with more than 80% Used Space (ibmcnx/cnx/LibrarySizesOverview.py)', cnxLibraryLarge) comm.AddItem('Reparent/Move Communities (ibmcnx/cnx/CommunitiesReparenting.py)', cnxCommunitiesReparenting) comm.AddItem('Add employee.extended role to user (ibmcnx/cnx/ProfilesAddExtRole.py)', cnxProfAddRole) comm.AddItem('Back to Main Menu (ibmcnx/menu/cnxmenu.py)', ibmcnx.functions.cnxBackToMainMenu) comm.AddItem("Exit", ibmcnx.functions.bye) state_comm = 'True' menutitle = "HCL Connections Admin Tasks" while state_comm == 'True': count = len(comm.menuitems) comm.Show(menutitle) ########################### ## Robust error handling ## ## only accept int ## ########################### ## Wait for valid input in while...not ### is_valid_comm = 0 while not is_valid_comm: try: inputstring = '\tEnter your choice [1-' + str(count) + ']: ' n = int(raw_input(inputstring)) if n <= count and n > 0: is_valid_comm = 1 # set it to 1 to validate input and to terminate the while..not loop else: print ("'%s' is not a valid menu option.") % n except ValueError, e: print ("'%s' is not a valid integer." % e.args[0].split(": ")[1]) # n = input( "your choice> " ) comm.Do(n - 1)
{ "content_hash": "0f861cac6db59cb87fec38c0ebfe6edd", "timestamp": "", "source": "github", "line_count": 104, "max_line_length": 105, "avg_line_length": 29.23076923076923, "alnum_prop": 0.6625, "repo_name": "stoeps13/ibmcnx2", "id": "c95045adddab0d97b5d263ded1875ffaba3bfb6d", "size": "3040", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "ibmcnx/menu/comm.py", "mode": "33261", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "183639" }, { "name": "Shell", "bytes": "391" } ], "symlink_target": "" }
from __future__ import absolute_import, unicode_literals from django import forms from .models import User class UserForm(forms.ModelForm): class Meta: # Set this form to use the User model. model = User # Constrain the UserForm to just these fields. fields = ("full_name",)
{ "content_hash": "c8759201e16e41771c5940605be385d4", "timestamp": "", "source": "github", "line_count": 15, "max_line_length": 56, "avg_line_length": 21.133333333333333, "alnum_prop": 0.6624605678233438, "repo_name": "harshnoise/shor", "id": "0e567b8438dbad8a4727224c0802d2dfc8c011d7", "size": "341", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "shor/users/forms.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "CSS", "bytes": "1212" }, { "name": "HTML", "bytes": "19254" }, { "name": "JavaScript", "bytes": "2375" }, { "name": "Python", "bytes": "47305" }, { "name": "Shell", "bytes": "3620" } ], "symlink_target": "" }
"""piddleSVG This module implements an SVG PIDDLE canvas. In other words, this is a PIDDLE backend that renders into a SVG file. Bits have been shamelessly cobbled from piddlePDF.py and/or piddlePS.py Greg Landrum (greglandrum@earthlink.net) 3/10/2000 """ """ Functionality implemented: -drawLine -drawPolygon -drawEllipse -drawArc -drawCurve -drawString (rotated text is, mostly, fine... see below) -drawFigure -drawImage Known problems: -Rotated text is right in either IBM's SVGView or Adobe's plugin. This problem is explained in drawString() -The font/string handling is not perfect. There are definite problems with getting the widths of strings. Thus far heights seem to work okay in the tests that I've done, but those could well be broken as well. """ from rdkit.sping.pid import * from rdkit.sping.PDF import pdfmetrics # for font info import string from rdkit import six from math import * #SVG_HEADER = """<?xml version="1.0" encoding="iso-8859-1"?> #<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.0//EN" #"http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd"> #""" SVG_HEADER = """<?xml version="1.0" encoding="iso-8859-1"?> """ def _ColorToSVG(color): """ convenience function for converting a sping.pid color to an SVG color """ if color == transparent: return 'none' else: return 'rgb(%d,%d,%d)'%(int(color.red*255),int(color.green*255), int(color.blue*255)) def _PointListToSVG(points,dupFirst=0): """ convenience function for converting a list of points to a string suitable for passing to SVG path operations """ outStr = '' for i in xrange(len(points)): outStr = outStr + '%.2f,%.2f '%(points[i][0],points[i][1]) # add back on the first point. This is not required in the spec, # but Adobe's beta-quality viewer seems to not like it being skipped if dupFirst == 1: outStr = outStr + '%.2f,%.2f'%(points[0][0],points[0][1]) return outStr class SVGCanvas( Canvas ): def __init__(self, size=(300,300), name='SVGCanvas',includeXMLHeader=True,extraHeaderText=''): self._nImages=1 # I'd rather do this as PNG, but IBM's SVGView doesn't support those # yet. Adobe's plugin works just fine with them, however. self._imageFormat='GIF' self.size = size self._initOutput(includeXMLHeader=includeXMLHeader,extraHeaderText=extraHeaderText) Canvas.__init__(self, size, name) def _initOutput(self,includeXMLHeader=True,extraHeaderText=''): if includeXMLHeader: self._txt = SVG_HEADER else: self._txt = "" self._txt += """<svg:svg version="1.1" baseProfile="full" xmlns:svg="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" xml:space="preserve" width="%dpx" height="%dpx" %s>\n"""%(self.size[0],self.size[1],extraHeaderText) def _findExternalFontName(self, font): #copied from piddlePDF by cwl- hack away! """Attempts to return proper font name. PDF uses a standard 14 fonts referred to by name. Default to self.defaultFont('Helvetica'). The dictionary allows a layer of indirection to support a standard set of sping.pid font names.""" piddle_font_map = { 'Times':'Times', 'times':'Times', 'Courier':'Courier', 'courier':'Courier', 'helvetica':'Helvetica', 'Helvetica':'Helvetica', 'symbol':'Symbol', 'Symbol':'Symbol', 'monospaced':'Courier', 'serif':'Times', 'sansserif':'Helvetica', 'ZapfDingbats':'ZapfDingbats', 'zapfdingbats':'ZapfDingbats', 'arial':'Helvetica' } try: face = piddle_font_map[string.lower(font.face)] except Exception: return piddle_font_map[string.lower('sansserif')] name = face + '-' if font.bold and face in ['Courier','Helvetica','Times']: name = name + 'Bold' if font.italic and face in ['Courier', 'Helvetica']: name = name + 'Oblique' elif font.italic and face == 'Times': name = name + 'Italic' if name == 'Times-': name = name + 'Roman' # symbol and ZapfDingbats cannot be modified! #trim and return if name[-1] == '-': name = name[0:-1] return name def _FormFontStr(self,font): """ form what we hope is a valid SVG font string. Defaults to 'sansserif' This should work when an array of font faces are passed in. """ fontStr = '' if font.face is None: font.__dict__['face'] = 'sansserif' # quick hack -cwl if isinstance(font.face,six.string_types): if len(string.split(font.face)) > 1: familyStr = '\'%s\''%font.face else: familyStr = font.face else: face = font.face[0] if len(string.split(face)) > 1: familyStr = '\'%s\''%(face) else: familyStr = face for i in xrange(1,len(font.face)): face = font.face[i] if len(string.split(face)) > 1: familyStr = ', \'%s\''%(face) else: familyStr = familyStr + ', %s'%face if font.italic: styleStr = 'font-style="italic"' else: styleStr = '' if font.bold: weightStr = 'font-weight="bold"' else: weightStr = '' if font.size: sizeStr = 'font-size="%.2f"'%font.size else: sizeStr = '' fontStr = 'font-family="%s" %s %s %s'%(familyStr,styleStr,weightStr,sizeStr) return fontStr def _FormArcStr(self,x1,y1,x2,y2,theta1,extent): """ Forms an arc specification for SVG """ if abs(extent) > 360: if extent < 0: extent = -abs(extent)%360 else: extent = extent%360 # deal with figuring out the various arc flags # required by SVG. if extent > 180: # this one is easy arcFlag = 1 else: arcFlag = 0 if extent >=0: sweepFlag = 0 else: sweepFlag = 1 # convert angles to radians (grn) theta1 = pi * theta1 / 180. extent = pi * extent / 180. # center of the arc cx = (x1+x2)/2. cy = (y1+y2)/2. # its radius rx = abs(x2 - x1)/2. ry = abs(y2 - y1)/2. # final angle theta2 = theta1 + extent # SVG takes arcs as paths running from one point to another. # figure out what those start and end points are now. # the -thetas are required because of a difference in the handedness # of angles in Piddle and SVG startx = cx + rx*cos(-theta1) starty = cy + ry*sin(-theta1) endx = cx + rx*cos(-theta2) endy = cy + ry*sin(-theta2) arcStr = '%.2f %.2f A%.2f %.2f 0 %d %d %.2f %.2f'%(startx,starty,rx,ry, arcFlag,sweepFlag, endx,endy) return arcStr # public functions def clear(self): self._initOutput() def flush(self): # self.save('svg') pass # to fit new definition of flush() -cwl def save(self, file=None, format=None): """Hand hand this either a file= <filename> or file = <an open file object>. By default, I've made the fomrat extension be .svg. By default it saves the file to "self.name" + '.svg' """ if file==None: file = self.name if isinstance(file, six.string_types): isFileName = 1 else: isFileName = 0 if isFileName: if format == None: if '.' not in file: file = file + '.svg' else: file = file + '.' + type fileobj = getFileObject(file, openFlags="w+") fileobj.write(self._txt+'</svg:svg>') if isFileName: fileobj.close() # do not close if handed a file handle instead of a file name def text(self): return self._txt+'</svg:svg>' #------------- drawing methods -------------- def drawLine(self, x1,y1, x2,y2, color=None, width=None, dash=None,**kwargs): "Draw a straight line between x1,y1 and x2,y2." # set color... if color: if color == transparent: return elif self.defaultLineColor == transparent: return else: color = self.defaultLineColor svgColor = _ColorToSVG(color) if width: w = width else: w = self.defaultLineWidth styleStr = 'stroke="%s" stroke-width="%d"'%(svgColor,w) if dash is not None: styleStr += ' stroke-dasharray="' styleStr += ' '.join([str(x) for x in dash]) styleStr += '"' outStr = '<svg:line x1="%.2f" y1="%.2f" x2="%.2f" y2="%.2f" %s>'%(x1,y1,x2,y2,styleStr) if kwargs.has_key('bodyText'): outStr+=kwargs['bodyText'] outStr += '</svg:line>\n' self._txt = self._txt + outStr def drawPolygon(self, pointlist, edgeColor=None, edgeWidth=None, fillColor=transparent, closed=0, dash=None,**kwargs): """drawPolygon(pointlist) -- draws a polygon pointlist: a list of (x,y) tuples defining vertices """ # get the points into SVG format pointStr = _PointListToSVG(pointlist,dupFirst=closed) # set color for fill... filling = 0 if fillColor: if fillColor != transparent: filling = 1 # do the fill if filling: fillStr = 'fill="%s"'%_ColorToSVG(fillColor) else: fillStr = 'fill="none"' # set color for edge... if not edgeColor: edgeColor = self.defaultLineColor # set edge width... if edgeWidth == None: edgeWidth = self.defaultLineWidth # SVG markers edgeStr = 'stroke="%s" stroke-width="%d"'%(_ColorToSVG(edgeColor),int(edgeWidth)) if dash is not None: edgeStr += ' stroke-dasharray="' edgeStr += ' '.join([str(x) for x in dash]) edgeStr += '"' # draw it outStr = '<svg:polygon %s %s points="%s">'%(fillStr,edgeStr,pointStr) if kwargs.has_key('bodyText'): outStr+=kwargs['bodyText'] outStr += '</svg:polygon>\n' self._txt = self._txt + outStr def drawEllipse(self, x1,y1,x2,y2, edgeColor=None, edgeWidth=None, fillColor=transparent, dash=None,**kwargs): # get the points into SVG format cx = (x1+x2)/2. cy = (y1+y2)/2. rx = abs(x2 - x1)/2. ry = abs(y2 - y1)/2. ellipseStr = 'cx="%.2f" cy="%.2f" rx="%.2f" ry="%.2f"'%(cx,cy,rx,ry) # set color for fill... filling = 0 if fillColor: if fillColor != transparent: filling = 1 # do the fill if filling: fillStr = 'fill="%s"'%_ColorToSVG(fillColor) else: fillStr = 'fill="none"' # set color for edge... if not edgeColor: edgeColor = self.defaultLineColor # set edge width... if edgeWidth == None: edgeWidth = self.defaultLineWidth edgeStr = 'stroke="%s" stroke-width="%d"'%(_ColorToSVG(edgeColor),int(edgeWidth)) if dash is not None: edgeStr += ' stroke-dasharray="' edgeStr += ' '.join([str(x) for x in dash]) edgeStr += '"' # draw it mods = [fillStr,edgeStr,ellipseStr] if kwargs.has_key('extraAttribs'): mods.append(kwargs['extraAttribs']) outStr = '<svg:ellipse %s>'%(' '.join(mods)) if kwargs.has_key('bodyText'): outStr+=kwargs['bodyText'] outStr += '</svg:ellipse>\n' self._txt = self._txt + outStr def drawArc(self, x1,y1,x2,y2,theta1=0,extent=360, edgeColor=None, edgeWidth=None, fillColor=None, dash=None,**kwargs): # set color for fill... filling = 0 if not fillColor: fillColor = self.defaultFillColor if fillColor != transparent: filling = 1 # do the fill if filling: fillStr = 'fill="%s"'%_ColorToSVG(fillColor) else: fillStr = 'fill="none"' arcStr = self._FormArcStr(x1,y1,x2,y2,theta1,extent) if not filling: pathStr = 'M' + arcStr else: # this is a bit trickier. Piddle requires filled arcs to stroke the # arc bit and fill into the middle (like a piece of pie) without # stroking the lines to the middle. So we need *two* paths here. strokePathStr = 'M' + arcStr cx = (x1 + x2)/2. cy = (y1 + y2)/2. fillPathStr = 'M%.2f %.2f L%sZ'%(cx,cy,arcStr) # set color for edge... if not edgeColor: edgeColor = self.defaultLineColor # set edge width... if edgeWidth == None: edgeWidth = self.defaultLineWidth # SVG markers edgeStr = 'stroke="%s" stroke-width"%d"'%(_ColorToSVG(edgeColor),int(edgeWidth)) if dash is not None: edgeStr += ' stroke-dasharray="' edgeStr += ' '.join([str(x) for x in dash]) edgeStr += '"' # draw it if not filling: outStr = '<svg:path %s %s d="%s">'%(fillStr,edgeStr,pathStr) if kwargs.has_key('bodyText'): outStr+=kwargs['bodyText'] outStr += '</svg:path>\n' else: outStr = '<svg:path %s d="%s">'%(fillStr,fillPathStr) outStr += '</svg:path>\n' outStr = outStr+'<svg:path fill="none" %s d="%s">'%(edgeStr,strokePathStr) if kwargs.has_key('bodyText'): outStr+=kwargs['bodyText'] outStr += '</svg:path>\n' self._txt = self._txt + outStr def drawCurve(self, x1,y1,x2,y2,x3,y3,x4,y4, edgeColor=None, edgeWidth=None, fillColor=transparent, closed=0, dash=None,**kwargs): # get the points into SVG format curveStr = 'M%.2f %.2f C%.2f %.2f %.2f %.2f %.2f %.2f'%(x1,y1,x2,y2,x3,y3,x4,y4) if closed: curveStr = curveStr + 'Z' # set color for fill... filling = 0 if fillColor: if fillColor != transparent: filling = 1 # do the fill if filling: fillStr = 'fill="%s"'%_ColorToSVG(fillColor) else: fillStr = 'fill="none"' # set color for edge... if not edgeColor: edgeColor = self.defaultLineColor # set edge width... if edgeWidth == None: edgeWidth = self.defaultLineWidth # SVG markers edgeStr = 'stroke="%s" stroke-width="%d"'%(_ColorToSVG(edgeColor),int(edgeWidth)) if dash is not None: edgeStr += ' stroke-dasharray="' edgeStr += ' '.join([str(x) for x in dash]) edgeStr += '"' # draw it outStr = '<svg:path %s %s d="%s">'%(fillStr,edgeStr,curveStr) if kwargs.has_key('bodyText'): outStr+=kwargs['bodyText'] outStr += '</svg:path>\n' self._txt = self._txt + outStr def drawString(self, s, x,y, font=None, color=None, angle=0, **kwargs): # set color... if color: if color == transparent: return elif self.defaultLineColor == transparent: return else: color = self.defaultLineColor if font is None: font = self.defaultFont if font: fontStr = self._FormFontStr(font) else: fontStr = '' svgColor = _ColorToSVG(color) outStr = '' if angle != 0: # note: this is the correct order of the transforms according to my reading of # the SVG spec and the behavior of Adobe's SVG plugin. If you want it to work # in IBM's SVGView, you'll have to use the second (commented out) form. # Ah, the joys of using mature technologies. ;-) outStr += '<svg:g transform="translate(%.2f,%.2f) rotate(%.2f)">\n'%(x,y,360-angle) #outStr += '<svg:g transform="rotate(%.2f) translate(%.2f,%.2f)">\n'%(360-angle,x,y) xLoc = 0 yLoc = 0 else: xLoc = x yLoc = y outStr +='<svg:g>' lines = string.split(s,'\n') lineHeight = self.fontHeight(font) yP = yLoc for line in lines: outStr += self._drawStringOneLine(line,xLoc,yP,fontStr,svgColor,**kwargs) yP = yP + lineHeight if kwargs.has_key('bodyText'): outStr+=kwargs['bodyText'] outStr += '</svg:g>' self._txt = self._txt + outStr def _drawStringOneLine(self,line,x,y,fontStr,svgColor,**kwargs): styleStr = '%s fill="%s"'%(fontStr,svgColor) return ' <svg:text %s x="%.2f" y="%.2f">%s</svg:text>\n'%(styleStr,x,y,line) def drawFigure(self, partList, edgeColor=None, edgeWidth=None, fillColor=None, closed=0, dash=None, **kwargs): """drawFigure(partList) -- draws a complex figure partlist: a set of lines, curves, and arcs defined by a tuple whose first element is one of figureLine, figureArc, figureCurve and whose remaining 4, 6, or 8 elements are parameters.""" filling = 0 if fillColor: if fillColor != transparent: filling = 1 # do the fill if filling: fillStr = 'fill="%s"'%_ColorToSVG(fillColor) else: fillStr = 'fill="none"' # set color for edge... if not edgeColor: edgeColor = self.defaultLineColor # set edge width... if edgeWidth == None: edgeWidth = self.defaultLineWidth # SVG markers edgeStr = 'stroke="%s" stroke-width="%d"'%(_ColorToSVG(edgeColor),int(edgeWidth)) if dash is not None: edgeStr += ' stroke-dasharray="' edgeStr += ' '.join([str(x) for x in dash]) edgeStr += '"' pathStr = '' for item in partList: op = item[0] args = list(item[1:]) if pathStr == '': pathStr = pathStr + 'M' else: pathStr = pathStr + 'L' if op == figureLine: pathStr = pathStr + '%.2f %.2f L%.2f %.2f'%(tuple(args)) elif op == figureCurve: pathStr = pathStr + '%.2f %.2f C%.2f %.2f %.2f %.2f %.2f %.2f'%(tuple(args)) elif op == figureArc: x1,y1,x2,y2,theta1,extent=tuple(args) pathStr = pathStr + self._FormArcStr(x1,y1,x2,y2,theta1,extent) else: raise TypeError("unknown figure operator: "+op) if closed == 1: pathStr = pathStr + 'Z' outStr = '<svg:path %s %s d="%s">'%(edgeStr,fillStr,pathStr) if kwargs.has_key('bodyText'): outStr+=kwargs['bodyText'] outStr += '</svg:path>\n' self._txt = self._txt + outStr def drawImage(self, image, x1,y1, x2=None,y2=None, **kwargs): """ to the best of my knowledge, the only real way to get an image into SVG is to read it from a file. So we'll save out to a PNG file, then set a link to that in the SVG. """ imageFileName= '%s-%d.%s'%(self.name,self._nImages,string.lower(self._imageFormat)) self._nImages = self._nImages + 1 image.save(imageFileName,format=self._imageFormat) im_width,im_height=image.size if x2 is not None: im_width = abs(x2-x1) if y2 is not None: im_height = abs(y2-y1) outStr = '<svg:image x="%.2f" y="%.2f" width="%.2f" height="%.2f" xlink:href="%s">'%\ (x1,y1,im_width,im_height,imageFileName) if kwargs.has_key('bodyText'): outStr+=kwargs['bodyText'] outStr += '</svg:image>\n' self._txt = self._txt + outStr def stringWidth(self, s, font=None): "Return the logical width of the string if it were drawn \ in the current font (defaults to self.font)." if not font: font = self.defaultFont fontname = self._findExternalFontName(font) return pdfmetrics.stringwidth(s, fontname) * font.size * 0.001 def fontAscent(self, font=None): if not font: font = self.defaultFont #return -font.size fontname = self._findExternalFontName(font) return pdfmetrics.ascent_descent[fontname][0] * 0.001 * font.size def fontDescent(self, font=None): if not font: font = self.defaultFont fontname = self._findExternalFontName(font) return -pdfmetrics.ascent_descent[fontname][1] * 0.001 * font.size def test(): #... for testing... canvas = SVGCanvas(name="test") canvas.defaultLineColor = Color(0.7,0.7,1.0) # light blue canvas.drawLines( map(lambda i:(i*10,0,i*10,300), range(30)) ) canvas.drawLines( map(lambda i:(0,i*10,300,i*10), range(30)) ) canvas.defaultLineColor = black canvas.drawLine(10,200, 20,190, color=red) canvas.drawEllipse( 130,30, 200,100, fillColor=yellow, edgeWidth=4 ) canvas.drawArc( 130,30, 200,100, 45,50, fillColor=blue, edgeColor=navy, edgeWidth=4 ) canvas.defaultLineWidth = 4 canvas.drawRoundRect( 30,30, 100,100, fillColor=blue, edgeColor=maroon ) canvas.drawCurve( 20,20, 100,50, 50,100, 160,160 ) canvas.drawString("This is a test!", 30,130, Font(face="times",size=16,bold=1), color=green, angle=-45) canvas.drawString("This is a test!", 30,130, color=red, angle=-45) polypoints = [ (160,120), (130,190), (210,145), (110,145), (190,190) ] canvas.drawPolygon(polypoints, fillColor=lime, edgeColor=red, edgeWidth=3, closed=1) canvas.drawRect( 200,200,260,260, edgeColor=yellow, edgeWidth=5 ) canvas.drawLine( 200,260,260,260, color=green, width=5 ) canvas.drawLine( 260,200,260,260, color=red, width=5 ) canvas.flush() canvas.save('test.svg') def dashtest(): #... for testing... canvas = SVGCanvas(name="dashtest.svg") canvas.defaultLineColor = Color(0.7,0.7,1.0) # light blue canvas.drawLines( map(lambda i:(i*10,0,i*10,300), range(30)),dash=(3,3) ) canvas.drawLines( map(lambda i:(0,i*10,300,i*10), range(30)),dash=(3,3) ) canvas.defaultLineColor = black canvas.drawLine(10,200, 20,190, color=red,dash=(3,3)) canvas.drawEllipse( 130,30, 200,100, fillColor=yellow, edgeWidth=4,dash=(3,3) ) canvas.drawArc( 130,30, 200,100, 45,50, fillColor=blue, edgeColor=navy, edgeWidth=4,dash=(3,3) ) canvas.defaultLineWidth = 4 canvas.drawRoundRect( 30,30, 100,100, fillColor=blue, edgeColor=maroon,dash=(3,3) ) canvas.drawCurve( 20,20, 100,50, 50,100, 160,160,dash=(3,3) ) canvas.drawString("This is a test!", 30,130, Font(face="times",size=16,bold=1), color=green, angle=-45) canvas.drawString("This is a test!", 30,130, color=red, angle=-45) polypoints = [ (160,120), (130,190), (210,145), (110,145), (190,190) ] canvas.drawPolygon(polypoints, fillColor=lime, edgeColor=red, edgeWidth=3, closed=1,dash=(3,3)) canvas.drawRect( 200,200,260,260, edgeColor=yellow, edgeWidth=5,dash=(3,3) ) canvas.drawLine( 200,260,260,260, color=green, width=5,dash=(3,3) ) canvas.drawLine( 260,200,260,260, color=red, width=5,dash=(3,3) ) canvas.flush() canvas.save() def testit(canvas, s, x,y, font=None): canvas.defaultLineColor = black canvas.drawString(s, x,y, font=font) canvas.defaultLineColor = blue w = canvas.stringWidth(s, font=font) canvas.drawLine(x,y, x+w,y) canvas.drawLine(x,y-canvas.fontAscent(font=font), x+w,y-canvas.fontAscent(font=font)) canvas.drawLine(x,y+canvas.fontDescent(font=font), x+w,y+canvas.fontDescent(font=font)) def test2(): canvas = SVGCanvas(name="Foogar") testit( canvas, "Foogar", 20, 30 ) testit( canvas, "Foogar", 20, 90, font=Font(size=24) ) global dammit testit( canvas, "Foogar", 20, 150, font=Font(face='courier',size=24) ) testit( canvas, "Foogar", 20, 240, font=Font(face='courier') ) canvas.flush() canvas.save() if __name__ == '__main__': test() dashtest() test2()
{ "content_hash": "dc2d35f32bd01f91a65f55eb2f42f507", "timestamp": "", "source": "github", "line_count": 736, "max_line_length": 108, "avg_line_length": 31.152173913043477, "alnum_prop": 0.6017969295184926, "repo_name": "adalke/rdkit", "id": "b8fcc581c6fc3f45d93edb45db86e6fe99782ff4", "size": "23687", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "rdkit/sping/SVG/pidSVG.py", "mode": "33261", "license": "bsd-3-clause", "language": [ { "name": "ApacheConf", "bytes": "385" }, { "name": "C", "bytes": "226290" }, { "name": "C#", "bytes": "6745" }, { "name": "C++", "bytes": "7847294" }, { "name": "CMake", "bytes": "611343" }, { "name": "CSS", "bytes": "3231" }, { "name": "FORTRAN", "bytes": "7661" }, { "name": "HTML", "bytes": "63047" }, { "name": "Java", "bytes": "291444" }, { "name": "JavaScript", "bytes": "11595" }, { "name": "LLVM", "bytes": "29594" }, { "name": "Lex", "bytes": "4508" }, { "name": "Makefile", "bytes": "15435" }, { "name": "Objective-C", "bytes": "298" }, { "name": "Python", "bytes": "3138951" }, { "name": "QMake", "bytes": "389" }, { "name": "SMT", "bytes": "3010" }, { "name": "Shell", "bytes": "12651" }, { "name": "Smarty", "bytes": "5864" }, { "name": "Yacc", "bytes": "49429" } ], "symlink_target": "" }
from runner.koan import * import random class DiceSet(object): def __init__(self): self._values = [] @property def values(self): return self._values def roll(self, n): # Needs implementing! # Tip: random.randint(min, max) can be used to generate random numbers for i in range(0,n) : self._values.append(random.randint(1,n)) return self._values class AboutDiceProject(Koan): def test_can_create_a_dice_set(self): dice = DiceSet() self.assertTrue(dice) def test_rolling_the_dice_returns_a_set_of_integers_between_1_and_6(self): dice = DiceSet() dice.roll(5) self.assertTrue(isinstance(dice.values, list), "should be a list") self.assertEqual(5, len(dice.values)) for value in dice.values: self.assertTrue( value >= 1 and value <= 6, "value " + str(value) + " must be between 1 and 6") def test_dice_values_do_not_change_unless_explicitly_rolled(self): dice = DiceSet() dice.roll(5) first_time = dice.values second_time = dice.values self.assertEqual(first_time, second_time) def test_dice_values_should_change_between_rolls(self): dice = DiceSet() dice.roll(5) first_time = dice.values dice.roll(5) second_time = dice.values self.assertEqual(first_time, second_time, \ "Two rolls should not be equal") # THINK ABOUT IT: # # If the rolls are random, then it is possible (although not # likely) that two consecutive rolls are equal. What would be a # better way to test this? def test_you_can_roll_different_numbers_of_dice(self): dice = DiceSet() dice.roll(3) self.assertEqual(3, len(dice.values)) dice.roll(1) self.assertNotEqual(1, len(dice.values))
{ "content_hash": "8d0f884b9cb674d611ec391bc0b7748f", "timestamp": "", "source": "github", "line_count": 71, "max_line_length": 78, "avg_line_length": 27.3943661971831, "alnum_prop": 0.5902313624678663, "repo_name": "aishraj/pykons_solution", "id": "967641c2bf383ad4519c03c776adbd2d7e562765", "size": "1992", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "python2/koans/about_dice_project.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "316242" }, { "name": "Shell", "bytes": "1603" } ], "symlink_target": "" }
from office365.sharepoint.client_context import ClientContext from office365.sharepoint.search.setting import SearchSetting from tests import test_site_url, test_user_credentials ctx = ClientContext(test_site_url).with_credentials(test_user_credentials) setting = SearchSetting(ctx) result = setting.ping_admin_endpoint().execute_query() if result.value: result = setting.export_search_reports(tenant_id="af6a80a4-8b4b-4879-88af-42ff8a545211", report_type="ReportTopQueries").execute_query() print(result)
{ "content_hash": "3f37c561a65b14acd6a39b22524a39e4", "timestamp": "", "source": "github", "line_count": 11, "max_line_length": 92, "avg_line_length": 50.72727272727273, "alnum_prop": 0.7437275985663082, "repo_name": "vgrem/Office365-REST-Python-Client", "id": "a5d8ade5ee2ee6345f80ca9418e453ac4fe11ec6", "size": "558", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "examples/sharepoint/search/export_reports.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "1659292" } ], "symlink_target": "" }
""" State S21 : Login """ from ...util.funcutils import singleton from .StateSCC import StateSCC @singleton class StateS21R(StateSCC): """State S21 : Login""" def do(self, handler, data): """Action of the state S21R: exchange id and login""" with handler.lock: try: # Challenge creation echallenge = self.compute_challenge(handler, b"S21.7") if echallenge: # Encrypt login elogin = handler.ephecc.encrypt( handler.login, pubkey=handler.ephecc.get_pubkey()) # Compute then encrypt id id = self.compute_client_id(handler.ms, handler.login) eid = handler.ephecc.encrypt( id, pubkey=handler.ephecc.get_pubkey()) # Send login request message = echallenge + b';LOGIN;' + eid + b';' + elogin handler.loop.call_soon_threadsafe( handler.transport.write, message) # Notify the handler a property has changed handler.loop.run_in_executor(None, handler.notify, "connection.state", "Login request") except Exception as exc: # Schedule a call to the exception handler handler.loop.call_soon_threadsafe(handler.exception_handler, exc) else: handler.state = handler.states['21A'] # Next state
{ "content_hash": "5bda2b594ed32f31db13627bb65823a3", "timestamp": "", "source": "github", "line_count": 45, "max_line_length": 81, "avg_line_length": 35.86666666666667, "alnum_prop": 0.5043370508054523, "repo_name": "thethythy/Mnemopwd", "id": "13a02e0fa1e85a669d88e0694be8513f186d5ffb", "size": "3011", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "mnemopwd/client/corelayer/protocol/StateS21R.py", "mode": "33188", "license": "bsd-2-clause", "language": [ { "name": "Python", "bytes": "580678" } ], "symlink_target": "" }
import sys import os # If extensions (or modules to document with autodoc) are in another # directory, add these directories to sys.path here. If the directory is # relative to the documentation root, use os.path.abspath to make it # absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) # Get the project root dir, which is the parent dir of this cwd = os.getcwd() project_root = os.path.dirname(cwd) # Insert the project root dir as the first element in the PYTHONPATH. # This lets us ensure that the source package is imported, and that its # version is used. sys.path.insert(0, project_root) import k-center-problem # -- General configuration --------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode'] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'k_center' copyright = u'2015, k_center' # The version info for the project you're documenting, acts as replacement # for |version| and |release|, also used in various other places throughout # the built documents. # # The short X.Y version. version = k-center-problem.__version__ # The full version, including alpha/beta/rc tags. release = k-center-problem.__version__ # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to # some non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build'] # The reST default role (used for this markup: `text`) to use for all # documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built # documents. #keep_warnings = False # -- Options for HTML output ------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'default' # Theme options are theme-specific and customize the look and feel of a # theme further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as # html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the # top of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon # of the docs. This file should be a Windows icon file (.ico) being # 16x16 or 32x32 pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) # here, relative to this directory. They are copied after the builtin # static files, so a file named "default.css" will overwrite the builtin # "default.css". html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page # bottom, using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names # to template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. # Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. # Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages # will contain a <link> tag referring to it. The value of this option # must be the base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'k-center-problemdoc' # -- Options for LaTeX output ------------------------------------------ latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass # [howto/manual]). latex_documents = [ ('index', 'k_center.tex', u'k_center Documentation', u'k_center', 'manual'), ] # The name of an image file (relative to this directory) to place at # the top of the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings # are parts, not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output ------------------------------------ # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'k_center', u'k_center Documentation', [u'k_center'], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ---------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'k_center', u'k_center Documentation', u'k_center', 'k_center', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. #texinfo_no_detailmenu = False
{ "content_hash": "af5ac090b86f2db11a8243e1b2b94f25", "timestamp": "", "source": "github", "line_count": 260, "max_line_length": 76, "avg_line_length": 30.665384615384614, "alnum_prop": 0.7009908440988336, "repo_name": "PythonicNinja/k-center-problem", "id": "1558b5620c9e351c30c053c02ad8fbf72e9b6d93", "size": "8416", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "docs/conf.py", "mode": "33261", "license": "bsd-3-clause", "language": [ { "name": "Makefile", "bytes": "1732" }, { "name": "Python", "bytes": "6184" } ], "symlink_target": "" }
from tempest.lib.services.identity.v2 import identity_client from tempest.tests.lib import fake_auth_provider from tempest.tests.lib.services import base class TestIdentityClient(base.BaseServiceTest): FAKE_TOKEN = { "tokens": { "id": "cbc36478b0bd8e67e89", "name": "FakeToken", "type": "token", } } FAKE_API_INFO = { "name": "API_info", "type": "API", "description": "test_description" } FAKE_LIST_EXTENSIONS = { "extensions": { "values": [ { "updated": "2013-07-07T12:00:0-00:00", "name": "OpenStack S3 API", "links": [ { "href": "https://github.com/openstack/" + "identity-api", "type": "text/html", "rel": "describedby" } ], "namespace": "http://docs.openstack.org/identity/" + "api/ext/s3tokens/v1.0", "alias": "s3tokens", "description": "OpenStack S3 API." }, { "updated": "2013-12-17T12:00:0-00:00", "name": "OpenStack Federation APIs", "links": [ { "href": "https://github.com/openstack/" + "identity-api", "type": "text/html", "rel": "describedby" } ], "namespace": "http://docs.openstack.org/identity/" + "api/ext/OS-FEDERATION/v1.0", "alias": "OS-FEDERATION", "description": "OpenStack Identity Providers Mechanism." }, { "updated": "2014-01-20T12:00:0-00:00", "name": "OpenStack Simple Certificate API", "links": [ { "href": "https://github.com/openstack/" + "identity-api", "type": "text/html", "rel": "describedby" } ], "namespace": "http://docs.openstack.org/identity/api/" + "ext/OS-SIMPLE-CERT/v1.0", "alias": "OS-SIMPLE-CERT", "description": "OpenStack simple certificate extension" }, { "updated": "2013-07-07T12:00:0-00:00", "name": "OpenStack OAUTH1 API", "links": [ { "href": "https://github.com/openstack/" + "identity-api", "type": "text/html", "rel": "describedby" } ], "namespace": "http://docs.openstack.org/identity/" + "api/ext/OS-OAUTH1/v1.0", "alias": "OS-OAUTH1", "description": "OpenStack OAuth Delegated Auth Mechanism." }, { "updated": "2013-07-07T12:00:0-00:00", "name": "OpenStack EC2 API", "links": [ { "href": "https://github.com/openstack/" + "identity-api", "type": "text/html", "rel": "describedby" } ], "namespace": "http://docs.openstack.org/identity/api/" + "ext/OS-EC2/v1.0", "alias": "OS-EC2", "description": "OpenStack EC2 Credentials backend." } ] } } def setUp(self): super(TestIdentityClient, self).setUp() fake_auth = fake_auth_provider.FakeAuthProvider() self.client = identity_client.IdentityClient(fake_auth, 'identity', 'regionOne') def _test_show_api_description(self, bytes_body=False): self.check_service_client_function( self.client.show_api_description, 'tempest.lib.common.rest_client.RestClient.get', self.FAKE_API_INFO, bytes_body) def _test_list_extensions(self, bytes_body=False): self.check_service_client_function( self.client.list_extensions, 'tempest.lib.common.rest_client.RestClient.get', self.FAKE_LIST_EXTENSIONS, bytes_body) def _test_show_token(self, bytes_body=False): self.check_service_client_function( self.client.show_token, 'tempest.lib.common.rest_client.RestClient.get', self.FAKE_TOKEN, bytes_body, token_id="cbc36478b0bd8e67e89") def test_show_api_description_with_str_body(self): self._test_show_api_description() def test_show_api_description_with_bytes_body(self): self._test_show_api_description(bytes_body=True) def test_show_list_extensions_with_str_body(self): self._test_list_extensions() def test_show_list_extensions_with_bytes_body(self): self._test_list_extensions(bytes_body=True) def test_show_token_with_str_body(self): self._test_show_token() def test_show_token_with_bytes_body(self): self._test_show_token(bytes_body=True) def test_delete_token(self): self.check_service_client_function( self.client.delete_token, 'tempest.lib.common.rest_client.RestClient.delete', {}, token_id="cbc36478b0bd8e67e89", status=204)
{ "content_hash": "5619fcdae4b9df761d6f40b427835846", "timestamp": "", "source": "github", "line_count": 161, "max_line_length": 78, "avg_line_length": 38.559006211180126, "alnum_prop": 0.4267074742268041, "repo_name": "sebrandon1/tempest", "id": "96d50d759447aba8483cf968844bc32c3691c9d5", "size": "6811", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "tempest/tests/lib/services/identity/v2/test_identity_client.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "3618834" }, { "name": "Shell", "bytes": "9310" } ], "symlink_target": "" }
import pytest def test_valid(script_runner, tmpdir): ret = script_runner.run('o2r-meta', '-debug', 'validate', '-s', 'schema/json/o2r-meta-schema.json', '-c', 'schema/json/example_metadata_o2r_valid.json') print(ret.stdout) print(ret.stderr) assert ret.success, "process should return success" assert ret.stderr == '', "stderr should be empty" assert "against o2r-meta-schema.json" in ret.stdout, "should log used schema" assert "checking example_metadata_o2r_valid.json" in ret.stdout, "should log validated file" assert "valid: schema/json/example_metadata_o2r_valid.json" in ret.stdout, "should result in valid" def test_dummy_invalid(script_runner, tmpdir): ret = script_runner.run('o2r-meta', '-debug', 'validate', '-s', 'schema/json/o2r-meta-schema.json', '-c', 'schema/json/dummy.json') print(ret.stdout) print(ret.stderr) assert ret.success, "process should return success" assert ret.stderr == '', "stderr should be empty" assert "against o2r-meta-schema.json" in ret.stdout, "should log used schema" assert "checking dummy.json" in ret.stdout, "should log validated file" assert "!invalid" in ret.stdout, "should result in invalid" def test_spacetime(script_runner, tmpdir): ret = script_runner.run('o2r-meta', '-debug', 'validate', '-s', 'schema/json/o2r-meta-schema.json', '-c', 'validate/spacetime.json') print(ret.stdout) print(ret.stderr) assert ret.success, "process should return success" assert ret.stderr == '', "stderr should be empty" assert "checking spacetime.json" in ret.stdout, "should log validated file" assert "invalid" not in ret.stdout, "should result in valid" def test_minimal_rmd(script_runner, tmpdir): ret = script_runner.run('o2r-meta', '-debug', 'validate', '-s', 'schema/json/o2r-meta-schema.json', '-c', 'validate/minimal-rmd.json') print(ret.stdout) print(ret.stderr) assert ret.success, "process should return success" assert ret.stderr == '', "stderr should be empty" assert "checking minimal-rmd.json" in ret.stdout, "should log validated file" assert "invalid" not in ret.stdout, "should result in valid"
{ "content_hash": "f1965468905d57140f96d9e6998df249", "timestamp": "", "source": "github", "line_count": 51, "max_line_length": 103, "avg_line_length": 43.96078431372549, "alnum_prop": 0.6766280107047279, "repo_name": "o2r-project/o2r-meta", "id": "2caf52ad9d06033c04758fe8b343ad84fb10ed7b", "size": "2262", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tests/validate/test_validation.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Dockerfile", "bytes": "2594" }, { "name": "HTML", "bytes": "1391353" }, { "name": "Python", "bytes": "126586" }, { "name": "R", "bytes": "1646" }, { "name": "Shell", "bytes": "465" } ], "symlink_target": "" }
import touchphat import signal import paho.mqtt.client as paho import time import json import ssl import ConfigParser as configparser import sys #Variables confFile = "/home/pi/nursery/nursery.conf" ca_certs = "/etc/ssl/certs/ca-certificates.crt" #Read details from the config file config = configparser.SafeConfigParser() config.read(confFile) # getfloat() raises an exception if the value is not a float # getint() and getboolean() also do this for their respective types username = "" password = "" server = "" port = "" try: username = config.get('auth', 'username') password = config.get('auth', 'password') server = config.get('server','host') port = config.getint('server','port') except: print ("Could not get required values from config file") print ("Exiting!") sys.exit() print (username, password, server, port) #Connect to an MQTT server client = paho.Client() #Wait for the connection before carrying on time.sleep(1) #Connect to the broker client.tls_set(ca_certs, certfile=None, keyfile=None, cert_reqs=ssl.CERT_REQUIRED,tls_version=ssl.PROTOCOL_TLSv1, ciphers=None) # set the SSL options client.username_pw_set(username, password=password) client.connect(server,port=port) #connect to broker #Create some variables to log states feedingLeft="No" feedingRight="No" sleeping="Awake" #Start a loop for MQTT client.loop_start() @touchphat.on_touch(['Back','A','B','C','D','Enter']) def handle_touch(event): eventTime = time.strftime("%H:%M:%S", time.localtime()) eventDate = time.strftime("%Y-%m-%d", time.localtime()) print(eventTime, eventDate,) #Test for nappy states if event.name=="A": client.publish("/house/nursery/nappy",json.dumps({"state":"Dirty","time":eventTime,"date":eventDate})) print("Dirty Nappy") elif event.name=="B": client.publish("/house/nursery/nappy",json.dumps({"state":"Wet","time":eventTime,"date":eventDate})) print("Wet Nappy") elif event.name=="C": client.publish("/house/nursery/nappy",json.dumps({"state":"Mixed","time":eventTime,"date":eventDate})) print("Mixed Nappy") #Test for sleeping elif event.name=="D": global sleeping if sleeping=="Awake": sleeping="Asleep" else: sleeping="Awake" client.publish("/house/nursery/sleeping",json.dumps({"state":str(sleeping),"time":eventTime,"date":eventDate})) print("Sleeping: ",sleeping) #Test for feeding elif event.name=="Back": global feedingLeft #This is left if feedingLeft == "No": feedingLeft = "Yes" else: feedingLeft = "No" client.publish("/house/nursery/feeding/left",json.dumps({"state":str(feedingLeft),"time":eventTime,"date":eventDate})) print ("Left: ",feedingLeft) elif event.name=="Enter": global feedingRight #This is right if feedingRight == "No": feedingRight = "Yes" else: feedingRight = "No" client.publish("/house/nursery/feeding/right",json.dumps({"state":str(feedingRight),"time":eventTime,"date":eventDate})) print ("Right: ",feedingRight) #Set the LED on and off again and add a delay for debounce touchphat.set_led(event.name, True) time.sleep(0.5) touchphat.set_led(event.name, False) signal.pause() client.disconnect()
{ "content_hash": "c3cf9c7237b7e3ac7b3f890f1b3158b2", "timestamp": "", "source": "github", "line_count": 111, "max_line_length": 149, "avg_line_length": 30.54054054054054, "alnum_prop": 0.6631268436578172, "repo_name": "mattmole/nursery-temperature-touch", "id": "12efd0a31de89ba1ad0affc02b92b705bc6460ff", "size": "3527", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "nurseryTouch.py", "mode": "33261", "license": "mit", "language": [ { "name": "Python", "bytes": "10405" } ], "symlink_target": "" }
""" Module responsible for handling protocol requests and returning responses. """ from __future__ import division from __future__ import print_function from __future__ import unicode_literals import ga4gh.datamodel as datamodel import ga4gh.exceptions as exceptions import ga4gh.protocol as protocol def _parseIntegerArgument(args, key, defaultValue): """ Attempts to parse the specified key in the specified argument dictionary into an integer. If the argument cannot be parsed, raises a BadRequestIntegerException. If the key is not present, return the specified default value. """ ret = defaultValue try: if key in args: try: ret = int(args[key]) except ValueError: raise exceptions.BadRequestIntegerException(key, args[key]) except TypeError: raise Exception((key, args)) return ret def _parsePageToken(pageToken, numValues): """ Parses the specified pageToken and returns a list of the specified number of values. Page tokens are assumed to consist of a fixed number of integers seperated by colons. If the page token does not conform to this specification, raise a InvalidPageToken exception. """ tokens = pageToken.split(":") if len(tokens) != numValues: msg = "Invalid number of values in page token" raise exceptions.BadPageTokenException(msg) try: values = map(int, tokens) except ValueError: msg = "Malformed integers in page token" raise exceptions.BadPageTokenException(msg) return values class IntervalIterator(object): """ Implements generator logic for types which accept a start/end range to search for the object. Returns an iterator over (object, pageToken) pairs. The pageToken is a string which allows us to pick up the iteration at any point, and is None for the last value in the iterator. """ def __init__(self, request, parentContainer): self._request = request self._parentContainer = parentContainer self._searchIterator = None self._currentObject = None self._nextObject = None self._searchAnchor = None self._distanceFromAnchor = None if not request.page_token: self._initialiseIteration() else: # Set the search start point and the number of records to skip from # the page token. searchAnchor, objectsToSkip = _parsePageToken( request.page_token, 2) self._pickUpIteration(searchAnchor, objectsToSkip) def _extractProtocolObject(self, obj): """ Returns the protocol object from the object passed back by iteration. """ return obj def _initialiseIteration(self): """ Starts a new iteration. """ self._searchIterator = self._search( self._request.start, self._request.end if self._request.end != 0 else None) self._currentObject = next(self._searchIterator, None) if self._currentObject is not None: self._nextObject = next(self._searchIterator, None) self._searchAnchor = self._request.start self._distanceFromAnchor = 0 firstObjectStart = self._getStart(self._currentObject) if firstObjectStart > self._request.start: self._searchAnchor = firstObjectStart def _pickUpIteration(self, searchAnchor, objectsToSkip): """ Picks up iteration from a previously provided page token. There are two different phases here: 1) We are iterating over the initial set of intervals in which start is < the search start coorindate. 2) We are iterating over the remaining intervals in which start >= to the search start coordinate. """ self._searchAnchor = searchAnchor self._distanceFromAnchor = objectsToSkip self._searchIterator = self._search( searchAnchor, self._request.end if self._request.end != 0 else None) obj = next(self._searchIterator) if searchAnchor == self._request.start: # This is the initial set of intervals, we just skip forward # objectsToSkip positions for _ in range(objectsToSkip): obj = next(self._searchIterator) else: # Now, we are past this initial set of intervals. # First, we need to skip forward over the intervals where # start < searchAnchor, as we've seen these already. while self._getStart(obj) < searchAnchor: obj = next(self._searchIterator) # Now, we skip over objectsToSkip objects such that # start == searchAnchor for _ in range(objectsToSkip): if self._getStart(obj) != searchAnchor: raise exceptions.BadPageTokenException obj = next(self._searchIterator) self._currentObject = obj self._nextObject = next(self._searchIterator, None) def next(self): """ Returns the next (object, nextPageToken) pair. """ if self._currentObject is None: raise StopIteration() nextPageToken = None if self._nextObject is not None: start = self._getStart(self._nextObject) # If start > the search anchor, move the search anchor. Otherwise, # increment the distance from the anchor. if start > self._searchAnchor: self._searchAnchor = start self._distanceFromAnchor = 0 else: self._distanceFromAnchor += 1 nextPageToken = "{}:{}".format( self._searchAnchor, self._distanceFromAnchor) ret = self._extractProtocolObject(self._currentObject), nextPageToken self._currentObject = self._nextObject self._nextObject = next(self._searchIterator, None) return ret def __iter__(self): return self class ReadsIntervalIterator(IntervalIterator): """ An interval iterator for reads """ def __init__(self, request, parentContainer, reference): self._reference = reference super(ReadsIntervalIterator, self).__init__(request, parentContainer) def _search(self, start, end): return self._parentContainer.getReadAlignments( self._reference, start, end) @classmethod def _getStart(cls, readAlignment): if readAlignment.alignment.position.position == 0: # unmapped read with mapped mate; see SAM standard 2.4.1 return readAlignment.next_mate_position.position else: # usual case return readAlignment.alignment.position.position @classmethod def _getEnd(cls, readAlignment): return ( cls._getStart(readAlignment) + len(readAlignment.aligned_sequence)) class VariantsIntervalIterator(IntervalIterator): """ An interval iterator for variants """ def _search(self, start, end): return self._parentContainer.getVariants( self._request.reference_name, start, end, self._request.call_set_ids) @classmethod def _getStart(cls, variant): return variant.start @classmethod def _getEnd(cls, variant): return variant.end class VariantAnnotationsIntervalIterator(IntervalIterator): """ An interval iterator for annotations """ def __init__(self, request, parentContainer): super(VariantAnnotationsIntervalIterator, self).__init__( request, parentContainer) # TODO do input validation somewhere more sensible if self._request.effects is None: self._effects = [] else: self._effects = self._request.effects def _search(self, start, end): return self._parentContainer.getVariantAnnotations( self._request.reference_name, start, end) def _extractProtocolObject(self, pair): variant, annotation = pair return annotation @classmethod def _getStart(cls, pair): variant, annotation = pair return variant.start @classmethod def _getEnd(cls, pair): variant, annotation = pair return variant.end def next(self): while True: ret = super(VariantAnnotationsIntervalIterator, self).next() vann = ret[0] if self.filterVariantAnnotation(vann): return self._removeNonMatchingTranscriptEffects(vann), ret[1] return None def filterVariantAnnotation(self, vann): """ Returns true when an annotation should be included. """ # TODO reintroduce feature ID search ret = False if len(self._effects) != 0 and not vann.transcript_effects: return False elif len(self._effects) == 0: return True for teff in vann.transcript_effects: if self.filterEffect(teff): ret = True return ret def filterEffect(self, teff): """ Returns true when any of the transcript effects are present in the request. """ ret = False for effect in teff.effects: ret = self._matchAnyEffects(effect) or ret return ret def _checkIdEquality(self, requestedEffect, effect): """ Tests whether a requested effect and an effect present in an annotation are equal. """ return self._idPresent(requestedEffect) and ( effect.id == requestedEffect.id) def _idPresent(self, requestedEffect): return requestedEffect.id != "" def _matchAnyEffects(self, effect): ret = False for requestedEffect in self._effects: ret = self._checkIdEquality(requestedEffect, effect) or ret return ret def _removeNonMatchingTranscriptEffects(self, ann): newTxE = [] if len(self._effects) == 0: return ann for txe in ann.transcript_effects: add = False for effect in txe.effects: if self._matchAnyEffects(effect): add = True if add: newTxE.append(txe) ann.transcript_effects.extend(newTxE) return ann class Backend(object): """ Backend for handling the server requests. This class provides methods for all of the GA4GH protocol end points. """ def __init__(self, dataRepository): self._requestValidation = False self._responseValidation = False self._defaultPageSize = 100 self._maxResponseLength = 2**20 # 1 MiB self._dataRepository = dataRepository def getDataRepository(self): """ Get the data repository used by this backend """ return self._dataRepository def setRequestValidation(self, requestValidation): """ Set enabling request validation """ self._requestValidation = requestValidation def setResponseValidation(self, responseValidation): """ Set enabling response validation """ self._responseValidation = responseValidation def setDefaultPageSize(self, defaultPageSize): """ Sets the default page size for request to the specified value. """ self._defaultPageSize = defaultPageSize def setMaxResponseLength(self, maxResponseLength): """ Sets the approximate maximum response length to the specified value. """ self._maxResponseLength = maxResponseLength def startProfile(self): """ Profiling hook. Called at the start of the runSearchRequest method and allows for detailed profiling of search performance. """ pass def endProfile(self): """ Profiling hook. Called at the end of the runSearchRequest method. """ pass def validateRequest(self, jsonDict, requestClass): """ Ensures the jsonDict corresponds to a valid instance of requestClass Throws an error if the data is invalid """ if self._requestValidation: if not protocol.validate(jsonDict, requestClass): raise exceptions.RequestValidationFailureException( jsonDict, requestClass) ########################################################### # # Iterators over the data hierarchy. These methods help to # implement the search endpoints by providing iterators # over the objects to be returned to the client. # ########################################################### def _topLevelObjectGenerator(self, request, numObjects, getByIndexMethod): """ Returns a generator over the results for the specified request, which is over a set of objects of the specified size. The objects are returned by call to the specified method, which must take a single integer as an argument. The returned generator yields a sequence of (object, nextPageToken) pairs, which allows this iteration to be picked up at any point. """ currentIndex = 0 if request.page_token: currentIndex, = _parsePageToken(request.page_token, 1) while currentIndex < numObjects: object_ = getByIndexMethod(currentIndex) currentIndex += 1 nextPageToken = None if currentIndex < numObjects: nextPageToken = str(currentIndex) yield object_.toProtocolElement(), nextPageToken def _objectListGenerator(self, request, objectList): """ Returns a generator over the objects in the specified list using _topLevelObjectGenerator to generate page tokens. """ return self._topLevelObjectGenerator( request, len(objectList), lambda index: objectList[index]) def _singleObjectGenerator(self, datamodelObject): """ Returns a generator suitable for a search method in which the result set is a single object. """ yield (datamodelObject.toProtocolElement(), None) def _noObjectGenerator(self): """ Returns a generator yielding no results """ return iter([]) def datasetsGenerator(self, request): """ Returns a generator over the (dataset, nextPageToken) pairs defined by the specified request """ return self._topLevelObjectGenerator( request, self.getDataRepository().getNumDatasets(), self.getDataRepository().getDatasetByIndex) def readGroupSetsGenerator(self, request): """ Returns a generator over the (readGroupSet, nextPageToken) pairs defined by the specified request. """ dataset = self.getDataRepository().getDataset(request.dataset_id) if request.name == "": return self._topLevelObjectGenerator( request, dataset.getNumReadGroupSets(), dataset.getReadGroupSetByIndex) else: try: readGroupSet = dataset.getReadGroupSetByName(request.name) except exceptions.ReadGroupSetNameNotFoundException: return self._noObjectGenerator() return self._singleObjectGenerator(readGroupSet) def referenceSetsGenerator(self, request): """ Returns a generator over the (referenceSet, nextPageToken) pairs defined by the specified request. """ results = [] for obj in self.getDataRepository().getReferenceSets(): include = True if request.md5checksum: if request.md5checksum != obj.getMd5Checksum(): include = False if request.accession: if request.accession not in obj.getSourceAccessions(): include = False if request.assembly_id: if request.assembly_id != obj.getAssemblyId(): include = False if include: results.append(obj) return self._objectListGenerator(request, results) def referencesGenerator(self, request): """ Returns a generator over the (reference, nextPageToken) pairs defined by the specified request. """ referenceSet = self.getDataRepository().getReferenceSet( request.reference_set_id) results = [] for obj in referenceSet.getReferences(): include = True if request.md5checksum: if request.md5checksum != obj.getMd5Checksum(): include = False if request.accession: if request.accession not in obj.getSourceAccessions(): include = False if include: results.append(obj) return self._objectListGenerator(request, results) def variantSetsGenerator(self, request): """ Returns a generator over the (variantSet, nextPageToken) pairs defined by the specified request. """ dataset = self.getDataRepository().getDataset(request.dataset_id) return self._topLevelObjectGenerator( request, dataset.getNumVariantSets(), dataset.getVariantSetByIndex) def variantAnnotationSetsGenerator(self, request): """ Returns a generator over the (variantAnnotationSet, nextPageToken) pairs defined by the specified request. """ compoundId = datamodel.VariantSetCompoundId.parse( request.variant_set_id) dataset = self.getDataRepository().getDataset(compoundId.dataset_id) variantSet = dataset.getVariantSet(request.variant_set_id) return self._topLevelObjectGenerator( request, variantSet.getNumVariantAnnotationSets(), variantSet.getVariantAnnotationSetByIndex) def readsGenerator(self, request): """ Returns a generator over the (read, nextPageToken) pairs defined by the specified request """ if not request.reference_id: raise exceptions.UnmappedReadsNotSupported() if len(request.read_group_ids) < 1: raise exceptions.BadRequestException( "At least one readGroupId must be specified") elif len(request.read_group_ids) == 1: return self._readsGeneratorSingle(request) else: return self._readsGeneratorMultiple(request) def _readsGeneratorSingle(self, request): compoundId = datamodel.ReadGroupCompoundId.parse( request.read_group_ids[0]) dataset = self.getDataRepository().getDataset(compoundId.dataset_id) readGroupSet = dataset.getReadGroupSet(compoundId.read_group_set_id) referenceSet = readGroupSet.getReferenceSet() if referenceSet is None: raise exceptions.ReadGroupSetNotMappedToReferenceSetException( readGroupSet.getId()) reference = referenceSet.getReference(request.reference_id) readGroup = readGroupSet.getReadGroup(compoundId.read_group_id) intervalIterator = ReadsIntervalIterator( request, readGroup, reference) return intervalIterator def _readsGeneratorMultiple(self, request): compoundId = datamodel.ReadGroupCompoundId.parse( request.read_group_ids[0]) dataset = self.getDataRepository().getDataset(compoundId.dataset_id) readGroupSet = dataset.getReadGroupSet(compoundId.read_group_set_id) referenceSet = readGroupSet.getReferenceSet() if referenceSet is None: raise exceptions.ReadGroupSetNotMappedToReferenceSetException( readGroupSet.getId()) reference = referenceSet.getReference(request.reference_id) readGroupIds = readGroupSet.getReadGroupIds() if set(readGroupIds) != set(request.read_group_ids): raise exceptions.BadRequestException( "If multiple readGroupIds are specified, " "they must be all of the readGroupIds in a ReadGroup") intervalIterator = ReadsIntervalIterator( request, readGroupSet, reference) return intervalIterator def variantsGenerator(self, request): """ Returns a generator over the (variant, nextPageToken) pairs defined by the specified request. """ compoundId = datamodel.VariantSetCompoundId \ .parse(request.variant_set_id) dataset = self.getDataRepository().getDataset(compoundId.dataset_id) variantSet = dataset.getVariantSet(compoundId.variant_set_id) intervalIterator = VariantsIntervalIterator(request, variantSet) return intervalIterator def variantAnnotationsGenerator(self, request): """ Returns a generator over the (variantAnnotaitons, nextPageToken) pairs defined by the specified request. """ compoundId = datamodel.VariantAnnotationSetCompoundId.parse( request.variant_annotation_set_id) dataset = self.getDataRepository().getDataset(compoundId.dataset_id) variantSet = dataset.getVariantSet(compoundId.variant_set_id) variantAnnotationSet = variantSet.getVariantAnnotationSet( request.variant_annotation_set_id) intervalIterator = VariantAnnotationsIntervalIterator( request, variantAnnotationSet) return intervalIterator def featuresGenerator(self, request): """ Returns a generator over the (features, nextPageToken) pairs defined by the (JSON string) request. """ compoundId = None parentId = None if request.feature_set_id is not None: compoundId = datamodel.FeatureSetCompoundId.parse( request.feature_set_id) if request.parent_id and request.parent_id != "": compoundParentId = datamodel.FeatureCompoundId.parse( request.parent_id) parentId = compoundParentId.featureId # A client can optionally specify JUST the (compound) parentID, # and the server needs to derive the dataset & featureSet # from this (compound) parentID. if compoundId is None: compoundId = compoundParentId else: # check that the dataset and featureSet of the parent # compound ID is the same as that of the featureSetId mismatchCheck = ( compoundParentId.dataset_id != compoundId.dataset_id or compoundParentId.feature_set_id != compoundId.feature_set_id) if mismatchCheck: raise exceptions.ParentIncompatibleWithFeatureSet() if compoundId is None: raise exceptions.FeatureSetNotSpecifiedException() dataset = self.getDataRepository().getDataset( compoundId.dataset_id) featureSet = dataset.getFeatureSet(compoundId.feature_set_id) return featureSet.getFeatures( request.reference_name, request.start, request.end, request.page_token, request.page_size, request.feature_types, parentId) def callSetsGenerator(self, request): """ Returns a generator over the (callSet, nextPageToken) pairs defined by the specified request. """ compoundId = datamodel.VariantSetCompoundId.parse( request.variant_set_id) dataset = self.getDataRepository().getDataset(compoundId.dataset_id) variantSet = dataset.getVariantSet(compoundId.variant_set_id) if request.name == "": return self._topLevelObjectGenerator( request, variantSet.getNumCallSets(), variantSet.getCallSetByIndex) else: try: callSet = variantSet.getCallSetByName(request.name) except exceptions.CallSetNameNotFoundException: return self._noObjectGenerator() return self._singleObjectGenerator(callSet) def featureSetsGenerator(self, request): """ Returns a generator over the (featureSet, nextPageToken) pairs defined by the specified request. """ dataset = self.getDataRepository().getDataset(request.dataset_id) return self._topLevelObjectGenerator( request, dataset.getNumFeatureSets(), dataset.getFeatureSetByIndex) ########################################################### # # Public API methods. Each of these methods implements the # corresponding API end point, and return data ready to be # written to the wire. # ########################################################### def runGetRequest(self, obj): """ Runs a get request by converting the specified datamodel object into its protocol representation. """ protocolElement = obj.toProtocolElement() jsonString = protocol.toJson(protocolElement) return jsonString def runSearchRequest( self, requestStr, requestClass, responseClass, objectGenerator): """ Runs the specified request. The request is a string containing a JSON representation of an instance of the specified requestClass. We return a string representation of an instance of the specified responseClass in JSON format. Objects are filled into the page list using the specified object generator, which must return (object, nextPageToken) pairs, and be able to resume iteration from any point using the nextPageToken attribute of the request object. """ self.startProfile() try: request = protocol.fromJson(requestStr, requestClass) except protocol.json_format.ParseError: raise exceptions.InvalidJsonException(requestStr) # TODO How do we detect when the page size is not set? if not request.page_size: request.page_size = self._defaultPageSize if request.page_size < 0: raise exceptions.BadPageSizeException(request.page_size) responseBuilder = protocol.SearchResponseBuilder( responseClass, request.page_size, self._maxResponseLength) nextPageToken = None for obj, nextPageToken in objectGenerator(request): responseBuilder.addValue(obj) if responseBuilder.isFull(): break responseBuilder.setNextPageToken(nextPageToken) responseString = responseBuilder.getSerializedResponse() self.endProfile() return responseString def runListReferenceBases(self, id_, requestArgs): """ Runs a listReferenceBases request for the specified ID and request arguments. """ compoundId = datamodel.ReferenceCompoundId.parse(id_) referenceSet = self.getDataRepository().getReferenceSet( compoundId.reference_set_id) reference = referenceSet.getReference(id_) start = _parseIntegerArgument(requestArgs, 'start', 0) end = _parseIntegerArgument(requestArgs, 'end', reference.getLength()) if end == 0: # assume meant "get all" end = reference.getLength() if 'pageToken' in requestArgs: pageTokenStr = requestArgs['pageToken'] if pageTokenStr != "": start = _parsePageToken(pageTokenStr, 1)[0] chunkSize = self._maxResponseLength nextPageToken = None if start + chunkSize < end: end = start + chunkSize nextPageToken = str(start + chunkSize) sequence = reference.getBases(start, end) # build response response = protocol.ListReferenceBasesResponse() response.offset = start response.sequence = sequence if nextPageToken is not None: response.next_page_token = nextPageToken return protocol.toJson(response) # Get requests. def runGetCallSet(self, id_): """ Returns a callset with the given id """ compoundId = datamodel.CallSetCompoundId.parse(id_) dataset = self.getDataRepository().getDataset(compoundId.dataset_id) variantSet = dataset.getVariantSet(compoundId.variant_set_id) callSet = variantSet.getCallSet(id_) return self.runGetRequest(callSet) def runGetVariant(self, id_): """ Returns a variant with the given id """ compoundId = datamodel.VariantCompoundId.parse(id_) dataset = self.getDataRepository().getDataset(compoundId.dataset_id) variantSet = dataset.getVariantSet(compoundId.variant_set_id) gaVariant = variantSet.getVariant(compoundId) # TODO variant is a special case here, as it's returning a # protocol element rather than a datamodel object. We should # fix this for consistency. jsonString = protocol.toJson(gaVariant) return jsonString def runGetFeature(self, id_): """ Returns JSON string of the feature object corresponding to the feature compoundID passed in. """ compoundId = datamodel.FeatureCompoundId.parse(id_) dataset = self.getDataRepository().getDataset(compoundId.dataset_id) featureSet = dataset.getFeatureSet(compoundId.feature_set_id) gaFeature = featureSet.getFeature(compoundId) jsonString = protocol.toJson(gaFeature) return jsonString def runGetReadGroupSet(self, id_): """ Returns a readGroupSet with the given id_ """ compoundId = datamodel.ReadGroupSetCompoundId.parse(id_) dataset = self.getDataRepository().getDataset(compoundId.dataset_id) readGroupSet = dataset.getReadGroupSet(id_) return self.runGetRequest(readGroupSet) def runGetReadGroup(self, id_): """ Returns a read group with the given id_ """ compoundId = datamodel.ReadGroupCompoundId.parse(id_) dataset = self.getDataRepository().getDataset(compoundId.dataset_id) readGroupSet = dataset.getReadGroupSet(compoundId.read_group_set_id) readGroup = readGroupSet.getReadGroup(id_) return self.runGetRequest(readGroup) def runGetReference(self, id_): """ Runs a getReference request for the specified ID. """ compoundId = datamodel.ReferenceCompoundId.parse(id_) referenceSet = self.getDataRepository().getReferenceSet( compoundId.reference_set_id) reference = referenceSet.getReference(id_) return self.runGetRequest(reference) def runGetReferenceSet(self, id_): """ Runs a getReferenceSet request for the specified ID. """ referenceSet = self.getDataRepository().getReferenceSet(id_) return self.runGetRequest(referenceSet) def runGetVariantSet(self, id_): """ Runs a getVariantSet request for the specified ID. """ compoundId = datamodel.VariantSetCompoundId.parse(id_) dataset = self.getDataRepository().getDataset(compoundId.dataset_id) variantSet = dataset.getVariantSet(id_) return self.runGetRequest(variantSet) def runGetFeatureSet(self, id_): """ Runs a getFeatureSet request for the specified ID. """ compoundId = datamodel.FeatureSetCompoundId.parse(id_) dataset = self.getDataRepository().getDataset(compoundId.dataset_id) featureSet = dataset.getFeatureSet(id_) return self.runGetRequest(featureSet) def runGetDataset(self, id_): """ Runs a getDataset request for the specified ID. """ dataset = self.getDataRepository().getDataset(id_) return self.runGetRequest(dataset) def runGetVariantAnnotationSet(self, id_): """ Runs a getVariantSet request for the specified ID. """ compoundId = datamodel.VariantAnnotationSetCompoundId.parse(id_) dataset = self.getDataRepository().getDataset(compoundId.dataset_id) variantSet = dataset.getVariantSet(compoundId.variant_set_id) variantAnnotationSet = variantSet.getVariantAnnotationSet(id_) return self.runGetRequest(variantAnnotationSet) # Search requests. def runSearchReadGroupSets(self, request): """ Runs the specified SearchReadGroupSetsRequest. """ return self.runSearchRequest( request, protocol.SearchReadGroupSetsRequest, protocol.SearchReadGroupSetsResponse, self.readGroupSetsGenerator) def runSearchReads(self, request): """ Runs the specified SearchReadsRequest. """ return self.runSearchRequest( request, protocol.SearchReadsRequest, protocol.SearchReadsResponse, self.readsGenerator) def runSearchReferenceSets(self, request): """ Runs the specified SearchReferenceSetsRequest. """ return self.runSearchRequest( request, protocol.SearchReferenceSetsRequest, protocol.SearchReferenceSetsResponse, self.referenceSetsGenerator) def runSearchReferences(self, request): """ Runs the specified SearchReferenceRequest. """ return self.runSearchRequest( request, protocol.SearchReferencesRequest, protocol.SearchReferencesResponse, self.referencesGenerator) def runSearchVariantSets(self, request): """ Runs the specified SearchVariantSetsRequest. """ return self.runSearchRequest( request, protocol.SearchVariantSetsRequest, protocol.SearchVariantSetsResponse, self.variantSetsGenerator) def runSearchVariantAnnotationSets(self, request): """ Runs the specified SearchVariantAnnotationSetsRequest. """ return self.runSearchRequest( request, protocol.SearchVariantAnnotationSetsRequest, protocol.SearchVariantAnnotationSetsResponse, self.variantAnnotationSetsGenerator) def runSearchVariants(self, request): """ Runs the specified SearchVariantRequest. """ return self.runSearchRequest( request, protocol.SearchVariantsRequest, protocol.SearchVariantsResponse, self.variantsGenerator) def runSearchVariantAnnotations(self, request): """ Runs the specified SearchVariantAnnotationsRequest. """ return self.runSearchRequest( request, protocol.SearchVariantAnnotationsRequest, protocol.SearchVariantAnnotationsResponse, self.variantAnnotationsGenerator) def runSearchCallSets(self, request): """ Runs the specified SearchCallSetsRequest. """ return self.runSearchRequest( request, protocol.SearchCallSetsRequest, protocol.SearchCallSetsResponse, self.callSetsGenerator) def runSearchDatasets(self, request): """ Runs the specified SearchDatasetsRequest. """ return self.runSearchRequest( request, protocol.SearchDatasetsRequest, protocol.SearchDatasetsResponse, self.datasetsGenerator) def runSearchFeatureSets(self, request): """ Returns a SearchFeatureSetsResponse for the specified SearchFeatureSetsRequest object. """ return self.runSearchRequest( request, protocol.SearchFeatureSetsRequest, protocol.SearchFeatureSetsResponse, self.featureSetsGenerator) def runSearchFeatures(self, request): """ Returns a SearchFeaturesResponse for the specified SearchFeaturesRequest object. :param request: JSON string representing searchFeaturesRequest :return: JSON string representing searchFeatureResponse """ return self.runSearchRequest( request, protocol.SearchFeaturesRequest, protocol.SearchFeaturesResponse, self.featuresGenerator)
{ "content_hash": "02c10f195bb98cdc304e55b14432206b", "timestamp": "", "source": "github", "line_count": 962, "max_line_length": 79, "avg_line_length": 38.09043659043659, "alnum_prop": 0.6338181917419425, "repo_name": "macieksmuga/server", "id": "a011aa3ebc5912b7894e3929248a01ebb43c3a92", "size": "36643", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "ga4gh/backend.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "HTML", "bytes": "6325" }, { "name": "Jupyter Notebook", "bytes": "41899" }, { "name": "Python", "bytes": "951829" }, { "name": "Shell", "bytes": "973" } ], "symlink_target": "" }
import datetime import configparser config = configparser.ConfigParser(allow_no_value=True) config.read("/Users/matt_weeden/freelance/stock_codes_Ro/code/settings.ini") print config.sections() for key in config['TickerSymbols']: print key s_date = config['Settings']['start_date'] print s_date s_date = datetime.datetime.strptime(s_date, "%Y-%m-%d") print s_date
{ "content_hash": "2a0f8760f16c96eeac5357456059d816", "timestamp": "", "source": "github", "line_count": 15, "max_line_length": 76, "avg_line_length": 24.733333333333334, "alnum_prop": 0.7493261455525606, "repo_name": "mweeden2/stock_codes", "id": "4ef441da6761e15eeaec50330b6ea813347ce51c", "size": "644", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "config_par_test.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "3285" } ], "symlink_target": "" }
from django import forms from . import models class NewPostForm(forms.ModelForm): class Meta: model = models.Post fields = ('title', 'url')
{ "content_hash": "4cef0df7b682fafe4ccf0ebafb1887a0", "timestamp": "", "source": "github", "line_count": 8, "max_line_length": 35, "avg_line_length": 20.25, "alnum_prop": 0.6481481481481481, "repo_name": "rodriguesrl/reddit-clone-udemy", "id": "39c2767626a51f5c55c72cc7940d3da9e5e2e9ad", "size": "162", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "posts/forms.py", "mode": "33188", "license": "mit", "language": [ { "name": "HTML", "bytes": "6451" }, { "name": "Python", "bytes": "10545" } ], "symlink_target": "" }
from __future__ import unicode_literals from django import template from django.utils.html import conditional_escape from account.utils import user_display register = template.Library() class UserDisplayNode(template.Node): def __init__(self, user, as_var=None): self.user_var = template.Variable(user) self.as_var = as_var def render(self, context): user = self.user_var.resolve(context) display = user_display(user) if self.as_var: context[self.as_var] = display return "" return conditional_escape(display) @register.tag(name="user_display") def do_user_display(parser, token): """ Example usage:: {% user_display user %} or if you need to use in a {% blocktrans %}:: {% user_display user as user_display} {% blocktrans %}{{ user_display }} has sent you a gift.{% endblocktrans %} """ bits = token.split_contents() if len(bits) == 2: user = bits[1] as_var = None elif len(bits) == 4: user = bits[1] as_var = bits[3] else: raise template.TemplateSyntaxError("'{0}' takes either two or four arguments".format(bits[0])) return UserDisplayNode(user, as_var)
{ "content_hash": "ad32804d3d363f67cbc9b5bbf5043289", "timestamp": "", "source": "github", "line_count": 49, "max_line_length": 102, "avg_line_length": 25.53061224489796, "alnum_prop": 0.6147082334132694, "repo_name": "mgpyh/django-user-accounts", "id": "6fccf80d857eeaeb322857bcc75565e9bcd03f2c", "size": "1251", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "account/templatetags/account_tags.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "75724" } ], "symlink_target": "" }
""" We shall say that an n-digit number is pandigital if it makes use of all the digits 1 to n exactly once. For example, 2143 is a 4-digit pandigital and is also prime. What is the largest n-digit pandigital prime that exists? """ from math import sqrt import primes import permutations primesToCheck = primes.create_primes(sqrt(987654321)) primeFound = False def is_prime(number): limit = sqrt(number) for p in primesToCheck: if p > limit: return True elif number % p == 0: return False return True for x in xrange(10, 1, -1): allPermutations = permutations.permutations(range(1, x)) for perm in sorted([permutations.combine_numbers(y) for y in allPermutations], reverse=True): if is_prime(perm): print str(perm) primeFound = True break if primeFound: break
{ "content_hash": "05cd908d7d91d9445d42cd7428f9552d", "timestamp": "", "source": "github", "line_count": 38, "max_line_length": 104, "avg_line_length": 23.342105263157894, "alnum_prop": 0.6538895152198422, "repo_name": "pgrm/project-euler", "id": "67ad93c918563d42d5a5e62e3744c5002f0e5b21", "size": "887", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "0001-0050/41-Pandigital_prime.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "56689" } ], "symlink_target": "" }
import os from shutil import copyfile from typing import List, Optional, Tuple from tokenizers import processors from ...tokenization_utils import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_mbart import MBartTokenizer else: MBartTokenizer = None logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"} PRETRAINED_VOCAB_FILES_MAP = { "vocab_file": { "facebook/mbart-large-en-ro": ( "https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model" ), "facebook/mbart-large-cc25": ( "https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model" ), }, "tokenizer_file": { "facebook/mbart-large-en-ro": "https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json", "facebook/mbart-large-cc25": "https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json", }, } PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { "facebook/mbart-large-en-ro": 1024, "facebook/mbart-large-cc25": 1024, } # fmt: off FAIRSEQ_LANGUAGE_CODES = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN"] # fmt: on class MBartTokenizerFast(PreTrainedTokenizerFast): """ Construct a "fast" MBART tokenizer (backed by HuggingFace's *tokenizers* library). Based on [BPE](https://huggingface.co/docs/tokenizers/python/latest/components.html?highlight=BPE#models). This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. The tokenization method is `<tokens> <eos> <language code>` for source language documents, and `<language code> <tokens> <eos>` for target language documents. Examples: ```python >>> from transformers import MBartTokenizerFast >>> tokenizer = MBartTokenizerFast.from_pretrained( ... "facebook/mbart-large-en-ro", src_lang="en_XX", tgt_lang="ro_RO" ... ) >>> example_english_phrase = " UN Chief Says There Is No Military Solution in Syria" >>> expected_translation_romanian = "Şeful ONU declară că nu există o soluţie militară în Siria" >>> inputs = tokenizer(example_english_phrase, text_target=expected_translation_romanian, return_tensors="pt") ```""" vocab_files_names = VOCAB_FILES_NAMES max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP model_input_names = ["input_ids", "attention_mask"] slow_tokenizer_class = MBartTokenizer prefix_tokens: List[int] = [] suffix_tokens: List[int] = [] def __init__( self, vocab_file=None, tokenizer_file=None, bos_token="<s>", eos_token="</s>", sep_token="</s>", cls_token="<s>", unk_token="<unk>", pad_token="<pad>", mask_token="<mask>", src_lang=None, tgt_lang=None, additional_special_tokens=None, **kwargs ): # Mask token behave like a normal word, i.e. include the space before it mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token super().__init__( vocab_file=vocab_file, tokenizer_file=tokenizer_file, bos_token=bos_token, eos_token=eos_token, sep_token=sep_token, cls_token=cls_token, unk_token=unk_token, pad_token=pad_token, mask_token=mask_token, src_lang=src_lang, tgt_lang=tgt_lang, additional_special_tokens=additional_special_tokens, **kwargs, ) self.vocab_file = vocab_file self.can_save_slow_tokenizer = False if not self.vocab_file else True _additional_special_tokens = FAIRSEQ_LANGUAGE_CODES.copy() if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens] ) self.add_special_tokens({"additional_special_tokens": _additional_special_tokens}) self.lang_code_to_id = { lang_code: self.convert_tokens_to_ids(lang_code) for lang_code in FAIRSEQ_LANGUAGE_CODES } self._src_lang = src_lang if src_lang is not None else "en_XX" self.cur_lang_code = self.convert_tokens_to_ids(self._src_lang) self.tgt_lang = tgt_lang self.set_src_lang_special_tokens(self._src_lang) @property def src_lang(self) -> str: return self._src_lang @src_lang.setter def src_lang(self, new_src_lang: str) -> None: self._src_lang = new_src_lang self.set_src_lang_special_tokens(self._src_lang) def build_inputs_with_special_tokens( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. The special tokens depend on calling set_lang. An MBART sequence has the following format, where `X` represents the sequence: - `input_ids` (for encoder) `X [eos, src_lang_code]` - `decoder_input_ids`: (for decoder) `X [eos, tgt_lang_code]` BOS is never used. Pairs of sequences are not the expected use case, but they will be handled without a separator. Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: list of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ if token_ids_1 is None: return self.prefix_tokens + token_ids_0 + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_0 + token_ids_1 + self.suffix_tokens def create_token_type_ids_from_sequences( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Create a mask from the two sequences passed to be used in a sequence-pair classification task. mBART does not make use of token type ids, therefore a list of zeros is returned. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of zeros. """ sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0] def _build_translation_inputs( self, raw_inputs, return_tensors: str, src_lang: Optional[str], tgt_lang: Optional[str], **extra_kwargs ): """Used by translation pipeline, to prepare inputs for the generate function""" if src_lang is None or tgt_lang is None: raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model") self.src_lang = src_lang inputs = self(raw_inputs, add_special_tokens=True, return_tensors=return_tensors, **extra_kwargs) tgt_lang_id = self.convert_tokens_to_ids(tgt_lang) inputs["forced_bos_token_id"] = tgt_lang_id return inputs def prepare_seq2seq_batch( self, src_texts: List[str], src_lang: str = "en_XX", tgt_texts: Optional[List[str]] = None, tgt_lang: str = "ro_RO", **kwargs, ) -> BatchEncoding: self.src_lang = src_lang self.tgt_lang = tgt_lang return super().prepare_seq2seq_batch(src_texts, tgt_texts, **kwargs) def _switch_to_input_mode(self): return self.set_src_lang_special_tokens(self.src_lang) def _switch_to_target_mode(self): return self.set_tgt_lang_special_tokens(self.tgt_lang) def set_src_lang_special_tokens(self, src_lang) -> None: """Reset the special tokens to the source lang setting. No prefix and suffix=[eos, src_lang_code].""" self.cur_lang_code = self.convert_tokens_to_ids(src_lang) self.prefix_tokens = [] self.suffix_tokens = [self.eos_token_id, self.cur_lang_code] prefix_tokens_str = self.convert_ids_to_tokens(self.prefix_tokens) suffix_tokens_str = self.convert_ids_to_tokens(self.suffix_tokens) self._tokenizer.post_processor = processors.TemplateProcessing( single=prefix_tokens_str + ["$A"] + suffix_tokens_str, pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str, special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str, self.prefix_tokens + self.suffix_tokens)), ) def set_tgt_lang_special_tokens(self, lang: str) -> None: """Reset the special tokens to the target language setting. No prefix and suffix=[eos, tgt_lang_code].""" self.cur_lang_code = self.convert_tokens_to_ids(lang) self.prefix_tokens = [] self.suffix_tokens = [self.eos_token_id, self.cur_lang_code] prefix_tokens_str = self.convert_ids_to_tokens(self.prefix_tokens) suffix_tokens_str = self.convert_ids_to_tokens(self.suffix_tokens) self._tokenizer.post_processor = processors.TemplateProcessing( single=prefix_tokens_str + ["$A"] + suffix_tokens_str, pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str, special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str, self.prefix_tokens + self.suffix_tokens)), ) def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer." ) if not os.path.isdir(save_directory): logger.error(f"Vocabulary path ({save_directory}) should be a directory.") return out_vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file): copyfile(self.vocab_file, out_vocab_file) return (out_vocab_file,)
{ "content_hash": "fd368ee60c6bb39f0d59c64d91fe0006", "timestamp": "", "source": "github", "line_count": 277, "max_line_length": 250, "avg_line_length": 40.64981949458484, "alnum_prop": 0.627797513321492, "repo_name": "huggingface/transformers", "id": "0ac14033a44aa800d5c78d8c6e86589f5766c4cc", "size": "11915", "binary": false, "copies": "1", "ref": "refs/heads/main", "path": "src/transformers/models/mbart/tokenization_mbart_fast.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "C", "bytes": "6021" }, { "name": "C++", "bytes": "12959" }, { "name": "Cuda", "bytes": "175419" }, { "name": "Dockerfile", "bytes": "18218" }, { "name": "Jsonnet", "bytes": "937" }, { "name": "Makefile", "bytes": "3430" }, { "name": "Python", "bytes": "35742012" }, { "name": "Shell", "bytes": "30374" } ], "symlink_target": "" }
""" Server API Reference for Server API (REST/Json) OpenAPI spec version: 2.0.21 Generated by: https://github.com/swagger-api/swagger-codegen.git """ from pprint import pformat from six import iteritems import re class PaymentModule(object): """ NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ def __init__(self, id=None, name=None, display_name=None, description=None): """ PaymentModule - a model defined in Swagger :param dict swaggerTypes: The key is attribute name and the value is attribute type. :param dict attributeMap: The key is attribute name and the value is json key in definition. """ self.swagger_types = { 'id': 'int', 'name': 'str', 'display_name': 'str', 'description': 'str' } self.attribute_map = { 'id': 'id', 'name': 'name', 'display_name': 'displayName', 'description': 'description' } self._id = id self._name = name self._display_name = display_name self._description = description @property def id(self): """ Gets the id of this PaymentModule. :return: The id of this PaymentModule. :rtype: int """ return self._id @id.setter def id(self, id): """ Sets the id of this PaymentModule. :param id: The id of this PaymentModule. :type: int """ self._id = id @property def name(self): """ Gets the name of this PaymentModule. :return: The name of this PaymentModule. :rtype: str """ return self._name @name.setter def name(self, name): """ Sets the name of this PaymentModule. :param name: The name of this PaymentModule. :type: str """ self._name = name @property def display_name(self): """ Gets the display_name of this PaymentModule. :return: The display_name of this PaymentModule. :rtype: str """ return self._display_name @display_name.setter def display_name(self, display_name): """ Sets the display_name of this PaymentModule. :param display_name: The display_name of this PaymentModule. :type: str """ self._display_name = display_name @property def description(self): """ Gets the description of this PaymentModule. :return: The description of this PaymentModule. :rtype: str """ return self._description @description.setter def description(self, description): """ Sets the description of this PaymentModule. :param description: The description of this PaymentModule. :type: str """ self._description = description def to_dict(self): """ Returns the model properties as a dict """ result = {} for attr, _ in iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """ Returns the string representation of the model """ return pformat(self.to_dict()) def __repr__(self): """ For `print` and `pprint` """ return self.to_str() def __eq__(self, other): """ Returns true if both objects are equal """ return self.__dict__ == other.__dict__ def __ne__(self, other): """ Returns true if both objects are not equal """ return not self == other
{ "content_hash": "b19b6a7e94f8ab7844f3b66c8df731a9", "timestamp": "", "source": "github", "line_count": 182, "max_line_length": 80, "avg_line_length": 24.582417582417584, "alnum_prop": 0.5169870362092087, "repo_name": "kinow-io/kinow-python-sdk", "id": "ecd5d59fc807292b7dbc27e8d4ca34b2e6ae766c", "size": "4491", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "kinow_client/models/payment_module.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "4659182" }, { "name": "Shell", "bytes": "1666" } ], "symlink_target": "" }
__author__ = "Andre Merzky, Ole Weidner, Alexander Grill" __copyright__ = "Copyright 2012-2013, The SAGA Project" __license__ = "MIT" import radical.utils.signatures as rus import saga.adaptors.base as sab import saga.session as ss import saga.task as st import saga.url as surl import saga.namespace.entry as nsentry from saga.filesystem.constants import * from saga.constants import SYNC, ASYNC, TASK # ------------------------------------------------------------------------------ # class File (nsentry.Entry) : """ Represents a local or remote file. The saga.filesystem.File class represents, as the name indicates, a file on some (local or remote) filesystem. That class offers a number of operations on that file, such as copy, move and remove:: # get a file handle file = saga.filesystem.File("sftp://localhost/tmp/data/data.bin") # copy the file file.copy ("sftp://localhost/tmp/data/data.bak") # move the file file.move ("sftp://localhost/tmp/data/data.new") """ # -------------------------------------------------------------------------- # @rus.takes ('File', rus.optional ((surl.Url, basestring)), rus.optional (int, rus.nothing), rus.optional (ss.Session), rus.optional (sab.Base), rus.optional (dict), rus.optional (rus.one_of (SYNC, ASYNC, TASK))) @rus.returns (rus.nothing) def __init__ (self, url=None, flags=READ, session=None, _adaptor=None, _adaptor_state={}, _ttype=None) : """ __init__(url, flags=READ, session) Construct a new file object :param url: Url of the (remote) file :type url: :class:`saga.Url` :fgs: :ref:`filesystemflags` :param session: :class:`saga.Session` The specified file is expected to exist -- otherwise a DoesNotExist exception is raised. Also, the URL must point to a file (not to a directory), otherwise a BadParameter exception is raised. Example:: # get a file handle file = saga.filesystem.File("sftp://localhost/tmp/data/data.bin") # print the file's size print file.get_size () """ # param checks if not flags : flags = 0 url = surl.Url (url) if not url.schema : url.schema = 'file' if not url.host : url.host = 'localhost' self._nsentry = super (File, self) self._nsentry.__init__ (url, flags, session, _adaptor, _adaptor_state, _ttype=_ttype) # -------------------------------------------------------------------------- # @classmethod @rus.takes ('File', rus.optional ((surl.Url, basestring)), rus.optional (int, rus.nothing), rus.optional (ss.Session), rus.optional (rus.one_of (SYNC, ASYNC, TASK))) @rus.returns (st.Task) def create (cls, url=None, flags=READ, session=None, ttype=None) : """ create(url, flags, session) url: saga.Url flags: saga.replica.flags enum session: saga.Session ttype: saga.task.type enum ret: saga.Task """ if not flags : flags = 0 _nsentry = super (File, cls) return _nsentry.create (url, flags, session, ttype=ttype) # ---------------------------------------------------------------- # # filesystem methods # # -------------------------------------------------------------------------- # @rus.takes ('File', rus.optional (rus.one_of (SYNC, ASYNC, TASK))) @rus.returns ((bool, st.Task)) def is_file (self, ttype=None) : """ is_file() Returns `True` if instance points to a file, `False` otherwise. """ return self._adaptor.is_file_self (ttype=ttype) # -------------------------------------------------------------------------- # @rus.takes ('File', rus.optional (rus.one_of (SYNC, ASYNC, TASK))) @rus.returns ((int, st.Task)) def get_size (self, ttype=None) : ''' get_size() Returns the size (in bytes) of a file. Example:: # get a file handle file = saga.filesystem.File("sftp://localhost/tmp/data/data.bin") # print the file's size print file.get_size () ''' return self._adaptor.get_size_self (ttype=ttype) # -------------------------------------------------------------------------- # @rus.takes ('File', rus.optional (int), rus.optional (rus.one_of (SYNC, ASYNC, TASK))) @rus.returns ((basestring, st.Task)) def read (self, size=None, ttype=None) : ''' size : int ttype: saga.task.type enum ret: string / bytearray / saga.Task ''' return self._adaptor.read (size, ttype=ttype) # -------------------------------------------------------------------------- # @rus.takes ('File', rus.optional (bool)) @rus.returns (st.Task) def close (self, kill=True, ttype=None) : ''' kill : bool ttype: saga.task.type enum ret: string / bytearray / saga.Task ''' return self._adaptor.close () # -------------------------------------------------------------------------- # @rus.takes ('File', basestring, rus.optional (rus.one_of (SYNC, ASYNC, TASK))) @rus.returns ((int, st.Task)) def write (self, data, ttype=None) : ''' data : string / bytearray ttype: saga.task.type enum ret: int / saga.Task ''' return self._adaptor.write (data, ttype=ttype) # -------------------------------------------------------------------------- # @rus.takes ('File', int, rus.optional (rus.one_of (START, CURRENT, END )), rus.optional (rus.one_of (SYNC, ASYNC, TASK))) @rus.returns ((int, st.Task)) def seek (self, offset, whence=START, ttype=None) : ''' offset: int whence: seek_mode enum ttype: saga.task.type enum ret: int / saga.Task ''' return self._adaptor.seek (offset, whence, ttype=ttype) # -------------------------------------------------------------------------- # @rus.takes ('File', rus.list_of (rus.tuple_of (int)), rus.optional (rus.one_of (SYNC, ASYNC, TASK))) @rus.returns ((basestring, st.Task)) def read_v (self, iovecs, ttype=None) : ''' iovecs: list [tuple (int, int)] ttype: saga.task.type enum ret: list [bytearray] / saga.Task ''' return self._adaptor.read_v (iovecs, ttype=ttype) # -------------------------------------------------------------------------- # @rus.takes ('File', rus.list_of (rus.tuple_of ((int, basestring))), rus.optional (rus.one_of (SYNC, ASYNC, TASK))) @rus.returns ((rus.list_of (int), st.Task)) def write_v (self, data, ttype=None) : ''' data: list [tuple (int, string / bytearray)] ttype: saga.task.type enum ret: list [int] / saga.Task ''' return self._adaptor.write_v (data, ttype=ttype) # -------------------------------------------------------------------------- # @rus.takes ('File', basestring, rus.optional (rus.one_of (SYNC, ASYNC, TASK))) @rus.returns ((int, st.Task)) def size_p (self, pattern, ttype=None) : ''' pattern: string ttype: saga.task.type enum ret: int / saga.Task ''' return self._adaptor.size_p (pattern, ttype=ttype) # -------------------------------------------------------------------------- # @rus.takes ('File', basestring, rus.optional (rus.one_of (SYNC, ASYNC, TASK))) @rus.returns ((basestring, st.Task)) def read_p (self, pattern, ttype=None) : ''' pattern: string ttype: saga.task.type enum ret: string / bytearray / saga.Task ''' return self._adaptor.read_p (pattern, ttype=ttype) # -------------------------------------------------------------------------- # @rus.takes ('File', basestring, basestring, rus.optional (rus.one_of (SYNC, ASYNC, TASK))) @rus.returns ((int, st.Task)) def write_p (self, pattern, data, ttype=None) : ''' pattern: string data: string / bytearray ttype: saga.task.type enum ret: int / saga.Task ''' return self._adaptor.write_p (pattern, data, ttype=ttype) # -------------------------------------------------------------------------- # @rus.takes ('File', rus.optional (rus.one_of (SYNC, ASYNC, TASK))) @rus.returns ((rus.list_of (basestring), st.Task)) def modes_e (self, ttype=None) : ''' ttype: saga.task.type enum ret: list [string] / saga.Task ''' return self._adaptor.modes_e (ttype=ttype) # -------------------------------------------------------------------------- # @rus.takes ('File', basestring, basestring, rus.optional (rus.one_of (SYNC, ASYNC, TASK))) @rus.returns ((int, st.Task)) def size_e (self, emode, spec, ttype=None) : ''' emode: string spec: string ttype: saga.task.type enum ret: int / saga.Task ''' return self._adaptor.size_e (emode, spec, ttype=ttype) # -------------------------------------------------------------------------- # @rus.takes ('File', basestring, basestring, rus.optional (rus.one_of (SYNC, ASYNC, TASK))) @rus.returns ((basestring, st.Task)) def read_e (self, emode, spec, ttype=None) : ''' emode: string spec: string ttype: saga.task.type enum ret: bytearray / saga.Task ''' return self._adaptor.read_e (emode, spec, ttype=ttype) # -------------------------------------------------------------------------- # @rus.takes ('File', basestring, basestring, basestring, rus.optional (rus.one_of (SYNC, ASYNC, TASK))) @rus.returns ((int, st.Task)) def write_e (self, emode, spec, data, ttype=None) : ''' emode: string spec: string data: string / bytearray ttype: saga.task.type enum ret: int / saga.Task ''' return self._adaptor.read_e (emode, spec, data, ttype=ttype) size = property (get_size) # int modes_e = property (modes_e) # list [string]
{ "content_hash": "06dbebac200c67cbef223240adfb4b1c", "timestamp": "", "source": "github", "line_count": 359, "max_line_length": 80, "avg_line_length": 32.325905292479106, "alnum_prop": 0.43817320120637654, "repo_name": "poojavade/Genomics_Docker", "id": "bf71975874f28d906fa4611187c1460d116e71c4", "size": "11606", "binary": false, "copies": "10", "ref": "refs/heads/master", "path": "Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/saga_python-0.18-py2.7.egg/saga/filesystem/file.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "AGS Script", "bytes": "457842" }, { "name": "Assembly", "bytes": "10509" }, { "name": "C", "bytes": "1265138" }, { "name": "C++", "bytes": "4734960" }, { "name": "CSS", "bytes": "17332" }, { "name": "FORTRAN", "bytes": "10375" }, { "name": "GLSL", "bytes": "493" }, { "name": "Groff", "bytes": "77173" }, { "name": "HTML", "bytes": "395483" }, { "name": "Java", "bytes": "9223" }, { "name": "JavaScript", "bytes": "783663" }, { "name": "Jupyter Notebook", "bytes": "189877" }, { "name": "Lua", "bytes": "28217" }, { "name": "Makefile", "bytes": "77825" }, { "name": "Matlab", "bytes": "4346" }, { "name": "Objective-C", "bytes": "567" }, { "name": "Perl", "bytes": "244796" }, { "name": "Python", "bytes": "54562861" }, { "name": "R", "bytes": "2568" }, { "name": "Shell", "bytes": "40620" }, { "name": "Smarty", "bytes": "21035" }, { "name": "TeX", "bytes": "55310" } ], "symlink_target": "" }
from __future__ import absolute_import from .. import backend as K from .. import activations from .. import initializers from .. import regularizers from .. import constraints from ..engine import Layer from ..engine import InputSpec from ..utils import conv_utils from ..legacy import interfaces # imports for backwards namespace compatibility from .pooling import AveragePooling1D from .pooling import AveragePooling2D from .pooling import AveragePooling3D from .pooling import MaxPooling1D from .pooling import MaxPooling2D from .pooling import MaxPooling3D from ..legacy.layers import AtrousConvolution1D from ..legacy.layers import AtrousConvolution2D class _Conv(Layer): """Abstract nD convolution layer (private, used as implementation base). This layer creates a convolution kernel that is convolved with the layer input to produce a tensor of outputs. If `use_bias` is True, a bias vector is created and added to the outputs. Finally, if `activation` is not `None`, it is applied to the outputs as well. # Arguments rank: An integer, the rank of the convolution, e.g. "2" for 2D convolution. filters: Integer, the dimensionality of the output space (i.e. the number output of filters in the convolution). kernel_size: An integer or tuple/list of n integers, specifying the dimensions of the convolution window. strides: An integer or tuple/list of n integers, specifying the strides of the convolution. Specifying any stride value != 1 is incompatible with specifying any `dilation_rate` value != 1. padding: One of `"valid"` or `"same"` (case-insensitive). data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, ..., channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, ...)`. It defaults to the `image_data_format` value found in your Keras config file at `~/.keras/keras.json`. If you never set it, then it will be "channels_last". dilation_rate: An integer or tuple/list of n integers, specifying the dilation rate to use for dilated convolution. Currently, specifying any `dilation_rate` value != 1 is incompatible with specifying any `strides` value != 1. activation: Activation function to use (see [activations](../activations.md)). If you don't specify anything, no activation is applied (ie. "linear" activation: `a(x) = x`). use_bias: Boolean, whether the layer uses a bias vector. kernel_initializer: Initializer for the `kernel` weights matrix (see [initializers](../initializers.md)). bias_initializer: Initializer for the bias vector (see [initializers](../initializers.md)). kernel_regularizer: Regularizer function applied to the `kernel` weights matrix (see [regularizer](../regularizers.md)). bias_regularizer: Regularizer function applied to the bias vector (see [regularizer](../regularizers.md)). activity_regularizer: Regularizer function applied to the output of the layer (its "activation"). (see [regularizer](../regularizers.md)). kernel_constraint: Constraint function applied to the kernel matrix (see [constraints](../constraints.md)). bias_constraint: Constraint function applied to the bias vector (see [constraints](../constraints.md)). """ def __init__(self, rank, filters, kernel_size, strides=1, padding='valid', data_format=None, dilation_rate=1, activation=None, use_bias=True, kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, **kwargs): super(_Conv, self).__init__(**kwargs) self.rank = rank self.filters = filters self.kernel_size = conv_utils.normalize_tuple(kernel_size, rank, 'kernel_size') self.strides = conv_utils.normalize_tuple(strides, rank, 'strides') self.padding = conv_utils.normalize_padding(padding) self.data_format = conv_utils.normalize_data_format(data_format) self.dilation_rate = conv_utils.normalize_tuple(dilation_rate, rank, 'dilation_rate') self.activation = activations.get(activation) self.use_bias = use_bias self.kernel_initializer = initializers.get(kernel_initializer) self.bias_initializer = initializers.get(bias_initializer) self.kernel_regularizer = regularizers.get(kernel_regularizer) self.bias_regularizer = regularizers.get(bias_regularizer) self.activity_regularizer = regularizers.get(activity_regularizer) self.kernel_constraint = constraints.get(kernel_constraint) self.bias_constraint = constraints.get(bias_constraint) self.input_spec = InputSpec(ndim=self.rank + 2) def build(self, input_shape): if self.data_format == 'channels_first': channel_axis = 1 else: channel_axis = -1 if input_shape[channel_axis] is None: raise ValueError('The channel dimension of the inputs ' 'should be defined. Found `None`.') input_dim = input_shape[channel_axis] kernel_shape = self.kernel_size + (input_dim, self.filters) self.kernel = self.add_weight(shape=kernel_shape, initializer=self.kernel_initializer, name='kernel', regularizer=self.kernel_regularizer, constraint=self.kernel_constraint) if self.use_bias: self.bias = self.add_weight(shape=(self.filters,), initializer=self.bias_initializer, name='bias', regularizer=self.bias_regularizer, constraint=self.bias_constraint) else: self.bias = None # Set input spec. self.input_spec = InputSpec(ndim=self.rank + 2, axes={channel_axis: input_dim}) self.built = True def call(self, inputs): if self.rank == 1: outputs = K.conv1d( inputs, self.kernel, strides=self.strides[0], padding=self.padding, data_format=self.data_format, dilation_rate=self.dilation_rate[0]) if self.rank == 2: outputs = K.conv2d( inputs, self.kernel, strides=self.strides, padding=self.padding, data_format=self.data_format, dilation_rate=self.dilation_rate) if self.rank == 3: outputs = K.conv3d( inputs, self.kernel, strides=self.strides, padding=self.padding, data_format=self.data_format, dilation_rate=self.dilation_rate) if self.use_bias: outputs = K.bias_add( outputs, self.bias, data_format=self.data_format) if self.activation is not None: return self.activation(outputs) return outputs def compute_output_shape(self, input_shape): if self.data_format == 'channels_last': space = input_shape[1:-1] new_space = [] for i in range(len(space)): new_dim = conv_utils.conv_output_length( space[i], self.kernel_size[i], padding=self.padding, stride=self.strides[i], dilation=self.dilation_rate[i]) new_space.append(new_dim) return (input_shape[0],) + tuple(new_space) + (self.filters,) if self.data_format == 'channels_first': space = input_shape[2:] new_space = [] for i in range(len(space)): new_dim = conv_utils.conv_output_length( space[i], self.kernel_size[i], padding=self.padding, stride=self.strides[i], dilation=self.dilation_rate[i]) new_space.append(new_dim) return (input_shape[0], self.filters) + tuple(new_space) def get_config(self): config = { 'rank': self.rank, 'filters': self.filters, 'kernel_size': self.kernel_size, 'strides': self.strides, 'padding': self.padding, 'data_format': self.data_format, 'dilation_rate': self.dilation_rate, 'activation': activations.serialize(self.activation), 'use_bias': self.use_bias, 'kernel_initializer': initializers.serialize(self.kernel_initializer), 'bias_initializer': initializers.serialize(self.bias_initializer), 'kernel_regularizer': regularizers.serialize(self.kernel_regularizer), 'bias_regularizer': regularizers.serialize(self.bias_regularizer), 'activity_regularizer': regularizers.serialize(self.activity_regularizer), 'kernel_constraint': constraints.serialize(self.kernel_constraint), 'bias_constraint': constraints.serialize(self.bias_constraint) } base_config = super(_Conv, self).get_config() return dict(list(base_config.items()) + list(config.items())) class Conv1D(_Conv): """1D convolution layer (e.g. temporal convolution). This layer creates a convolution kernel that is convolved with the layer input over a single spatial (or temporal) dimension to produce a tensor of outputs. If `use_bias` is True, a bias vector is created and added to the outputs. Finally, if `activation` is not `None`, it is applied to the outputs as well. When using this layer as the first layer in a model, provide an `input_shape` argument (tuple of integers or `None`, e.g. `(10, 128)` for sequences of 10 vectors of 128-dimensional vectors, or `(None, 128)` for variable-length sequences of 128-dimensional vectors. # Arguments filters: Integer, the dimensionality of the output space (i.e. the number output of filters in the convolution). kernel_size: An integer or tuple/list of a single integer, specifying the length of the 1D convolution window. strides: An integer or tuple/list of a single integer, specifying the stride length of the convolution. Specifying any stride value != 1 is incompatible with specifying any `dilation_rate` value != 1. padding: One of `"valid"`, `"causal"` or `"same"` (case-insensitive). `"valid"` means "no padding". `"same"` results in padding the input such that the output has the same length as the original input. `"causal"` results in causal (dilated) convolutions, e.g. output[t] does not depend on input[t+1:]. Useful when modeling temporal data where the model should not violate the temporal order. See [WaveNet: A Generative Model for Raw Audio, section 2.1](https://arxiv.org/abs/1609.03499). dilation_rate: an integer or tuple/list of a single integer, specifying the dilation rate to use for dilated convolution. Currently, specifying any `dilation_rate` value != 1 is incompatible with specifying any `strides` value != 1. activation: Activation function to use (see [activations](../activations.md)). If you don't specify anything, no activation is applied (ie. "linear" activation: `a(x) = x`). use_bias: Boolean, whether the layer uses a bias vector. kernel_initializer: Initializer for the `kernel` weights matrix (see [initializers](../initializers.md)). bias_initializer: Initializer for the bias vector (see [initializers](../initializers.md)). kernel_regularizer: Regularizer function applied to the `kernel` weights matrix (see [regularizer](../regularizers.md)). bias_regularizer: Regularizer function applied to the bias vector (see [regularizer](../regularizers.md)). activity_regularizer: Regularizer function applied to the output of the layer (its "activation"). (see [regularizer](../regularizers.md)). kernel_constraint: Constraint function applied to the kernel matrix (see [constraints](../constraints.md)). bias_constraint: Constraint function applied to the bias vector (see [constraints](../constraints.md)). # Input shape 3D tensor with shape: `(batch_size, steps, input_dim)` # Output shape 3D tensor with shape: `(batch_size, new_steps, filters)` `steps` value might have changed due to padding or strides. """ @interfaces.legacy_conv1d_support def __init__(self, filters, kernel_size, strides=1, padding='valid', dilation_rate=1, activation=None, use_bias=True, kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, **kwargs): super(Conv1D, self).__init__( rank=1, filters=filters, kernel_size=kernel_size, strides=strides, padding=padding, data_format='channels_last', dilation_rate=dilation_rate, activation=activation, use_bias=use_bias, kernel_initializer=kernel_initializer, bias_initializer=bias_initializer, kernel_regularizer=kernel_regularizer, bias_regularizer=bias_regularizer, activity_regularizer=activity_regularizer, kernel_constraint=kernel_constraint, bias_constraint=bias_constraint, **kwargs) self.input_spec = InputSpec(ndim=3) def get_config(self): config = super(Conv1D, self).get_config() config.pop('rank') config.pop('data_format') return config class Conv2D(_Conv): """2D convolution layer (e.g. spatial convolution over images). This layer creates a convolution kernel that is convolved with the layer input to produce a tensor of outputs. If `use_bias` is True, a bias vector is created and added to the outputs. Finally, if `activation` is not `None`, it is applied to the outputs as well. When using this layer as the first layer in a model, provide the keyword argument `input_shape` (tuple of integers, does not include the sample axis), e.g. `input_shape=(128, 128, 3)` for 128x128 RGB pictures in `data_format="channels_last"`. # Arguments filters: Integer, the dimensionality of the output space (i.e. the number output of filters in the convolution). kernel_size: An integer or tuple/list of 2 integers, specifying the width and height of the 2D convolution window. Can be a single integer to specify the same value for all spatial dimensions. strides: An integer or tuple/list of 2 integers, specifying the strides of the convolution along the width and height. Can be a single integer to specify the same value for all spatial dimensions. Specifying any stride value != 1 is incompatible with specifying any `dilation_rate` value != 1. padding: one of `"valid"` or `"same"` (case-insensitive). data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, height, width, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, height, width)`. It defaults to the `image_data_format` value found in your Keras config file at `~/.keras/keras.json`. If you never set it, then it will be "channels_last". dilation_rate: an integer or tuple/list of 2 integers, specifying the dilation rate to use for dilated convolution. Can be a single integer to specify the same value for all spatial dimensions. Currently, specifying any `dilation_rate` value != 1 is incompatible with specifying any stride value != 1. activation: Activation function to use (see [activations](../activations.md)). If you don't specify anything, no activation is applied (ie. "linear" activation: `a(x) = x`). use_bias: Boolean, whether the layer uses a bias vector. kernel_initializer: Initializer for the `kernel` weights matrix (see [initializers](../initializers.md)). bias_initializer: Initializer for the bias vector (see [initializers](../initializers.md)). kernel_regularizer: Regularizer function applied to the `kernel` weights matrix (see [regularizer](../regularizers.md)). bias_regularizer: Regularizer function applied to the bias vector (see [regularizer](../regularizers.md)). activity_regularizer: Regularizer function applied to the output of the layer (its "activation"). (see [regularizer](../regularizers.md)). kernel_constraint: Constraint function applied to the kernel matrix (see [constraints](../constraints.md)). bias_constraint: Constraint function applied to the bias vector (see [constraints](../constraints.md)). # Input shape 4D tensor with shape: `(samples, channels, rows, cols)` if data_format='channels_first' or 4D tensor with shape: `(samples, rows, cols, channels)` if data_format='channels_last'. # Output shape 4D tensor with shape: `(samples, filters, new_rows, new_cols)` if data_format='channels_first' or 4D tensor with shape: `(samples, new_rows, new_cols, filters)` if data_format='channels_last'. `rows` and `cols` values might have changed due to padding. """ @interfaces.legacy_conv2d_support def __init__(self, filters, kernel_size, strides=(1, 1), padding='valid', data_format=None, dilation_rate=(1, 1), activation=None, use_bias=True, kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, **kwargs): super(Conv2D, self).__init__( rank=2, filters=filters, kernel_size=kernel_size, strides=strides, padding=padding, data_format=data_format, dilation_rate=dilation_rate, activation=activation, use_bias=use_bias, kernel_initializer=kernel_initializer, bias_initializer=bias_initializer, kernel_regularizer=kernel_regularizer, bias_regularizer=bias_regularizer, activity_regularizer=activity_regularizer, kernel_constraint=kernel_constraint, bias_constraint=bias_constraint, **kwargs) self.input_spec = InputSpec(ndim=4) def get_config(self): config = super(Conv2D, self).get_config() config.pop('rank') return config class Conv3D(_Conv): """3D convolution layer (e.g. spatial convolution over volumes). This layer creates a convolution kernel that is convolved with the layer input to produce a tensor of outputs. If `use_bias` is True, a bias vector is created and added to the outputs. Finally, if `activation` is not `None`, it is applied to the outputs as well. When using this layer as the first layer in a model, provide the keyword argument `input_shape` (tuple of integers, does not include the sample axis), e.g. `input_shape=(128, 128, 128, 1)` for 128x128x128 volumes with a single channel, in `data_format="channels_last"`. # Arguments filters: Integer, the dimensionality of the output space (i.e. the number output of filters in the convolution). kernel_size: An integer or tuple/list of 3 integers, specifying the depth, height and width of the 3D convolution window. Can be a single integer to specify the same value for all spatial dimensions. strides: An integer or tuple/list of 3 integers, specifying the strides of the convolution along each spatial dimension. Can be a single integer to specify the same value for all spatial dimensions. Specifying any stride value != 1 is incompatible with specifying any `dilation_rate` value != 1. padding: one of `"valid"` or `"same"` (case-insensitive). data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`. It defaults to the `image_data_format` value found in your Keras config file at `~/.keras/keras.json`. If you never set it, then it will be "channels_last". dilation_rate: an integer or tuple/list of 3 integers, specifying the dilation rate to use for dilated convolution. Can be a single integer to specify the same value for all spatial dimensions. Currently, specifying any `dilation_rate` value != 1 is incompatible with specifying any stride value != 1. activation: Activation function to use (see [activations](../activations.md)). If you don't specify anything, no activation is applied (ie. "linear" activation: `a(x) = x`). use_bias: Boolean, whether the layer uses a bias vector. kernel_initializer: Initializer for the `kernel` weights matrix (see [initializers](../initializers.md)). bias_initializer: Initializer for the bias vector (see [initializers](../initializers.md)). kernel_regularizer: Regularizer function applied to the `kernel` weights matrix (see [regularizer](../regularizers.md)). bias_regularizer: Regularizer function applied to the bias vector (see [regularizer](../regularizers.md)). activity_regularizer: Regularizer function applied to the output of the layer (its "activation"). (see [regularizer](../regularizers.md)). kernel_constraint: Constraint function applied to the kernel matrix (see [constraints](../constraints.md)). bias_constraint: Constraint function applied to the bias vector (see [constraints](../constraints.md)). # Input shape 5D tensor with shape: `(samples, channels, conv_dim1, conv_dim2, conv_dim3)` if data_format='channels_first' or 5D tensor with shape: `(samples, conv_dim1, conv_dim2, conv_dim3, channels)` if data_format='channels_last'. # Output shape 5D tensor with shape: `(samples, filters, new_conv_dim1, new_conv_dim2, new_conv_dim3)` if data_format='channels_first' or 5D tensor with shape: `(samples, new_conv_dim1, new_conv_dim2, new_conv_dim3, filters)` if data_format='channels_last'. `new_conv_dim1`, `new_conv_dim2` and `new_conv_dim3` values might have changed due to padding. """ @interfaces.legacy_conv3d_support def __init__(self, filters, kernel_size, strides=(1, 1, 1), padding='valid', data_format=None, dilation_rate=(1, 1, 1), activation=None, use_bias=True, kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, **kwargs): super(Conv3D, self).__init__( rank=3, filters=filters, kernel_size=kernel_size, strides=strides, padding=padding, data_format=data_format, dilation_rate=dilation_rate, activation=activation, use_bias=use_bias, kernel_initializer=kernel_initializer, bias_initializer=bias_initializer, kernel_regularizer=kernel_regularizer, bias_regularizer=bias_regularizer, activity_regularizer=activity_regularizer, kernel_constraint=kernel_constraint, bias_constraint=bias_constraint, **kwargs) self.input_spec = InputSpec(ndim=5) def get_config(self): config = super(Conv3D, self).get_config() config.pop('rank') return config class Conv2DTranspose(Conv2D): """Transposed convolution layer (sometimes called Deconvolution). The need for transposed convolutions generally arises from the desire to use a transformation going in the opposite direction of a normal convolution, i.e., from something that has the shape of the output of some convolution to something that has the shape of its input while maintaining a connectivity pattern that is compatible with said convolution. When using this layer as the first layer in a model, provide the keyword argument `input_shape` (tuple of integers, does not include the sample axis), e.g. `input_shape=(128, 128, 3)` for 128x128 RGB pictures in `data_format="channels_last"`. # Arguments filters: Integer, the dimensionality of the output space (i.e. the number of output filters in the convolution). kernel_size: An integer or tuple/list of 2 integers, specifying the width and height of the 2D convolution window. Can be a single integer to specify the same value for all spatial dimensions. strides: An integer or tuple/list of 2 integers, specifying the strides of the convolution along the width and height. Can be a single integer to specify the same value for all spatial dimensions. Specifying any stride value != 1 is incompatible with specifying any `dilation_rate` value != 1. padding: one of `"valid"` or `"same"` (case-insensitive). data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, height, width, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, height, width)`. It defaults to the `image_data_format` value found in your Keras config file at `~/.keras/keras.json`. If you never set it, then it will be "channels_last". dilation_rate: an integer or tuple/list of 2 integers, specifying the dilation rate to use for dilated convolution. Can be a single integer to specify the same value for all spatial dimensions. Currently, specifying any `dilation_rate` value != 1 is incompatible with specifying any stride value != 1. activation: Activation function to use (see [activations](../activations.md)). If you don't specify anything, no activation is applied (ie. "linear" activation: `a(x) = x`). use_bias: Boolean, whether the layer uses a bias vector. kernel_initializer: Initializer for the `kernel` weights matrix (see [initializers](../initializers.md)). bias_initializer: Initializer for the bias vector (see [initializers](../initializers.md)). kernel_regularizer: Regularizer function applied to the `kernel` weights matrix (see [regularizer](../regularizers.md)). bias_regularizer: Regularizer function applied to the bias vector (see [regularizer](../regularizers.md)). activity_regularizer: Regularizer function applied to the output of the layer (its "activation"). (see [regularizer](../regularizers.md)). kernel_constraint: Constraint function applied to the kernel matrix (see [constraints](../constraints.md)). bias_constraint: Constraint function applied to the bias vector (see [constraints](../constraints.md)). # Input shape 4D tensor with shape: `(batch, channels, rows, cols)` if data_format='channels_first' or 4D tensor with shape: `(batch, rows, cols, channels)` if data_format='channels_last'. # Output shape 4D tensor with shape: `(batch, filters, new_rows, new_cols)` if data_format='channels_first' or 4D tensor with shape: `(batch, new_rows, new_cols, filters)` if data_format='channels_last'. `rows` and `cols` values might have changed due to padding. # References - [A guide to convolution arithmetic for deep learning](https://arxiv.org/abs/1603.07285v1) - [Deconvolutional Networks](http://www.matthewzeiler.com/pubs/cvpr2010/cvpr2010.pdf) """ @interfaces.legacy_deconv2d_support def __init__(self, filters, kernel_size, strides=(1, 1), padding='valid', data_format=None, activation=None, use_bias=True, kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, **kwargs): super(Conv2DTranspose, self).__init__( filters, kernel_size, strides=strides, padding=padding, data_format=data_format, activation=activation, use_bias=use_bias, kernel_initializer=kernel_initializer, bias_initializer=bias_initializer, kernel_regularizer=kernel_regularizer, bias_regularizer=bias_regularizer, activity_regularizer=activity_regularizer, kernel_constraint=kernel_constraint, bias_constraint=bias_constraint, **kwargs) self.input_spec = InputSpec(ndim=4) def build(self, input_shape): if len(input_shape) != 4: raise ValueError('Inputs should have rank ' + str(4) + '; Received input shape:', str(input_shape)) if self.data_format == 'channels_first': channel_axis = 1 else: channel_axis = -1 if input_shape[channel_axis] is None: raise ValueError('The channel dimension of the inputs ' 'should be defined. Found `None`.') input_dim = input_shape[channel_axis] kernel_shape = self.kernel_size + (self.filters, input_dim) self.kernel = self.add_weight(shape=kernel_shape, initializer=self.kernel_initializer, name='kernel', regularizer=self.kernel_regularizer, constraint=self.kernel_constraint) if self.use_bias: self.bias = self.add_weight(shape=(self.filters,), initializer=self.bias_initializer, name='bias', regularizer=self.bias_regularizer, constraint=self.bias_constraint) else: self.bias = None # Set input spec. self.input_spec = InputSpec(ndim=4, axes={channel_axis: input_dim}) self.built = True def call(self, inputs): input_shape = K.shape(inputs) batch_size = input_shape[0] if self.data_format == 'channels_first': h_axis, w_axis = 2, 3 else: h_axis, w_axis = 1, 2 height, width = input_shape[h_axis], input_shape[w_axis] kernel_h, kernel_w = self.kernel_size stride_h, stride_w = self.strides # Infer the dynamic output shape: out_height = conv_utils.deconv_length(height, stride_h, kernel_h, self.padding) out_width = conv_utils.deconv_length(width, stride_w, kernel_w, self.padding) if self.data_format == 'channels_first': output_shape = (batch_size, self.filters, out_height, out_width) else: output_shape = (batch_size, out_height, out_width, self.filters) outputs = K.conv2d_transpose( inputs, self.kernel, output_shape, self.strides, padding=self.padding, data_format=self.data_format) if self.bias: outputs = K.bias_add( outputs, self.bias, data_format=self.data_format) if self.activation is not None: return self.activation(outputs) return outputs def compute_output_shape(self, input_shape): output_shape = list(input_shape) if self.data_format == 'channels_first': c_axis, h_axis, w_axis = 1, 2, 3 else: c_axis, h_axis, w_axis = 3, 1, 2 kernel_h, kernel_w = self.kernel_size stride_h, stride_w = self.strides output_shape[c_axis] = self.filters output_shape[h_axis] = conv_utils.deconv_length( output_shape[h_axis], stride_h, kernel_h, self.padding) output_shape[w_axis] = conv_utils.deconv_length( output_shape[w_axis], stride_w, kernel_w, self.padding) return tuple(output_shape) def get_config(self): config = super(Conv2DTranspose, self).get_config() config.pop('dilation_rate') return config class Conv3DTranspose(Conv3D): """Transposed convolution layer (sometimes called Deconvolution). The need for transposed convolutions generally arises from the desire to use a transformation going in the opposite direction of a normal convolution, i.e., from something that has the shape of the output of some convolution to something that has the shape of its input while maintaining a connectivity pattern that is compatible with said convolution. When using this layer as the first layer in a model, provide the keyword argument `input_shape` (tuple of integers, does not include the sample axis), e.g. `input_shape=(128, 128, 128, 3)` for a 128x128x128 volume with 3 channels if `data_format="channels_last"`. # Arguments filters: Integer, the dimensionality of the output space (i.e. the number of output filters in the convolution). kernel_size: An integer or tuple/list of 3 integers, specifying the width and height of the 3D convolution window. Can be a single integer to specify the same value for all spatial dimensions. strides: An integer or tuple/list of 3 integers, specifying the strides of the convolution along the width and height. Can be a single integer to specify the same value for all spatial dimensions. Specifying any stride value != 1 is incompatible with specifying any `dilation_rate` value != 1. padding: one of `"valid"` or `"same"` (case-insensitive). data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, depth, height, width, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, depth, height, width)`. It defaults to the `image_data_format` value found in your Keras config file at `~/.keras/keras.json`. If you never set it, then it will be "channels_last". dilation_rate: an integer or tuple/list of 3 integers, specifying the dilation rate to use for dilated convolution. Can be a single integer to specify the same value for all spatial dimensions. Currently, specifying any `dilation_rate` value != 1 is incompatible with specifying any stride value != 1. activation: Activation function to use (see [activations](../activations.md)). If you don't specify anything, no activation is applied (ie. "linear" activation: `a(x) = x`). use_bias: Boolean, whether the layer uses a bias vector. kernel_initializer: Initializer for the `kernel` weights matrix (see [initializers](../initializers.md)). bias_initializer: Initializer for the bias vector (see [initializers](../initializers.md)). kernel_regularizer: Regularizer function applied to the `kernel` weights matrix (see [regularizer](../regularizers.md)). bias_regularizer: Regularizer function applied to the bias vector (see [regularizer](../regularizers.md)). activity_regularizer: Regularizer function applied to the output of the layer (its "activation"). (see [regularizer](../regularizers.md)). kernel_constraint: Constraint function applied to the kernel matrix (see [constraints](../constraints.md)). bias_constraint: Constraint function applied to the bias vector (see [constraints](../constraints.md)). # Input shape 5D tensor with shape: `(batch, channels, depth, rows, cols)` if data_format='channels_first' or 5D tensor with shape: `(batch, depth, rows, cols, channels)` if data_format='channels_last'. # Output shape 5D tensor with shape: `(batch, filters, new_depth, new_rows, new_cols)` if data_format='channels_first' or 5D tensor with shape: `(batch, new_depth, new_rows, new_cols, filters)` if data_format='channels_last'. `depth` and `rows` and `cols` values might have changed due to padding. # References - [A guide to convolution arithmetic for deep learning](https://arxiv.org/abs/1603.07285v1) - [Deconvolutional Networks](http://www.matthewzeiler.com/pubs/cvpr2010/cvpr2010.pdf) """ def __init__(self, filters, kernel_size, strides=(1, 1, 1), padding='valid', data_format=None, activation=None, use_bias=True, kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, **kwargs): super(Conv3DTranspose, self).__init__( filters, kernel_size, strides=strides, padding=padding, data_format=data_format, activation=activation, use_bias=use_bias, kernel_initializer=kernel_initializer, bias_initializer=bias_initializer, kernel_regularizer=kernel_regularizer, bias_regularizer=bias_regularizer, activity_regularizer=activity_regularizer, kernel_constraint=kernel_constraint, bias_constraint=bias_constraint, **kwargs) self.input_spec = InputSpec(ndim=5) def build(self, input_shape): if len(input_shape) != 5: raise ValueError('Inputs should have rank ' + str(5) + '; Received input shape:', str(input_shape)) if self.data_format == 'channels_first': channel_axis = 1 else: channel_axis = -1 if input_shape[channel_axis] is None: raise ValueError('The channel dimension of the inputs ' 'should be defined. Found `None`.') input_dim = input_shape[channel_axis] kernel_shape = self.kernel_size + (self.filters, input_dim) self.kernel = self.add_weight(shape=kernel_shape, initializer=self.kernel_initializer, name='kernel', regularizer=self.kernel_regularizer, constraint=self.kernel_constraint) if self.use_bias: self.bias = self.add_weight(shape=(self.filters,), initializer=self.bias_initializer, name='bias', regularizer=self.bias_regularizer, constraint=self.bias_constraint) else: self.bias = None # Set input spec. self.input_spec = InputSpec(ndim=5, axes={channel_axis: input_dim}) self.built = True def call(self, inputs): input_shape = K.shape(inputs) batch_size = input_shape[0] if self.data_format == 'channels_first': d_axis, h_axis, w_axis = 2, 3, 4 else: d_axis, h_axis, w_axis = 1, 2, 3 depth = input_shape[d_axis] height = input_shape[h_axis] width = input_shape[w_axis] kernel_d, kernel_h, kernel_w = self.kernel_size stride_d, stride_h, stride_w = self.strides # Infer the dynamic output shape: out_depth = conv_utils.deconv_length(depth, stride_d, kernel_d, self.padding) out_height = conv_utils.deconv_length(height, stride_h, kernel_h, self.padding) out_width = conv_utils.deconv_length(width, stride_w, kernel_w, self.padding) if self.data_format == 'channels_first': output_shape = (batch_size, self.filters, out_depth, out_height, out_width) else: output_shape = (batch_size, out_depth, out_height, out_width, self.filters) outputs = K.conv3d_transpose(inputs, self.kernel, output_shape, self.strides, padding=self.padding, data_format=self.data_format) if self.bias: outputs = K.bias_add( outputs, self.bias, data_format=self.data_format) if self.activation is not None: return self.activation(outputs) return outputs def compute_output_shape(self, input_shape): output_shape = list(input_shape) if self.data_format == 'channels_first': c_axis, d_axis, h_axis, w_axis = 1, 2, 3, 4 else: c_axis, d_axis, h_axis, w_axis = 4, 1, 2, 3 kernel_d, kernel_h, kernel_w = self.kernel_size stride_d, stride_h, stride_w = self.strides output_shape[c_axis] = self.filters output_shape[d_axis] = conv_utils.deconv_length(output_shape[d_axis], stride_d, kernel_d, self.padding) output_shape[h_axis] = conv_utils.deconv_length(output_shape[h_axis], stride_h, kernel_h, self.padding) output_shape[w_axis] = conv_utils.deconv_length(output_shape[w_axis], stride_w, kernel_w, self.padding) return tuple(output_shape) def get_config(self): config = super(Conv3DTranspose, self).get_config() config.pop('dilation_rate') return config class SeparableConv2D(Conv2D): """Depthwise separable 2D convolution. Separable convolutions consist in first performing a depthwise spatial convolution (which acts on each input channel separately) followed by a pointwise convolution which mixes together the resulting output channels. The `depth_multiplier` argument controls how many output channels are generated per input channel in the depthwise step. Intuitively, separable convolutions can be understood as a way to factorize a convolution kernel into two smaller kernels, or as an extreme version of an Inception block. # Arguments filters: Integer, the dimensionality of the output space (i.e. the number output of filters in the convolution). kernel_size: An integer or tuple/list of 2 integers, specifying the width and height of the 2D convolution window. Can be a single integer to specify the same value for all spatial dimensions. strides: An integer or tuple/list of 2 integers, specifying the strides of the convolution along the width and height. Can be a single integer to specify the same value for all spatial dimensions. Specifying any stride value != 1 is incompatible with specifying any `dilation_rate` value != 1. padding: one of `"valid"` or `"same"` (case-insensitive). data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, height, width, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, height, width)`. It defaults to the `image_data_format` value found in your Keras config file at `~/.keras/keras.json`. If you never set it, then it will be "channels_last". depth_multiplier: The number of depthwise convolution output channels for each input channel. The total number of depthwise convolution output channels will be equal to `filterss_in * depth_multiplier`. activation: Activation function to use (see [activations](../activations.md)). If you don't specify anything, no activation is applied (ie. "linear" activation: `a(x) = x`). use_bias: Boolean, whether the layer uses a bias vector. depthwise_initializer: Initializer for the depthwise kernel matrix (see [initializers](../initializers.md)). pointwise_initializer: Initializer for the pointwise kernel matrix (see [initializers](../initializers.md)). bias_initializer: Initializer for the bias vector (see [initializers](../initializers.md)). depthwise_regularizer: Regularizer function applied to the depthwise kernel matrix (see [regularizer](../regularizers.md)). pointwise_regularizer: Regularizer function applied to the depthwise kernel matrix (see [regularizer](../regularizers.md)). bias_regularizer: Regularizer function applied to the bias vector (see [regularizer](../regularizers.md)). activity_regularizer: Regularizer function applied to the output of the layer (its "activation"). (see [regularizer](../regularizers.md)). depthwise_constraint: Constraint function applied to the depthwise kernel matrix (see [constraints](../constraints.md)). pointwise_constraint: Constraint function applied to the pointwise kernel matrix (see [constraints](../constraints.md)). bias_constraint: Constraint function applied to the bias vector (see [constraints](../constraints.md)). # Input shape 4D tensor with shape: `(batch, channels, rows, cols)` if data_format='channels_first' or 4D tensor with shape: `(batch, rows, cols, channels)` if data_format='channels_last'. # Output shape 4D tensor with shape: `(batch, filters, new_rows, new_cols)` if data_format='channels_first' or 4D tensor with shape: `(batch, new_rows, new_cols, filters)` if data_format='channels_last'. `rows` and `cols` values might have changed due to padding. """ @interfaces.legacy_separable_conv2d_support def __init__(self, filters, kernel_size, strides=(1, 1), padding='valid', data_format=None, depth_multiplier=1, activation=None, use_bias=True, depthwise_initializer='glorot_uniform', pointwise_initializer='glorot_uniform', bias_initializer='zeros', depthwise_regularizer=None, pointwise_regularizer=None, bias_regularizer=None, activity_regularizer=None, depthwise_constraint=None, pointwise_constraint=None, bias_constraint=None, **kwargs): super(SeparableConv2D, self).__init__( filters=filters, kernel_size=kernel_size, strides=strides, padding=padding, data_format=data_format, activation=activation, use_bias=use_bias, bias_regularizer=bias_regularizer, activity_regularizer=activity_regularizer, bias_constraint=bias_constraint, **kwargs) self.depth_multiplier = depth_multiplier self.depthwise_initializer = initializers.get(depthwise_initializer) self.pointwise_initializer = initializers.get(pointwise_initializer) self.depthwise_regularizer = regularizers.get(depthwise_regularizer) self.pointwise_regularizer = regularizers.get(pointwise_regularizer) self.depthwise_constraint = constraints.get(depthwise_constraint) self.pointwise_constraint = constraints.get(pointwise_constraint) def build(self, input_shape): if len(input_shape) < 4: raise ValueError('Inputs to `SeparableConv2D` should have rank 4. ' 'Received input shape:', str(input_shape)) if self.data_format == 'channels_first': channel_axis = 1 else: channel_axis = 3 if input_shape[channel_axis] is None: raise ValueError('The channel dimension of the inputs to ' '`SeparableConv2D` ' 'should be defined. Found `None`.') input_dim = int(input_shape[channel_axis]) depthwise_kernel_shape = (self.kernel_size[0], self.kernel_size[1], input_dim, self.depth_multiplier) pointwise_kernel_shape = (1, 1, self.depth_multiplier * input_dim, self.filters) self.depthwise_kernel = self.add_weight( shape=depthwise_kernel_shape, initializer=self.depthwise_initializer, name='depthwise_kernel', regularizer=self.depthwise_regularizer, constraint=self.depthwise_constraint) self.pointwise_kernel = self.add_weight( shape=pointwise_kernel_shape, initializer=self.pointwise_initializer, name='pointwise_kernel', regularizer=self.pointwise_regularizer, constraint=self.pointwise_constraint) if self.use_bias: self.bias = self.add_weight(shape=(self.filters,), initializer=self.bias_initializer, name='bias', regularizer=self.bias_regularizer, constraint=self.bias_constraint) else: self.bias = None # Set input spec. self.input_spec = InputSpec(ndim=4, axes={channel_axis: input_dim}) self.built = True def call(self, inputs): outputs = K.separable_conv2d( inputs, self.depthwise_kernel, self.pointwise_kernel, data_format=self.data_format, strides=self.strides, padding=self.padding) if self.bias: outputs = K.bias_add( outputs, self.bias, data_format=self.data_format) if self.activation is not None: return self.activation(outputs) return outputs def compute_output_shape(self, input_shape): if self.data_format == 'channels_first': rows = input_shape[2] cols = input_shape[3] elif self.data_format == 'channels_last': rows = input_shape[1] cols = input_shape[2] rows = conv_utils.conv_output_length(rows, self.kernel_size[0], self.padding, self.strides[0]) cols = conv_utils.conv_output_length(cols, self.kernel_size[1], self.padding, self.strides[1]) if self.data_format == 'channels_first': return (input_shape[0], self.filters, rows, cols) elif self.data_format == 'channels_last': return (input_shape[0], rows, cols, self.filters) def get_config(self): config = super(SeparableConv2D, self).get_config() config.pop('kernel_initializer') config.pop('kernel_regularizer') config.pop('kernel_constraint') config['depth_multiplier'] = self.depth_multiplier config['depthwise_initializer'] = initializers.serialize(self.depthwise_initializer) config['pointwise_initializer'] = initializers.serialize(self.pointwise_initializer) config['depthwise_regularizer'] = regularizers.serialize(self.depthwise_regularizer) config['pointwise_regularizer'] = regularizers.serialize(self.pointwise_regularizer) config['depthwise_constraint'] = constraints.serialize(self.depthwise_constraint) config['pointwise_constraint'] = constraints.serialize(self.pointwise_constraint) return config class UpSampling1D(Layer): """Upsampling layer for 1D inputs. Repeats each temporal step `size` times along the time axis. # Arguments size: integer. Upsampling factor. # Input shape 3D tensor with shape: `(batch, steps, features)`. # Output shape 3D tensor with shape: `(batch, upsampled_steps, features)`. """ @interfaces.legacy_upsampling1d_support def __init__(self, size=2, **kwargs): super(UpSampling1D, self).__init__(**kwargs) self.size = int(size) self.input_spec = InputSpec(ndim=3) def compute_output_shape(self, input_shape): size = self.size * input_shape[1] if input_shape[1] is not None else None return (input_shape[0], size, input_shape[2]) def call(self, inputs): output = K.repeat_elements(inputs, self.size, axis=1) return output def get_config(self): config = {'size': self.size} base_config = super(UpSampling1D, self).get_config() return dict(list(base_config.items()) + list(config.items())) class UpSampling2D(Layer): """Upsampling layer for 2D inputs. Repeats the rows and columns of the data by size[0] and size[1] respectively. # Arguments size: int, or tuple of 2 integers. The upsampling factors for rows and columns. data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, height, width, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, height, width)`. It defaults to the `image_data_format` value found in your Keras config file at `~/.keras/keras.json`. If you never set it, then it will be "channels_last". # Input shape 4D tensor with shape: - If `data_format` is `"channels_last"`: `(batch, rows, cols, channels)` - If `data_format` is `"channels_first"`: `(batch, channels, rows, cols)` # Output shape 4D tensor with shape: - If `data_format` is `"channels_last"`: `(batch, upsampled_rows, upsampled_cols, channels)` - If `data_format` is `"channels_first"`: `(batch, channels, upsampled_rows, upsampled_cols)` """ @interfaces.legacy_upsampling2d_support def __init__(self, size=(2, 2), data_format=None, **kwargs): super(UpSampling2D, self).__init__(**kwargs) self.data_format = conv_utils.normalize_data_format(data_format) self.size = conv_utils.normalize_tuple(size, 2, 'size') self.input_spec = InputSpec(ndim=4) def compute_output_shape(self, input_shape): if self.data_format == 'channels_first': height = self.size[0] * input_shape[2] if input_shape[2] is not None else None width = self.size[1] * input_shape[3] if input_shape[3] is not None else None return (input_shape[0], input_shape[1], height, width) elif self.data_format == 'channels_last': height = self.size[0] * input_shape[1] if input_shape[1] is not None else None width = self.size[1] * input_shape[2] if input_shape[2] is not None else None return (input_shape[0], height, width, input_shape[3]) def call(self, inputs): return K.resize_images(inputs, self.size[0], self.size[1], self.data_format) def get_config(self): config = {'size': self.size, 'data_format': self.data_format} base_config = super(UpSampling2D, self).get_config() return dict(list(base_config.items()) + list(config.items())) class UpSampling3D(Layer): """Upsampling layer for 3D inputs. Repeats the 1st, 2nd and 3rd dimensions of the data by size[0], size[1] and size[2] respectively. # Arguments size: int, or tuple of 3 integers. The upsampling factors for dim1, dim2 and dim3. data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`. It defaults to the `image_data_format` value found in your Keras config file at `~/.keras/keras.json`. If you never set it, then it will be "channels_last". # Input shape 5D tensor with shape: - If `data_format` is `"channels_last"`: `(batch, dim1, dim2, dim3, channels)` - If `data_format` is `"channels_first"`: `(batch, channels, dim1, dim2, dim3)` # Output shape 5D tensor with shape: - If `data_format` is `"channels_last"`: `(batch, upsampled_dim1, upsampled_dim2, upsampled_dim3, channels)` - If `data_format` is `"channels_first"`: `(batch, channels, upsampled_dim1, upsampled_dim2, upsampled_dim3)` """ @interfaces.legacy_upsampling3d_support def __init__(self, size=(2, 2, 2), data_format=None, **kwargs): self.data_format = conv_utils.normalize_data_format(data_format) self.size = conv_utils.normalize_tuple(size, 3, 'size') self.input_spec = InputSpec(ndim=5) super(UpSampling3D, self).__init__(**kwargs) def compute_output_shape(self, input_shape): if self.data_format == 'channels_first': dim1 = self.size[0] * input_shape[2] if input_shape[2] is not None else None dim2 = self.size[1] * input_shape[3] if input_shape[3] is not None else None dim3 = self.size[2] * input_shape[4] if input_shape[4] is not None else None return (input_shape[0], input_shape[1], dim1, dim2, dim3) elif self.data_format == 'channels_last': dim1 = self.size[0] * input_shape[1] if input_shape[1] is not None else None dim2 = self.size[1] * input_shape[2] if input_shape[2] is not None else None dim3 = self.size[2] * input_shape[3] if input_shape[3] is not None else None return (input_shape[0], dim1, dim2, dim3, input_shape[4]) def call(self, inputs): return K.resize_volumes(inputs, self.size[0], self.size[1], self.size[2], self.data_format) def get_config(self): config = {'size': self.size, 'data_format': self.data_format} base_config = super(UpSampling3D, self).get_config() return dict(list(base_config.items()) + list(config.items())) class ZeroPadding1D(Layer): """Zero-padding layer for 1D input (e.g. temporal sequence). # Arguments padding: int, or tuple of int (length 2), or dictionary. - If int: How many zeros to add at the beginning and end of the padding dimension (axis 1). - If tuple of int (length 2): How many zeros to add at the beginning and at the end of the padding dimension (`(left_pad, right_pad)`). # Input shape 3D tensor with shape `(batch, axis_to_pad, features)` # Output shape 3D tensor with shape `(batch, padded_axis, features)` """ def __init__(self, padding=1, **kwargs): super(ZeroPadding1D, self).__init__(**kwargs) self.padding = conv_utils.normalize_tuple(padding, 2, 'padding') self.input_spec = InputSpec(ndim=3) def compute_output_shape(self, input_shape): if input_shape[1] is not None: length = input_shape[1] + self.padding[0] + self.padding[1] else: length = None return (input_shape[0], length, input_shape[2]) def call(self, inputs): return K.temporal_padding(inputs, padding=self.padding) def get_config(self): config = {'padding': self.padding} base_config = super(ZeroPadding1D, self).get_config() return dict(list(base_config.items()) + list(config.items())) class ZeroPadding2D(Layer): """Zero-padding layer for 2D input (e.g. picture). This layer can add rows and columns of zeros at the top, bottom, left and right side of an image tensor. # Arguments padding: int, or tuple of 2 ints, or tuple of 2 tuples of 2 ints. - If int: the same symmetric padding is applied to width and height. - If tuple of 2 ints: interpreted as two different symmetric padding values for height and width: `(symmetric_height_pad, symmetric_width_pad)`. - If tuple of 2 tuples of 2 ints: interpreted as `((top_pad, bottom_pad), (left_pad, right_pad))` data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, height, width, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, height, width)`. It defaults to the `image_data_format` value found in your Keras config file at `~/.keras/keras.json`. If you never set it, then it will be "channels_last". # Input shape 4D tensor with shape: - If `data_format` is `"channels_last"`: `(batch, rows, cols, channels)` - If `data_format` is `"channels_first"`: `(batch, channels, rows, cols)` # Output shape 4D tensor with shape: - If `data_format` is `"channels_last"`: `(batch, padded_rows, padded_cols, channels)` - If `data_format` is `"channels_first"`: `(batch, channels, padded_rows, padded_cols)` """ @interfaces.legacy_zeropadding2d_support def __init__(self, padding=(1, 1), data_format=None, **kwargs): super(ZeroPadding2D, self).__init__(**kwargs) self.data_format = conv_utils.normalize_data_format(data_format) if isinstance(padding, int): self.padding = ((padding, padding), (padding, padding)) elif hasattr(padding, '__len__'): if len(padding) != 2: raise ValueError('`padding` should have two elements. ' 'Found: ' + str(padding)) height_padding = conv_utils.normalize_tuple(padding[0], 2, '1st entry of padding') width_padding = conv_utils.normalize_tuple(padding[1], 2, '2nd entry of padding') self.padding = (height_padding, width_padding) else: raise ValueError('`padding` should be either an int, ' 'a tuple of 2 ints ' '(symmetric_height_pad, symmetric_width_pad), ' 'or a tuple of 2 tuples of 2 ints ' '((top_pad, bottom_pad), (left_pad, right_pad)). ' 'Found: ' + str(padding)) self.input_spec = InputSpec(ndim=4) def compute_output_shape(self, input_shape): if self.data_format == 'channels_first': if input_shape[2] is not None: rows = input_shape[2] + self.padding[0][0] + self.padding[0][1] else: rows = None if input_shape[3] is not None: cols = input_shape[3] + self.padding[1][0] + self.padding[1][1] else: cols = None return (input_shape[0], input_shape[1], rows, cols) elif self.data_format == 'channels_last': if input_shape[1] is not None: rows = input_shape[1] + self.padding[0][0] + self.padding[0][1] else: rows = None if input_shape[2] is not None: cols = input_shape[2] + self.padding[1][0] + self.padding[1][1] else: cols = None return (input_shape[0], rows, cols, input_shape[3]) def call(self, inputs): return K.spatial_2d_padding(inputs, padding=self.padding, data_format=self.data_format) def get_config(self): config = {'padding': self.padding, 'data_format': self.data_format} base_config = super(ZeroPadding2D, self).get_config() return dict(list(base_config.items()) + list(config.items())) class ZeroPadding3D(Layer): """Zero-padding layer for 3D data (spatial or spatio-temporal). # Arguments padding: int, or tuple of 2 ints, or tuple of 2 tuples of 2 ints. - If int: the same symmetric padding is applied to width and height. - If tuple of 2 ints: interpreted as two different symmetric padding values for height and width: `(symmetric_dim1_pad, symmetric_dim2_pad, symmetric_dim3_pad)`. - If tuple of 2 tuples of 2 ints: interpreted as `((left_dim1_pad, right_dim1_pad), (left_dim2_pad, right_dim2_pad), (left_dim3_pad, right_dim3_pad))` data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`. It defaults to the `image_data_format` value found in your Keras config file at `~/.keras/keras.json`. If you never set it, then it will be "channels_last". # Input shape 5D tensor with shape: - If `data_format` is `"channels_last"`: `(batch, first_axis_to_pad, second_axis_to_pad, third_axis_to_pad, depth)` - If `data_format` is `"channels_first"`: `(batch, depth, first_axis_to_pad, second_axis_to_pad, third_axis_to_pad)` # Output shape 5D tensor with shape: - If `data_format` is `"channels_last"`: `(batch, first_padded_axis, second_padded_axis, third_axis_to_pad, depth)` - If `data_format` is `"channels_first"`: `(batch, depth, first_padded_axis, second_padded_axis, third_axis_to_pad)` """ @interfaces.legacy_zeropadding3d_support def __init__(self, padding=(1, 1, 1), data_format=None, **kwargs): super(ZeroPadding3D, self).__init__(**kwargs) self.data_format = conv_utils.normalize_data_format(data_format) if isinstance(padding, int): self.padding = ((padding, padding), (padding, padding), (padding, padding)) elif hasattr(padding, '__len__'): if len(padding) != 3: raise ValueError('`padding` should have 3 elements. ' 'Found: ' + str(padding)) dim1_padding = conv_utils.normalize_tuple(padding[0], 2, '1st entry of padding') dim2_padding = conv_utils.normalize_tuple(padding[1], 2, '2nd entry of padding') dim3_padding = conv_utils.normalize_tuple(padding[2], 2, '3rd entry of padding') self.padding = (dim1_padding, dim2_padding, dim3_padding) else: raise ValueError('`padding` should be either an int, ' 'a tuple of 3 ints ' '(symmetric_dim1_pad, symmetric_dim2_pad, symmetric_dim3_pad), ' 'or a tuple of 3 tuples of 2 ints ' '((left_dim1_pad, right_dim1_pad),' ' (left_dim2_pad, right_dim2_pad),' ' (left_dim3_pad, right_dim2_pad)). ' 'Found: ' + str(padding)) self.input_spec = InputSpec(ndim=5) def compute_output_shape(self, input_shape): if self.data_format == 'channels_first': if input_shape[2] is not None: dim1 = input_shape[2] + self.padding[0][0] + self.padding[0][1] else: dim1 = None if input_shape[3] is not None: dim2 = input_shape[3] + self.padding[1][0] + self.padding[1][1] else: dim2 = None if input_shape[4] is not None: dim3 = input_shape[4] + self.padding[2][0] + self.padding[2][1] else: dim3 = None return (input_shape[0], input_shape[1], dim1, dim2, dim3) elif self.data_format == 'channels_last': if input_shape[1] is not None: dim1 = input_shape[1] + self.padding[0][0] + self.padding[0][1] else: dim1 = None if input_shape[2] is not None: dim2 = input_shape[2] + self.padding[1][0] + self.padding[1][1] else: dim2 = None if input_shape[3] is not None: dim3 = input_shape[3] + self.padding[2][0] + self.padding[2][1] else: dim3 = None return (input_shape[0], dim1, dim2, dim3, input_shape[4]) def call(self, inputs): return K.spatial_3d_padding(inputs, padding=self.padding, data_format=self.data_format) def get_config(self): config = {'padding': self.padding, 'data_format': self.data_format} base_config = super(ZeroPadding3D, self).get_config() return dict(list(base_config.items()) + list(config.items())) class Cropping1D(Layer): """Cropping layer for 1D input (e.g. temporal sequence). It crops along the time dimension (axis 1). # Arguments cropping: int or tuple of int (length 2) How many units should be trimmed off at the beginning and end of the cropping dimension (axis 1). If a single int is provided, the same value will be used for both. # Input shape 3D tensor with shape `(batch, axis_to_crop, features)` # Output shape 3D tensor with shape `(batch, cropped_axis, features)` """ def __init__(self, cropping=(1, 1), **kwargs): super(Cropping1D, self).__init__(**kwargs) self.cropping = conv_utils.normalize_tuple(cropping, 2, 'cropping') self.input_spec = InputSpec(ndim=3) def compute_output_shape(self, input_shape): if input_shape[1] is not None: length = input_shape[1] - self.cropping[0] - self.cropping[1] else: length = None return (input_shape[0], length, input_shape[2]) def call(self, inputs): if self.cropping[1] == 0: return inputs[:, self.cropping[0]:, :] else: return inputs[:, self.cropping[0]: -self.cropping[1], :] def get_config(self): config = {'cropping': self.cropping} base_config = super(Cropping1D, self).get_config() return dict(list(base_config.items()) + list(config.items())) class Cropping2D(Layer): """Cropping layer for 2D input (e.g. picture). It crops along spatial dimensions, i.e. width and height. # Arguments cropping: int, or tuple of 2 ints, or tuple of 2 tuples of 2 ints. - If int: the same symmetric cropping is applied to width and height. - If tuple of 2 ints: interpreted as two different symmetric cropping values for height and width: `(symmetric_height_crop, symmetric_width_crop)`. - If tuple of 2 tuples of 2 ints: interpreted as `((top_crop, bottom_crop), (left_crop, right_crop))` data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, height, width, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, height, width)`. It defaults to the `image_data_format` value found in your Keras config file at `~/.keras/keras.json`. If you never set it, then it will be "channels_last". # Input shape 4D tensor with shape: - If `data_format` is `"channels_last"`: `(batch, rows, cols, channels)` - If `data_format` is `"channels_first"`: `(batch, channels, rows, cols)` # Output shape 4D tensor with shape: - If `data_format` is `"channels_last"`: `(batch, cropped_rows, cropped_cols, channels)` - If `data_format` is `"channels_first"`: `(batch, channels, cropped_rows, cropped_cols)` # Examples ```python # Crop the input 2D images or feature maps model = Sequential() model.add(Cropping2D(cropping=((2, 2), (4, 4)), input_shape=(28, 28, 3))) # now model.output_shape == (None, 24, 20, 3) model.add(Conv2D(64, (3, 3), padding='same')) model.add(Cropping2D(cropping=((2, 2), (2, 2)))) # now model.output_shape == (None, 20, 16. 64) ``` """ @interfaces.legacy_cropping2d_support def __init__(self, cropping=((0, 0), (0, 0)), data_format=None, **kwargs): super(Cropping2D, self).__init__(**kwargs) self.data_format = conv_utils.normalize_data_format(data_format) if isinstance(cropping, int): self.cropping = ((cropping, cropping), (cropping, cropping)) elif hasattr(cropping, '__len__'): if len(cropping) != 2: raise ValueError('`cropping` should have two elements. ' 'Found: ' + str(cropping)) height_cropping = conv_utils.normalize_tuple( cropping[0], 2, '1st entry of cropping') width_cropping = conv_utils.normalize_tuple( cropping[1], 2, '2nd entry of cropping') self.cropping = (height_cropping, width_cropping) else: raise ValueError('`cropping` should be either an int, ' 'a tuple of 2 ints ' '(symmetric_height_crop, symmetric_width_crop), ' 'or a tuple of 2 tuples of 2 ints ' '((top_crop, bottom_crop), (left_crop, right_crop)). ' 'Found: ' + str(cropping)) self.input_spec = InputSpec(ndim=4) def compute_output_shape(self, input_shape): if self.data_format == 'channels_first': return (input_shape[0], input_shape[1], input_shape[2] - self.cropping[0][0] - self.cropping[0][1] if input_shape[2] else None, input_shape[3] - self.cropping[1][0] - self.cropping[1][1] if input_shape[3] else None) elif self.data_format == 'channels_last': return (input_shape[0], input_shape[1] - self.cropping[0][0] - self.cropping[0][1] if input_shape[1] else None, input_shape[2] - self.cropping[1][0] - self.cropping[1][1] if input_shape[2] else None, input_shape[3]) def call(self, inputs): if self.data_format == 'channels_first': if self.cropping[0][1] == self.cropping[1][1] == 0: return inputs[:, :, self.cropping[0][0]:, self.cropping[1][0]:] elif self.cropping[0][1] == 0: return inputs[:, :, self.cropping[0][0]:, self.cropping[1][0]: -self.cropping[1][1]] elif self.cropping[1][1] == 0: return inputs[:, :, self.cropping[0][0]: -self.cropping[0][1], self.cropping[1][0]:] return inputs[:, :, self.cropping[0][0]: -self.cropping[0][1], self.cropping[1][0]: -self.cropping[1][1]] elif self.data_format == 'channels_last': if self.cropping[0][1] == self.cropping[1][1] == 0: return inputs[:, self.cropping[0][0]:, self.cropping[1][0]:, :] elif self.cropping[0][1] == 0: return inputs[:, self.cropping[0][0]:, self.cropping[1][0]: -self.cropping[1][1], :] elif self.cropping[1][1] == 0: return inputs[:, self.cropping[0][0]: -self.cropping[0][1], self.cropping[1][0]:, :] return inputs[:, self.cropping[0][0]: -self.cropping[0][1], self.cropping[1][0]: -self.cropping[1][1], :] def get_config(self): config = {'cropping': self.cropping, 'data_format': self.data_format} base_config = super(Cropping2D, self).get_config() return dict(list(base_config.items()) + list(config.items())) class Cropping3D(Layer): """Cropping layer for 3D data (e.g. spatial or spatio-temporal). # Arguments cropping: int, or tuple of 2 ints, or tuple of 2 tuples of 2 ints. - If int: the same symmetric cropping is applied to width and height. - If tuple of 2 ints: interpreted as two different symmetric cropping values for height and width: `(symmetric_dim1_crop, symmetric_dim2_crop, symmetric_dim3_crop)`. - If tuple of 2 tuples of 2 ints: interpreted as `((left_dim1_crop, right_dim1_crop), (left_dim2_crop, right_dim2_crop), (left_dim3_crop, right_dim3_crop))` data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`. It defaults to the `image_data_format` value found in your Keras config file at `~/.keras/keras.json`. If you never set it, then it will be "channels_last". # Input shape 5D tensor with shape: - If `data_format` is `"channels_last"`: `(batch, first_axis_to_crop, second_axis_to_crop, third_axis_to_crop, depth)` - If `data_format` is `"channels_first"`: `(batch, depth, first_axis_to_crop, second_axis_to_crop, third_axis_to_crop)` # Output shape 5D tensor with shape: - If `data_format` is `"channels_last"`: `(batch, first_cropped_axis, second_cropped_axis, third_cropped_axis, depth)` - If `data_format` is `"channels_first"`: `(batch, depth, first_cropped_axis, second_cropped_axis, third_cropped_axis)` """ @interfaces.legacy_cropping3d_support def __init__(self, cropping=((1, 1), (1, 1), (1, 1)), data_format=None, **kwargs): super(Cropping3D, self).__init__(**kwargs) self.data_format = conv_utils.normalize_data_format(data_format) if isinstance(cropping, int): self.cropping = ((cropping, cropping), (cropping, cropping), (cropping, cropping)) elif hasattr(cropping, '__len__'): if len(cropping) != 3: raise ValueError('`cropping` should have 3 elements. ' 'Found: ' + str(cropping)) dim1_cropping = conv_utils.normalize_tuple(cropping[0], 2, '1st entry of cropping') dim2_cropping = conv_utils.normalize_tuple(cropping[1], 2, '2nd entry of cropping') dim3_cropping = conv_utils.normalize_tuple(cropping[2], 2, '3rd entry of cropping') self.cropping = (dim1_cropping, dim2_cropping, dim3_cropping) else: raise ValueError('`cropping` should be either an int, ' 'a tuple of 3 ints ' '(symmetric_dim1_crop, symmetric_dim2_crop, symmetric_dim3_crop), ' 'or a tuple of 3 tuples of 2 ints ' '((left_dim1_crop, right_dim1_crop),' ' (left_dim2_crop, right_dim2_crop),' ' (left_dim3_crop, right_dim2_crop)). ' 'Found: ' + str(cropping)) self.input_spec = InputSpec(ndim=5) def compute_output_shape(self, input_shape): if self.data_format == 'channels_first': if input_shape[2] is not None: dim1 = input_shape[2] - self.cropping[0][0] - self.cropping[0][1] else: dim1 = None if input_shape[3] is not None: dim2 = input_shape[3] - self.cropping[1][0] - self.cropping[1][1] else: dim2 = None if input_shape[4] is not None: dim3 = input_shape[4] - self.cropping[2][0] - self.cropping[2][1] else: dim3 = None return (input_shape[0], input_shape[1], dim1, dim2, dim3) elif self.data_format == 'channels_last': if input_shape[1] is not None: dim1 = input_shape[1] - self.cropping[0][0] - self.cropping[0][1] else: dim1 = None if input_shape[2] is not None: dim2 = input_shape[2] - self.cropping[1][0] - self.cropping[1][1] else: dim2 = None if input_shape[3] is not None: dim3 = input_shape[3] - self.cropping[2][0] - self.cropping[2][1] else: dim3 = None return (input_shape[0], dim1, dim2, dim3, input_shape[4]) def call(self, inputs): if self.data_format == 'channels_first': if self.cropping[0][1] == self.cropping[1][1] == self.cropping[2][1] == 0: return inputs[:, :, self.cropping[0][0]:, self.cropping[1][0]:, self.cropping[2][0]:] elif self.cropping[0][1] == self.cropping[1][1] == 0: return inputs[:, :, self.cropping[0][0]:, self.cropping[1][0]:, self.cropping[2][0]: -self.cropping[2][1]] elif self.cropping[1][1] == self.cropping[2][1] == 0: return inputs[:, :, self.cropping[0][0]: -self.cropping[0][1], self.cropping[1][0]:, self.cropping[2][0]:] elif self.cropping[0][1] == self.cropping[2][1] == 0: return inputs[:, :, self.cropping[0][0]:, self.cropping[1][0]: -self.cropping[1][1], self.cropping[2][0]:] elif self.cropping[0][1] == 0: return inputs[:, :, self.cropping[0][0]:, self.cropping[1][0]: -self.cropping[1][1], self.cropping[2][0]: -self.cropping[2][1]] elif self.cropping[1][1] == 0: return inputs[:, :, self.cropping[0][0]: -self.cropping[0][1], self.cropping[1][0]:, self.cropping[2][0]: -self.cropping[2][1]] elif self.cropping[2][1] == 0: return inputs[:, :, self.cropping[0][0]: -self.cropping[0][1], self.cropping[1][0]: -self.cropping[1][1], self.cropping[2][0]:] return inputs[:, :, self.cropping[0][0]: -self.cropping[0][1], self.cropping[1][0]: -self.cropping[1][1], self.cropping[2][0]: -self.cropping[2][1]] elif self.data_format == 'channels_last': if self.cropping[0][1] == self.cropping[1][1] == self.cropping[2][1] == 0: return inputs[:, self.cropping[0][0]:, self.cropping[1][0]:, self.cropping[2][0]:, :] elif self.cropping[0][1] == self.cropping[1][1] == 0: return inputs[:, self.cropping[0][0]:, self.cropping[1][0]:, self.cropping[2][0]: -self.cropping[2][1], :] elif self.cropping[1][1] == self.cropping[2][1] == 0: return inputs[:, self.cropping[0][0]: -self.cropping[0][1], self.cropping[1][0]:, self.cropping[2][0]:, :] elif self.cropping[0][1] == self.cropping[2][1] == 0: return inputs[:, self.cropping[0][0]:, self.cropping[1][0]:-self.cropping[1][1], self.cropping[2][0]:, :] elif self.cropping[0][1] == 0: return inputs[:, self.cropping[0][0]:, self.cropping[1][0]: -self.cropping[1][1], self.cropping[2][0]: -self.cropping[2][1], :] elif self.cropping[1][1] == 0: return inputs[:, self.cropping[0][0]: -self.cropping[0][1], self.cropping[1][0]:, self.cropping[2][0]: -self.cropping[2][1], :] elif self.cropping[2][1] == 0: return inputs[:, self.cropping[0][0]: -self.cropping[0][1], self.cropping[1][0]: -self.cropping[1][1], self.cropping[2][0]:, :] return inputs[:, self.cropping[0][0]: -self.cropping[0][1], self.cropping[1][0]: -self.cropping[1][1], self.cropping[2][0]: -self.cropping[2][1], :] def get_config(self): config = {'cropping': self.cropping, 'data_format': self.data_format} base_config = super(Cropping3D, self).get_config() return dict(list(base_config.items()) + list(config.items())) # Aliases Convolution1D = Conv1D Convolution2D = Conv2D Convolution3D = Conv3D SeparableConvolution2D = SeparableConv2D Convolution2DTranspose = Conv2DTranspose Deconvolution2D = Deconv2D = Conv2DTranspose Deconvolution3D = Deconv3D = Conv3DTranspose # Legacy aliases AtrousConv1D = AtrousConvolution1D AtrousConv2D = AtrousConvolution2D
{ "content_hash": "d271bbfadc818c733f75b89149b1799a", "timestamp": "", "source": "github", "line_count": 2128, "max_line_length": 123, "avg_line_length": 44.86419172932331, "alnum_prop": 0.5511621330037394, "repo_name": "ryfeus/lambda-packs", "id": "5c3aac406dd8e63e51ae020ba4cad5bc45135f09", "size": "95495", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "Keras_tensorflow_nightly/source2.7/keras/layers/convolutional.py", "mode": "33188", "license": "mit", "language": [ { "name": "C", "bytes": "9768343" }, { "name": "C++", "bytes": "76566960" }, { "name": "CMake", "bytes": "191097" }, { "name": "CSS", "bytes": "153538" }, { "name": "Cuda", "bytes": "61768" }, { "name": "Cython", "bytes": "3110222" }, { "name": "Fortran", "bytes": "110284" }, { "name": "HTML", "bytes": "248658" }, { "name": "JavaScript", "bytes": "62920" }, { "name": "MATLAB", "bytes": "17384" }, { "name": "Makefile", "bytes": "152150" }, { "name": "Python", "bytes": "549307737" }, { "name": "Roff", "bytes": "26398" }, { "name": "SWIG", "bytes": "142" }, { "name": "Shell", "bytes": "7790" }, { "name": "Smarty", "bytes": "4090" }, { "name": "TeX", "bytes": "152062" }, { "name": "XSLT", "bytes": "305540" } ], "symlink_target": "" }
"""Starts a virtual machine.""" from baseCmd import * from baseResponse import * class startVirtualMachineCmd (baseCmd): typeInfo = {} def __init__(self): self.isAsync = "true" """The ID of the virtual machine""" """Required""" self.id = None self.typeInfo['id'] = 'uuid' """Deployment planner to use for vm allocation. Available to ROOT admin only""" self.deploymentplanner = None self.typeInfo['deploymentplanner'] = 'string' """destination Host ID to deploy the VM to - parameter available for root admin only""" self.hostid = None self.typeInfo['hostid'] = 'uuid' self.required = ["id", ] class startVirtualMachineResponse (baseResponse): typeInfo = {} def __init__(self): """the ID of the virtual machine""" self.id = None self.typeInfo['id'] = 'string' """the account associated with the virtual machine""" self.account = None self.typeInfo['account'] = 'string' """the number of cpu this virtual machine is running with""" self.cpunumber = None self.typeInfo['cpunumber'] = 'integer' """the speed of each cpu""" self.cpuspeed = None self.typeInfo['cpuspeed'] = 'integer' """the amount of the vm's CPU currently used""" self.cpuused = None self.typeInfo['cpuused'] = 'string' """the date when this virtual machine was created""" self.created = None self.typeInfo['created'] = 'date' """Vm details in key/value pairs.""" self.details = None self.typeInfo['details'] = 'map' """the read (io) of disk on the vm""" self.diskioread = None self.typeInfo['diskioread'] = 'long' """the write (io) of disk on the vm""" self.diskiowrite = None self.typeInfo['diskiowrite'] = 'long' """the read (bytes) of disk on the vm""" self.diskkbsread = None self.typeInfo['diskkbsread'] = 'long' """the write (bytes) of disk on the vm""" self.diskkbswrite = None self.typeInfo['diskkbswrite'] = 'long' """the ID of the disk offering of the virtual machine""" self.diskofferingid = None self.typeInfo['diskofferingid'] = 'string' """the name of the disk offering of the virtual machine""" self.diskofferingname = None self.typeInfo['diskofferingname'] = 'string' """user generated name. The name of the virtual machine is returned if no displayname exists.""" self.displayname = None self.typeInfo['displayname'] = 'string' """an optional field whether to the display the vm to the end user or not.""" self.displayvm = None self.typeInfo['displayvm'] = 'boolean' """the name of the domain in which the virtual machine exists""" self.domain = None self.typeInfo['domain'] = 'string' """the ID of the domain in which the virtual machine exists""" self.domainid = None self.typeInfo['domainid'] = 'string' """the virtual network for the service offering""" self.forvirtualnetwork = None self.typeInfo['forvirtualnetwork'] = 'boolean' """the group name of the virtual machine""" self.group = None self.typeInfo['group'] = 'string' """the group ID of the virtual machine""" self.groupid = None self.typeInfo['groupid'] = 'string' """Os type ID of the virtual machine""" self.guestosid = None self.typeInfo['guestosid'] = 'string' """true if high-availability is enabled, false otherwise""" self.haenable = None self.typeInfo['haenable'] = 'boolean' """the ID of the host for the virtual machine""" self.hostid = None self.typeInfo['hostid'] = 'string' """the name of the host for the virtual machine""" self.hostname = None self.typeInfo['hostname'] = 'string' """the hypervisor on which the template runs""" self.hypervisor = None self.typeInfo['hypervisor'] = 'string' """instance name of the user vm; this parameter is returned to the ROOT admin only""" self.instancename = None self.typeInfo['instancename'] = 'string' """true if vm contains XS tools inorder to support dynamic scaling of VM cpu/memory.""" self.isdynamicallyscalable = None self.typeInfo['isdynamicallyscalable'] = 'boolean' """an alternate display text of the ISO attached to the virtual machine""" self.isodisplaytext = None self.typeInfo['isodisplaytext'] = 'string' """the ID of the ISO attached to the virtual machine""" self.isoid = None self.typeInfo['isoid'] = 'string' """the name of the ISO attached to the virtual machine""" self.isoname = None self.typeInfo['isoname'] = 'string' """ssh key-pair""" self.keypair = None self.typeInfo['keypair'] = 'string' """the memory allocated for the virtual machine""" self.memory = None self.typeInfo['memory'] = 'integer' """the name of the virtual machine""" self.name = None self.typeInfo['name'] = 'string' """the incoming network traffic on the vm""" self.networkkbsread = None self.typeInfo['networkkbsread'] = 'long' """the outgoing network traffic on the host""" self.networkkbswrite = None self.typeInfo['networkkbswrite'] = 'long' """OS type id of the vm""" self.ostypeid = None self.typeInfo['ostypeid'] = 'long' """the password (if exists) of the virtual machine""" self.password = None self.typeInfo['password'] = 'string' """true if the password rest feature is enabled, false otherwise""" self.passwordenabled = None self.typeInfo['passwordenabled'] = 'boolean' """the project name of the vm""" self.project = None self.typeInfo['project'] = 'string' """the project id of the vm""" self.projectid = None self.typeInfo['projectid'] = 'string' """public IP address id associated with vm via Static nat rule""" self.publicip = None self.typeInfo['publicip'] = 'string' """public IP address id associated with vm via Static nat rule""" self.publicipid = None self.typeInfo['publicipid'] = 'string' """device ID of the root volume""" self.rootdeviceid = None self.typeInfo['rootdeviceid'] = 'long' """device type of the root volume""" self.rootdevicetype = None self.typeInfo['rootdevicetype'] = 'string' """the ID of the service offering of the virtual machine""" self.serviceofferingid = None self.typeInfo['serviceofferingid'] = 'string' """the name of the service offering of the virtual machine""" self.serviceofferingname = None self.typeInfo['serviceofferingname'] = 'string' """State of the Service from LB rule""" self.servicestate = None self.typeInfo['servicestate'] = 'string' """the state of the virtual machine""" self.state = None self.typeInfo['state'] = 'string' """an alternate display text of the template for the virtual machine""" self.templatedisplaytext = None self.typeInfo['templatedisplaytext'] = 'string' """the ID of the template for the virtual machine. A -1 is returned if the virtual machine was created from an ISO file.""" self.templateid = None self.typeInfo['templateid'] = 'string' """the name of the template for the virtual machine""" self.templatename = None self.typeInfo['templatename'] = 'string' """the user's ID who deployed the virtual machine""" self.userid = None self.typeInfo['userid'] = 'string' """the user's name who deployed the virtual machine""" self.username = None self.typeInfo['username'] = 'string' """the vgpu type used by the virtual machine""" self.vgpu = None self.typeInfo['vgpu'] = 'string' """the ID of the availablility zone for the virtual machine""" self.zoneid = None self.typeInfo['zoneid'] = 'string' """the name of the availability zone for the virtual machine""" self.zonename = None self.typeInfo['zonename'] = 'string' """list of affinity groups associated with the virtual machine""" self.affinitygroup = [] """the list of nics associated with vm""" self.nic = [] """list of security groups associated with the virtual machine""" self.securitygroup = [] """the list of resource tags associated with vm""" self.tags = [] """the ID of the latest async job acting on this object""" self.jobid = None self.typeInfo['jobid'] = '' """the current status of the latest async job acting on this object""" self.jobstatus = None self.typeInfo['jobstatus'] = '' class affinitygroup: def __init__(self): """"the ID of the affinity group""" self.id = None """"the account owning the affinity group""" self.account = None """"the description of the affinity group""" self.description = None """"the domain name of the affinity group""" self.domain = None """"the domain ID of the affinity group""" self.domainid = None """"the name of the affinity group""" self.name = None """"the project name of the affinity group""" self.project = None """"the project ID of the affinity group""" self.projectid = None """"the type of the affinity group""" self.type = None """"virtual machine IDs associated with this affinity group""" self.virtualmachineIds = None class nic: def __init__(self): """"the ID of the nic""" self.id = None """"the broadcast uri of the nic""" self.broadcasturi = None """"device id for the network when plugged into the virtual machine""" self.deviceid = None """"the gateway of the nic""" self.gateway = None """"the IPv6 address of network""" self.ip6address = None """"the cidr of IPv6 network""" self.ip6cidr = None """"the gateway of IPv6 network""" self.ip6gateway = None """"the ip address of the nic""" self.ipaddress = None """"true if nic is default, false otherwise""" self.isdefault = None """"the isolation uri of the nic""" self.isolationuri = None """"true if nic is default, false otherwise""" self.macaddress = None """"the netmask of the nic""" self.netmask = None """"the ID of the corresponding network""" self.networkid = None """"the name of the corresponding network""" self.networkname = None """"the Secondary ipv4 addr of nic""" self.secondaryip = None """"the traffic type of the nic""" self.traffictype = None """"the type of the nic""" self.type = None """"Id of the vm to which the nic belongs""" self.virtualmachineid = None class tags: def __init__(self): """"the account associated with the tag""" self.account = None """"customer associated with the tag""" self.customer = None """"the domain associated with the tag""" self.domain = None """"the ID of the domain associated with the tag""" self.domainid = None """"tag key name""" self.key = None """"the project name where tag belongs to""" self.project = None """"the project id the tag belongs to""" self.projectid = None """"id of the resource""" self.resourceid = None """"resource type""" self.resourcetype = None """"tag value""" self.value = None class egressrule: def __init__(self): """"account owning the security group rule""" self.account = None """"the CIDR notation for the base IP address of the security group rule""" self.cidr = None """"the ending IP of the security group rule""" self.endport = None """"the code for the ICMP message response""" self.icmpcode = None """"the type of the ICMP message response""" self.icmptype = None """"the protocol of the security group rule""" self.protocol = None """"the id of the security group rule""" self.ruleid = None """"security group name""" self.securitygroupname = None """"the starting IP of the security group rule""" self.startport = None """"the list of resource tags associated with the rule""" self.tags = [] """"the account associated with the tag""" self.account = None """"customer associated with the tag""" self.customer = None """"the domain associated with the tag""" self.domain = None """"the ID of the domain associated with the tag""" self.domainid = None """"tag key name""" self.key = None """"the project name where tag belongs to""" self.project = None """"the project id the tag belongs to""" self.projectid = None """"id of the resource""" self.resourceid = None """"resource type""" self.resourcetype = None """"tag value""" self.value = None class tags: def __init__(self): """"the account associated with the tag""" self.account = None """"customer associated with the tag""" self.customer = None """"the domain associated with the tag""" self.domain = None """"the ID of the domain associated with the tag""" self.domainid = None """"tag key name""" self.key = None """"the project name where tag belongs to""" self.project = None """"the project id the tag belongs to""" self.projectid = None """"id of the resource""" self.resourceid = None """"resource type""" self.resourcetype = None """"tag value""" self.value = None class tags: def __init__(self): """"the account associated with the tag""" self.account = None """"customer associated with the tag""" self.customer = None """"the domain associated with the tag""" self.domain = None """"the ID of the domain associated with the tag""" self.domainid = None """"tag key name""" self.key = None """"the project name where tag belongs to""" self.project = None """"the project id the tag belongs to""" self.projectid = None """"id of the resource""" self.resourceid = None """"resource type""" self.resourcetype = None """"tag value""" self.value = None class ingressrule: def __init__(self): """"account owning the security group rule""" self.account = None """"the CIDR notation for the base IP address of the security group rule""" self.cidr = None """"the ending IP of the security group rule""" self.endport = None """"the code for the ICMP message response""" self.icmpcode = None """"the type of the ICMP message response""" self.icmptype = None """"the protocol of the security group rule""" self.protocol = None """"the id of the security group rule""" self.ruleid = None """"security group name""" self.securitygroupname = None """"the starting IP of the security group rule""" self.startport = None """"the list of resource tags associated with the rule""" self.tags = [] """"the account associated with the tag""" self.account = None """"customer associated with the tag""" self.customer = None """"the domain associated with the tag""" self.domain = None """"the ID of the domain associated with the tag""" self.domainid = None """"tag key name""" self.key = None """"the project name where tag belongs to""" self.project = None """"the project id the tag belongs to""" self.projectid = None """"id of the resource""" self.resourceid = None """"resource type""" self.resourcetype = None """"tag value""" self.value = None class tags: def __init__(self): """"the account associated with the tag""" self.account = None """"customer associated with the tag""" self.customer = None """"the domain associated with the tag""" self.domain = None """"the ID of the domain associated with the tag""" self.domainid = None """"tag key name""" self.key = None """"the project name where tag belongs to""" self.project = None """"the project id the tag belongs to""" self.projectid = None """"id of the resource""" self.resourceid = None """"resource type""" self.resourcetype = None """"tag value""" self.value = None class tags: def __init__(self): """"the account associated with the tag""" self.account = None """"customer associated with the tag""" self.customer = None """"the domain associated with the tag""" self.domain = None """"the ID of the domain associated with the tag""" self.domainid = None """"tag key name""" self.key = None """"the project name where tag belongs to""" self.project = None """"the project id the tag belongs to""" self.projectid = None """"id of the resource""" self.resourceid = None """"resource type""" self.resourcetype = None """"tag value""" self.value = None class securitygroup: def __init__(self): """"the ID of the security group""" self.id = None """"the account owning the security group""" self.account = None """"the description of the security group""" self.description = None """"the domain name of the security group""" self.domain = None """"the domain ID of the security group""" self.domainid = None """"the name of the security group""" self.name = None """"the project name of the group""" self.project = None """"the project id of the group""" self.projectid = None """"the number of virtualmachines associated with this securitygroup""" self.virtualmachinecount = None """"the list of virtualmachine ids associated with this securitygroup""" self.virtualmachineids = None """"the list of egress rules associated with the security group""" self.egressrule = [] """"account owning the security group rule""" self.account = None """"the CIDR notation for the base IP address of the security group rule""" self.cidr = None """"the ending IP of the security group rule""" self.endport = None """"the code for the ICMP message response""" self.icmpcode = None """"the type of the ICMP message response""" self.icmptype = None """"the protocol of the security group rule""" self.protocol = None """"the id of the security group rule""" self.ruleid = None """"security group name""" self.securitygroupname = None """"the starting IP of the security group rule""" self.startport = None """"the list of resource tags associated with the rule""" self.tags = [] """"the account associated with the tag""" self.account = None """"customer associated with the tag""" self.customer = None """"the domain associated with the tag""" self.domain = None """"the ID of the domain associated with the tag""" self.domainid = None """"tag key name""" self.key = None """"the project name where tag belongs to""" self.project = None """"the project id the tag belongs to""" self.projectid = None """"id of the resource""" self.resourceid = None """"resource type""" self.resourcetype = None """"tag value""" self.value = None """"the list of ingress rules associated with the security group""" self.ingressrule = [] """"account owning the security group rule""" self.account = None """"the CIDR notation for the base IP address of the security group rule""" self.cidr = None """"the ending IP of the security group rule""" self.endport = None """"the code for the ICMP message response""" self.icmpcode = None """"the type of the ICMP message response""" self.icmptype = None """"the protocol of the security group rule""" self.protocol = None """"the id of the security group rule""" self.ruleid = None """"security group name""" self.securitygroupname = None """"the starting IP of the security group rule""" self.startport = None """"the list of resource tags associated with the rule""" self.tags = [] """"the account associated with the tag""" self.account = None """"customer associated with the tag""" self.customer = None """"the domain associated with the tag""" self.domain = None """"the ID of the domain associated with the tag""" self.domainid = None """"tag key name""" self.key = None """"the project name where tag belongs to""" self.project = None """"the project id the tag belongs to""" self.projectid = None """"id of the resource""" self.resourceid = None """"resource type""" self.resourcetype = None """"tag value""" self.value = None """"the list of resource tags associated with the rule""" self.tags = [] """"the account associated with the tag""" self.account = None """"customer associated with the tag""" self.customer = None """"the domain associated with the tag""" self.domain = None """"the ID of the domain associated with the tag""" self.domainid = None """"tag key name""" self.key = None """"the project name where tag belongs to""" self.project = None """"the project id the tag belongs to""" self.projectid = None """"id of the resource""" self.resourceid = None """"resource type""" self.resourcetype = None """"tag value""" self.value = None """"the ID of the latest async job acting on this object""" self.jobid = None """"the current status of the latest async job acting on this object""" self.jobstatus = None class tags: def __init__(self): """"the account associated with the tag""" self.account = None """"customer associated with the tag""" self.customer = None """"the domain associated with the tag""" self.domain = None """"the ID of the domain associated with the tag""" self.domainid = None """"tag key name""" self.key = None """"the project name where tag belongs to""" self.project = None """"the project id the tag belongs to""" self.projectid = None """"id of the resource""" self.resourceid = None """"resource type""" self.resourcetype = None """"tag value""" self.value = None
{ "content_hash": "7ff7286f83d95c435fa919c07e571449", "timestamp": "", "source": "github", "line_count": 629, "max_line_length": 131, "avg_line_length": 38.82193958664547, "alnum_prop": 0.5727507268929931, "repo_name": "MissionCriticalCloud/marvin", "id": "84f6f32c12f8e6ad1970194e3bec94be2e67a162", "size": "24419", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "marvin/cloudstackAPI/startVirtualMachine.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "2573421" } ], "symlink_target": "" }
"""Syncronizes Zookeeper to file system. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import logging import glob import os import kazoo from treadmill import fs from treadmill import utils from treadmill import zknamespace as z from treadmill import zkutils from treadmill import zkwatchers from treadmill.zksync import utils as zksync_utils _LOGGER = logging.getLogger(__name__) class Zk2Fs: """Syncronize Zookeeper with file system.""" def __init__(self, zkclient, fsroot, tmp_dir=None): self.watches = set() self.processed_once = set() self.zkclient = zkclient self.fsroot = fsroot self.tmp_dir = tmp_dir self.ready = False self.zkclient.add_listener(zkutils.exit_on_lost) def mark_ready(self): """Mark itself as ready, typically past initial sync.""" self.ready = True self._update_last() def _update_last(self): """Update modify file timestamp to indicate changes were made.""" if self.ready: zksync_utils.create_ready_file(self.fsroot) def _default_on_del(self, zkpath): """Default callback invoked on node delete, remove file.""" fs.rm_safe(self.fpath(zkpath)) def _default_on_add(self, zkpath): """Default callback invoked on node is added, default - sync data. Race condition is possible in which added node does no longer exist when we try to sync data. """ try: self.sync_data(zkpath) except kazoo.client.NoNodeError: _LOGGER.warning( 'Tried to add node that no longer exists: %s', zkpath ) fpath = self.fpath(zkpath) fs.rm_safe(fpath) def _write_data(self, fpath, data, stat): """Write Zookeeper data to filesystem. """ zksync_utils.write_data( fpath, data, stat.last_modified, raise_err=True, tmp_dir=self.tmp_dir ) def _data_watch(self, zkpath, data, stat, event, fpath=None): """Invoked when data changes. """ fpath = fpath or self.fpath(zkpath) if event is not None and event.type == 'DELETED': _LOGGER.info('Node deleted: %s', zkpath) self.watches.discard(zkpath) fs.rm_safe(fpath) elif stat is None: _LOGGER.info('Node does not exist: %s', zkpath) self.watches.discard(zkpath) fs.rm_safe(fpath) else: self._write_data(fpath, data, stat) def _filter_children_actions(self, sorted_children, sorted_filenames, add, remove, common): """sorts the children actions to add, remove and common.""" num_children = len(sorted_children) num_filenames = len(sorted_filenames) child_idx = 0 file_idx = 0 while child_idx < num_children or file_idx < num_filenames: child_name = None if child_idx < num_children: child_name = sorted_children[child_idx] file_name = None if file_idx < num_filenames: file_name = sorted_filenames[file_idx] if child_name is None: remove.append(file_name) file_idx += 1 elif file_name is None: add.append(child_name) child_idx += 1 elif child_name == file_name: common.append(child_name) child_idx += 1 file_idx += 1 elif child_name < file_name: add.append(child_name) child_idx += 1 else: remove.append(file_name) file_idx += 1 def _children_watch(self, zkpath, children, watch_data, on_add, on_del, cont_watch_predicate=None): """Callback invoked on children watch.""" fpath = self.fpath(zkpath) sorted_children = sorted(children) sorted_filenames = sorted(map(os.path.basename, glob.glob(os.path.join(fpath, '*')))) add = [] remove = [] common = [] self._filter_children_actions(sorted_children, sorted_filenames, add, remove, common) for node in remove: _LOGGER.info('Delete: %s', node) zknode = z.join_zookeeper_path(zkpath, node) self.watches.discard(zknode) on_del(zknode) if zkpath not in self.processed_once: self.processed_once.add(zkpath) for node in common: _LOGGER.info('Common: %s', node) zknode = z.join_zookeeper_path(zkpath, node) if watch_data: self.watches.add(zknode) on_add(zknode) for node in add: _LOGGER.info('Add: %s', node) zknode = z.join_zookeeper_path(zkpath, node) if watch_data: self.watches.add(zknode) on_add(zknode) if cont_watch_predicate: return cont_watch_predicate(zkpath, sorted_children) return True def fpath(self, zkpath): """Returns file path to given zk node.""" return os.path.join(self.fsroot, zkpath.lstrip('/')) def sync_data(self, zkpath, fpath=None, watch=False): """Sync zk node data to file.""" fpath = fpath or self.fpath(zkpath) if watch: self.watches.add(zkpath) if zkpath in self.watches: @zkwatchers.ExistingDataWatch(self.zkclient, zkpath) @utils.exit_on_unhandled def _data_watch(data, stat, event): """Invoked when data changes.""" self._data_watch(zkpath, data, stat, event, fpath) self._update_last() else: data, stat = self.zkclient.get(zkpath) self._write_data(fpath, data, stat) self._update_last() def _make_children_watch(self, zkpath, watch_data=False, on_add=None, on_del=None, cont_watch_predicate=None): """Make children watch function.""" _LOGGER.debug('Establish children watch on: %s', zkpath) @self.zkclient.ChildrenWatch(zkpath) @utils.exit_on_unhandled def _children_watch(children): """Callback invoked on children watch.""" renew = self._children_watch( zkpath, children, watch_data, on_add, on_del, cont_watch_predicate=cont_watch_predicate, ) self._update_last() return renew def sync_children(self, zkpath, watch_data=False, on_add=None, on_del=None, need_watch_predicate=None, cont_watch_predicate=None): """Sync children of zkpath to fpath. need_watch_predicate decides if the watch is needed based on the zkpath alone. cont_watch_prediacate decides if the watch is needed based on content of zkpath children. To avoid race condition, both need to return False, if one of them returns True, watch will be set. """ _LOGGER.info('sync children: zk = %s, watch_data: %s', zkpath, watch_data) fpath = self.fpath(zkpath) fs.mkdir_safe(fpath) done_file = os.path.join(fpath, '.done') if os.path.exists(done_file): _LOGGER.info('Found done file: %s, nothing to watch.', done_file) return if not on_del: on_del = self._default_on_del if not on_add: on_add = self._default_on_add need_watch = True if need_watch_predicate: need_watch = need_watch_predicate(zkpath) _LOGGER.info('Need watch on %s: %s', zkpath, need_watch) if need_watch: self._make_children_watch( zkpath, watch_data, on_add, on_del, cont_watch_predicate=cont_watch_predicate ) else: try: children = self.zkclient.get_children(zkpath) except kazoo.client.NoNodeError: children = [] need_watch = self._children_watch( zkpath, children, watch_data, on_add, on_del, cont_watch_predicate=cont_watch_predicate, ) if need_watch: self._make_children_watch( zkpath, watch_data, on_add, on_del, cont_watch_predicate=cont_watch_predicate ) self._update_last() if not need_watch: utils.touch(done_file)
{ "content_hash": "94b85e0cdb31a9883bbd700117455700", "timestamp": "", "source": "github", "line_count": 289, "max_line_length": 78, "avg_line_length": 31.418685121107266, "alnum_prop": 0.5389867841409691, "repo_name": "ceache/treadmill", "id": "adfaea3cc8c14a84c003ec6fceaba00f36e0ea1f", "size": "9080", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "lib/python/treadmill/zksync/zk2fs.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "PowerShell", "bytes": "3750" }, { "name": "Python", "bytes": "3362298" }, { "name": "Ruby", "bytes": "3712" }, { "name": "Shell", "bytes": "51646" } ], "symlink_target": "" }
from datetime import datetime, timedelta from django.test.client import RequestFactory from django.http import Http404 from bulbs.utils.test import BaseIndexableTestCase from bulbs.liveblog.views import LiveblogNewEntriesView from bulbs.liveblog.models import LiveBlogEntry from example.testcontent.models import TestLiveBlog class TestLiveBlogNewEntriesView(BaseIndexableTestCase): def setUp(self): super(TestLiveBlogNewEntriesView, self).setUp() self.liveblog = TestLiveBlog.objects.create( published=datetime.now()) self.entry1 = LiveBlogEntry.objects.create( liveblog=self.liveblog, published=datetime.now()) def test_requires_entry_ids_param(self): view = LiveblogNewEntriesView.as_view() with self.assertRaises(ValueError): view( RequestFactory().get('/liveblog/this-cool-liveblog-{}/new-entries'.format( self.liveblog.pk)), slug='this-cool-live-blog', pk=self.liveblog.pk) def test_raises_404_if_liveblog_does_not_exist(self): view = LiveblogNewEntriesView.as_view() with self.assertRaises(Http404): view( RequestFactory().get('/liveblog/this-cool-liveblog-1234/new-entries'), slug='this-cool-live-blog', pk=1234) def test_renders_new_entries(self): entry2 = LiveBlogEntry.objects.create( liveblog=self.liveblog, published=datetime.now() - timedelta(days=1)) entry3 = LiveBlogEntry.objects.create( liveblog=self.liveblog, published=datetime.now() - timedelta(days=1)) view = LiveblogNewEntriesView.as_view() response = view( RequestFactory().get( '/liveblog/this-cool-liveblog-{}/new-entries?entry_ids={}'.format( self.liveblog.pk, ','.join([str(entry2.pk), str(entry3.pk)]))), slug='this-cool-live-blog', pk=self.liveblog.pk) self.assertContains(response, '<bulbs-liveblog-entry', count=2) self.assertContains(response, 'entry-id="{}"'.format(entry2.pk)) self.assertContains(response, 'entry-id="{}"'.format(entry3.pk)) def test_only_renders_published_entries(self): entry2 = LiveBlogEntry.objects.create( liveblog=self.liveblog, published=datetime.now() - timedelta(days=1)) entry3 = LiveBlogEntry.objects.create( liveblog=self.liveblog, published=datetime.now() + timedelta(days=1)) LiveBlogEntry.objects.create( liveblog=self.liveblog) view = LiveblogNewEntriesView.as_view() response = view( RequestFactory().get( '/liveblog/this-cool-liveblog-{}/new-entries?entry_ids={}'.format( self.liveblog.pk, ','.join([str(entry2.pk), str(entry3.pk)]))), slug='this-cool-live-blog', pk=self.liveblog.pk) self.assertContains(response, '<bulbs-liveblog-entry', count=1) self.assertContains(response, 'entry-id="{}"'.format(entry2.pk))
{ "content_hash": "6b0daf7188a154c7b7f42041d969cf16", "timestamp": "", "source": "github", "line_count": 78, "max_line_length": 90, "avg_line_length": 40.76923076923077, "alnum_prop": 0.6226415094339622, "repo_name": "theonion/django-bulbs", "id": "c6d09fad6bd1957fb593e5c05119b1f264ebecff", "size": "3180", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tests/liveblog/test_liveblog_views.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "36651" }, { "name": "HTML", "bytes": "73968" }, { "name": "JavaScript", "bytes": "57288" }, { "name": "Python", "bytes": "1055540" }, { "name": "Ruby", "bytes": "397" }, { "name": "Shell", "bytes": "1629" } ], "symlink_target": "" }
""" A series of tests to establish that the command-line management tools work as advertised - especially with regards to the handling of the DJANGO_SETTINGS_MODULE and default settings.py files. """ from __future__ import unicode_literals import codecs import os import re import shutil import socket import subprocess import sys import tempfile import unittest import django from django import conf, get_version from django.conf import settings from django.core.management import ( BaseCommand, CommandError, call_command, color, ) from django.db import ConnectionHandler from django.db.migrations.exceptions import MigrationSchemaMissing from django.db.migrations.recorder import MigrationRecorder from django.test import ( LiveServerTestCase, SimpleTestCase, TestCase, mock, override_settings, ) from django.test.runner import DiscoverRunner from django.utils._os import npath, upath from django.utils.encoding import force_text from django.utils.six import PY2, PY3, StringIO custom_templates_dir = os.path.join(os.path.dirname(upath(__file__)), 'custom_templates') SYSTEM_CHECK_MSG = 'System check identified no issues' class AdminScriptTestCase(unittest.TestCase): @classmethod def setUpClass(cls): super(AdminScriptTestCase, cls).setUpClass() cls.test_dir = os.path.realpath(os.path.join( tempfile.gettempdir(), cls.__name__, 'test_project', )) if not os.path.exists(cls.test_dir): os.makedirs(cls.test_dir) with open(os.path.join(cls.test_dir, '__init__.py'), 'w'): pass @classmethod def tearDownClass(cls): shutil.rmtree(cls.test_dir) super(AdminScriptTestCase, cls).tearDownClass() def write_settings(self, filename, apps=None, is_dir=False, sdict=None, extra=None): if is_dir: settings_dir = os.path.join(self.test_dir, filename) os.mkdir(settings_dir) settings_file_path = os.path.join(settings_dir, '__init__.py') else: settings_file_path = os.path.join(self.test_dir, filename) with open(settings_file_path, 'w') as settings_file: settings_file.write('# -*- coding: utf-8 -*\n') settings_file.write('# Settings file automatically generated by admin_scripts test case\n') if extra: settings_file.write("%s\n" % extra) exports = [ 'DATABASES', 'ROOT_URLCONF', 'SECRET_KEY', ] for s in exports: if hasattr(settings, s): o = getattr(settings, s) if not isinstance(o, (dict, tuple, list)): o = "'%s'" % o settings_file.write("%s = %s\n" % (s, o)) if apps is None: apps = ['django.contrib.auth', 'django.contrib.contenttypes', 'admin_scripts'] settings_file.write("INSTALLED_APPS = %s\n" % apps) if sdict: for k, v in sdict.items(): settings_file.write("%s = %s\n" % (k, v)) def remove_settings(self, filename, is_dir=False): full_name = os.path.join(self.test_dir, filename) if is_dir: shutil.rmtree(full_name) else: os.remove(full_name) # Also try to remove the compiled file; if it exists, it could # mess up later tests that depend upon the .py file not existing try: if sys.platform.startswith('java'): # Jython produces module$py.class files os.remove(re.sub(r'\.py$', '$py.class', full_name)) else: # CPython produces module.pyc files os.remove(full_name + 'c') except OSError: pass # Also remove a __pycache__ directory, if it exists cache_name = os.path.join(self.test_dir, '__pycache__') if os.path.isdir(cache_name): shutil.rmtree(cache_name) def _ext_backend_paths(self): """ Returns the paths for any external backend packages. """ paths = [] first_package_re = re.compile(r'(^[^\.]+)\.') for backend in settings.DATABASES.values(): result = first_package_re.findall(backend['ENGINE']) if result and result != ['django']: backend_pkg = __import__(result[0]) backend_dir = os.path.dirname(backend_pkg.__file__) paths.append(os.path.dirname(backend_dir)) return paths def run_test(self, script, args, settings_file=None, apps=None): base_dir = os.path.dirname(self.test_dir) # The base dir for Django's tests is one level up. tests_dir = os.path.dirname(os.path.dirname(upath(__file__))) # The base dir for Django is one level above the test dir. We don't use # `import django` to figure that out, so we don't pick up a Django # from site-packages or similar. django_dir = os.path.dirname(tests_dir) ext_backend_base_dirs = self._ext_backend_paths() # Define a temporary environment for the subprocess test_environ = os.environ.copy() if sys.platform.startswith('java'): python_path_var_name = 'JYTHONPATH' else: python_path_var_name = 'PYTHONPATH' old_cwd = os.getcwd() # Set the test environment if settings_file: test_environ['DJANGO_SETTINGS_MODULE'] = str(settings_file) elif 'DJANGO_SETTINGS_MODULE' in test_environ: del test_environ['DJANGO_SETTINGS_MODULE'] python_path = [base_dir, django_dir, tests_dir] python_path.extend(ext_backend_base_dirs) # Use native strings for better compatibility test_environ[str(python_path_var_name)] = npath(os.pathsep.join(python_path)) test_environ[str('PYTHONWARNINGS')] = str('') # Move to the test directory and run os.chdir(self.test_dir) out, err = subprocess.Popen([sys.executable, script] + args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=test_environ, universal_newlines=True).communicate() # Move back to the old working directory os.chdir(old_cwd) return out, err def run_django_admin(self, args, settings_file=None): script_dir = os.path.abspath(os.path.join(os.path.dirname(upath(django.__file__)), 'bin')) return self.run_test(os.path.join(script_dir, 'django-admin.py'), args, settings_file) def run_manage(self, args, settings_file=None): def safe_remove(path): try: os.remove(path) except OSError: pass conf_dir = os.path.dirname(upath(conf.__file__)) template_manage_py = os.path.join(conf_dir, 'project_template', 'manage.py-tpl') test_manage_py = os.path.join(self.test_dir, 'manage.py') shutil.copyfile(template_manage_py, test_manage_py) with open(test_manage_py, 'r') as fp: manage_py_contents = fp.read() manage_py_contents = manage_py_contents.replace( "{{ project_name }}", "test_project") with open(test_manage_py, 'w') as fp: fp.write(manage_py_contents) self.addCleanup(safe_remove, test_manage_py) return self.run_test('./manage.py', args, settings_file) def assertNoOutput(self, stream): "Utility assertion: assert that the given stream is empty" self.assertEqual(len(stream), 0, "Stream should be empty: actually contains '%s'" % stream) def assertOutput(self, stream, msg, regex=False): "Utility assertion: assert that the given message exists in the output" stream = force_text(stream) if regex: self.assertIsNotNone(re.search(msg, stream), "'%s' does not match actual output text '%s'" % (msg, stream)) else: self.assertIn(msg, stream, "'%s' does not match actual output text '%s'" % (msg, stream)) def assertNotInOutput(self, stream, msg): "Utility assertion: assert that the given message doesn't exist in the output" stream = force_text(stream) self.assertNotIn(msg, stream, "'%s' matches actual output text '%s'" % (msg, stream)) ########################################################################## # DJANGO ADMIN TESTS # This first series of test classes checks the environment processing # of the django-admin.py script ########################################################################## class DjangoAdminNoSettings(AdminScriptTestCase): "A series of tests for django-admin.py when there is no settings.py file." def test_builtin_command(self): "no settings: django-admin builtin commands fail with an error when no settings provided" args = ['check', 'admin_scripts'] out, err = self.run_django_admin(args) self.assertNoOutput(out) self.assertOutput(err, 'settings are not configured') def test_builtin_with_bad_settings(self): "no settings: django-admin builtin commands fail if settings file (from argument) doesn't exist" args = ['check', '--settings=bad_settings', 'admin_scripts'] out, err = self.run_django_admin(args) self.assertNoOutput(out) self.assertOutput(err, "No module named '?bad_settings'?", regex=True) def test_builtin_with_bad_environment(self): "no settings: django-admin builtin commands fail if settings file (from environment) doesn't exist" args = ['check', 'admin_scripts'] out, err = self.run_django_admin(args, 'bad_settings') self.assertNoOutput(out) self.assertOutput(err, "No module named '?bad_settings'?", regex=True) class DjangoAdminDefaultSettings(AdminScriptTestCase): """A series of tests for django-admin.py when using a settings.py file that contains the test application. """ def setUp(self): self.write_settings('settings.py') def tearDown(self): self.remove_settings('settings.py') def test_builtin_command(self): "default: django-admin builtin commands fail with an error when no settings provided" args = ['check', 'admin_scripts'] out, err = self.run_django_admin(args) self.assertNoOutput(out) self.assertOutput(err, 'settings are not configured') def test_builtin_with_settings(self): "default: django-admin builtin commands succeed if settings are provided as argument" args = ['check', '--settings=test_project.settings', 'admin_scripts'] out, err = self.run_django_admin(args) self.assertNoOutput(err) self.assertOutput(out, SYSTEM_CHECK_MSG) def test_builtin_with_environment(self): "default: django-admin builtin commands succeed if settings are provided in the environment" args = ['check', 'admin_scripts'] out, err = self.run_django_admin(args, 'test_project.settings') self.assertNoOutput(err) self.assertOutput(out, SYSTEM_CHECK_MSG) def test_builtin_with_bad_settings(self): "default: django-admin builtin commands fail if settings file (from argument) doesn't exist" args = ['check', '--settings=bad_settings', 'admin_scripts'] out, err = self.run_django_admin(args) self.assertNoOutput(out) self.assertOutput(err, "No module named '?bad_settings'?", regex=True) def test_builtin_with_bad_environment(self): "default: django-admin builtin commands fail if settings file (from environment) doesn't exist" args = ['check', 'admin_scripts'] out, err = self.run_django_admin(args, 'bad_settings') self.assertNoOutput(out) self.assertOutput(err, "No module named '?bad_settings'?", regex=True) def test_custom_command(self): "default: django-admin can't execute user commands if it isn't provided settings" args = ['noargs_command'] out, err = self.run_django_admin(args) self.assertNoOutput(out) self.assertOutput(err, "No Django settings specified") self.assertOutput(err, "Unknown command: 'noargs_command'") def test_custom_command_with_settings(self): "default: django-admin can execute user commands if settings are provided as argument" args = ['noargs_command', '--settings=test_project.settings'] out, err = self.run_django_admin(args) self.assertNoOutput(err) self.assertOutput(out, "EXECUTE: noargs_command") def test_custom_command_with_environment(self): "default: django-admin can execute user commands if settings are provided in environment" args = ['noargs_command'] out, err = self.run_django_admin(args, 'test_project.settings') self.assertNoOutput(err) self.assertOutput(out, "EXECUTE: noargs_command") class DjangoAdminFullPathDefaultSettings(AdminScriptTestCase): """A series of tests for django-admin.py when using a settings.py file that contains the test application specified using a full path. """ def setUp(self): self.write_settings('settings.py', ['django.contrib.auth', 'django.contrib.contenttypes', 'admin_scripts', 'admin_scripts.complex_app']) def tearDown(self): self.remove_settings('settings.py') def test_builtin_command(self): "fulldefault: django-admin builtin commands fail with an error when no settings provided" args = ['check', 'admin_scripts'] out, err = self.run_django_admin(args) self.assertNoOutput(out) self.assertOutput(err, 'settings are not configured') def test_builtin_with_settings(self): "fulldefault: django-admin builtin commands succeed if a settings file is provided" args = ['check', '--settings=test_project.settings', 'admin_scripts'] out, err = self.run_django_admin(args) self.assertNoOutput(err) self.assertOutput(out, SYSTEM_CHECK_MSG) def test_builtin_with_environment(self): "fulldefault: django-admin builtin commands succeed if the environment contains settings" args = ['check', 'admin_scripts'] out, err = self.run_django_admin(args, 'test_project.settings') self.assertNoOutput(err) self.assertOutput(out, SYSTEM_CHECK_MSG) def test_builtin_with_bad_settings(self): "fulldefault: django-admin builtin commands fail if settings file (from argument) doesn't exist" args = ['check', '--settings=bad_settings', 'admin_scripts'] out, err = self.run_django_admin(args) self.assertNoOutput(out) self.assertOutput(err, "No module named '?bad_settings'?", regex=True) def test_builtin_with_bad_environment(self): "fulldefault: django-admin builtin commands fail if settings file (from environment) doesn't exist" args = ['check', 'admin_scripts'] out, err = self.run_django_admin(args, 'bad_settings') self.assertNoOutput(out) self.assertOutput(err, "No module named '?bad_settings'?", regex=True) def test_custom_command(self): "fulldefault: django-admin can't execute user commands unless settings are provided" args = ['noargs_command'] out, err = self.run_django_admin(args) self.assertNoOutput(out) self.assertOutput(err, "No Django settings specified") self.assertOutput(err, "Unknown command: 'noargs_command'") def test_custom_command_with_settings(self): "fulldefault: django-admin can execute user commands if settings are provided as argument" args = ['noargs_command', '--settings=test_project.settings'] out, err = self.run_django_admin(args) self.assertNoOutput(err) self.assertOutput(out, "EXECUTE: noargs_command") def test_custom_command_with_environment(self): "fulldefault: django-admin can execute user commands if settings are provided in environment" args = ['noargs_command'] out, err = self.run_django_admin(args, 'test_project.settings') self.assertNoOutput(err) self.assertOutput(out, "EXECUTE: noargs_command") class DjangoAdminMinimalSettings(AdminScriptTestCase): """A series of tests for django-admin.py when using a settings.py file that doesn't contain the test application. """ def setUp(self): self.write_settings('settings.py', apps=['django.contrib.auth', 'django.contrib.contenttypes']) def tearDown(self): self.remove_settings('settings.py') def test_builtin_command(self): "minimal: django-admin builtin commands fail with an error when no settings provided" args = ['check', 'admin_scripts'] out, err = self.run_django_admin(args) self.assertNoOutput(out) self.assertOutput(err, 'settings are not configured') def test_builtin_with_settings(self): "minimal: django-admin builtin commands fail if settings are provided as argument" args = ['check', '--settings=test_project.settings', 'admin_scripts'] out, err = self.run_django_admin(args) self.assertNoOutput(out) self.assertOutput(err, "No installed app with label 'admin_scripts'.") def test_builtin_with_environment(self): "minimal: django-admin builtin commands fail if settings are provided in the environment" args = ['check', 'admin_scripts'] out, err = self.run_django_admin(args, 'test_project.settings') self.assertNoOutput(out) self.assertOutput(err, "No installed app with label 'admin_scripts'.") def test_builtin_with_bad_settings(self): "minimal: django-admin builtin commands fail if settings file (from argument) doesn't exist" args = ['check', '--settings=bad_settings', 'admin_scripts'] out, err = self.run_django_admin(args) self.assertNoOutput(out) self.assertOutput(err, "No module named '?bad_settings'?", regex=True) def test_builtin_with_bad_environment(self): "minimal: django-admin builtin commands fail if settings file (from environment) doesn't exist" args = ['check', 'admin_scripts'] out, err = self.run_django_admin(args, 'bad_settings') self.assertNoOutput(out) self.assertOutput(err, "No module named '?bad_settings'?", regex=True) def test_custom_command(self): "minimal: django-admin can't execute user commands unless settings are provided" args = ['noargs_command'] out, err = self.run_django_admin(args) self.assertNoOutput(out) self.assertOutput(err, "No Django settings specified") self.assertOutput(err, "Unknown command: 'noargs_command'") def test_custom_command_with_settings(self): "minimal: django-admin can't execute user commands, even if settings are provided as argument" args = ['noargs_command', '--settings=test_project.settings'] out, err = self.run_django_admin(args) self.assertNoOutput(out) self.assertOutput(err, "Unknown command: 'noargs_command'") def test_custom_command_with_environment(self): "minimal: django-admin can't execute user commands, even if settings are provided in environment" args = ['noargs_command'] out, err = self.run_django_admin(args, 'test_project.settings') self.assertNoOutput(out) self.assertOutput(err, "Unknown command: 'noargs_command'") class DjangoAdminAlternateSettings(AdminScriptTestCase): """A series of tests for django-admin.py when using a settings file with a name other than 'settings.py'. """ def setUp(self): self.write_settings('alternate_settings.py') def tearDown(self): self.remove_settings('alternate_settings.py') def test_builtin_command(self): "alternate: django-admin builtin commands fail with an error when no settings provided" args = ['check', 'admin_scripts'] out, err = self.run_django_admin(args) self.assertNoOutput(out) self.assertOutput(err, 'settings are not configured') def test_builtin_with_settings(self): "alternate: django-admin builtin commands succeed if settings are provided as argument" args = ['check', '--settings=test_project.alternate_settings', 'admin_scripts'] out, err = self.run_django_admin(args) self.assertNoOutput(err) self.assertOutput(out, SYSTEM_CHECK_MSG) def test_builtin_with_environment(self): "alternate: django-admin builtin commands succeed if settings are provided in the environment" args = ['check', 'admin_scripts'] out, err = self.run_django_admin(args, 'test_project.alternate_settings') self.assertNoOutput(err) self.assertOutput(out, SYSTEM_CHECK_MSG) def test_builtin_with_bad_settings(self): "alternate: django-admin builtin commands fail if settings file (from argument) doesn't exist" args = ['check', '--settings=bad_settings', 'admin_scripts'] out, err = self.run_django_admin(args) self.assertNoOutput(out) self.assertOutput(err, "No module named '?bad_settings'?", regex=True) def test_builtin_with_bad_environment(self): "alternate: django-admin builtin commands fail if settings file (from environment) doesn't exist" args = ['check', 'admin_scripts'] out, err = self.run_django_admin(args, 'bad_settings') self.assertNoOutput(out) self.assertOutput(err, "No module named '?bad_settings'?", regex=True) def test_custom_command(self): "alternate: django-admin can't execute user commands unless settings are provided" args = ['noargs_command'] out, err = self.run_django_admin(args) self.assertNoOutput(out) self.assertOutput(err, "No Django settings specified") self.assertOutput(err, "Unknown command: 'noargs_command'") def test_custom_command_with_settings(self): "alternate: django-admin can execute user commands if settings are provided as argument" args = ['noargs_command', '--settings=test_project.alternate_settings'] out, err = self.run_django_admin(args) self.assertNoOutput(err) self.assertOutput(out, "EXECUTE: noargs_command") def test_custom_command_with_environment(self): "alternate: django-admin can execute user commands if settings are provided in environment" args = ['noargs_command'] out, err = self.run_django_admin(args, 'test_project.alternate_settings') self.assertNoOutput(err) self.assertOutput(out, "EXECUTE: noargs_command") class DjangoAdminMultipleSettings(AdminScriptTestCase): """A series of tests for django-admin.py when multiple settings files (including the default 'settings.py') are available. The default settings file is insufficient for performing the operations described, so the alternate settings must be used by the running script. """ def setUp(self): self.write_settings('settings.py', apps=['django.contrib.auth', 'django.contrib.contenttypes']) self.write_settings('alternate_settings.py') def tearDown(self): self.remove_settings('settings.py') self.remove_settings('alternate_settings.py') def test_builtin_command(self): "alternate: django-admin builtin commands fail with an error when no settings provided" args = ['check', 'admin_scripts'] out, err = self.run_django_admin(args) self.assertNoOutput(out) self.assertOutput(err, 'settings are not configured') def test_builtin_with_settings(self): "alternate: django-admin builtin commands succeed if settings are provided as argument" args = ['check', '--settings=test_project.alternate_settings', 'admin_scripts'] out, err = self.run_django_admin(args) self.assertNoOutput(err) self.assertOutput(out, SYSTEM_CHECK_MSG) def test_builtin_with_environment(self): "alternate: django-admin builtin commands succeed if settings are provided in the environment" args = ['check', 'admin_scripts'] out, err = self.run_django_admin(args, 'test_project.alternate_settings') self.assertNoOutput(err) self.assertOutput(out, SYSTEM_CHECK_MSG) def test_builtin_with_bad_settings(self): "alternate: django-admin builtin commands fail if settings file (from argument) doesn't exist" args = ['check', '--settings=bad_settings', 'admin_scripts'] out, err = self.run_django_admin(args) self.assertOutput(err, "No module named '?bad_settings'?", regex=True) def test_builtin_with_bad_environment(self): "alternate: django-admin builtin commands fail if settings file (from environment) doesn't exist" args = ['check', 'admin_scripts'] out, err = self.run_django_admin(args, 'bad_settings') self.assertNoOutput(out) self.assertOutput(err, "No module named '?bad_settings'?", regex=True) def test_custom_command(self): "alternate: django-admin can't execute user commands unless settings are provided" args = ['noargs_command'] out, err = self.run_django_admin(args) self.assertNoOutput(out) self.assertOutput(err, "No Django settings specified") self.assertOutput(err, "Unknown command: 'noargs_command'") def test_custom_command_with_settings(self): "alternate: django-admin can execute user commands if settings are provided as argument" args = ['noargs_command', '--settings=test_project.alternate_settings'] out, err = self.run_django_admin(args) self.assertNoOutput(err) self.assertOutput(out, "EXECUTE: noargs_command") def test_custom_command_with_environment(self): "alternate: django-admin can execute user commands if settings are provided in environment" args = ['noargs_command'] out, err = self.run_django_admin(args, 'test_project.alternate_settings') self.assertNoOutput(err) self.assertOutput(out, "EXECUTE: noargs_command") class DjangoAdminSettingsDirectory(AdminScriptTestCase): """ A series of tests for django-admin.py when the settings file is in a directory. (see #9751). """ def setUp(self): self.write_settings('settings', is_dir=True) def tearDown(self): self.remove_settings('settings', is_dir=True) def test_setup_environ(self): "directory: startapp creates the correct directory" args = ['startapp', 'settings_test'] app_path = os.path.join(self.test_dir, 'settings_test') out, err = self.run_django_admin(args, 'test_project.settings') self.addCleanup(shutil.rmtree, app_path) self.assertNoOutput(err) self.assertTrue(os.path.exists(app_path)) unicode_literals_import = "from __future__ import unicode_literals\n" with open(os.path.join(app_path, 'apps.py'), 'r') as f: content = f.read() self.assertIn("class SettingsTestConfig(AppConfig)", content) self.assertIn("name = 'settings_test'", content) if not PY3: self.assertIn(unicode_literals_import, content) if not PY3: with open(os.path.join(app_path, 'models.py'), 'r') as fp: content = fp.read() self.assertIn(unicode_literals_import, content) def test_setup_environ_custom_template(self): "directory: startapp creates the correct directory with a custom template" template_path = os.path.join(custom_templates_dir, 'app_template') args = ['startapp', '--template', template_path, 'custom_settings_test'] app_path = os.path.join(self.test_dir, 'custom_settings_test') out, err = self.run_django_admin(args, 'test_project.settings') self.addCleanup(shutil.rmtree, app_path) self.assertNoOutput(err) self.assertTrue(os.path.exists(app_path)) self.assertTrue(os.path.exists(os.path.join(app_path, 'api.py'))) @unittest.skipIf(PY2, "Python 2 doesn't support Unicode package names.") def test_startapp_unicode_name(self): "directory: startapp creates the correct directory with unicode characters" args = ['startapp', 'こんにちは'] app_path = os.path.join(self.test_dir, 'こんにちは') out, err = self.run_django_admin(args, 'test_project.settings') self.addCleanup(shutil.rmtree, app_path) self.assertNoOutput(err) self.assertTrue(os.path.exists(app_path)) with open(os.path.join(app_path, 'apps.py'), 'r', encoding='utf8') as f: content = f.read() self.assertIn("class こんにちはConfig(AppConfig)", content) self.assertIn("name = 'こんにちは'", content) def test_builtin_command(self): "directory: django-admin builtin commands fail with an error when no settings provided" args = ['check', 'admin_scripts'] out, err = self.run_django_admin(args) self.assertNoOutput(out) self.assertOutput(err, 'settings are not configured') def test_builtin_with_bad_settings(self): "directory: django-admin builtin commands fail if settings file (from argument) doesn't exist" args = ['check', '--settings=bad_settings', 'admin_scripts'] out, err = self.run_django_admin(args) self.assertOutput(err, "No module named '?bad_settings'?", regex=True) def test_builtin_with_bad_environment(self): "directory: django-admin builtin commands fail if settings file (from environment) doesn't exist" args = ['check', 'admin_scripts'] out, err = self.run_django_admin(args, 'bad_settings') self.assertNoOutput(out) self.assertOutput(err, "No module named '?bad_settings'?", regex=True) def test_custom_command(self): "directory: django-admin can't execute user commands unless settings are provided" args = ['noargs_command'] out, err = self.run_django_admin(args) self.assertNoOutput(out) self.assertOutput(err, "No Django settings specified") self.assertOutput(err, "Unknown command: 'noargs_command'") def test_builtin_with_settings(self): "directory: django-admin builtin commands succeed if settings are provided as argument" args = ['check', '--settings=test_project.settings', 'admin_scripts'] out, err = self.run_django_admin(args) self.assertNoOutput(err) self.assertOutput(out, SYSTEM_CHECK_MSG) def test_builtin_with_environment(self): "directory: django-admin builtin commands succeed if settings are provided in the environment" args = ['check', 'admin_scripts'] out, err = self.run_django_admin(args, 'test_project.settings') self.assertNoOutput(err) self.assertOutput(out, SYSTEM_CHECK_MSG) ########################################################################## # MANAGE.PY TESTS # This next series of test classes checks the environment processing # of the generated manage.py script ########################################################################## class ManageNoSettings(AdminScriptTestCase): "A series of tests for manage.py when there is no settings.py file." def test_builtin_command(self): "no settings: manage.py builtin commands fail with an error when no settings provided" args = ['check', 'admin_scripts'] out, err = self.run_manage(args) self.assertNoOutput(out) self.assertOutput(err, "No module named '?(test_project\.)?settings'?", regex=True) def test_builtin_with_bad_settings(self): "no settings: manage.py builtin commands fail if settings file (from argument) doesn't exist" args = ['check', '--settings=bad_settings', 'admin_scripts'] out, err = self.run_manage(args) self.assertNoOutput(out) self.assertOutput(err, "No module named '?bad_settings'?", regex=True) def test_builtin_with_bad_environment(self): "no settings: manage.py builtin commands fail if settings file (from environment) doesn't exist" args = ['check', 'admin_scripts'] out, err = self.run_manage(args, 'bad_settings') self.assertNoOutput(out) self.assertOutput(err, "No module named '?bad_settings'?", regex=True) class ManageDefaultSettings(AdminScriptTestCase): """A series of tests for manage.py when using a settings.py file that contains the test application. """ def setUp(self): self.write_settings('settings.py') def tearDown(self): self.remove_settings('settings.py') def test_builtin_command(self): "default: manage.py builtin commands succeed when default settings are appropriate" args = ['check', 'admin_scripts'] out, err = self.run_manage(args) self.assertNoOutput(err) self.assertOutput(out, SYSTEM_CHECK_MSG) def test_builtin_with_settings(self): "default: manage.py builtin commands succeed if settings are provided as argument" args = ['check', '--settings=test_project.settings', 'admin_scripts'] out, err = self.run_manage(args) self.assertNoOutput(err) self.assertOutput(out, SYSTEM_CHECK_MSG) def test_builtin_with_environment(self): "default: manage.py builtin commands succeed if settings are provided in the environment" args = ['check', 'admin_scripts'] out, err = self.run_manage(args, 'test_project.settings') self.assertNoOutput(err) self.assertOutput(out, SYSTEM_CHECK_MSG) def test_builtin_with_bad_settings(self): "default: manage.py builtin commands succeed if settings file (from argument) doesn't exist" args = ['check', '--settings=bad_settings', 'admin_scripts'] out, err = self.run_manage(args) self.assertNoOutput(out) self.assertOutput(err, "No module named '?bad_settings'?", regex=True) def test_builtin_with_bad_environment(self): "default: manage.py builtin commands fail if settings file (from environment) doesn't exist" args = ['check', 'admin_scripts'] out, err = self.run_manage(args, 'bad_settings') self.assertNoOutput(out) self.assertOutput(err, "No module named '?bad_settings'?", regex=True) def test_custom_command(self): "default: manage.py can execute user commands when default settings are appropriate" args = ['noargs_command'] out, err = self.run_manage(args) self.assertNoOutput(err) self.assertOutput(out, "EXECUTE: noargs_command") def test_custom_command_with_settings(self): "default: manage.py can execute user commands when settings are provided as argument" args = ['noargs_command', '--settings=test_project.settings'] out, err = self.run_manage(args) self.assertNoOutput(err) self.assertOutput(out, "EXECUTE: noargs_command") def test_custom_command_with_environment(self): "default: manage.py can execute user commands when settings are provided in environment" args = ['noargs_command'] out, err = self.run_manage(args, 'test_project.settings') self.assertNoOutput(err) self.assertOutput(out, "EXECUTE: noargs_command") class ManageFullPathDefaultSettings(AdminScriptTestCase): """A series of tests for manage.py when using a settings.py file that contains the test application specified using a full path. """ def setUp(self): self.write_settings('settings.py', ['django.contrib.auth', 'django.contrib.contenttypes', 'admin_scripts']) def tearDown(self): self.remove_settings('settings.py') def test_builtin_command(self): "fulldefault: manage.py builtin commands succeed when default settings are appropriate" args = ['check', 'admin_scripts'] out, err = self.run_manage(args) self.assertNoOutput(err) self.assertOutput(out, SYSTEM_CHECK_MSG) def test_builtin_with_settings(self): "fulldefault: manage.py builtin commands succeed if settings are provided as argument" args = ['check', '--settings=test_project.settings', 'admin_scripts'] out, err = self.run_manage(args) self.assertNoOutput(err) self.assertOutput(out, SYSTEM_CHECK_MSG) def test_builtin_with_environment(self): "fulldefault: manage.py builtin commands succeed if settings are provided in the environment" args = ['check', 'admin_scripts'] out, err = self.run_manage(args, 'test_project.settings') self.assertNoOutput(err) self.assertOutput(out, SYSTEM_CHECK_MSG) def test_builtin_with_bad_settings(self): "fulldefault: manage.py builtin commands succeed if settings file (from argument) doesn't exist" args = ['check', '--settings=bad_settings', 'admin_scripts'] out, err = self.run_manage(args) self.assertNoOutput(out) self.assertOutput(err, "No module named '?bad_settings'?", regex=True) def test_builtin_with_bad_environment(self): "fulldefault: manage.py builtin commands fail if settings file (from environment) doesn't exist" args = ['check', 'admin_scripts'] out, err = self.run_manage(args, 'bad_settings') self.assertNoOutput(out) self.assertOutput(err, "No module named '?bad_settings'?", regex=True) def test_custom_command(self): "fulldefault: manage.py can execute user commands when default settings are appropriate" args = ['noargs_command'] out, err = self.run_manage(args) self.assertNoOutput(err) self.assertOutput(out, "EXECUTE: noargs_command") def test_custom_command_with_settings(self): "fulldefault: manage.py can execute user commands when settings are provided as argument" args = ['noargs_command', '--settings=test_project.settings'] out, err = self.run_manage(args) self.assertNoOutput(err) self.assertOutput(out, "EXECUTE: noargs_command") def test_custom_command_with_environment(self): "fulldefault: manage.py can execute user commands when settings are provided in environment" args = ['noargs_command'] out, err = self.run_manage(args, 'test_project.settings') self.assertNoOutput(err) self.assertOutput(out, "EXECUTE: noargs_command") class ManageMinimalSettings(AdminScriptTestCase): """A series of tests for manage.py when using a settings.py file that doesn't contain the test application. """ def setUp(self): self.write_settings('settings.py', apps=['django.contrib.auth', 'django.contrib.contenttypes']) def tearDown(self): self.remove_settings('settings.py') def test_builtin_command(self): "minimal: manage.py builtin commands fail with an error when no settings provided" args = ['check', 'admin_scripts'] out, err = self.run_manage(args) self.assertNoOutput(out) self.assertOutput(err, "No installed app with label 'admin_scripts'.") def test_builtin_with_settings(self): "minimal: manage.py builtin commands fail if settings are provided as argument" args = ['check', '--settings=test_project.settings', 'admin_scripts'] out, err = self.run_manage(args) self.assertNoOutput(out) self.assertOutput(err, "No installed app with label 'admin_scripts'.") def test_builtin_with_environment(self): "minimal: manage.py builtin commands fail if settings are provided in the environment" args = ['check', 'admin_scripts'] out, err = self.run_manage(args, 'test_project.settings') self.assertNoOutput(out) self.assertOutput(err, "No installed app with label 'admin_scripts'.") def test_builtin_with_bad_settings(self): "minimal: manage.py builtin commands fail if settings file (from argument) doesn't exist" args = ['check', '--settings=bad_settings', 'admin_scripts'] out, err = self.run_manage(args) self.assertNoOutput(out) self.assertOutput(err, "No module named '?bad_settings'?", regex=True) def test_builtin_with_bad_environment(self): "minimal: manage.py builtin commands fail if settings file (from environment) doesn't exist" args = ['check', 'admin_scripts'] out, err = self.run_manage(args, 'bad_settings') self.assertNoOutput(out) self.assertOutput(err, "No module named '?bad_settings'?", regex=True) def test_custom_command(self): "minimal: manage.py can't execute user commands without appropriate settings" args = ['noargs_command'] out, err = self.run_manage(args) self.assertNoOutput(out) self.assertOutput(err, "Unknown command: 'noargs_command'") def test_custom_command_with_settings(self): "minimal: manage.py can't execute user commands, even if settings are provided as argument" args = ['noargs_command', '--settings=test_project.settings'] out, err = self.run_manage(args) self.assertNoOutput(out) self.assertOutput(err, "Unknown command: 'noargs_command'") def test_custom_command_with_environment(self): "minimal: manage.py can't execute user commands, even if settings are provided in environment" args = ['noargs_command'] out, err = self.run_manage(args, 'test_project.settings') self.assertNoOutput(out) self.assertOutput(err, "Unknown command: 'noargs_command'") class ManageAlternateSettings(AdminScriptTestCase): """A series of tests for manage.py when using a settings file with a name other than 'settings.py'. """ def setUp(self): self.write_settings('alternate_settings.py') def tearDown(self): self.remove_settings('alternate_settings.py') def test_builtin_command(self): "alternate: manage.py builtin commands fail with an error when no default settings provided" args = ['check', 'admin_scripts'] out, err = self.run_manage(args) self.assertNoOutput(out) self.assertOutput(err, "No module named '?(test_project\.)?settings'?", regex=True) def test_builtin_with_settings(self): "alternate: manage.py builtin commands work with settings provided as argument" args = ['check', '--settings=alternate_settings', 'admin_scripts'] out, err = self.run_manage(args) self.assertOutput(out, SYSTEM_CHECK_MSG) self.assertNoOutput(err) def test_builtin_with_environment(self): "alternate: manage.py builtin commands work if settings are provided in the environment" args = ['check', 'admin_scripts'] out, err = self.run_manage(args, 'alternate_settings') self.assertOutput(out, SYSTEM_CHECK_MSG) self.assertNoOutput(err) def test_builtin_with_bad_settings(self): "alternate: manage.py builtin commands fail if settings file (from argument) doesn't exist" args = ['check', '--settings=bad_settings', 'admin_scripts'] out, err = self.run_manage(args) self.assertNoOutput(out) self.assertOutput(err, "No module named '?bad_settings'?", regex=True) def test_builtin_with_bad_environment(self): "alternate: manage.py builtin commands fail if settings file (from environment) doesn't exist" args = ['check', 'admin_scripts'] out, err = self.run_manage(args, 'bad_settings') self.assertNoOutput(out) self.assertOutput(err, "No module named '?bad_settings'?", regex=True) def test_custom_command(self): "alternate: manage.py can't execute user commands without settings" args = ['noargs_command'] out, err = self.run_manage(args) self.assertNoOutput(out) self.assertOutput(err, "No module named '?(test_project\.)?settings'?", regex=True) def test_custom_command_with_settings(self): "alternate: manage.py can execute user commands if settings are provided as argument" args = ['noargs_command', '--settings=alternate_settings'] out, err = self.run_manage(args) self.assertOutput( out, "EXECUTE: noargs_command options=[('no_color', False), " "('pythonpath', None), ('settings', 'alternate_settings'), " "('traceback', False), ('verbosity', 1)]" ) self.assertNoOutput(err) def test_custom_command_with_environment(self): "alternate: manage.py can execute user commands if settings are provided in environment" args = ['noargs_command'] out, err = self.run_manage(args, 'alternate_settings') self.assertOutput( out, "EXECUTE: noargs_command options=[('no_color', False), " "('pythonpath', None), ('settings', None), ('traceback', False), " "('verbosity', 1)]" ) self.assertNoOutput(err) def test_custom_command_output_color(self): "alternate: manage.py output syntax color can be deactivated with the `--no-color` option" args = ['noargs_command', '--no-color', '--settings=alternate_settings'] out, err = self.run_manage(args) self.assertOutput( out, "EXECUTE: noargs_command options=[('no_color', True), " "('pythonpath', None), ('settings', 'alternate_settings'), " "('traceback', False), ('verbosity', 1)]" ) self.assertNoOutput(err) class ManageMultipleSettings(AdminScriptTestCase): """A series of tests for manage.py when multiple settings files (including the default 'settings.py') are available. The default settings file is insufficient for performing the operations described, so the alternate settings must be used by the running script. """ def setUp(self): self.write_settings('settings.py', apps=['django.contrib.auth', 'django.contrib.contenttypes']) self.write_settings('alternate_settings.py') def tearDown(self): self.remove_settings('settings.py') self.remove_settings('alternate_settings.py') def test_builtin_command(self): "multiple: manage.py builtin commands fail with an error when no settings provided" args = ['check', 'admin_scripts'] out, err = self.run_manage(args) self.assertNoOutput(out) self.assertOutput(err, "No installed app with label 'admin_scripts'.") def test_builtin_with_settings(self): "multiple: manage.py builtin commands succeed if settings are provided as argument" args = ['check', '--settings=alternate_settings', 'admin_scripts'] out, err = self.run_manage(args) self.assertNoOutput(err) self.assertOutput(out, SYSTEM_CHECK_MSG) def test_builtin_with_environment(self): "multiple: manage.py can execute builtin commands if settings are provided in the environment" args = ['check', 'admin_scripts'] out, err = self.run_manage(args, 'alternate_settings') self.assertNoOutput(err) self.assertOutput(out, SYSTEM_CHECK_MSG) def test_builtin_with_bad_settings(self): "multiple: manage.py builtin commands fail if settings file (from argument) doesn't exist" args = ['check', '--settings=bad_settings', 'admin_scripts'] out, err = self.run_manage(args) self.assertNoOutput(out) self.assertOutput(err, "No module named '?bad_settings'?", regex=True) def test_builtin_with_bad_environment(self): "multiple: manage.py builtin commands fail if settings file (from environment) doesn't exist" args = ['check', 'admin_scripts'] out, err = self.run_manage(args, 'bad_settings') self.assertNoOutput(out) self.assertOutput(err, "No module named '?bad_settings'?", regex=True) def test_custom_command(self): "multiple: manage.py can't execute user commands using default settings" args = ['noargs_command'] out, err = self.run_manage(args) self.assertNoOutput(out) self.assertOutput(err, "Unknown command: 'noargs_command'") def test_custom_command_with_settings(self): "multiple: manage.py can execute user commands if settings are provided as argument" args = ['noargs_command', '--settings=alternate_settings'] out, err = self.run_manage(args) self.assertNoOutput(err) self.assertOutput(out, "EXECUTE: noargs_command") def test_custom_command_with_environment(self): "multiple: manage.py can execute user commands if settings are provided in environment" args = ['noargs_command'] out, err = self.run_manage(args, 'alternate_settings') self.assertNoOutput(err) self.assertOutput(out, "EXECUTE: noargs_command") class ManageSettingsWithSettingsErrors(AdminScriptTestCase): """ Tests for manage.py when using the default settings.py file containing runtime errors. """ def tearDown(self): self.remove_settings('settings.py') def write_settings_with_import_error(self, filename): settings_file_path = os.path.join(self.test_dir, filename) with open(settings_file_path, 'w') as settings_file: settings_file.write('# Settings file automatically generated by admin_scripts test case\n') settings_file.write('# The next line will cause an import error:\nimport foo42bar\n') def test_import_error(self): """ import error: manage.py builtin commands shows useful diagnostic info when settings with import errors is provided (#14130). """ self.write_settings_with_import_error('settings.py') args = ['check', 'admin_scripts'] out, err = self.run_manage(args) self.assertNoOutput(out) self.assertOutput(err, "No module named") self.assertOutput(err, "foo42bar") def test_attribute_error(self): """ manage.py builtin commands does not swallow attribute error due to bad settings (#18845). """ self.write_settings('settings.py', sdict={'BAD_VAR': 'INSTALLED_APPS.crash'}) args = ['collectstatic', 'admin_scripts'] out, err = self.run_manage(args) self.assertNoOutput(out) self.assertOutput(err, "AttributeError: 'list' object has no attribute 'crash'") def test_key_error(self): self.write_settings('settings.py', sdict={'BAD_VAR': 'DATABASES["blah"]'}) args = ['collectstatic', 'admin_scripts'] out, err = self.run_manage(args) self.assertNoOutput(out) self.assertOutput(err, "KeyError: 'blah'") def test_help(self): """ Test listing available commands output note when only core commands are available. """ self.write_settings('settings.py', sdict={'MEDIA_URL': '"/no_ending_slash"'}) args = ['help'] out, err = self.run_manage(args) self.assertOutput(out, 'only Django core commands are listed') self.assertNoOutput(err) class ManageCheck(AdminScriptTestCase): def tearDown(self): self.remove_settings('settings.py') def test_nonexistent_app(self): """ manage.py check reports an error on a non-existent app in INSTALLED_APPS """ self.write_settings('settings.py', apps=['admin_scriptz.broken_app'], sdict={'USE_I18N': False}) args = ['check'] out, err = self.run_manage(args) self.assertNoOutput(out) self.assertOutput(err, 'ImportError') self.assertOutput(err, 'No module named') self.assertOutput(err, 'admin_scriptz') def test_broken_app(self): """ manage.py check reports an ImportError if an app's models.py raises one on import """ self.write_settings('settings.py', apps=['admin_scripts.broken_app']) args = ['check'] out, err = self.run_manage(args) self.assertNoOutput(out) self.assertOutput(err, 'ImportError') def test_complex_app(self): """ manage.py check does not raise an ImportError validating a complex app with nested calls to load_app """ self.write_settings( 'settings.py', apps=[ 'admin_scripts.complex_app', 'admin_scripts.simple_app', 'django.contrib.admin.apps.SimpleAdminConfig', 'django.contrib.auth', 'django.contrib.contenttypes', ], sdict={ 'DEBUG': True } ) args = ['check'] out, err = self.run_manage(args) self.assertNoOutput(err) self.assertEqual(out, 'System check identified no issues (0 silenced).\n') def test_app_with_import(self): """ manage.py check does not raise errors when an app imports a base class that itself has an abstract base. """ self.write_settings('settings.py', apps=['admin_scripts.app_with_import', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sites'], sdict={'DEBUG': True}) args = ['check'] out, err = self.run_manage(args) self.assertNoOutput(err) self.assertEqual(out, 'System check identified no issues (0 silenced).\n') def test_output_format(self): """ All errors/warnings should be sorted by level and by message. """ self.write_settings('settings.py', apps=['admin_scripts.app_raising_messages', 'django.contrib.auth', 'django.contrib.contenttypes'], sdict={'DEBUG': True}) args = ['check'] out, err = self.run_manage(args) expected_err = ( "SystemCheckError: System check identified some issues:\n" "\n" "ERRORS:\n" "?: An error\n" "\tHINT: Error hint\n" "\n" "WARNINGS:\n" "a: Second warning\n" "obj: First warning\n" "\tHINT: Hint\n" "\n" "System check identified 3 issues (0 silenced).\n" ) self.assertEqual(err, expected_err) self.assertNoOutput(out) def test_warning_does_not_halt(self): """ When there are only warnings or less serious messages, then Django should not prevent user from launching their project, so `check` command should not raise `CommandError` exception. In this test we also test output format. """ self.write_settings('settings.py', apps=['admin_scripts.app_raising_warning', 'django.contrib.auth', 'django.contrib.contenttypes'], sdict={'DEBUG': True}) args = ['check'] out, err = self.run_manage(args) expected_err = ( "System check identified some issues:\n" # No "CommandError: " part "\n" "WARNINGS:\n" "?: A warning\n" "\n" "System check identified 1 issue (0 silenced).\n" ) self.assertEqual(err, expected_err) self.assertNoOutput(out) class CustomTestRunner(DiscoverRunner): def __init__(self, *args, **kwargs): assert 'liveserver' not in kwargs super(CustomTestRunner, self).__init__(*args, **kwargs) def run_tests(self, test_labels, extra_tests=None, **kwargs): pass class ManageTestCommand(AdminScriptTestCase): def setUp(self): from django.core.management.commands.test import Command as TestCommand self.cmd = TestCommand() def test_liveserver(self): """ Ensure that the --liveserver option sets the environment variable correctly. Refs #2879. """ # Backup original state address_predefined = 'DJANGO_LIVE_TEST_SERVER_ADDRESS' in os.environ old_address = os.environ.get('DJANGO_LIVE_TEST_SERVER_ADDRESS') self.cmd.handle(verbosity=0, testrunner='admin_scripts.tests.CustomTestRunner') # Original state hasn't changed self.assertEqual('DJANGO_LIVE_TEST_SERVER_ADDRESS' in os.environ, address_predefined) self.assertEqual(os.environ.get('DJANGO_LIVE_TEST_SERVER_ADDRESS'), old_address) self.cmd.handle(verbosity=0, testrunner='admin_scripts.tests.CustomTestRunner', liveserver='blah') # Variable was correctly set self.assertEqual(os.environ['DJANGO_LIVE_TEST_SERVER_ADDRESS'], 'blah') # Restore original state if address_predefined: os.environ['DJANGO_LIVE_TEST_SERVER_ADDRESS'] = old_address else: del os.environ['DJANGO_LIVE_TEST_SERVER_ADDRESS'] class ManageRunserver(AdminScriptTestCase): def setUp(self): from django.core.management.commands.runserver import Command def monkey_run(*args, **options): return self.output = StringIO() self.cmd = Command(stdout=self.output) self.cmd.run = monkey_run def assertServerSettings(self, addr, port, ipv6=None, raw_ipv6=False): self.assertEqual(self.cmd.addr, addr) self.assertEqual(self.cmd.port, port) self.assertEqual(self.cmd.use_ipv6, ipv6) self.assertEqual(self.cmd._raw_ipv6, raw_ipv6) def test_runserver_addrport(self): self.cmd.handle() self.assertServerSettings('127.0.0.1', '8000') self.cmd.handle(addrport="1.2.3.4:8000") self.assertServerSettings('1.2.3.4', '8000') self.cmd.handle(addrport="7000") self.assertServerSettings('127.0.0.1', '7000') @unittest.skipUnless(socket.has_ipv6, "platform doesn't support IPv6") def test_runner_addrport_ipv6(self): self.cmd.handle(addrport="", use_ipv6=True) self.assertServerSettings('::1', '8000', ipv6=True, raw_ipv6=True) self.cmd.handle(addrport="7000", use_ipv6=True) self.assertServerSettings('::1', '7000', ipv6=True, raw_ipv6=True) self.cmd.handle(addrport="[2001:0db8:1234:5678::9]:7000") self.assertServerSettings('2001:0db8:1234:5678::9', '7000', ipv6=True, raw_ipv6=True) def test_runner_hostname(self): self.cmd.handle(addrport="localhost:8000") self.assertServerSettings('localhost', '8000') self.cmd.handle(addrport="test.domain.local:7000") self.assertServerSettings('test.domain.local', '7000') @unittest.skipUnless(socket.has_ipv6, "platform doesn't support IPv6") def test_runner_hostname_ipv6(self): self.cmd.handle(addrport="test.domain.local:7000", use_ipv6=True) self.assertServerSettings('test.domain.local', '7000', ipv6=True) def test_runner_ambiguous(self): # Only 4 characters, all of which could be in an ipv6 address self.cmd.handle(addrport="beef:7654") self.assertServerSettings('beef', '7654') # Uses only characters that could be in an ipv6 address self.cmd.handle(addrport="deadbeef:7654") self.assertServerSettings('deadbeef', '7654') def test_no_database(self): """ Ensure runserver.check_migrations doesn't choke on empty DATABASES. """ tested_connections = ConnectionHandler({}) with mock.patch('django.core.management.commands.runserver.connections', new=tested_connections): self.cmd.check_migrations() def test_readonly_database(self): """ Ensure runserver.check_migrations doesn't choke when a database is read-only (with possibly no django_migrations table). """ with mock.patch.object( MigrationRecorder, 'ensure_schema', side_effect=MigrationSchemaMissing()): self.cmd.check_migrations() # Check a warning is emitted self.assertIn("Not checking migrations", self.output.getvalue()) class ManageRunserverMigrationWarning(TestCase): def setUp(self): from django.core.management.commands.runserver import Command self.stdout = StringIO() self.runserver_command = Command(stdout=self.stdout) @override_settings(INSTALLED_APPS=["admin_scripts.app_waiting_migration"]) def test_migration_warning_one_app(self): self.runserver_command.check_migrations() output = self.stdout.getvalue() self.assertIn('You have 1 unapplied migration(s)', output) self.assertIn('apply the migrations for app(s): app_waiting_migration.', output) @override_settings( INSTALLED_APPS=[ "admin_scripts.app_waiting_migration", "admin_scripts.another_app_waiting_migration", ], ) def test_migration_warning_multiple_apps(self): self.runserver_command.check_migrations() output = self.stdout.getvalue() self.assertIn('You have 2 unapplied migration(s)', output) self.assertIn( 'apply the migrations for app(s): another_app_waiting_migration, ' 'app_waiting_migration.', output ) class ManageRunserverEmptyAllowedHosts(AdminScriptTestCase): def setUp(self): self.write_settings('settings.py', sdict={ 'ALLOWED_HOSTS': [], 'DEBUG': False, }) def tearDown(self): self.remove_settings('settings.py') def test_empty_allowed_hosts_error(self): out, err = self.run_manage(['runserver']) self.assertNoOutput(out) self.assertOutput(err, 'CommandError: You must set settings.ALLOWED_HOSTS if DEBUG is False.') class ManageTestserver(AdminScriptTestCase): from django.core.management.commands.testserver import Command as TestserverCommand @mock.patch.object(TestserverCommand, 'handle') def test_testserver_handle_params(self, mock_handle): out = StringIO() call_command('testserver', 'blah.json', stdout=out) mock_handle.assert_called_with( 'blah.json', stdout=out, settings=None, pythonpath=None, verbosity=1, traceback=False, addrport='', no_color=False, use_ipv6=False, skip_checks=True, interactive=True, ) ########################################################################## # COMMAND PROCESSING TESTS # Check that user-space commands are correctly handled - in particular, # that arguments to the commands are correctly parsed and processed. ########################################################################## class CommandTypes(AdminScriptTestCase): "Tests for the various types of base command types that can be defined." def setUp(self): self.write_settings('settings.py') def tearDown(self): self.remove_settings('settings.py') def test_version(self): "version is handled as a special case" args = ['version'] out, err = self.run_manage(args) self.assertNoOutput(err) self.assertOutput(out, get_version()) def test_version_alternative(self): "--version is equivalent to version" args1, args2 = ['version'], ['--version'] # It's possible one outputs on stderr and the other on stdout, hence the set self.assertEqual(set(self.run_manage(args1)), set(self.run_manage(args2))) def test_help(self): "help is handled as a special case" args = ['help'] out, err = self.run_manage(args) self.assertOutput(out, "Type 'manage.py help <subcommand>' for help on a specific subcommand.") self.assertOutput(out, '[django]') self.assertOutput(out, 'startapp') self.assertOutput(out, 'startproject') def test_help_commands(self): "help --commands shows the list of all available commands" args = ['help', '--commands'] out, err = self.run_manage(args) self.assertNotInOutput(out, 'usage:') self.assertNotInOutput(out, 'Options:') self.assertNotInOutput(out, '[django]') self.assertOutput(out, 'startapp') self.assertOutput(out, 'startproject') self.assertNotInOutput(out, '\n\n') def test_help_alternative(self): "--help is equivalent to help" args1, args2 = ['help'], ['--help'] self.assertEqual(self.run_manage(args1), self.run_manage(args2)) def test_help_short_altert(self): "-h is handled as a short form of --help" args1, args2 = ['--help'], ['-h'] self.assertEqual(self.run_manage(args1), self.run_manage(args2)) def test_specific_help(self): "--help can be used on a specific command" args = ['check', '--help'] out, err = self.run_manage(args) self.assertNoOutput(err) self.assertOutput(out, "Checks the entire Django project for potential problems.") def test_color_style(self): style = color.no_style() self.assertEqual(style.ERROR('Hello, world!'), 'Hello, world!') style = color.make_style('nocolor') self.assertEqual(style.ERROR('Hello, world!'), 'Hello, world!') style = color.make_style('dark') self.assertIn('Hello, world!', style.ERROR('Hello, world!')) self.assertNotEqual(style.ERROR('Hello, world!'), 'Hello, world!') # Default palette has color. style = color.make_style('') self.assertIn('Hello, world!', style.ERROR('Hello, world!')) self.assertNotEqual(style.ERROR('Hello, world!'), 'Hello, world!') def test_command_color(self): class Command(BaseCommand): requires_system_checks = False def handle(self, *args, **options): self.stdout.write('Hello, world!', self.style.ERROR) self.stderr.write('Hello, world!', self.style.ERROR) out = StringIO() err = StringIO() command = Command(stdout=out, stderr=err) command.execute() if color.supports_color(): self.assertIn('Hello, world!\n', out.getvalue()) self.assertIn('Hello, world!\n', err.getvalue()) self.assertNotEqual(out.getvalue(), 'Hello, world!\n') self.assertNotEqual(err.getvalue(), 'Hello, world!\n') else: self.assertEqual(out.getvalue(), 'Hello, world!\n') self.assertEqual(err.getvalue(), 'Hello, world!\n') def test_command_no_color(self): "--no-color prevent colorization of the output" class Command(BaseCommand): requires_system_checks = False def handle(self, *args, **options): self.stdout.write('Hello, world!', self.style.ERROR) self.stderr.write('Hello, world!', self.style.ERROR) out = StringIO() err = StringIO() command = Command(stdout=out, stderr=err, no_color=True) command.execute() self.assertEqual(out.getvalue(), 'Hello, world!\n') self.assertEqual(err.getvalue(), 'Hello, world!\n') out = StringIO() err = StringIO() command = Command(stdout=out, stderr=err) command.execute(no_color=True) self.assertEqual(out.getvalue(), 'Hello, world!\n') self.assertEqual(err.getvalue(), 'Hello, world!\n') def test_custom_stdout(self): class Command(BaseCommand): requires_system_checks = False def handle(self, *args, **options): self.stdout.write("Hello, World!") out = StringIO() command = Command(stdout=out) command.execute() self.assertEqual(out.getvalue(), "Hello, World!\n") out.truncate(0) new_out = StringIO() command.execute(stdout=new_out) self.assertEqual(out.getvalue(), "") self.assertEqual(new_out.getvalue(), "Hello, World!\n") def test_custom_stderr(self): class Command(BaseCommand): requires_system_checks = False def handle(self, *args, **options): self.stderr.write("Hello, World!") err = StringIO() command = Command(stderr=err) command.execute() self.assertEqual(err.getvalue(), "Hello, World!\n") err.truncate(0) new_err = StringIO() command.execute(stderr=new_err) self.assertEqual(err.getvalue(), "") self.assertEqual(new_err.getvalue(), "Hello, World!\n") def test_base_command(self): "User BaseCommands can execute when a label is provided" args = ['base_command', 'testlabel'] expected_labels = "('testlabel',)" self._test_base_command(args, expected_labels) def test_base_command_no_label(self): "User BaseCommands can execute when no labels are provided" args = ['base_command'] expected_labels = "()" self._test_base_command(args, expected_labels) def test_base_command_multiple_label(self): "User BaseCommands can execute when no labels are provided" args = ['base_command', 'testlabel', 'anotherlabel'] expected_labels = "('testlabel', 'anotherlabel')" self._test_base_command(args, expected_labels) def test_base_command_with_option(self): "User BaseCommands can execute with options when a label is provided" args = ['base_command', 'testlabel', '--option_a=x'] expected_labels = "('testlabel',)" self._test_base_command(args, expected_labels, option_a="'x'") def test_base_command_with_options(self): "User BaseCommands can execute with multiple options when a label is provided" args = ['base_command', 'testlabel', '-a', 'x', '--option_b=y'] expected_labels = "('testlabel',)" self._test_base_command(args, expected_labels, option_a="'x'", option_b="'y'") def test_base_command_with_wrong_option(self): "User BaseCommands outputs command usage when wrong option is specified" args = ['base_command', '--invalid'] out, err = self.run_manage(args) self.assertNoOutput(out) self.assertOutput(err, "usage: manage.py base_command") self.assertOutput(err, "error: unrecognized arguments: --invalid") def _test_base_command(self, args, labels, option_a="'1'", option_b="'2'"): out, err = self.run_manage(args) expected_out = ( "EXECUTE:BaseCommand labels=%s, " "options=[('no_color', False), ('option_a', %s), ('option_b', %s), " "('option_c', '3'), ('pythonpath', None), ('settings', None), " "('traceback', False), ('verbosity', 1)]") % (labels, option_a, option_b) self.assertNoOutput(err) self.assertOutput(out, expected_out) def test_base_run_from_argv(self): """ Test run_from_argv properly terminates even with custom execute() (#19665) Also test proper traceback display. """ err = StringIO() command = BaseCommand(stderr=err) def raise_command_error(*args, **kwargs): raise CommandError("Custom error") command.execute = lambda args: args # This will trigger TypeError # If the Exception is not CommandError it should always # raise the original exception. with self.assertRaises(TypeError): command.run_from_argv(['', '']) # If the Exception is CommandError and --traceback is not present # this command should raise a SystemExit and don't print any # traceback to the stderr. command.execute = raise_command_error err.truncate(0) with self.assertRaises(SystemExit): command.run_from_argv(['', '']) err_message = err.getvalue() self.assertNotIn("Traceback", err_message) self.assertIn("CommandError", err_message) # If the Exception is CommandError and --traceback is present # this command should raise the original CommandError as if it # were not a CommandError. err.truncate(0) with self.assertRaises(CommandError): command.run_from_argv(['', '', '--traceback']) def test_run_from_argv_non_ascii_error(self): """ Test that non-ASCII message of CommandError does not raise any UnicodeDecodeError in run_from_argv. """ def raise_command_error(*args, **kwargs): raise CommandError("Erreur personnalisée") command = BaseCommand(stderr=StringIO()) command.execute = raise_command_error with self.assertRaises(SystemExit): command.run_from_argv(['', '']) def test_run_from_argv_closes_connections(self): """ A command called from the command line should close connections after being executed (#21255). """ command = BaseCommand(stderr=StringIO()) command.check = lambda: [] command.handle = lambda *args, **kwargs: args with mock.patch('django.core.management.base.connections') as mock_connections: command.run_from_argv(['', '']) # Test connections have been closed self.assertTrue(mock_connections.close_all.called) def test_noargs(self): "NoArg Commands can be executed" args = ['noargs_command'] out, err = self.run_manage(args) self.assertNoOutput(err) self.assertOutput( out, "EXECUTE: noargs_command options=[('no_color', False), " "('pythonpath', None), ('settings', None), ('traceback', False), " "('verbosity', 1)]" ) def test_noargs_with_args(self): "NoArg Commands raise an error if an argument is provided" args = ['noargs_command', 'argument'] out, err = self.run_manage(args) self.assertOutput(err, "error: unrecognized arguments: argument") def test_app_command(self): "User AppCommands can execute when a single app name is provided" args = ['app_command', 'auth'] out, err = self.run_manage(args) self.assertNoOutput(err) self.assertOutput(out, "EXECUTE:AppCommand name=django.contrib.auth, options=") self.assertOutput( out, ", options=[('no_color', False), ('pythonpath', None), " "('settings', None), ('traceback', False), ('verbosity', 1)]" ) def test_app_command_no_apps(self): "User AppCommands raise an error when no app name is provided" args = ['app_command'] out, err = self.run_manage(args) self.assertOutput(err, 'error: Enter at least one application label.') def test_app_command_multiple_apps(self): "User AppCommands raise an error when multiple app names are provided" args = ['app_command', 'auth', 'contenttypes'] out, err = self.run_manage(args) self.assertNoOutput(err) self.assertOutput(out, "EXECUTE:AppCommand name=django.contrib.auth, options=") self.assertOutput( out, ", options=[('no_color', False), ('pythonpath', None), " "('settings', None), ('traceback', False), ('verbosity', 1)]" ) self.assertOutput(out, "EXECUTE:AppCommand name=django.contrib.contenttypes, options=") self.assertOutput( out, ", options=[('no_color', False), ('pythonpath', None), " "('settings', None), ('traceback', False), ('verbosity', 1)]" ) def test_app_command_invalid_app_label(self): "User AppCommands can execute when a single app name is provided" args = ['app_command', 'NOT_AN_APP'] out, err = self.run_manage(args) self.assertOutput(err, "No installed app with label 'NOT_AN_APP'.") def test_app_command_some_invalid_app_labels(self): "User AppCommands can execute when some of the provided app names are invalid" args = ['app_command', 'auth', 'NOT_AN_APP'] out, err = self.run_manage(args) self.assertOutput(err, "No installed app with label 'NOT_AN_APP'.") def test_label_command(self): "User LabelCommands can execute when a label is provided" args = ['label_command', 'testlabel'] out, err = self.run_manage(args) self.assertNoOutput(err) self.assertOutput( out, "EXECUTE:LabelCommand label=testlabel, options=[('no_color', False), " "('pythonpath', None), ('settings', None), ('traceback', False), ('verbosity', 1)]" ) def test_label_command_no_label(self): "User LabelCommands raise an error if no label is provided" args = ['label_command'] out, err = self.run_manage(args) self.assertOutput(err, 'Enter at least one label') def test_label_command_multiple_label(self): "User LabelCommands are executed multiple times if multiple labels are provided" args = ['label_command', 'testlabel', 'anotherlabel'] out, err = self.run_manage(args) self.assertNoOutput(err) self.assertOutput( out, "EXECUTE:LabelCommand label=testlabel, options=[('no_color', False), " "('pythonpath', None), ('settings', None), ('traceback', False), ('verbosity', 1)]" ) self.assertOutput( out, "EXECUTE:LabelCommand label=anotherlabel, options=[('no_color', False), " "('pythonpath', None), ('settings', None), ('traceback', False), ('verbosity', 1)]" ) class Discovery(SimpleTestCase): def test_precedence(self): """ Apps listed first in INSTALLED_APPS have precedence. """ with self.settings(INSTALLED_APPS=['admin_scripts.complex_app', 'admin_scripts.simple_app', 'django.contrib.auth', 'django.contrib.contenttypes']): out = StringIO() call_command('duplicate', stdout=out) self.assertEqual(out.getvalue().strip(), 'complex_app') with self.settings(INSTALLED_APPS=['admin_scripts.simple_app', 'admin_scripts.complex_app', 'django.contrib.auth', 'django.contrib.contenttypes']): out = StringIO() call_command('duplicate', stdout=out) self.assertEqual(out.getvalue().strip(), 'simple_app') class ArgumentOrder(AdminScriptTestCase): """Tests for 2-stage argument parsing scheme. django-admin command arguments are parsed in 2 parts; the core arguments (--settings, --traceback and --pythonpath) are parsed using a basic parser, ignoring any unknown options. Then the full settings are passed to the command parser, which extracts commands of interest to the individual command. """ def setUp(self): self.write_settings('settings.py', apps=['django.contrib.auth', 'django.contrib.contenttypes']) self.write_settings('alternate_settings.py') def tearDown(self): self.remove_settings('settings.py') self.remove_settings('alternate_settings.py') def test_setting_then_option(self): """ Options passed after settings are correctly handled. """ args = ['base_command', 'testlabel', '--settings=alternate_settings', '--option_a=x'] self._test(args) def test_setting_then_short_option(self): """ Short options passed after settings are correctly handled. """ args = ['base_command', 'testlabel', '--settings=alternate_settings', '-a', 'x'] self._test(args) def test_option_then_setting(self): """ Options passed before settings are correctly handled. """ args = ['base_command', 'testlabel', '--option_a=x', '--settings=alternate_settings'] self._test(args) def test_short_option_then_setting(self): """ Short options passed before settings are correctly handled. """ args = ['base_command', 'testlabel', '-a', 'x', '--settings=alternate_settings'] self._test(args) def test_option_then_setting_then_option(self): """ Options are correctly handled when they are passed before and after a setting. """ args = ['base_command', 'testlabel', '--option_a=x', '--settings=alternate_settings', '--option_b=y'] self._test(args, option_b="'y'") def _test(self, args, option_b="'2'"): out, err = self.run_manage(args) self.assertNoOutput(err) self.assertOutput( out, "EXECUTE:BaseCommand labels=('testlabel',), options=[('no_color', False), " "('option_a', 'x'), ('option_b', %s), ('option_c', '3'), " "('pythonpath', None), ('settings', 'alternate_settings'), " "('traceback', False), ('verbosity', 1)]" % option_b ) @override_settings(ROOT_URLCONF='admin_scripts.urls') class StartProject(LiveServerTestCase, AdminScriptTestCase): available_apps = [ 'admin_scripts', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', ] def test_wrong_args(self): "Make sure passing the wrong kinds of arguments outputs an error and prints usage" out, err = self.run_django_admin(['startproject']) self.assertNoOutput(out) self.assertOutput(err, "usage:") self.assertOutput(err, "You must provide a project name.") def test_simple_project(self): "Make sure the startproject management command creates a project" args = ['startproject', 'testproject'] testproject_dir = os.path.join(self.test_dir, 'testproject') self.addCleanup(shutil.rmtree, testproject_dir, True) out, err = self.run_django_admin(args) self.assertNoOutput(err) self.assertTrue(os.path.isdir(testproject_dir)) # running again.. out, err = self.run_django_admin(args) self.assertNoOutput(out) self.assertOutput(err, "already exists") def test_invalid_project_name(self): "Make sure the startproject management command validates a project name" for bad_name in ('7testproject', '../testproject'): args = ['startproject', bad_name] testproject_dir = os.path.join(self.test_dir, bad_name) self.addCleanup(shutil.rmtree, testproject_dir, True) out, err = self.run_django_admin(args) if PY2: self.assertOutput( err, "Error: '%s' is not a valid project name. Please make " "sure the name begins with a letter or underscore." % bad_name ) else: self.assertOutput( err, "Error: '%s' is not a valid project name. Please make " "sure the name is a valid identifier." % bad_name ) self.assertFalse(os.path.exists(testproject_dir)) def test_simple_project_different_directory(self): "Make sure the startproject management command creates a project in a specific directory" args = ['startproject', 'testproject', 'othertestproject'] testproject_dir = os.path.join(self.test_dir, 'othertestproject') os.mkdir(testproject_dir) self.addCleanup(shutil.rmtree, testproject_dir) out, err = self.run_django_admin(args) self.assertNoOutput(err) self.assertTrue(os.path.exists(os.path.join(testproject_dir, 'manage.py'))) # running again.. out, err = self.run_django_admin(args) self.assertNoOutput(out) self.assertOutput(err, "already exists") def test_custom_project_template(self): "Make sure the startproject management command is able to use a different project template" template_path = os.path.join(custom_templates_dir, 'project_template') args = ['startproject', '--template', template_path, 'customtestproject'] testproject_dir = os.path.join(self.test_dir, 'customtestproject') self.addCleanup(shutil.rmtree, testproject_dir, True) out, err = self.run_django_admin(args) self.assertNoOutput(err) self.assertTrue(os.path.isdir(testproject_dir)) self.assertTrue(os.path.exists(os.path.join(testproject_dir, 'additional_dir'))) def test_template_dir_with_trailing_slash(self): "Ticket 17475: Template dir passed has a trailing path separator" template_path = os.path.join(custom_templates_dir, 'project_template' + os.sep) args = ['startproject', '--template', template_path, 'customtestproject'] testproject_dir = os.path.join(self.test_dir, 'customtestproject') self.addCleanup(shutil.rmtree, testproject_dir, True) out, err = self.run_django_admin(args) self.assertNoOutput(err) self.assertTrue(os.path.isdir(testproject_dir)) self.assertTrue(os.path.exists(os.path.join(testproject_dir, 'additional_dir'))) def test_custom_project_template_from_tarball_by_path(self): "Make sure the startproject management command is able to use a different project template from a tarball" template_path = os.path.join(custom_templates_dir, 'project_template.tgz') args = ['startproject', '--template', template_path, 'tarballtestproject'] testproject_dir = os.path.join(self.test_dir, 'tarballtestproject') self.addCleanup(shutil.rmtree, testproject_dir, True) out, err = self.run_django_admin(args) self.assertNoOutput(err) self.assertTrue(os.path.isdir(testproject_dir)) self.assertTrue(os.path.exists(os.path.join(testproject_dir, 'run.py'))) def test_custom_project_template_from_tarball_to_alternative_location(self): "Startproject can use a project template from a tarball and create it in a specified location" template_path = os.path.join(custom_templates_dir, 'project_template.tgz') args = ['startproject', '--template', template_path, 'tarballtestproject', 'altlocation'] testproject_dir = os.path.join(self.test_dir, 'altlocation') os.mkdir(testproject_dir) self.addCleanup(shutil.rmtree, testproject_dir) out, err = self.run_django_admin(args) self.assertNoOutput(err) self.assertTrue(os.path.isdir(testproject_dir)) self.assertTrue(os.path.exists(os.path.join(testproject_dir, 'run.py'))) def test_custom_project_template_from_tarball_by_url(self): """ The startproject management command is able to use a different project template from a tarball via a URL. """ template_url = '%s/custom_templates/project_template.tgz' % self.live_server_url args = ['startproject', '--template', template_url, 'urltestproject'] testproject_dir = os.path.join(self.test_dir, 'urltestproject') self.addCleanup(shutil.rmtree, testproject_dir, True) out, err = self.run_django_admin(args) self.assertNoOutput(err) self.assertTrue(os.path.isdir(testproject_dir)) self.assertTrue(os.path.exists(os.path.join(testproject_dir, 'run.py'))) def test_project_template_tarball_url(self): "Startproject management command handles project template tar/zip balls from non-canonical urls" template_url = '%s/custom_templates/project_template.tgz/' % self.live_server_url args = ['startproject', '--template', template_url, 'urltestproject'] testproject_dir = os.path.join(self.test_dir, 'urltestproject') self.addCleanup(shutil.rmtree, testproject_dir, True) out, err = self.run_django_admin(args) self.assertNoOutput(err) self.assertTrue(os.path.isdir(testproject_dir)) self.assertTrue(os.path.exists(os.path.join(testproject_dir, 'run.py'))) def test_file_without_extension(self): "Make sure the startproject management command is able to render custom files" template_path = os.path.join(custom_templates_dir, 'project_template') args = ['startproject', '--template', template_path, 'customtestproject', '-e', 'txt', '-n', 'Procfile'] testproject_dir = os.path.join(self.test_dir, 'customtestproject') self.addCleanup(shutil.rmtree, testproject_dir, True) out, err = self.run_django_admin(args) self.assertNoOutput(err) self.assertTrue(os.path.isdir(testproject_dir)) self.assertTrue(os.path.exists(os.path.join(testproject_dir, 'additional_dir'))) base_path = os.path.join(testproject_dir, 'additional_dir') for f in ('Procfile', 'additional_file.py', 'requirements.txt'): self.assertTrue(os.path.exists(os.path.join(base_path, f))) with open(os.path.join(base_path, f)) as fh: self.assertEqual(fh.read().strip(), '# some file for customtestproject test project') def test_custom_project_template_context_variables(self): "Make sure template context variables are rendered with proper values" template_path = os.path.join(custom_templates_dir, 'project_template') args = ['startproject', '--template', template_path, 'another_project', 'project_dir'] testproject_dir = os.path.join(self.test_dir, 'project_dir') os.mkdir(testproject_dir) self.addCleanup(shutil.rmtree, testproject_dir) out, err = self.run_django_admin(args) self.assertNoOutput(err) test_manage_py = os.path.join(testproject_dir, 'manage.py') with open(test_manage_py, 'r') as fp: content = force_text(fp.read()) self.assertIn("project_name = 'another_project'", content) self.assertIn("project_directory = '%s'" % testproject_dir, content) def test_no_escaping_of_project_variables(self): "Make sure template context variables are not html escaped" # We're using a custom command so we need the alternate settings self.write_settings('alternate_settings.py') self.addCleanup(self.remove_settings, 'alternate_settings.py') template_path = os.path.join(custom_templates_dir, 'project_template') args = [ 'custom_startproject', '--template', template_path, 'another_project', 'project_dir', '--extra', '<&>', '--settings=alternate_settings', ] testproject_dir = os.path.join(self.test_dir, 'project_dir') os.mkdir(testproject_dir) self.addCleanup(shutil.rmtree, testproject_dir) out, err = self.run_manage(args) self.assertNoOutput(err) test_manage_py = os.path.join(testproject_dir, 'additional_dir', 'extra.py') with open(test_manage_py, 'r') as fp: content = fp.read() self.assertIn("<&>", content) def test_custom_project_destination_missing(self): """ Make sure an exception is raised when the provided destination directory doesn't exist """ template_path = os.path.join(custom_templates_dir, 'project_template') args = ['startproject', '--template', template_path, 'yet_another_project', 'project_dir2'] testproject_dir = os.path.join(self.test_dir, 'project_dir2') out, err = self.run_django_admin(args) self.assertNoOutput(out) self.assertOutput(err, "Destination directory '%s' does not exist, please create it first." % testproject_dir) self.assertFalse(os.path.exists(testproject_dir)) def test_custom_project_template_with_non_ascii_templates(self): """ The startproject management command is able to render templates with non-ASCII content. """ template_path = os.path.join(custom_templates_dir, 'project_template') args = ['startproject', '--template', template_path, '--extension=txt', 'customtestproject'] testproject_dir = os.path.join(self.test_dir, 'customtestproject') self.addCleanup(shutil.rmtree, testproject_dir, True) out, err = self.run_django_admin(args) self.assertNoOutput(err) self.assertTrue(os.path.isdir(testproject_dir)) path = os.path.join(testproject_dir, 'ticket-18091-non-ascii-template.txt') with codecs.open(path, 'r', encoding='utf-8') as f: self.assertEqual(f.read().splitlines(False), [ 'Some non-ASCII text for testing ticket #18091:', 'üäö €']) class DiffSettings(AdminScriptTestCase): """Tests for diffsettings management command.""" def test_basic(self): """Runs without error and emits settings diff.""" self.write_settings('settings_to_diff.py', sdict={'FOO': '"bar"'}) self.addCleanup(self.remove_settings, 'settings_to_diff.py') args = ['diffsettings', '--settings=settings_to_diff'] out, err = self.run_manage(args) self.assertNoOutput(err) self.assertOutput(out, "FOO = 'bar' ###") def test_all(self): """The all option also shows settings with the default value.""" self.write_settings('settings_to_diff.py', sdict={'STATIC_URL': 'None'}) self.addCleanup(self.remove_settings, 'settings_to_diff.py') args = ['diffsettings', '--settings=settings_to_diff', '--all'] out, err = self.run_manage(args) self.assertNoOutput(err) self.assertOutput(out, "### STATIC_URL = None") class Dumpdata(AdminScriptTestCase): """Tests for dumpdata management command.""" def setUp(self): self.write_settings('settings.py') def tearDown(self): self.remove_settings('settings.py') def test_pks_parsing(self): """Regression for #20509 Test would raise an exception rather than printing an error message. """ args = ['dumpdata', '--pks=1'] out, err = self.run_manage(args) self.assertOutput(err, "You can only use --pks option with one model") self.assertNoOutput(out) class MainModule(AdminScriptTestCase): """python -m django works like django-admin.""" def test_runs_django_admin(self): cmd_out, _ = self.run_django_admin(['--version']) mod_out, _ = self.run_test('-m', ['django', '--version']) self.assertEqual(mod_out, cmd_out)
{ "content_hash": "d304ce643689cf1bc961a950b0d0b63c", "timestamp": "", "source": "github", "line_count": 2176, "max_line_length": 118, "avg_line_length": 43.56112132352941, "alnum_prop": 0.6347360980704512, "repo_name": "karyon/django", "id": "60c3a68fd2d99bfade146105707fa13fb9cef71d", "size": "94859", "binary": false, "copies": "4", "ref": "refs/heads/master", "path": "tests/admin_scripts/tests.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "CSS", "bytes": "52334" }, { "name": "HTML", "bytes": "170527" }, { "name": "JavaScript", "bytes": "256023" }, { "name": "Makefile", "bytes": "125" }, { "name": "Python", "bytes": "11449863" }, { "name": "Shell", "bytes": "809" }, { "name": "Smarty", "bytes": "130" } ], "symlink_target": "" }
import urllib2 from bs4 import BeautifulSoup page = urllib2.urlopen("https://contrataciondelestado.es/wps/portal/plataforma") soup = BeautifulSoup(page) links = soup('div') #print links[0] #print #print links[2] print '-----------FIN----------------' for link in links: try: link['class'] #print link except KeyError: continue if link['class'] == ['paddingLeft1']: print link for child in link.children: print(child)
{ "content_hash": "7dbf97b96289f1ca830105ee0d2d0bd6", "timestamp": "", "source": "github", "line_count": 21, "max_line_length": 80, "avg_line_length": 22.333333333333332, "alnum_prop": 0.6247334754797441, "repo_name": "jmartinz/pyCrawler", "id": "618b6cc53d63332896f01c6302fec7fb05f11f9a", "size": "469", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "99.Test/test04bs4_PCE.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "HTML", "bytes": "74554" }, { "name": "OpenEdge ABL", "bytes": "3226" }, { "name": "Python", "bytes": "54571" } ], "symlink_target": "" }
from .language import language_detection
{ "content_hash": "a299a732d8ae27e7df65c2c5606b38ad", "timestamp": "", "source": "github", "line_count": 1, "max_line_length": 40, "avg_line_length": 40, "alnum_prop": 0.875, "repo_name": "salarmohtaj/language_detection", "id": "2730ee2aa4215e171c26cfb27e4deeb42927c4d2", "size": "40", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "language_detection/__init__.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "3254" } ], "symlink_target": "" }
"""Production settings and globals.""" from __future__ import absolute_import from os import environ from .base import * # Normally you should not import ANYTHING from Django directly # into your settings, but ImproperlyConfigured is an exception. from django.core.exceptions import ImproperlyConfigured def get_env_setting(setting): """ Get the environment setting or return exception """ try: return environ[setting] except KeyError: error_msg = "Set the %s env variable" % setting raise ImproperlyConfigured(error_msg) ########## HOST CONFIGURATION # See: https://docs.djangoproject.com/en/1.5/releases/1.5/#allowed-hosts-required-in-production ALLOWED_HOSTS = ['127.0.0.1', '159.90.251.20'] ########## END HOST CONFIGURATION ########## EMAIL CONFIGURATION # See: https://docs.djangoproject.com/en/dev/ref/settings/#email-backend EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend' # See: https://docs.djangoproject.com/en/dev/ref/settings/#email-host EMAIL_HOST = environ.get('EMAIL_HOST', 'smtp.gmail.com') # See: https://docs.djangoproject.com/en/dev/ref/settings/#email-host-password EMAIL_HOST_PASSWORD = environ.get('EMAIL_HOST_PASSWORD', '') # See: https://docs.djangoproject.com/en/dev/ref/settings/#email-host-user EMAIL_HOST_USER = environ.get('EMAIL_HOST_USER', 'your_email@example.com') # See: https://docs.djangoproject.com/en/dev/ref/settings/#email-port EMAIL_PORT = environ.get('EMAIL_PORT', 587) # See: https://docs.djangoproject.com/en/dev/ref/settings/#email-subject-prefix EMAIL_SUBJECT_PREFIX = '[%s] ' % SITE_NAME # See: https://docs.djangoproject.com/en/dev/ref/settings/#email-use-tls EMAIL_USE_TLS = True # See: https://docs.djangoproject.com/en/dev/ref/settings/#server-email SERVER_EMAIL = EMAIL_HOST_USER ########## END EMAIL CONFIGURATION ########## DATABASE CONFIGURATION # See: https://docs.djangoproject.com/en/dev/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': normpath(join(DJANGO_ROOT, 'default.db')), 'USER': '', 'PASSWORD': '', 'HOST': '', 'PORT': '', } } ########## END DATABASE CONFIGURATION ########## CACHE CONFIGURATION # See: https://docs.djangoproject.com/en/dev/ref/settings/#caches CACHES = { 'default': { 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', } } ########## END CACHE CONFIGURATION ########## SECRET CONFIGURATION # See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key SECRET_KEY = get_env_setting('SECRET_KEY') ########## END SECRET CONFIGURATION
{ "content_hash": "11c3360e3a615892672574ec6bc42172", "timestamp": "", "source": "github", "line_count": 81, "max_line_length": 95, "avg_line_length": 32.39506172839506, "alnum_prop": 0.6859756097560976, "repo_name": "prengifo/GuardabosquesUSB", "id": "31af9f12062375ee6a5ce529cf60817b825896c2", "size": "2624", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "GuardabosquesUSB/GuardabosquesUSB/settings/production.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "89200" }, { "name": "JavaScript", "bytes": "45" }, { "name": "Makefile", "bytes": "5612" }, { "name": "Python", "bytes": "53780" }, { "name": "Shell", "bytes": "5120" } ], "symlink_target": "" }
import sqlalchemy as sa from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm import sessionmaker, relationship from sqlalchemy.dialects import mysql Base = declarative_base() class User(Base): __tablename__ = 'user' user_id = sa.Column(sa.Integer, primary_key=True) username = sa.Column(sa.String(15), unique=True) email = sa.Column(sa.String(30), unique=True) password = sa.Column(sa.String(40), nullable=False) realname = sa.Column(sa.String(20), nullable=False) comment = sa.Column(sa.String(30)) deleted = sa.Column(sa.Integer, nullable=False, server_default=sa.text("'0'")) reset_uuid = sa.Column(sa.String(40)) salt = sa.Column(sa.String(40)) sysadmin_flag = sa.Column(sa.Integer) creation_time = sa.Column(mysql.TIMESTAMP) update_time = sa.Column(mysql.TIMESTAMP) class Properties(Base): __tablename__ = 'properties' k = sa.Column(sa.String(64), primary_key = True) v = sa.Column(sa.String(128), nullable = False) class ProjectMember(Base): __tablename__ = 'project_member' project_id = sa.Column(sa.Integer(), primary_key = True) user_id = sa.Column(sa.Integer(), primary_key = True) role = sa.Column(sa.Integer(), nullable = False) creation_time = sa.Column(mysql.TIMESTAMP, nullable = True) update_time = sa.Column(mysql.TIMESTAMP, nullable = True) sa.ForeignKeyConstraint(['project_id'], [u'project.project_id'], ), sa.ForeignKeyConstraint(['role'], [u'role.role_id'], ), sa.ForeignKeyConstraint(['user_id'], [u'user.user_id'], ), class UserProjectRole(Base): __tablename__ = 'user_project_role' upr_id = sa.Column(sa.Integer(), primary_key = True) user_id = sa.Column(sa.Integer(), sa.ForeignKey('user.user_id')) pr_id = sa.Column(sa.Integer(), sa.ForeignKey('project_role.pr_id')) project_role = relationship("ProjectRole") class ProjectRole(Base): __tablename__ = 'project_role' pr_id = sa.Column(sa.Integer(), primary_key = True) project_id = sa.Column(sa.Integer(), nullable = False) role_id = sa.Column(sa.Integer(), nullable = False) sa.ForeignKeyConstraint(['role_id'], [u'role.role_id']) sa.ForeignKeyConstraint(['project_id'], [u'project.project_id']) class Access(Base): __tablename__ = 'access' access_id = sa.Column(sa.Integer(), primary_key = True) access_code = sa.Column(sa.String(1)) comment = sa.Column(sa.String(30)) class Role(Base): __tablename__ = 'role' role_id = sa.Column(sa.Integer, primary_key=True) role_mask = sa.Column(sa.Integer, nullable=False, server_default=sa.text("'0'")) role_code = sa.Column(sa.String(20)) name = sa.Column(sa.String(20)) class Project(Base): __tablename__ = 'project' project_id = sa.Column(sa.Integer, primary_key=True) owner_id = sa.Column(sa.ForeignKey(u'user.user_id'), nullable=False, index=True) name = sa.Column(sa.String(30), nullable=False, unique=True) creation_time = sa.Column(mysql.TIMESTAMP) update_time = sa.Column(mysql.TIMESTAMP) deleted = sa.Column(sa.Integer, nullable=False, server_default=sa.text("'0'")) public = sa.Column(sa.Integer, nullable=False, server_default=sa.text("'0'")) owner = relationship(u'User') class ReplicationPolicy(Base): __tablename__ = "replication_policy" id = sa.Column(sa.Integer, primary_key=True) name = sa.Column(sa.String(256)) project_id = sa.Column(sa.Integer, nullable=False) target_id = sa.Column(sa.Integer, nullable=False) enabled = sa.Column(mysql.TINYINT(1), nullable=False, server_default=sa.text("'1'")) description = sa.Column(sa.Text) cron_str = sa.Column(sa.String(256)) start_time = sa.Column(mysql.TIMESTAMP) creation_time = sa.Column(mysql.TIMESTAMP, server_default = sa.text("CURRENT_TIMESTAMP")) update_time = sa.Column(mysql.TIMESTAMP, server_default = sa.text("CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP")) class ReplicationTarget(Base): __tablename__ = "replication_target" id = sa.Column(sa.Integer, primary_key=True) name = sa.Column(sa.String(64)) url = sa.Column(sa.String(64)) username = sa.Column(sa.String(40)) password = sa.Column(sa.String(40)) target_type = sa.Column(mysql.TINYINT(1), nullable=False, server_default=sa.text("'0'")) creation_time = sa.Column(mysql.TIMESTAMP, server_default = sa.text("CURRENT_TIMESTAMP")) update_time = sa.Column(mysql.TIMESTAMP, server_default = sa.text("CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP")) class ReplicationJob(Base): __tablename__ = "replication_job" id = sa.Column(sa.Integer, primary_key=True) status = sa.Column(sa.String(64), nullable=False) policy_id = sa.Column(sa.Integer, nullable=False) repository = sa.Column(sa.String(256), nullable=False) operation = sa.Column(sa.String(64), nullable=False) tags = sa.Column(sa.String(16384)) creation_time = sa.Column(mysql.TIMESTAMP, server_default = sa.text("CURRENT_TIMESTAMP")) update_time = sa.Column(mysql.TIMESTAMP, server_default = sa.text("CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP")) __table_args__ = (sa.Index('policy', "policy_id"),) class Repository(Base): __tablename__ = "repository" repository_id = sa.Column(sa.Integer, primary_key=True) name = sa.Column(sa.String(255), nullable=False, unique=True) project_id = sa.Column(sa.Integer, nullable=False) owner_id = sa.Column(sa.Integer, nullable=False) description = sa.Column(sa.Text) pull_count = sa.Column(sa.Integer,server_default=sa.text("'0'"), nullable=False) star_count = sa.Column(sa.Integer,server_default=sa.text("'0'"), nullable=False) creation_time = sa.Column(mysql.TIMESTAMP, server_default = sa.text("CURRENT_TIMESTAMP")) update_time = sa.Column(mysql.TIMESTAMP, server_default = sa.text("CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP"))
{ "content_hash": "f1fb8398be0f3d23fce1ae56736eea70", "timestamp": "", "source": "github", "line_count": 139, "max_line_length": 119, "avg_line_length": 42.46762589928058, "alnum_prop": 0.684736574623073, "repo_name": "wknet123/harbor", "id": "c928edc1741782ddfc3ac69252568778cb82e07b", "size": "5950", "binary": false, "copies": "3", "ref": "refs/heads/master-latest", "path": "tools/migration/db_meta.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "15315" }, { "name": "Go", "bytes": "827320" }, { "name": "HTML", "bytes": "114700" }, { "name": "JavaScript", "bytes": "6249" }, { "name": "Makefile", "bytes": "22255" }, { "name": "Python", "bytes": "66203" }, { "name": "Shell", "bytes": "22345" }, { "name": "Smarty", "bytes": "5128" }, { "name": "TypeScript", "bytes": "647354" } ], "symlink_target": "" }
import unittest from katas.kyu_8.sum_arrays import sum_array class SumArrayTestCase(unittest.TestCase): def test_equals(self): self.assertEqual(sum_array([]), 0) def test_equals_2(self): self.assertEqual(sum_array([1, 2, 3]), 6) def test_equals_3(self): self.assertEqual(sum_array([1.1, 2.2, 3.3]), 6.6) def test_equals_4(self): self.assertEqual(sum_array([4, 5, 6]), 15)
{ "content_hash": "c69c7c1fbff7f7f94af5ba94ab3d0a1c", "timestamp": "", "source": "github", "line_count": 17, "max_line_length": 57, "avg_line_length": 25.058823529411764, "alnum_prop": 0.6291079812206573, "repo_name": "the-zebulan/CodeWars", "id": "59f60a1b1a63763fddae2b4f120c6e671d1ec1c9", "size": "426", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tests/kyu_8_tests/test_sum_arrays.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "1203000" } ], "symlink_target": "" }
"""Update encrypted deploy password in Travis config file """ from __future__ import print_function import base64 import json import os from getpass import getpass import yaml from cryptography.hazmat.primitives.serialization import load_pem_public_key from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives.asymmetric.padding import PKCS1v15 try: from urllib import urlopen except: from urllib.request import urlopen GITHUB_REPO = 'watchdogpolska/allauth_watchdog_id' TRAVIS_CONFIG_FILE = os.path.join( os.path.dirname(os.path.abspath(__file__)), '.travis.yml') def load_key(pubkey): """Load public RSA key, with work-around for keys using incorrect header/footer format. Read more about RSA encryption with cryptography: https://cryptography.io/latest/hazmat/primitives/asymmetric/rsa/ """ try: return load_pem_public_key(pubkey.encode(), default_backend()) except ValueError: # workaround for https://github.com/travis-ci/travis-api/issues/196 pubkey = pubkey.replace('BEGIN RSA', 'BEGIN').replace('END RSA', 'END') return load_pem_public_key(pubkey.encode(), default_backend()) def encrypt(pubkey, password): """Encrypt password using given RSA public key and encode it with base64. The encrypted password can only be decrypted by someone with the private key (in this case, only Travis). """ key = load_key(pubkey) encrypted_password = key.encrypt(password, PKCS1v15()) return base64.b64encode(encrypted_password) def fetch_public_key(repo): """Download RSA public key Travis will use for this repo. Travis API docs: http://docs.travis-ci.com/api/#repository-keys """ keyurl = 'https://api.travis-ci.org/repos/{0}/key'.format(repo) data = json.loads(urlopen(keyurl).read().decode()) if 'key' not in data: errmsg = "Could not find public key for repo: {}.\n".format(repo) errmsg += "Have you already added your GitHub repo to Travis?" raise ValueError(errmsg) return data['key'] def prepend_line(filepath, line): """Rewrite a file adding a line to its beginning. """ with open(filepath) as f: lines = f.readlines() lines.insert(0, line) with open(filepath, 'w') as f: f.writelines(lines) def load_yaml_config(filepath): with open(filepath) as f: return yaml.load(f) def save_yaml_config(filepath, config): with open(filepath, 'w') as f: yaml.dump(config, f, default_flow_style=False) def update_travis_deploy_password(encrypted_password): """Update the deploy section of the .travis.yml file to use the given encrypted password. """ config = load_yaml_config(TRAVIS_CONFIG_FILE) config['deploy']['password'] = dict(secure=encrypted_password) save_yaml_config(TRAVIS_CONFIG_FILE, config) line = ('# This file was autogenerated and will overwrite' ' each time you run travis_pypi_setup.py\n') prepend_line(TRAVIS_CONFIG_FILE, line) def main(args): public_key = fetch_public_key(args.repo) password = args.password or getpass('PyPI password: ') update_travis_deploy_password(encrypt(public_key, password.encode())) print("Wrote encrypted password to .travis.yml -- you're ready to deploy") if '__main__' == __name__: import argparse parser = argparse.ArgumentParser(description=__doc__) parser.add_argument('--repo', default=GITHUB_REPO, help='GitHub repo (default: %s)' % GITHUB_REPO) parser.add_argument('--password', help='PyPI password (will prompt if not provided)') args = parser.parse_args() main(args)
{ "content_hash": "3bc3cbbc202d9a11236c7b21e6080d64", "timestamp": "", "source": "github", "line_count": 120, "max_line_length": 79, "avg_line_length": 31.05, "alnum_prop": 0.6819645732689211, "repo_name": "ad-m/django-atom", "id": "b749bc69bca1a45477f0c230c6dd229837ab46af", "size": "3772", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "travis_pypi_setup.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Makefile", "bytes": "1223" }, { "name": "Python", "bytes": "48546" } ], "symlink_target": "" }
from java.awt import Robot, Toolkit from java.awt.event import InputEvent from java.awt.MouseInfo import getPointerInfo from .base import PyMouseMeta r = Robot() class PyMouse(PyMouseMeta): def press(self, x, y, button = 1): button_list = [None, InputEvent.BUTTON1_MASK, InputEvent.BUTTON3_MASK, InputEvent.BUTTON2_MASK] self.move(x, y) r.mousePress(button_list[button]) def release(self, x, y, button = 1): button_list = [None, InputEvent.BUTTON1_MASK, InputEvent.BUTTON3_MASK, InputEvent.BUTTON2_MASK] self.move(x, y) r.mouseRelease(button_list[button]) def move(self, x, y): r.mouseMove(x, y) def position(self): loc = getPointerInfo().getLocation() return loc.getX, loc.getY def screen_size(self): dim = Toolkit.getDefaultToolkit().getScreenSize() return dim.getWidth(), dim.getHeight()
{ "content_hash": "ea9285964f220d04adf4250b21a71c3b", "timestamp": "", "source": "github", "line_count": 28, "max_line_length": 103, "avg_line_length": 32.464285714285715, "alnum_prop": 0.6633663366336634, "repo_name": "CamelBackNotation/hackdfw", "id": "b9775532a625c13d4dbb74d6ba462d311d03cc75", "size": "1564", "binary": false, "copies": "15", "ref": "refs/heads/master", "path": "Dependencies/build/lib.linux-x86_64-2.7/pymouse/java_.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "22372" }, { "name": "Shell", "bytes": "312" } ], "symlink_target": "" }
import pytest from django.core.urlresolvers import reverse from articles.views import ArticleListView from tests.factories import ArticleFactory @pytest.mark.django_db class TestVirtualFields(object): def _request(self, admin_client): response = admin_client.get(reverse('articles:list')) assert response.status_code == 200 return response def _assert_list_items_len(self, response, length): assert 'list_items' in response.context_data assert len(response.context_data['list_items']) == length def test_virtual_field(self, admin_client): """ Virtual field displayed in ListView """ article = ArticleFactory() view = ArticleListView() view.fields = ['title', 'description', 'published', 'category'] response = self._request(admin_client) self._assert_list_items_len(response, 1) item = response.context_data['list_items'][0] assert item[1]['value'] == article.title assert item[2]['value'] == article.description assert item[3]['value'] == article.published assert item[4]['value'] == article.category.name def test_missing_virtual_field(self, admin_client): """ Error happens on wrong virtual field name """ article = ArticleFactory() # noqa view = ArticleListView() view.fields = ['title', 'description', 'published', 'virtual_field'] response = self._request(admin_client) search_virtual_field = False for field in response.context_data['list_items'][0]: if type(field) == int: continue if 'virtual_field' in field['field']: search_virtual_field = True assert search_virtual_field is False
{ "content_hash": "c8762696cd7ac62e7cc47f2efb21349f", "timestamp": "", "source": "github", "line_count": 53, "max_line_length": 76, "avg_line_length": 33.9811320754717, "alnum_prop": 0.6268739589117157, "repo_name": "ddaan/django-arctic", "id": "94fdf462fa618b00ef0300ad34f3e8b940806bce", "size": "1801", "binary": false, "copies": "1", "ref": "refs/heads/develop", "path": "tests/test_virtual_fields.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "189003" }, { "name": "HTML", "bytes": "62483" }, { "name": "JavaScript", "bytes": "38489" }, { "name": "Python", "bytes": "109791" } ], "symlink_target": "" }
from swgpy.object import * def create(kernel): result = Tangible() result.template = "object/tangible/component/bio/shared_bio_component_food_flavor.iff" result.attribute_template_id = -1 result.stfName("","") #### BEGIN MODIFICATIONS #### #### END MODIFICATIONS #### return result
{ "content_hash": "f68d712d9b74cc26e30a2dfa9c1adcb5", "timestamp": "", "source": "github", "line_count": 13, "max_line_length": 87, "avg_line_length": 23, "alnum_prop": 0.6889632107023411, "repo_name": "obi-two/Rebelion", "id": "2f4bbf68831bde0b8a9e94c8505a06685e70da50", "size": "444", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "data/scripts/templates/object/tangible/component/bio/shared_bio_component_food_flavor.py", "mode": "33188", "license": "mit", "language": [ { "name": "Batchfile", "bytes": "11818" }, { "name": "C", "bytes": "7699" }, { "name": "C++", "bytes": "2293610" }, { "name": "CMake", "bytes": "39727" }, { "name": "PLSQL", "bytes": "42065" }, { "name": "Python", "bytes": "7499185" }, { "name": "SQLPL", "bytes": "41864" } ], "symlink_target": "" }
from model.group import Group testdata = [Group(name="name1", header="header1", footer="footer1"), Group(name="name2", header="header2", footer="footer2")]
{ "content_hash": "6de3beadb68c4ca9c2deeb543d86d41a", "timestamp": "", "source": "github", "line_count": 4, "max_line_length": 68, "avg_line_length": 42.25, "alnum_prop": 0.6686390532544378, "repo_name": "tkapriyan/python_training", "id": "06347ce569ee912377e2960bae7ce314211b3789", "size": "169", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "data/group.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "29309" } ], "symlink_target": "" }
import mock import greenlet import threading from rackattack.tcp import publish class OneThreadedPublish(publish.Publish): def __init__(self, amqpURL): self.threadStartMock = mock.Mock() threading.Thread.__init__ = mock.Mock() threading.Thread.daemon = mock.Mock() threading.Thread.Event = mock.Mock() publish.threading.Thread.__init__ = mock.Mock() publish.threading.Thread.start = self.threadStartMock publish.threading.Thread.daemon = mock.Mock() publish.threading.Thread.Event = mock.Mock() def waitWrapper(self): return greenlet.getcurrent().parent.switch() origWait = threading._Event.wait threading._Event.wait = waitWrapper super(OneThreadedPublish, self).__init__(amqpURL) self.testedServerContext = greenlet.greenlet(self.run) self.hasServerContestStarted = False def queueGetWrapper(*args, **kwargs): item = greenlet.getcurrent().parent.switch() return item self.originalGet = self._queue.get self._queue.get = queueGetWrapper threading._Event.wait = origWait def continueWithServer(self): if self.hasServerContestStarted: while self._queue.qsize() > 0: item = self.originalGet(block=False) self.testedServerContext.switch(item) else: self.hasServerContestStarted = True self.testedServerContext.switch()
{ "content_hash": "bbf92201e6dbb0ca251e956db1d4454e", "timestamp": "", "source": "github", "line_count": 44, "max_line_length": 62, "avg_line_length": 34.02272727272727, "alnum_prop": 0.6432865731462926, "repo_name": "eliran-stratoscale/rackattack-api", "id": "392792e339952ee5c0ebe9a5c6d54db7be233e83", "size": "1497", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "py/rackattack/tests/one_threaded_publish.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Makefile", "bytes": "988" }, { "name": "Python", "bytes": "99070" } ], "symlink_target": "" }
import configparser import fileinput import os import pwd import shutil import sys swift_run_time_user = None def _chown_to_swift(path): global swift_run_time_user uc = pwd.getpwnam(swift_run_time_user) os.chown(path, uc.pw_uid, uc.pw_gid) def _unpatch_pipeline_line(orig_line, storlet_middleware): mds = list() for md in orig_line.split(): if md == 'pipeline' or md == '=': continue mds.append(md) if storlet_middleware in mds: mds.remove(storlet_middleware) new_line = 'pipeline =' for md in mds: new_line += ' ' + md return new_line + '\n' def _patch_proxy_pipeline_line(orig_line, storlet_middleware): mds = list() for md in orig_line.split(): if md == 'pipeline' or md == '=': continue mds.append(md) if storlet_middleware in mds: return orig_line # If there is 'copy' middleware, storlet_hander is placed # in the left of 'copy' middleware. try: copy_index = mds.index('copy') except Exception: copy_index = -1 if copy_index != -1: mds.insert(copy_index, storlet_middleware) else: # If there is slo middleware, storlet_hander is placed # in the left of slo middleware. try: slo_index = mds.index('slo') except Exception: slo_index = -1 if slo_index != -1: mds.insert(slo_index, storlet_middleware) else: # Otherwise, storlet_hander is placed in the left of proxy-sever. proxy_index = mds.index('proxy-server') mds.insert(proxy_index, storlet_middleware) new_line = 'pipeline =' for md in mds: new_line += ' ' + md return new_line + '\n' def _patch_object_pipeline_line(orig_line, storlet_middleware): mds = list() for md in orig_line.split(): if md == 'pipeline' or md == '=': continue mds.append(md) if storlet_middleware in mds: return orig_line object_index = mds.index('object-server') mds.insert(object_index, storlet_middleware) new_line = 'pipeline =' for md in mds: new_line += ' ' + md return new_line + '\n' def unpatch_swift_config_file(conf, conf_file): storlet_middleware = conf.get('common-confs', 'storlet_middleware') for line in fileinput.input(conf_file, inplace=1): if line.startswith('pipeline'): new_line = _unpatch_pipeline_line(line, storlet_middleware) line = new_line sys.stdout.write(line) _chown_to_swift(conf_file) def patch_swift_config_file(conf, conf_file, service): storlet_middleware = conf.get('common-confs', 'storlet_middleware') filter_block_first_line = '[filter:%s]\n' % storlet_middleware filter_in_file = False for line in fileinput.input(conf_file, inplace=1): if line.startswith('pipeline'): if service == 'proxy': new_line = _patch_proxy_pipeline_line(line, storlet_middleware) else: new_line = _patch_object_pipeline_line(line, storlet_middleware) line = new_line if filter_block_first_line in line: filter_in_file = True sys.stdout.write(line) if filter_in_file is False: with open(conf_file, 'a') as f: f.write('\n') f.write(filter_block_first_line) f.write('use = egg:storlets#%s\n' % storlet_middleware) f.write('storlet_container = %s\n' % conf.get('common-confs', 'storlet_container')) f.write('storlet_dependency = %s\n' % conf.get('common-confs', 'storlet_dependency')) f.write('storlet_gateway_module = %s\n' % conf.get('common-confs', 'storlet_gateway_module')) f.write('storlet_gateway_conf = %s\n' % conf.get('common-confs', 'storlet_gateway_conf')) f.write('storlet_execute_on_proxy_only = %s\n' % conf.get( 'common-confs', 'storlet_proxy_execution')) f.write('execution_server = %s\n' % service) _chown_to_swift(conf_file) def unpatch_swift_storlet_proxy_file(conf): storlet_proxy_server_conf_file = conf.get('proxy-confs', 'storlet_proxy_server_conf_file') if os.path.exists(storlet_proxy_server_conf_file): os.remove(storlet_proxy_server_conf_file) def patch_swift_storlet_proxy_file(conf): storlet_proxy_server_conf_file = conf.get('proxy-confs', 'storlet_proxy_server_conf_file') proxy_server_conf_file = conf.get('proxy-confs', 'proxy_server_conf_file') source_file = proxy_server_conf_file target_file = storlet_proxy_server_conf_file shutil.copyfile(source_file, target_file) for line in fileinput.input(storlet_proxy_server_conf_file, inplace=1): if line.startswith('pipeline'): # If there is no proxy-logging in the configuration file, we don't # want to add it to the pipeline. This may cause invalid internal # client configuration (we encountered this problem in a fuel swift # cluster). if 'proxy-logging' in line: line = ('pipeline = proxy-logging cache storlet_handler slo ' 'proxy-logging proxy-server\n') else: line = 'pipeline = cache storlet_handler slo proxy-server\n' sys.stdout.write(line) _chown_to_swift(storlet_proxy_server_conf_file) def remove_gateway_conf_file(conf): gateway_conf_file = conf.get('common-confs', 'storlet_gateway_conf') if os.path.exists(gateway_conf_file): os.remove(gateway_conf_file) def remove(conf): object_server_conf_files = conf.get('object-confs', 'object_server_conf_files').split(',') for f in object_server_conf_files: if os.path.exists(f): unpatch_swift_config_file(conf, f) proxy_server_conf_file = conf.get('proxy-confs', 'proxy_server_conf_file') unpatch_swift_config_file(conf, proxy_server_conf_file) unpatch_swift_storlet_proxy_file(conf) remove_gateway_conf_file(conf) def install(conf): object_server_conf_files = conf.get('object-confs', 'object_server_conf_files').split(',') for f in object_server_conf_files: if os.path.exists(f): patch_swift_config_file(conf, f, 'object') proxy_server_conf_file = conf.get('proxy-confs', 'proxy_server_conf_file') patch_swift_config_file(conf, proxy_server_conf_file, 'proxy') patch_swift_storlet_proxy_file(conf) def usage(argv): print("Usage: %s %s %s" % (argv[0], "install/remove conf_file", "swift_run_time_user")) def main(argv): if len(argv) != 4: usage(argv) exit(-1) conf = configparser.ConfigParser() conf.read(argv[2]) global swift_run_time_user swift_run_time_user = argv[3] if argv[1] == 'install': install(conf) elif argv[1] == 'remove': remove(conf) else: usage(argv) if __name__ == "__main__": main(sys.argv)
{ "content_hash": "76c9a0927f4935aaa6655630ecfd4b75", "timestamp": "", "source": "github", "line_count": 234, "max_line_length": 79, "avg_line_length": 31.585470085470085, "alnum_prop": 0.5831416587741848, "repo_name": "openstack/storlets", "id": "0ba743b2f75944162d8567270a654accfacda0ee", "size": "8039", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "devstack/swift_config.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "C", "bytes": "31430" }, { "name": "Java", "bytes": "184917" }, { "name": "Jupyter Notebook", "bytes": "7689" }, { "name": "Makefile", "bytes": "347" }, { "name": "Python", "bytes": "579917" }, { "name": "Shell", "bytes": "20127" } ], "symlink_target": "" }
""" DMLC submission script, MPI version """ # pylint: disable=invalid-name from __future__ import absolute_import import sys import subprocess, logging from threading import Thread from . import tracker def get_mpi_env(envs): """get the mpirun command for setting the envornment support both openmpi and mpich2 """ # windows hack: we will use msmpi if sys.platform == 'win32': for k, v in envs.items(): cmd += ' -env %s %s' % (k, str(v)) return cmd # decide MPI version. (_, err) = subprocess.Popen('mpirun', stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate() cmd = '' if 'Open MPI' in err: for k, v in envs.items(): cmd += ' -x %s=%s' % (k, str(v)) elif 'mpich' in err: for k, v in envs.items(): cmd += ' -env %s %s' % (k, str(v)) else: raise RuntimeError('Unknown MPI Version') return cmd def submit(args): """Submission script with MPI.""" def mpi_submit(nworker, nserver, pass_envs): """Internal closure for job submission.""" def run(prog): """run the program""" subprocess.check_call(prog, shell=True) cmd = '' if args.host_file is not None: cmd = '--hostfile %s ' % (args.host_file) cmd += ' ' + ' '.join(args.command) pass_envs['DMLC_JOB_CLUSTER'] = 'mpi' # start workers if nworker > 0: logging.info('Start %d workers by mpirun' % nworker) pass_envs['DMLC_ROLE'] = 'worker' if sys.platform == 'win32': prog = 'mpiexec -n %d %s %s' % (nworker, get_mpi_env(pass_envs), cmd) else: prog = 'mpirun -n %d %s %s' % (nworker, get_mpi_env(pass_envs), cmd) thread = Thread(target=run, args=(prog,)) thread.setDaemon(True) thread.start() # start servers if nserver > 0: logging.info('Start %d servers by mpirun' % nserver) pass_envs['DMLC_ROLE'] = 'server' if sys.platform == 'win32': prog = 'mpiexec -n %d %s %s' % (nworker, get_mpi_env(pass_envs), cmd) else: prog = 'mpirun -n %d %s %s' % (nserver, get_mpi_env(pass_envs), cmd) thread = Thread(target=run, args=(prog,)) thread.setDaemon(True) thread.start() tracker.submit(args.num_workers, args.num_servers, fun_submit=mpi_submit, pscmd=(' '.join(args.command)))
{ "content_hash": "b9ecb4a0b8da182034d59e18b7189c60", "timestamp": "", "source": "github", "line_count": 82, "max_line_length": 85, "avg_line_length": 32, "alnum_prop": 0.522484756097561, "repo_name": "yhpeng-git/mxnet", "id": "943db95f91fd6e7828104a9e7df01d3eb38ce13e", "size": "2624", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "dmlc-core/tracker/dmlc_tracker/mpi.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Batchfile", "bytes": "11053" }, { "name": "C", "bytes": "88476" }, { "name": "C++", "bytes": "4755504" }, { "name": "CMake", "bytes": "147856" }, { "name": "Cuda", "bytes": "3403191" }, { "name": "Java", "bytes": "86766" }, { "name": "Jupyter Notebook", "bytes": "1229390" }, { "name": "Makefile", "bytes": "141324" }, { "name": "Matlab", "bytes": "30187" }, { "name": "Perl", "bytes": "575202" }, { "name": "Perl 6", "bytes": "21768" }, { "name": "Protocol Buffer", "bytes": "78574" }, { "name": "Python", "bytes": "2825418" }, { "name": "R", "bytes": "255240" }, { "name": "Scala", "bytes": "828520" }, { "name": "Shell", "bytes": "120692" } ], "symlink_target": "" }
from testfixtures import compare from service.ws_re.register.registers import Registers from service.ws_re.register.repo import DataRepo from service.ws_re.register.test_base import BaseTestRegister, copy_tst_data from service.ws_re.volumes import Volumes class TestRegisters(BaseTestRegister): def test_init(self): for volume in Volumes().all_volumes: copy_tst_data("I_1_base", volume.file_name) registers = Registers() iterator = iter(registers.volumes.values()) compare("I,1", next(iterator).volume.name) for _ in range(83): last = next(iterator) compare("R", last.volume.name) compare(84, len(registers.volumes)) compare("IV,1", registers["IV,1"].volume.name) def test_not_all_there(self): copy_tst_data("I_1_base", "I_1") Registers() def test_alphabetic(self): copy_tst_data("I_1_alpha", "I_1") copy_tst_data("III_1_alpha", "III_1") i = 0 for i, register in enumerate(Registers().alphabetic): if register.start == "a": compare(4, len(register)) continue if register.start == "b": compare(2, len(register)) continue if register.start == "ch": compare(1, len(register)) continue if register.start == "d": compare(1, len(register)) continue if register.start == "u": compare(2, len(register)) continue compare(43, i) def test_author(self): copy_tst_data("I_1_author", "I_1") copy_tst_data("III_1_author", "III_1") author_registers = iter(Registers().author) register = next(author_registers) compare("Herman Abel", register.author.name) compare(4, len(register)) register = next(author_registers) compare("Abert", register.author.name) compare(5, len(register)) register = next(author_registers) compare("William Abbott", register.author.name) compare(2, len(register)) def test_persist(self): copy_tst_data("I_1_alpha", "I_1") copy_tst_data("III_1_alpha", "III_1") registers = Registers() register_I_1 = registers["I,1"] register_I_1._lemmas[0]._chapters[0]._dict["author"] = "Siegfried" register_III_1 = registers["III,1"] register_III_1._lemmas[0]._chapters[0]._dict["author"] = "Siegfried" registers.persist() with open(DataRepo.get_data_path().joinpath("I_1.json"), mode="r", encoding="utf-8") as register_file: self.assertTrue("Siegfried" in register_file.read()) with open(DataRepo.get_data_path().joinpath("III_1.json"), mode="r", encoding="utf-8") as register_file: self.assertTrue("Siegfried" in register_file.read())
{ "content_hash": "44def6772cc58dd6524146fb207237e7", "timestamp": "", "source": "github", "line_count": 74, "max_line_length": 112, "avg_line_length": 39.472972972972975, "alnum_prop": 0.5826771653543307, "repo_name": "the-it/WS_THEbotIT", "id": "cafdbec703cfdf22c3e62ae1ab267e16bdded3ee", "size": "2969", "binary": false, "copies": "1", "ref": "refs/heads/main", "path": "service/ws_re/register/test_registers.py", "mode": "33188", "license": "mit", "language": [ { "name": "HCL", "bytes": "3121" }, { "name": "Makefile", "bytes": "3017" }, { "name": "Python", "bytes": "785189" }, { "name": "Shell", "bytes": "1199" } ], "symlink_target": "" }
import sys import time import random sys.path.insert(0, '..') import rrdmodel m = rrdmodel.RRDModel('.') year_minutes = 60*24*365 start = (int(time.time()) - year_minutes*60) * 1000 recv_total = 0 send_total = 0 one_percent = year_minutes / 100 for minute in xrange(year_minutes): recv_total += int(random.random()**8 * 20000000) send_total += int(random.random()**8 * 20000000) m.update(start + 60000*minute, (recv_total, send_total)) percent, remainder = divmod(minute, one_percent) if remainder == 0: sys.stderr.write('\r' + str(percent) + '%') sys.stderr.write('\n')
{ "content_hash": "471c596f3f437c4b93bc652bb291dac4", "timestamp": "", "source": "github", "line_count": 22, "max_line_length": 60, "avg_line_length": 27.363636363636363, "alnum_prop": 0.6578073089700996, "repo_name": "welshjf/bitnomon", "id": "80cf0463bfbdac773c8e5843905ccb3c194afbe3", "size": "621", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tools/randomize-rrd.py", "mode": "33261", "license": "apache-2.0", "language": [ { "name": "HTML", "bytes": "1668" }, { "name": "Makefile", "bytes": "776" }, { "name": "Python", "bytes": "66560" } ], "symlink_target": "" }
import logging import time import sys import gevent from cStringIO import StringIO from Debug import Debug from Config import config from util import helper from PeerHashfield import PeerHashfield if config.use_tempfiles: import tempfile # Communicate remote peers class Peer(object): __slots__ = ( "ip", "port", "site", "key", "connection", "connection_server", "time_found", "time_response", "time_hashfield", "time_added", "has_hashfield", "time_my_hashfield_sent", "last_ping", "last_content_json_update", "hashfield", "connection_error", "hash_failed", "download_bytes", "download_time" ) def __init__(self, ip, port, site=None, connection_server=None): self.ip = ip self.port = port self.site = site self.key = "%s:%s" % (ip, port) self.connection = None self.connection_server = connection_server self.has_hashfield = False # Lazy hashfield object not created yet self.time_hashfield = None # Last time peer's hashfiled downloaded self.time_my_hashfield_sent = None # Last time my hashfield sent to peer self.time_found = time.time() # Time of last found in the torrent tracker self.time_response = None # Time of last successful response from peer self.time_added = time.time() self.last_ping = None # Last response time for ping self.last_content_json_update = 0.0 # Modify date of last received content.json self.connection_error = 0 # Series of connection error self.hash_failed = 0 # Number of bad files from peer self.download_bytes = 0 # Bytes downloaded self.download_time = 0 # Time spent to download def __getattr__(self, key): if key == "hashfield": self.has_hashfield = True self.hashfield = PeerHashfield() return self.hashfield else: return getattr(self, key) def log(self, text): if not config.verbose: return # Only log if we are in debug mode if self.site: self.site.log.debug("%s:%s %s" % (self.ip, self.port, text)) else: logging.debug("%s:%s %s" % (self.ip, self.port, text)) # Connect to host def connect(self, connection=None): if self.connection: self.log("Getting connection (Closing %s)..." % self.connection) self.connection.close() else: self.log("Getting connection...") if connection: # Connection specified self.connection = connection self.connection.sites += 1 else: # Try to find from connection pool or create new connection self.connection = None try: if self.connection_server: self.connection = self.connection_server.getConnection(self.ip, self.port, site=self.site) elif self.site: self.connection = self.site.connection_server.getConnection(self.ip, self.port, site=self.site) else: self.connection = sys.modules["main"].file_server.getConnection(self.ip, self.port, site=self.site) self.connection.sites += 1 except Exception, err: self.onConnectionError() self.log("Getting connection error: %s (connection_error: %s, hash_failed: %s)" % (Debug.formatException(err), self.connection_error, self.hash_failed)) self.connection = None # Check if we have connection to peer def findConnection(self): if self.connection and self.connection.connected: # We have connection to peer return self.connection else: # Try to find from other sites connections self.connection = self.site.connection_server.getConnection(self.ip, self.port, create=False, site=self.site) if self.connection: self.connection.sites += 1 return self.connection def __str__(self): return "Peer:%-12s" % self.ip def __repr__(self): return "<%s>" % self.__str__() def packMyAddress(self): if self.ip.endswith(".onion"): return helper.packOnionAddress(self.ip, self.port) else: return helper.packAddress(self.ip, self.port) # Found a peer on tracker def found(self): self.time_found = time.time() # Send a command to peer and return response value def request(self, cmd, params={}, stream_to=None): if not self.connection or self.connection.closed: self.connect() if not self.connection: self.onConnectionError() return None # Connection failed self.log("Send request: %s %s" % (params.get("site", ""), cmd)) for retry in range(1, 4): # Retry 3 times try: res = self.connection.request(cmd, params, stream_to) if not res: raise Exception("Send error") if "error" in res: self.log("%s error: %s" % (cmd, res["error"])) self.onConnectionError() else: # Successful request, reset connection error num self.connection_error = 0 self.time_response = time.time() return res except Exception, err: if type(err).__name__ == "Notify": # Greenlet killed by worker self.log("Peer worker got killed: %s, aborting cmd: %s" % (err.message, cmd)) break else: self.onConnectionError() self.log( "%s (connection_error: %s, hash_failed: %s, retry: %s)" % (Debug.formatException(err), self.connection_error, self.hash_failed, retry) ) time.sleep(1 * retry) self.connect() return None # Failed after 4 retry # Get a file content from peer def getFile(self, site, inner_path): # Use streamFile if client supports it if config.stream_downloads and self.connection and self.connection.handshake and self.connection.handshake["rev"] > 310: return self.streamFile(site, inner_path) location = 0 if config.use_tempfiles: buff = tempfile.SpooledTemporaryFile(max_size=16 * 1024, mode='w+b') else: buff = StringIO() s = time.time() while True: # Read in 512k parts res = self.request("getFile", {"site": site, "inner_path": inner_path, "location": location}) if not res or "body" not in res: # Error return False buff.write(res["body"]) res["body"] = None # Save memory if res["location"] == res["size"]: # End of file break else: location = res["location"] self.download_bytes += res["location"] self.download_time += (time.time() - s) if self.site: self.site.settings["bytes_recv"] = self.site.settings.get("bytes_recv", 0) + res["location"] buff.seek(0) return buff # Download file out of msgpack context to save memory and cpu def streamFile(self, site, inner_path): location = 0 if config.use_tempfiles: buff = tempfile.SpooledTemporaryFile(max_size=16 * 1024, mode='w+b') else: buff = StringIO() s = time.time() while True: # Read in 512k parts res = self.request("streamFile", {"site": site, "inner_path": inner_path, "location": location}, stream_to=buff) if not res or "location" not in res: # Error self.log("Invalid response: %s" % res) return False if res["location"] == res["size"]: # End of file break else: location = res["location"] self.download_bytes += res["location"] self.download_time += (time.time() - s) self.site.settings["bytes_recv"] = self.site.settings.get("bytes_recv", 0) + res["location"] buff.seek(0) return buff # Send a ping request def ping(self): response_time = None for retry in range(1, 3): # Retry 3 times s = time.time() with gevent.Timeout(10.0, False): # 10 sec timeout, don't raise exception res = self.request("ping") if res and "body" in res and res["body"] == "Pong!": response_time = time.time() - s break # All fine, exit from for loop # Timeout reached or bad response self.onConnectionError() self.connect() time.sleep(1) if response_time: self.log("Ping: %.3f" % response_time) else: self.log("Ping failed") self.last_ping = response_time return response_time # Request peer exchange from peer def pex(self, site=None, need_num=5): if not site: site = self.site # If no site defined request peers for this site # give back 5 connectible peers packed_peers = helper.packPeers(self.site.getConnectablePeers(5)) request = {"site": site.address, "peers": packed_peers["ip4"], "need": need_num} if packed_peers["onion"]: request["peers_onion"] = packed_peers["onion"] res = self.request("pex", request) if not res or "error" in res: return False added = 0 # Ip4 for peer in res.get("peers", []): address = helper.unpackAddress(peer) if site.addPeer(*address): added += 1 # Onion for peer in res.get("peers_onion", []): address = helper.unpackOnionAddress(peer) if site.addPeer(*address): added += 1 if added: self.log("Added peers using pex: %s" % added) return added # List modified files since the date # Return: {inner_path: modification date,...} def listModified(self, since): return self.request("listModified", {"since": since, "site": self.site.address}) def updateHashfield(self, force=False): # Don't update hashfield again in 15 min if self.time_hashfield and time.time() - self.time_hashfield > 60 * 15 and not force: return False self.time_hashfield = time.time() res = self.request("getHashfield", {"site": self.site.address}) if not res or "error" in res: return False self.hashfield.replaceFromString(res["hashfield_raw"]) return self.hashfield # Find peers for hashids # Return: {hash1: ["ip:port", "ip:port",...],...} def findHashIds(self, hash_ids): res = self.request("findHashIds", {"site": self.site.address, "hash_ids": hash_ids}) if not res or "error" in res: return False # Unpack IP4 back = {key: map(helper.unpackAddress, val) for key, val in res["peers"].items()[0:30]} # Unpack onion for hash, onion_peers in res.get("peers_onion", {}).items()[0:30]: if not hash in back: back[hash] = [] back[hash] += map(helper.unpackOnionAddress, onion_peers) return back # Send my hashfield to peer # Return: True if sent def sendMyHashfield(self): if self.connection and self.connection.handshake.get("rev", 0) < 510: return False # Not supported if self.time_my_hashfield_sent and self.site.content_manager.hashfield.time_changed <= self.time_my_hashfield_sent: return False # Peer already has the latest hashfield res = self.request("setHashfield", {"site": self.site.address, "hashfield_raw": self.site.content_manager.hashfield.tostring()}) if not res or "error" in res: return False else: self.time_my_hashfield_sent = time.time() return True # Stop and remove from site def remove(self): self.log("Removing peer...Connection error: %s, Hash failed: %s" % (self.connection_error, self.hash_failed)) if self.site and self.key in self.site.peers: del(self.site.peers[self.key]) if self.connection: self.connection.close() # - EVENTS - # On connection error def onConnectionError(self): self.connection_error += 1 if self.connection_error >= 3: # Dead peer self.remove() # Done working with peer def onWorkerDone(self): pass
{ "content_hash": "068e6f90d62aebea7750282c3d7a3ca6", "timestamp": "", "source": "github", "line_count": 333, "max_line_length": 156, "avg_line_length": 38.447447447447445, "alnum_prop": 0.5694759040849801, "repo_name": "kustomzone/Fuzium", "id": "ad5cf8dd82a176d277e5f4493ecc0e26524b8ad6", "size": "12803", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "core/src/Peer/Peer.py", "mode": "33188", "license": "mit", "language": [ { "name": "Batchfile", "bytes": "1204" }, { "name": "C", "bytes": "34092" }, { "name": "CSS", "bytes": "373182" }, { "name": "CoffeeScript", "bytes": "88917" }, { "name": "HTML", "bytes": "123191" }, { "name": "JavaScript", "bytes": "2133526" }, { "name": "Python", "bytes": "2843920" }, { "name": "Shell", "bytes": "898" } ], "symlink_target": "" }
"""The tests for the uptime sensor platform.""" import unittest from unittest.mock import patch from datetime import timedelta from homeassistant.util.async_ import run_coroutine_threadsafe from homeassistant.setup import setup_component from homeassistant.components.sensor.uptime import UptimeSensor from tests.common import get_test_home_assistant class TestUptimeSensor(unittest.TestCase): """Test the uptime sensor.""" def setUp(self): """Set up things to run when tests begin.""" self.hass = get_test_home_assistant() def tearDown(self): """Stop everything that was started.""" self.hass.stop() def test_uptime_min_config(self): """Test minimum uptime configuration.""" config = { 'sensor': { 'platform': 'uptime', } } assert setup_component(self.hass, 'sensor', config) def test_uptime_sensor_name_change(self): """Test uptime sensor with different name.""" config = { 'sensor': { 'platform': 'uptime', 'name': 'foobar', } } assert setup_component(self.hass, 'sensor', config) def test_uptime_sensor_config_hours(self): """Test uptime sensor with hours defined in config.""" config = { 'sensor': { 'platform': 'uptime', 'unit_of_measurement': 'hours', } } assert setup_component(self.hass, 'sensor', config) def test_uptime_sensor_config_minutes(self): """Test uptime sensor with minutes defined in config.""" config = { 'sensor': { 'platform': 'uptime', 'unit_of_measurement': 'minutes', } } assert setup_component(self.hass, 'sensor', config) def test_uptime_sensor_days_output(self): """Test uptime sensor output data.""" sensor = UptimeSensor('test', 'days') assert sensor.unit_of_measurement == 'days' new_time = sensor.initial + timedelta(days=1) with patch('homeassistant.util.dt.now', return_value=new_time): run_coroutine_threadsafe( sensor.async_update(), self.hass.loop ).result() assert sensor.state == 1.00 new_time = sensor.initial + timedelta(days=111.499) with patch('homeassistant.util.dt.now', return_value=new_time): run_coroutine_threadsafe( sensor.async_update(), self.hass.loop ).result() assert sensor.state == 111.50 def test_uptime_sensor_hours_output(self): """Test uptime sensor output data.""" sensor = UptimeSensor('test', 'hours') assert sensor.unit_of_measurement == 'hours' new_time = sensor.initial + timedelta(hours=16) with patch('homeassistant.util.dt.now', return_value=new_time): run_coroutine_threadsafe( sensor.async_update(), self.hass.loop ).result() assert sensor.state == 16.00 new_time = sensor.initial + timedelta(hours=72.499) with patch('homeassistant.util.dt.now', return_value=new_time): run_coroutine_threadsafe( sensor.async_update(), self.hass.loop ).result() assert sensor.state == 72.50 def test_uptime_sensor_minutes_output(self): """Test uptime sensor output data.""" sensor = UptimeSensor('test', 'minutes') assert sensor.unit_of_measurement == 'minutes' new_time = sensor.initial + timedelta(minutes=16) with patch('homeassistant.util.dt.now', return_value=new_time): run_coroutine_threadsafe( sensor.async_update(), self.hass.loop ).result() assert sensor.state == 16.00 new_time = sensor.initial + timedelta(minutes=12.499) with patch('homeassistant.util.dt.now', return_value=new_time): run_coroutine_threadsafe( sensor.async_update(), self.hass.loop ).result() assert sensor.state == 12.50
{ "content_hash": "5ac542ebf0598495bcba89bf965e1b7a", "timestamp": "", "source": "github", "line_count": 117, "max_line_length": 71, "avg_line_length": 36.376068376068375, "alnum_prop": 0.5700187969924813, "repo_name": "HydrelioxGitHub/home-assistant", "id": "00552dd9e49883e9d88e116dc395521353616d57", "size": "4256", "binary": false, "copies": "4", "ref": "refs/heads/dev", "path": "tests/components/sensor/test_uptime.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Batchfile", "bytes": "1175" }, { "name": "Dockerfile", "bytes": "1081" }, { "name": "Python", "bytes": "14330009" }, { "name": "Ruby", "bytes": "745" }, { "name": "Shell", "bytes": "17364" } ], "symlink_target": "" }
"""Utilities for unit-testing Keras.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import threading import numpy as np from tensorflow.python import keras from tensorflow.python.eager import context from tensorflow.python.framework import tensor_shape from tensorflow.python.training.rmsprop import RMSPropOptimizer from tensorflow.python.util import tf_contextlib from tensorflow.python.util import tf_inspect def get_test_data(train_samples, test_samples, input_shape, num_classes, random_seed=None): """Generates test data to train a model on. Arguments: train_samples: Integer, how many training samples to generate. test_samples: Integer, how many test samples to generate. input_shape: Tuple of integers, shape of the inputs. num_classes: Integer, number of classes for the data and targets. random_seed: Integer, random seed used by numpy to generate data. Returns: A tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`. """ if random_seed is not None: np.random.seed(random_seed) num_sample = train_samples + test_samples templates = 2 * num_classes * np.random.random((num_classes,) + input_shape) y = np.random.randint(0, num_classes, size=(num_sample,)) x = np.zeros((num_sample,) + input_shape, dtype=np.float32) for i in range(num_sample): x[i] = templates[y[i]] + np.random.normal(loc=0, scale=1., size=input_shape) return ((x[:train_samples], y[:train_samples]), (x[train_samples:], y[train_samples:])) def layer_test(layer_cls, kwargs=None, input_shape=None, input_dtype=None, input_data=None, expected_output=None, expected_output_dtype=None): """Test routine for a layer with a single input and single output. Arguments: layer_cls: Layer class object. kwargs: Optional dictionary of keyword arguments for instantiating the layer. input_shape: Input shape tuple. input_dtype: Data type of the input data. input_data: Numpy array of input data. expected_output: Shape tuple for the expected shape of the output. expected_output_dtype: Data type expected for the output. Returns: The output data (Numpy array) returned by the layer, for additional checks to be done by the calling code. Raises: ValueError: if `input_shape is None`. """ if input_data is None: if input_shape is None: raise ValueError('input_shape is None') if not input_dtype: input_dtype = 'float32' input_data_shape = list(input_shape) for i, e in enumerate(input_data_shape): if e is None: input_data_shape[i] = np.random.randint(1, 4) input_data = 10 * np.random.random(input_data_shape) if input_dtype[:5] == 'float': input_data -= 0.5 input_data = input_data.astype(input_dtype) elif input_shape is None: input_shape = input_data.shape if input_dtype is None: input_dtype = input_data.dtype if expected_output_dtype is None: expected_output_dtype = input_dtype # instantiation kwargs = kwargs or {} layer = layer_cls(**kwargs) # test get_weights , set_weights at layer level weights = layer.get_weights() layer.set_weights(weights) # test and instantiation from weights if 'weights' in tf_inspect.getargspec(layer_cls.__init__): kwargs['weights'] = weights layer = layer_cls(**kwargs) # test in functional API x = keras.layers.Input(shape=input_shape[1:], dtype=input_dtype) y = layer(x) if keras.backend.dtype(y) != expected_output_dtype: raise AssertionError('When testing layer %s, for input %s, found output ' 'dtype=%s but expected to find %s.\nFull kwargs: %s' % (layer_cls.__name__, x, keras.backend.dtype(y), expected_output_dtype, kwargs)) # check shape inference model = keras.models.Model(x, y) expected_output_shape = tuple( layer.compute_output_shape( tensor_shape.TensorShape(input_shape)).as_list()) actual_output = model.predict(input_data) actual_output_shape = actual_output.shape for expected_dim, actual_dim in zip(expected_output_shape, actual_output_shape): if expected_dim is not None: if expected_dim != actual_dim: raise AssertionError( 'When testing layer %s, for input %s, found output_shape=' '%s but expected to find %s.\nFull kwargs: %s' % (layer_cls.__name__, x, actual_output_shape, expected_output_shape, kwargs)) if expected_output is not None: np.testing.assert_allclose(actual_output, expected_output, rtol=1e-3) # test serialization, weight setting at model level model_config = model.get_config() recovered_model = keras.models.Model.from_config(model_config) if model.weights: weights = model.get_weights() recovered_model.set_weights(weights) output = recovered_model.predict(input_data) np.testing.assert_allclose(output, actual_output, rtol=1e-3) # test training mode (e.g. useful for dropout tests) # Rebuild the model to avoid the graph being reused between predict() and # train(). This was causing some error for layer with Defun as it body. # See b/120160788 for more details. This should be mitigated after 2.0. model = keras.models.Model(x, layer(x)) if _thread_local_data.run_eagerly is not None: model.compile(RMSPropOptimizer(0.01), 'mse', weighted_metrics=['acc'], run_eagerly=should_run_eagerly()) else: model.compile(RMSPropOptimizer(0.01), 'mse', weighted_metrics=['acc']) model.train_on_batch(input_data, actual_output) # test as first layer in Sequential API layer_config = layer.get_config() layer_config['batch_input_shape'] = input_shape layer = layer.__class__.from_config(layer_config) model = keras.models.Sequential() model.add(layer) actual_output = model.predict(input_data) actual_output_shape = actual_output.shape for expected_dim, actual_dim in zip(expected_output_shape, actual_output_shape): if expected_dim is not None: if expected_dim != actual_dim: raise AssertionError( 'When testing layer %s **after deserialization**, ' 'for input %s, found output_shape=' '%s but expected to find inferred shape %s.\nFull kwargs: %s' % (layer_cls.__name__, x, actual_output_shape, expected_output_shape, kwargs)) if expected_output is not None: np.testing.assert_allclose(actual_output, expected_output, rtol=1e-3) # test serialization, weight setting at model level model_config = model.get_config() recovered_model = keras.models.Sequential.from_config(model_config) if model.weights: weights = model.get_weights() recovered_model.set_weights(weights) output = recovered_model.predict(input_data) np.testing.assert_allclose(output, actual_output, rtol=1e-3) # for further checks in the caller function return actual_output _thread_local_data = threading.local() _thread_local_data.model_type = None _thread_local_data.run_eagerly = None @tf_contextlib.contextmanager def model_type_scope(value): """Provides a scope within which the model type to test is equal to `value`. The model type gets restored to its original value upon exiting the scope. Arguments: value: model type value Yields: The provided value. """ previous_value = _thread_local_data.model_type try: _thread_local_data.model_type = value yield value finally: # Restore model type to initial value. _thread_local_data.model_type = previous_value @tf_contextlib.contextmanager def run_eagerly_scope(value): """Provides a scope within which we compile models to run eagerly or not. The boolean gets restored to its original value upon exiting the scope. Arguments: value: Bool specifying if we should run models eagerly in the active test. Should be True or False. Yields: The provided value. """ previous_value = _thread_local_data.run_eagerly try: _thread_local_data.run_eagerly = value yield value finally: # Restore model type to initial value. _thread_local_data.run_eagerly = previous_value def should_run_eagerly(): """Returns whether the models we are testing should be run eagerly.""" if _thread_local_data.run_eagerly is None: raise ValueError('Cannot call `should_run_eagerly()` outside of a ' '`run_eagerly_scope()` or `run_all_keras_modes` ' 'decorator.') return _thread_local_data.run_eagerly and context.executing_eagerly() def get_model_type(): """Gets the model type that should be tested.""" if _thread_local_data.model_type is None: raise ValueError('Cannot call `get_model_type()` outside of a ' '`model_type_scope()` or `run_with_all_model_types` ' 'decorator.') return _thread_local_data.model_type def get_small_sequential_mlp(num_hidden, num_classes, input_dim=None): model = keras.models.Sequential() if input_dim: model.add(keras.layers.Dense(num_hidden, activation='relu', input_dim=input_dim)) else: model.add(keras.layers.Dense(num_hidden, activation='relu')) activation = 'sigmoid' if num_classes == 1 else 'softmax' model.add(keras.layers.Dense(num_classes, activation=activation)) return model def get_small_functional_mlp(num_hidden, num_classes, input_dim): inputs = keras.Input(shape=(input_dim,)) outputs = keras.layers.Dense(num_hidden, activation='relu')(inputs) activation = 'sigmoid' if num_classes == 1 else 'softmax' outputs = keras.layers.Dense(num_classes, activation=activation)(outputs) return keras.Model(inputs, outputs) class _SmallSubclassMLP(keras.Model): """A subclass model based small MLP.""" def __init__(self, num_hidden, num_classes): super(_SmallSubclassMLP, self).__init__() self.layer_a = keras.layers.Dense(num_hidden, activation='relu') activation = 'sigmoid' if num_classes == 1 else 'softmax' self.layer_b = keras.layers.Dense(num_classes, activation=activation) def call(self, inputs, **kwargs): x = self.layer_a(inputs) return self.layer_b(x) class _SmallSubclassMLPCustomBuild(keras.Model): """A subclass model small MLP that uses a custom build method.""" def __init__(self, num_hidden, num_classes): super(_SmallSubclassMLPCustomBuild, self).__init__() self.layer_a = None self.layer_b = None self.num_hidden = num_hidden self.num_classes = num_classes def build(self, input_shape): self.layer_a = keras.layers.Dense(self.num_hidden, activation='relu') activation = 'sigmoid' if self.num_classes == 1 else 'softmax' self.layer_b = keras.layers.Dense(self.num_classes, activation=activation) def call(self, inputs, **kwargs): x = self.layer_a(inputs) return self.layer_b(x) def get_small_subclass_mlp(num_hidden, num_classes): return _SmallSubclassMLP(num_hidden, num_classes) def get_small_subclass_mlp_with_custom_build(num_hidden, num_classes): return _SmallSubclassMLPCustomBuild(num_hidden, num_classes) def get_small_mlp(num_hidden, num_classes, input_dim): """Get a small mlp of the model type specified by `get_model_type`.""" model_type = get_model_type() if model_type == 'subclass': return get_small_subclass_mlp(num_hidden, num_classes) if model_type == 'subclass_custom_build': return get_small_subclass_mlp_with_custom_build(num_hidden, num_classes) if model_type == 'sequential': return get_small_sequential_mlp(num_hidden, num_classes, input_dim) if model_type == 'functional': return get_small_functional_mlp(num_hidden, num_classes, input_dim) raise ValueError('Unknown model type {}'.format(model_type)) class _SubclassModel(keras.Model): """A Keras subclass model.""" def __init__(self, layers): super(_SubclassModel, self).__init__() self.all_layers = layers def call(self, inputs, **kwargs): x = inputs for layer in self.all_layers: x = layer(x) return x class _SubclassModelCustomBuild(keras.Model): """A Keras subclass model that uses a custom build method.""" def __init__(self, layer_generating_func): super(_SubclassModelCustomBuild, self).__init__() self.all_layers = None self._layer_generating_func = layer_generating_func def build(self, input_shape): layers = [] for layer in self._layer_generating_func(): layers.append(layer) self.all_layers = layers def call(self, inputs, **kwargs): x = inputs for layer in self.all_layers: x = layer(x) return x def get_model_from_layers(layers, input_shape=None): """Builds a model from a sequence of layers.""" model_type = get_model_type() if model_type == 'subclass': return _SubclassModel(layers) if model_type == 'subclass_custom_build': layer_generating_func = lambda: layers return _SubclassModelCustomBuild(layer_generating_func) if model_type == 'sequential': model = keras.models.Sequential() if input_shape: model.add(keras.layers.InputLayer(input_shape=input_shape)) for layer in layers: model.add(layer) return model if model_type == 'functional': if not input_shape: raise ValueError('Cannot create a functional model from layers with no ' 'input shape.') inputs = keras.Input(shape=input_shape) outputs = inputs for layer in layers: outputs = layer(outputs) return keras.Model(inputs, outputs) raise ValueError('Unknown model type {}'.format(model_type)) class _MultiIOSubclassModel(keras.Model): """Multi IO Keras subclass model.""" def __init__(self, branch_a, branch_b, shared_input_branch=None, shared_output_branch=None): super(_MultiIOSubclassModel, self).__init__() self._shared_input_branch = shared_input_branch self._branch_a = branch_a self._branch_b = branch_b self._shared_output_branch = shared_output_branch def call(self, inputs, **kwargs): if self._shared_input_branch: for layer in self._shared_input_branch: inputs = layer(inputs) a = inputs b = inputs else: a, b = inputs for layer in self._branch_a: a = layer(a) for layer in self._branch_b: b = layer(b) outs = [a, b] if self._shared_output_branch: for layer in self._shared_output_branch: outs = layer(outs) return outs class _MultiIOSubclassModelCustomBuild(keras.Model): """Multi IO Keras subclass model that uses a custom build method.""" def __init__(self, branch_a_func, branch_b_func, shared_input_branch_func=None, shared_output_branch_func=None): super(_MultiIOSubclassModelCustomBuild, self).__init__() self._shared_input_branch_func = shared_input_branch_func self._branch_a_func = branch_a_func self._branch_b_func = branch_b_func self._shared_output_branch_func = shared_output_branch_func self._shared_input_branch = None self._branch_a = None self._branch_b = None self._shared_output_branch = None def build(self, input_shape): if self._shared_input_branch_func(): self._shared_input_branch = self._shared_input_branch_func() self._branch_a = self._branch_a_func() self._branch_b = self._branch_b_func() if self._shared_output_branch_func(): self._shared_output_branch = self._shared_output_branch_func() def call(self, inputs, **kwargs): if self._shared_input_branch: for layer in self._shared_input_branch: inputs = layer(inputs) a = inputs b = inputs else: a, b = inputs for layer in self._branch_a: a = layer(a) for layer in self._branch_b: b = layer(b) outs = a, b if self._shared_output_branch: for layer in self._shared_output_branch: outs = layer(outs) return outs def get_multi_io_model( branch_a, branch_b, shared_input_branch=None, shared_output_branch=None): """Builds a multi-io model that contains two branches. The produced model will be of the type specified by `get_model_type`. To build a two-input, two-output model: Specify a list of layers for branch a and branch b, but do not specify any shared input branch or shared output branch. The resulting model will apply each branch to a different input, to produce two outputs. The first value in branch_a must be the Keras 'Input' layer for branch a, and the first value in branch_b must be the Keras 'Input' layer for branch b. example usage: ``` branch_a = [Input(shape=(2,), name='a'), Dense(), Dense()] branch_b = [Input(shape=(3,), name='b'), Dense(), Dense()] model = get_multi_io_model(branch_a, branch_b) ``` To build a two-input, one-output model: Specify a list of layers for branch a and branch b, and specify a shared output branch. The resulting model will apply each branch to a different input. It will then apply the shared output branch to a tuple containing the intermediate outputs of each branch, to produce a single output. The first layer in the shared_output_branch must be able to merge a tuple of two tensors. The first value in branch_a must be the Keras 'Input' layer for branch a, and the first value in branch_b must be the Keras 'Input' layer for branch b. example usage: ``` input_branch_a = [Input(shape=(2,), name='a'), Dense(), Dense()] input_branch_b = [Input(shape=(3,), name='b'), Dense(), Dense()] shared_output_branch = [Concatenate(), Dense(), Dense()] model = get_multi_io_model(input_branch_a, input_branch_b, shared_output_branch=shared_output_branch) ``` To build a one-input, two-output model: Specify a list of layers for branch a and branch b, and specify a shared input branch. The resulting model will take one input, and apply the shared input branch to it. It will then respectively apply each branch to that intermediate result in parallel, to produce two outputs. The first value in the shared_input_branch must be the Keras 'Input' layer for the whole model. Branch a and branch b should not contain any Input layers. example usage: ``` shared_input_branch = [Input(shape=(2,), name='in'), Dense(), Dense()] output_branch_a = [Dense(), Dense()] output_branch_b = [Dense(), Dense()] model = get_multi_io_model(output__branch_a, output_branch_b, shared_input_branch=shared_input_branch) ``` Args: branch_a: A sequence of layers for branch a of the model. branch_b: A sequence of layers for branch b of the model. shared_input_branch: An optional sequence of layers to apply to a single input, before applying both branches to that intermediate result. If set, the model will take only one input instead of two. Defaults to None. shared_output_branch: An optional sequence of layers to merge the intermediate results produced by branch a and branch b. If set, the model will produce only one output instead of two. Defaults to None. Returns: A multi-io model of the type specified by `get_model_type`, specified by the different branches. """ # Extract the functional inputs from the layer lists if shared_input_branch: inputs = shared_input_branch[0] shared_input_branch = shared_input_branch[1:] else: inputs = branch_a[0], branch_b[0] branch_a = branch_a[1:] branch_b = branch_b[1:] model_type = get_model_type() if model_type == 'subclass': return _MultiIOSubclassModel(branch_a, branch_b, shared_input_branch, shared_output_branch) if model_type == 'subclass_custom_build': return _MultiIOSubclassModelCustomBuild((lambda: branch_a), (lambda: branch_b), (lambda: shared_input_branch), (lambda: shared_output_branch)) if model_type == 'sequential': raise ValueError('Cannot use `get_multi_io_model` to construct ' 'sequential models') if model_type == 'functional': if shared_input_branch: a_and_b = inputs for layer in shared_input_branch: a_and_b = layer(a_and_b) a = a_and_b b = a_and_b else: a, b = inputs for layer in branch_a: a = layer(a) for layer in branch_b: b = layer(b) outputs = a, b if shared_output_branch: for layer in shared_output_branch: outputs = layer(outputs) return keras.Model(inputs, outputs) raise ValueError('Unknown model type {}'.format(model_type))
{ "content_hash": "1cee43de146b24f453339798c82151e7", "timestamp": "", "source": "github", "line_count": 614, "max_line_length": 80, "avg_line_length": 34.55048859934853, "alnum_prop": 0.6603186574903366, "repo_name": "asimshankar/tensorflow", "id": "fd062b0ab337aa6fa62a7603a36749cde315c3da", "size": "21903", "binary": false, "copies": "3", "ref": "refs/heads/master", "path": "tensorflow/python/keras/testing_utils.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Assembly", "bytes": "4882" }, { "name": "Batchfile", "bytes": "10132" }, { "name": "C", "bytes": "490070" }, { "name": "C#", "bytes": "8446" }, { "name": "C++", "bytes": "52677142" }, { "name": "CMake", "bytes": "207176" }, { "name": "Dockerfile", "bytes": "39454" }, { "name": "Go", "bytes": "1290930" }, { "name": "HTML", "bytes": "4680032" }, { "name": "Java", "bytes": "890529" }, { "name": "Jupyter Notebook", "bytes": "2618412" }, { "name": "LLVM", "bytes": "6536" }, { "name": "Makefile", "bytes": "68402" }, { "name": "Objective-C", "bytes": "16140" }, { "name": "Objective-C++", "bytes": "102518" }, { "name": "PHP", "bytes": "5172" }, { "name": "Pascal", "bytes": "221" }, { "name": "Perl", "bytes": "7536" }, { "name": "PureBasic", "bytes": "25356" }, { "name": "Python", "bytes": "43038983" }, { "name": "RobotFramework", "bytes": "891" }, { "name": "Ruby", "bytes": "838" }, { "name": "Shell", "bytes": "497659" }, { "name": "Smarty", "bytes": "6976" } ], "symlink_target": "" }
from __future__ import print_function from optparse import OptionParser from os.path import exists from sys import stderr from util import check_output opts = OptionParser() opts.add_option('-a', help='action (valid actions are: install,deploy)') opts.add_option('-v', help='gerrit version') opts.add_option('-d', help='dependencies (jars artifacts)') args, ctx = opts.parse_args() action = args.a if action not in ['deploy', 'install']: print("unknown action : %s" % action, file=stderr) exit(1) deps = args.d.split() if not deps: print('dependencies are empty') exit(1) extension_jar = [x for x in deps if "extension-api.jar" in x][0] extension_src = [x for x in deps if "extension-api-src.jar" in x][0] plugin_jar = [x for x in deps if "plugin-api.jar" in x][0] plugin_src = [x for x in deps if "plugin-api-src.jar" in x][0] version = args.v if not version: print('version is empty') exit(1) REPO_TYPE = 'snapshot' if version.endswith("SNAPSHOT") else 'release' URL = 's3://gerrit-api@commondatastorage.googleapis.com/%s' % REPO_TYPE plugin = ['-DartifactId=gerrit-plugin-api'] extension = ['-DartifactId=gerrit-extension-api'] common = [ '-DgroupId=com.google.gerrit', '-Dversion=%s' % version, ] jar = ['-Dpackaging=jar'] src = ['-Dpackaging=java-source'] cmd = { 'deploy': ['mvn', 'deploy:deploy-file', '-DrepositoryId=gerrit-api-repository', '-Durl=%s' % URL], 'install': ['mvn', 'install:install-file'], } try: check_output(cmd[action] + plugin + common + jar + ['-Dfile=%s' % plugin_jar]) check_output(cmd[action] + plugin + common + src + ['-Dfile=%s' % plugin_src]) check_output(cmd[action] + extension + common + jar + ['-Dfile=%s' % extension_jar]) check_output(cmd[action] + extension + common + src + ['-Dfile=%s' % extension_src]) except Exception as e: print('%s command failed: %s' % (action, e), file=stderr) exit(1)
{ "content_hash": "476037c16374a2afca0609c58a779592", "timestamp": "", "source": "github", "line_count": 77, "max_line_length": 72, "avg_line_length": 28.207792207792206, "alnum_prop": 0.5824125230202578, "repo_name": "zommarin/gerrit", "id": "4779abe577a0d84e04d59f03b987e1475cc23011", "size": "2786", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tools/maven_deploy.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Java", "bytes": "5925270" }, { "name": "JavaScript", "bytes": "1590" }, { "name": "Perl", "bytes": "9943" }, { "name": "Prolog", "bytes": "17421" }, { "name": "Python", "bytes": "19659" }, { "name": "Shell", "bytes": "36343" } ], "symlink_target": "" }
import matplotlib.pyplot as plt import numpy as np class CurveHighLighter(object): def __init__(self, ax, alpha=0.3, linewidth=3): self.ax = ax self.alpha = alpha self.linewidth = 3 ax.figure.canvas.mpl_connect('motion_notify_event', self.on_move) def highlight(self, target): need_redraw = False if target is None: for line in self.ax.lines: line.set_alpha(1.0) if line.get_linewidth() != 1.0: line.set_linewidth(1.0) need_redraw = True else: for line in self.ax.lines: lw = self.linewidth if line is target else 1 if line.get_linewidth() != lw: line.set_linewidth(lw) need_redraw = True alpha = 1.0 if lw == self.linewidth else self.alpha line.set_alpha(alpha) if need_redraw: self.ax.figure.canvas.draw_idle() def on_move(self, evt): ax = self.ax for line in ax.lines: if line.contains(evt)[0]: self.highlight(line) break else: self.highlight(None) fig, ax = plt.subplots() x = np.linspace(0, 50, 300) from scipy.special import jn for i in range(1, 10): ax.plot(x, jn(i, x)) ch = CurveHighLighter(ax) plt.show()
{ "content_hash": "8a61b5014c040d544cf68eeb0e515d9a", "timestamp": "", "source": "github", "line_count": 42, "max_line_length": 74, "avg_line_length": 33.07142857142857, "alnum_prop": 0.5291576673866091, "repo_name": "UpSea/midProjects", "id": "38ae95994c73166f96312a32b54ff6975b6d3df4", "size": "1389", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "BasicOperations/04_Matplotlib/06_mouseEvent_LineSelection.py", "mode": "33188", "license": "mit", "language": [ { "name": "Assembly", "bytes": "13285" }, { "name": "Batchfile", "bytes": "2076" }, { "name": "C", "bytes": "195139" }, { "name": "C++", "bytes": "56306" }, { "name": "Makefile", "bytes": "13644" }, { "name": "Objective-C", "bytes": "1051" }, { "name": "Python", "bytes": "625753" }, { "name": "R", "bytes": "15618" } ], "symlink_target": "" }
"""Support for monitoring the state of Digital Ocean droplets.""" import logging import voluptuous as vol from homeassistant.components.binary_sensor import ( DEVICE_CLASS_MOVING, PLATFORM_SCHEMA, BinarySensorEntity, ) from homeassistant.const import ATTR_ATTRIBUTION import homeassistant.helpers.config_validation as cv from . import ( ATTR_CREATED_AT, ATTR_DROPLET_ID, ATTR_DROPLET_NAME, ATTR_FEATURES, ATTR_IPV4_ADDRESS, ATTR_IPV6_ADDRESS, ATTR_MEMORY, ATTR_REGION, ATTR_VCPUS, ATTRIBUTION, CONF_DROPLETS, DATA_DIGITAL_OCEAN, ) _LOGGER = logging.getLogger(__name__) DEFAULT_NAME = "Droplet" PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend( {vol.Required(CONF_DROPLETS): vol.All(cv.ensure_list, [cv.string])} ) def setup_platform(hass, config, add_entities, discovery_info=None): """Set up the Digital Ocean droplet sensor.""" if not (digital := hass.data.get(DATA_DIGITAL_OCEAN)): return False droplets = config[CONF_DROPLETS] dev = [] for droplet in droplets: droplet_id = digital.get_droplet_id(droplet) if droplet_id is None: _LOGGER.error("Droplet %s is not available", droplet) return False dev.append(DigitalOceanBinarySensor(digital, droplet_id)) add_entities(dev, True) class DigitalOceanBinarySensor(BinarySensorEntity): """Representation of a Digital Ocean droplet sensor.""" def __init__(self, do, droplet_id): """Initialize a new Digital Ocean sensor.""" self._digital_ocean = do self._droplet_id = droplet_id self._state = None self.data = None @property def name(self): """Return the name of the sensor.""" return self.data.name @property def is_on(self): """Return true if the binary sensor is on.""" return self.data.status == "active" @property def device_class(self): """Return the class of this sensor.""" return DEVICE_CLASS_MOVING @property def extra_state_attributes(self): """Return the state attributes of the Digital Ocean droplet.""" return { ATTR_ATTRIBUTION: ATTRIBUTION, ATTR_CREATED_AT: self.data.created_at, ATTR_DROPLET_ID: self.data.id, ATTR_DROPLET_NAME: self.data.name, ATTR_FEATURES: self.data.features, ATTR_IPV4_ADDRESS: self.data.ip_address, ATTR_IPV6_ADDRESS: self.data.ip_v6_address, ATTR_MEMORY: self.data.memory, ATTR_REGION: self.data.region["name"], ATTR_VCPUS: self.data.vcpus, } def update(self): """Update state of sensor.""" self._digital_ocean.update() for droplet in self._digital_ocean.data: if droplet.id == self._droplet_id: self.data = droplet
{ "content_hash": "bc066733a2ed4afcb591352214a4e606", "timestamp": "", "source": "github", "line_count": 102, "max_line_length": 71, "avg_line_length": 28.294117647058822, "alnum_prop": 0.6302841302841303, "repo_name": "lukas-hetzenecker/home-assistant", "id": "b518809286288d81216ce32dc01490d36b457178", "size": "2886", "binary": false, "copies": "2", "ref": "refs/heads/dev", "path": "homeassistant/components/digital_ocean/binary_sensor.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Dockerfile", "bytes": "2443" }, { "name": "Python", "bytes": "38023745" }, { "name": "Shell", "bytes": "4910" } ], "symlink_target": "" }
import os import json from flask import abort class Config(): def __init__(self, config_file=None): self.config_file = os.path.dirname(__file__) + '/../cfg/config.json' if config_file is not None: self.config_file = os.path.dirname(__file__) + '/../cfg/' + config_file def load(self): txt = "" line = "" if os.path.isfile(self.config_file): with open(self.config_file) as fin: for line in fin: txt += line try: Config.settings = json.loads(txt) return True except: abort(500) return False
{ "content_hash": "58eb5b55201398986741eed28d3a945c", "timestamp": "", "source": "github", "line_count": 24, "max_line_length": 83, "avg_line_length": 28.458333333333332, "alnum_prop": 0.49633967789165445, "repo_name": "mcchin/data-collector", "id": "e991ba27549ff4c2bc614d653abea5112d812eae", "size": "683", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "classes/config.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "4338" } ], "symlink_target": "" }
import logging import typing as t from globus_sdk import exc, utils from globus_sdk.authorizers import BasicAuthorizer from globus_sdk.response import GlobusHTTPResponse from ..flow_managers import GlobusAuthorizationCodeFlowManager from ..oauth2_constants import DEFAULT_REQUESTED_SCOPES from ..response import OAuthDependentTokenResponse, OAuthTokenResponse from .base import AuthClient log = logging.getLogger(__name__) class ConfidentialAppAuthClient(AuthClient): """ This is a specialized type of ``AuthClient`` used to represent an App with a Client ID and Client Secret wishing to communicate with Globus Auth. It must be given a Client ID and a Client Secret, and furthermore, these will be used to establish a :class:`BasicAuthorizer <globus_sdk.BasicAuthorizer>` for authorization purposes. Additionally, the Client ID is stored for use in various calls. Confidential Applications (i.e. Applications with are not Native Apps) are those like the `Sample Data Portal <https://github.com/globus/globus-sample-data-portal>`_, which have their own credentials for authenticating against Globus Auth. Any keyword arguments given are passed through to the ``AuthClient`` constructor. .. automethodlist:: globus_sdk.ConfidentialAppAuthClient """ def __init__(self, client_id: str, client_secret: str, **kwargs: t.Any): if "authorizer" in kwargs: log.error("ArgumentError(ConfidentialAppClient.authorizer)") raise exc.GlobusSDKUsageError( "Cannot give a ConfidentialAppAuthClient an authorizer" ) super().__init__( client_id=client_id, authorizer=BasicAuthorizer(client_id, client_secret), **kwargs, ) log.info(f"Finished initializing client, client_id={client_id}") def oauth2_client_credentials_tokens( self, requested_scopes: t.Optional[t.Union[str, t.Iterable[str]]] = None ) -> OAuthTokenResponse: r""" Perform an OAuth2 Client Credentials Grant to get access tokens which directly represent your client and allow it to act on its own (independent of any user authorization). This method does not use a ``GlobusOAuthFlowManager`` because it is not at all necessary to do so. :param requested_scopes: Space-separated scope names being requested for the access token(s). Defaults to a set of commonly desired scopes for Globus. :type requested_scopes: str or iterable of str, optional :rtype: :class:`OAuthTokenResponse <.OAuthTokenResponse>` For example, with a Client ID of "CID1001" and a Client Secret of "RAND2002", you could use this grant type like so: >>> client = ConfidentialAppAuthClient("CID1001", "RAND2002") >>> tokens = client.oauth2_client_credentials_tokens() >>> transfer_token_info = ( ... tokens.by_resource_server["transfer.api.globus.org"]) >>> transfer_token = transfer_token_info["access_token"] """ log.info("Fetching token(s) using client credentials") requested_scopes = requested_scopes or DEFAULT_REQUESTED_SCOPES # convert scopes iterable to string immediately on load if not isinstance(requested_scopes, str): requested_scopes = " ".join(requested_scopes) return self.oauth2_token( {"grant_type": "client_credentials", "scope": requested_scopes} ) @utils.doc_api_method( "in the Globus Auth Specification", "auth/developer-guide/#obtaining-authorization", external_format_str=( "The Authorization Code Grant flow is described " "`{message} <{base_url}/{link}>`_." ), ) def oauth2_start_flow( self, redirect_uri: str, requested_scopes: t.Optional[t.Union[str, t.Iterable[str]]] = None, *, state: str = "_default", refresh_tokens: bool = False, ) -> GlobusAuthorizationCodeFlowManager: """ Starts or resumes an Authorization Code OAuth2 flow. Under the hood, this is done by instantiating a :class:`GlobusAuthorizationCodeFlowManager <.GlobusAuthorizationCodeFlowManager>` :param redirect_uri: The page that users should be directed to after authenticating at the authorize URL. :type redirect_uri: str ``redirect_uri`` (*string*) :param requested_scopes: The scopes on the token(s) being requested, as a space-separated string or an iterable of strings. Defaults to ``openid profile email urn:globus:auth:scope:transfer.api.globus.org:all`` :type requested_scopes: str or iterable of str, optional :param state: This string allows an application to pass information back to itself in the course of the OAuth flow. Because the user will navigate away from the application to complete the flow, this parameter lets the app pass an arbitrary string from the starting page to the ``redirect_uri`` :type state: str, optional :param refresh_tokens: When True, request refresh tokens in addition to access tokens. [Default: ``False``] :type refresh_tokens: bool, optional **Examples** You can see an example of this flow :ref:`in the usage examples <examples_three_legged_oauth_login>` """ log.info("Starting OAuth2 Authorization Code Grant Flow") self.current_oauth2_flow_manager = GlobusAuthorizationCodeFlowManager( self, redirect_uri, requested_scopes=requested_scopes, state=state, refresh_tokens=refresh_tokens, ) return self.current_oauth2_flow_manager def oauth2_get_dependent_tokens( self, token: str, *, additional_params: t.Optional[t.Dict[str, t.Any]] = None ) -> OAuthDependentTokenResponse: """ Does a `Dependent Token Grant <https://docs.globus.org/api/auth/reference/#dependent_token_grant_post_v2_oauth2_token>`_ against Globus Auth. This exchanges a token given to this client for a new set of tokens which give it access to resource servers on which it depends. This grant type is intended for use by Resource Servers playing out the following scenario: 1. User has tokens for Service A, but Service A requires access to Service B on behalf of the user 2. Service B should not see tokens scoped for Service A 3. Service A therefore requests tokens scoped only for Service B, based on tokens which were originally scoped for Service A... In order to do this exchange, the tokens for Service A must have scopes which depend on scopes for Service B (the services' scopes must encode their relationship). As long as that is the case, Service A can use this Grant to get those "Dependent" or "Downstream" tokens for Service B. :param token: A Globus Access Token as a string :type token: str :param additional_params: Additional parameters to include in the request body :type additional_params: dict, optional :rtype: :class:`OAuthDependentTokenResponse <.OAuthDependentTokenResponse>` """ log.info("Getting dependent tokens from access token") log.debug(f"additional_params={additional_params}") form_data = { "grant_type": "urn:globus:auth:grant_type:dependent_token", "token": token, } if additional_params: form_data.update(additional_params) return self.oauth2_token(form_data, response_class=OAuthDependentTokenResponse) @utils.doc_api_method( "Token Introspection", "auth/reference/#token_introspection_post_v2_oauth2_token_introspect", ) def oauth2_token_introspect( self, token: str, *, include: t.Optional[str] = None ) -> GlobusHTTPResponse: """ POST /v2/oauth2/token/introspect Get information about a Globus Auth token. >>> ac = globus_sdk.ConfidentialAppAuthClient( ... CLIENT_ID, CLIENT_SECRET) >>> ac.oauth2_token_introspect('<token_string>') Get information about a Globus Auth token including the full identity set of the user to whom it belongs >>> ac = globus_sdk.ConfidentialAppAuthClient( ... CLIENT_ID, CLIENT_SECRET) >>> data = ac.oauth2_token_introspect( ... '<token_string>', include='identity_set') >>> for identity in data['identity_set']: >>> print('token authenticates for "{}"'.format(identity)) :param token: An Access Token as a raw string, being evaluated :type token: str :param include: A value for the ``include`` parameter in the request body. Default is to omit the parameter. :type include: str, optional """ log.info("Checking token validity (introspect)") body = {"token": token} if include is not None: body["include"] = include return self.post("/v2/oauth2/token/introspect", data=body, encoding="form")
{ "content_hash": "709335d9ba859fb30ddf8216ea56a49e", "timestamp": "", "source": "github", "line_count": 215, "max_line_length": 98, "avg_line_length": 43.53488372093023, "alnum_prop": 0.6547008547008547, "repo_name": "globus/globus-sdk-python", "id": "d9ba1b556524202706cc4d080b85d227b95f80d1", "size": "9360", "binary": false, "copies": "1", "ref": "refs/heads/main", "path": "src/globus_sdk/services/auth/client/confidential_client.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Jinja", "bytes": "303" }, { "name": "Makefile", "bytes": "810" }, { "name": "Python", "bytes": "896256" }, { "name": "Shell", "bytes": "125" } ], "symlink_target": "" }
from django.conf.urls.defaults import patterns, url from django.views.generic.simple import direct_to_template # views from django.contrib.auth import views as auth_views from django.conf import settings from lingcod.openid import views as oid_views from registration import views as reg_views from registration.views import activate from registration.views import register from lingcod.common.registration_backend.forms import MarineMapRegistrationForm urlpatterns = patterns('', # django registration activate # url( # r'^activate/(?P<activation_key>\w+)/$', # reg_views.activate, # {'backend': 'registration.backends.default.DefaultBackend'}, # name='registration_activate' # ), # user profile url(r'^password/reset/$', auth_views.password_reset, name='auth_password_reset'), url(r'^password/reset/confirm/(?P<uidb36>[0-9A-Za-z]+)-(?P<token>.+)/$', auth_views.password_reset_confirm, name='auth_password_reset_confirm'), url(r'^password/reset/complete/$', auth_views.password_reset_complete, name='auth_password_reset_complete'), url(r'^password/reset/done/$', auth_views.password_reset_done, name='auth_password_reset_done'), url(r'^password/$',oid_views.password_change, name='auth_password_change'), # manage account registration url(r'^associate/complete/$', oid_views.complete_associate, name='user_complete_associate'), url(r'^associate/$', oid_views.associate, name='user_associate'), url(r'^dissociate/$', oid_views.dissociate, name='user_dissociate'), url(r'^register/$', oid_views.register, name='user_register'), url(r'^signout/$', oid_views.signout, {'next_page': settings.LOGIN_REDIRECT_URL}, name='user_signout'), url(r'^signout/$', oid_views.signout, {'next_page': settings.LOGIN_REDIRECT_URL}, name='auth_logout'), url(r'^signin/complete/$', oid_views.complete_signin, name='user_complete_signin'), url( r'^signup/$', reg_views.register, {'backend': 'registration.backends.default.DefaultBackend', 'form_class': MarineMapRegistrationForm}, name='registration_register' ), url(r'^signup/complete/$',direct_to_template, {'template': 'registration/registration_complete.html'}, name='registration_complete'), url(r'^activate/complete/$', direct_to_template, { 'template': 'registration/activation_complete.html' , 'extra_context': {'group_request_email': settings.GROUP_REQUEST_EMAIL} }, name='registration_activation_complete'), # Activation keys get matched by \w+ instead of the more specific # [a-fA-F0-9]{40} because a bad activation key should still get to the view; # that way it can return a sensible "invalid key" message instead of a # confusing 404. url(r'^activate/(?P<activation_key>\w+)/$', activate, { 'backend': 'lingcod.common.registration_backend.LingcodBackend' }, name='registration_activate'), # url(r'^register/$', # register, # { 'backend': 'lingcod.common.registration_backend.LingcodBackend' }, # name='registration_register'), # url(r'^register/complete/$', # direct_to_template, # { 'template': 'registration/registration_complete.html'}, # name='registration_complete'), url(r'^register/closed/$', direct_to_template, { 'template': 'registration/registration_closed.html' }, name='registration_disallowed'), # yadis uri url(r'^yadis.xrdf$', oid_views.xrdf, name='oid_xrdf'), ) ## The openid login behavior can be 'hidden' by use of a # template which only allows local user/pass authentication # Note that this does not disable openid completely; user could still # POST openid credentials if they wanted to try: use_openid = settings.OPENID_ENABLED except: use_openid = False if use_openid: template_name = 'authopenid/signin.html' else: template_name = 'authopenid/signin_local.html' urlpatterns += patterns('', url(r'^signin/$', oid_views.signin, {'template_name':template_name}, name='user_signin'), url(r'^signin/$', oid_views.signin, {'template_name':template_name}, name='auth_login'), )
{ "content_hash": "5dbba7724f49c9556ccdaa590cd85b8d", "timestamp": "", "source": "github", "line_count": 104, "max_line_length": 108, "avg_line_length": 41.20192307692308, "alnum_prop": 0.6686114352392065, "repo_name": "google-code-export/marinemap", "id": "2375382b4e279e25c4a58025f795da5a154d34fa", "size": "4928", "binary": false, "copies": "3", "ref": "refs/heads/master", "path": "lingcod/openid/urls.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "CSS", "bytes": "62866" }, { "name": "HTML", "bytes": "350564" }, { "name": "JavaScript", "bytes": "1435695" }, { "name": "PLpgSQL", "bytes": "3371" }, { "name": "Python", "bytes": "1152113" }, { "name": "Shell", "bytes": "12077" } ], "symlink_target": "" }
'''This module contains the different readers used to parse both the target and the query records from files.''' import os.path from itertools import islice from Bio import SeqIO from Bio.Seq import Seq from pyPaSWAS.Core.SWSeqRecord import SWSeqRecord from pyPaSWAS.Core.Exceptions import InvalidOptionException from pyPaSWAS.Core.Exceptions import ReaderException class Reader(object): '''The generic reader from which other readers inherit some common functionalities. Each reader should implement ''' def __init__(self, logger, path, filetype, limitlength=5000): '''path: the absolute path to the file that contains the records limitlength: the maximimum length of the records that will be used for the alignment ''' self.logger = logger self.logger.debug('Initializing reader\n\tpath = {0}\n\tlimitlength = {1}...'.format(path, limitlength)) self.rc_string = "_RC" self.path = '' self.filetype = filetype self.records = None self.limitlength = limitlength self._set_path(path) self._set_limit_length(limitlength) self.logger.debug('Initializing reader finished.') def sort_records(self, reverse=True): '''sorts the records''' self.logger.debug('Sorting records on length...') self.records.sort(key=lambda seqIO: len(seqIO.seq), reverse=reverse) @staticmethod def _is_a_readable_file(path): '''Checks whether or not a file is writeable.''' if os.path.isfile(path) and os.access(path, os.R_OK): return True else: return False def _set_path(self, path): '''Sets the path to the file which contains the records.''' if self._is_a_readable_file(path): self.path = path else: raise InvalidOptionException('{0} is not a file or the program is not allowed to access it.'.format(path)) def _set_filetype(self): '''This method sets the type of the file that will be parsed. Should be overridden in each reader class. ''' self.filetype = 'Unknown' def _set_limit_length(self, limit_length): '''sets the limit of the number of sequences that are to be compared at one time)''' try: self.limitlength = int(limit_length) except ValueError: raise InvalidOptionException('Limitlength should be an int but is {0}'.format(limit_length)) def get_records(self): '''Getter for the parsed records''' #self.logger.debug('Returning records...') return self.records def complement_records(self): '''Appends the reverse complements to the parsed records ''' #self.logger.debug('Creating complement sequences...') seqIO = lambda seqIO: SWSeqRecord(Seq(str(seqIO.seq.reverse_complement()), seqIO.seq.alphabet), identifier=(str(seqIO.id) + self.rc_string)) self.records.extend([seqIO(record) for record in self.records]) def complement_records_only(self): '''Creates the reverse complements to the parsed records ''' seqIO = lambda seqIO: SWSeqRecord(Seq(str(seqIO.seq.reverse_complement()), seqIO.seq.alphabet), identifier=(str(seqIO.id) + self.rc_string)) self.records = [seqIO(record) for record in self.records] def reverse_records(self): '''Appends the reverse complements to the parsed records ''' #self.logger.debug('Creating complement sequences...') seqIO = lambda seqIO: SWSeqRecord(Seq(str(seqIO.seq[::-1]), seqIO.seq.alphabet), identifier=(str(seqIO.id) + self.rc_string)) self.records.extend([seqIO(record) for record in self.records]) class BioPythonReader(Reader): '''This class parses input files''' def read_records(self, start=0, end=None): '''Parses the records from a supported input file''' self.logger.debug('Reading from {} file...'.format(self.filetype)) file_elements = open(self.path, "rU") self.records = list(islice(SeqIO.parse(file_elements, self.filetype), start, end)) file_elements.close() if len(self.records) == 0: raise ReaderException('No (more) sequence data found in input file ({}), of file type {}.'.format(self.path, self.filetype)) if self.limitlength > 0: nrecords = len(self.records) #self.logger.debug('Checking sequences length..') self.records = [SWSeqRecord(Seq(str(record.seq), record.seq.alphabet), identifier=record.id) for record in self.records if len(record.seq) <= self.limitlength and len(record.seq) > 0] diff = nrecords - len(self.records) if diff > 0: self.logger.info('{} sequence(s) removed with length > limit_length ({}bp)'.format(diff, self.limitlength)) if end==None and len(self.records) == 0: self.logger.info('No sequences remaining after filtering on length for {}.' ' Please adjust using the limit_length parameter.'.format(self.path)) else: self.records = [SWSeqRecord(Seq(str(record.seq), record.seq.alphabet), identifier=record.id) for record in self.records] self.logger.debug('\t{} sequences read.'.format(len(self.records)))
{ "content_hash": "f2892c0b6a7e3b3044822f8d8731a481", "timestamp": "", "source": "github", "line_count": 118, "max_line_length": 136, "avg_line_length": 47.00847457627118, "alnum_prop": 0.6224986479177934, "repo_name": "swarris/pyPaSWAS", "id": "5009f6e15000d747d09a4084d7b4a1582bede2e7", "size": "5547", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "pyPaSWAS/Core/Readers.py", "mode": "33188", "license": "mit", "language": [ { "name": "C", "bytes": "55486" }, { "name": "Cuda", "bytes": "39887" }, { "name": "Dockerfile", "bytes": "4587" }, { "name": "Makefile", "bytes": "1520" }, { "name": "Python", "bytes": "235631" }, { "name": "Shell", "bytes": "14043" } ], "symlink_target": "" }
import ctypes from kowhai import * # protocol commands KOW_CMD_GET_VERSION = 0x00 KOW_CMD_GET_VERSION_ACK = 0x0F KOW_CMD_GET_TREE_LIST = 0x10 KOW_CMD_GET_TREE_LIST_ACK = 0x1F KOW_CMD_GET_TREE_LIST_ACK_END = 0x1E KOW_CMD_WRITE_DATA = 0x20 KOW_CMD_WRITE_DATA_END = 0x21 KOW_CMD_WRITE_DATA_ACK = 0x2F KOW_CMD_READ_DATA = 0x30 KOW_CMD_READ_DATA_ACK = 0x3F KOW_CMD_READ_DATA_ACK_END = 0x3E KOW_CMD_READ_DESCRIPTOR = 0x40 KOW_CMD_READ_DESCRIPTOR_ACK = 0x4F KOW_CMD_READ_DESCRIPTOR_ACK_END = 0x4E KOW_CMD_GET_FUNCTION_LIST = 0x50 KOW_CMD_GET_FUNCTION_LIST_ACK = 0x5F KOW_CMD_GET_FUNCTION_LIST_ACK_END = 0x5E KOW_CMD_GET_FUNCTION_DETAILS = 0x60 KOW_CMD_GET_FUNCTION_DETAILS_ACK = 0x6F KOW_CMD_CALL_FUNCTION = 0x70 KOW_CMD_CALL_FUNCTION_ACK = 0x7F KOW_CMD_CALL_FUNCTION_RESULT = 0x7E KOW_CMD_CALL_FUNCTION_RESULT_END = 0x7D KOW_CMD_CALL_FUNCTION_FAILED = 0x7C KOW_CMD_EVENT = 0x80 KOW_CMD_EVENT_END = 0x8F KOW_CMD_GET_SYMBOL_LIST = 0x90 KOW_CMD_GET_SYMBOL_LIST_ACK = 0x9F KOW_CMD_GET_SYMBOL_LIST_ACK_END = 0x9E # protocol error codes KOW_CMD_ERROR_INVALID_COMMAND = 0xF0 KOW_CMD_ERROR_INVALID_TREE_ID = 0xF1 KOW_CMD_ERROR_INVALID_FUNCTION_ID = 0xF2 KOW_CMD_ERROR_INVALID_SYMBOL_PATH = 0xF3 KOW_CMD_ERROR_INVALID_PAYLOAD_OFFSET = 0xF4 KOW_CMD_ERROR_INVALID_PAYLOAD_SIZE = 0xF5 KOW_CMD_ERROR_INVALID_SEQUENCE = 0xF6 KOW_CMD_ERROR_NO_DATA = 0xF7 KOW_CMD_ERROR_UNKNOWN = 0xFF class kowhai_protocol_header_t(ctypes.Structure): _pack_ = 1 _fields_ = [('command', uint8_t), ('id_', uint16_t)] class kowhai_protocol_symbol_spec_t(ctypes.Structure): _pack_ = 1 _fields_ = [('count', uint8_t), ('array_', ctypes.POINTER(kowhai_symbol_t))] class kowhai_protocol_data_payload_memory_spec_t(ctypes.Structure): _pack_ = 1 _fields_ = [('type_', uint16_t), ('offset', uint16_t), ('size', uint16_t)] class kowhai_protocol_data_payload_spec_t(ctypes.Structure): _pack_ = 1 _fields_ = [('symbols', kowhai_protocol_symbol_spec_t), ('memory', kowhai_protocol_data_payload_memory_spec_t)] class kowhai_protocol_descriptor_payload_spec_t(ctypes.Structure): _pack_ = 1 _fields_ = [('node_count', uint16_t), ('offset', uint16_t), ('size', uint16_t)] class kowhai_protocol_id_list_t(ctypes.Structure): _pack_ = 1 _fields_ = [('list_count', uint16_t), ('offset', uint16_t), ('size', uint16_t)] class kowhai_protocol_id_list_item_t(ctypes.Structure): _pack_ = 1 _fields_ = [('id', uint16_t), ('type_', uint16_t)] class kowhai_protocol_string_list_t(ctypes.Structure): _pack_ = 1 _fields_ = [('list_count', uint16_t), ('list_total_size', uint32_t), ('offset', uint16_t), ('size', uint16_t)] class kowhai_protocol_function_details_t(ctypes.Structure): _pack_ = 1 _fields_ = [('tree_in_id', uint16_t), ('tree_out_id', uint16_t)] class kowhai_protocol_function_call_t(ctypes.Structure): _pack_ = 1 _fields_ = [('offset', uint16_t), ('size', uint16_t)] class kowhai_protocol_event_t(ctypes.Structure): _pack_ = 1 _fields_ = [('offset', uint16_t), ('size', uint16_t)] class kowhai_protocol_payload_spec_t(ctypes.Union): _pack_ = 1 _fields_ = [('version', uint32_t), ('id_list', kowhai_protocol_id_list_t), ('data', kowhai_protocol_data_payload_spec_t), ('descriptor', kowhai_protocol_descriptor_payload_spec_t), ('function_details', kowhai_protocol_function_details_t), ('function_call', kowhai_protocol_function_call_t), ('event', kowhai_protocol_event_t), ('string_list', kowhai_protocol_string_list_t)] class kowhai_protocol_payload_t(ctypes.Structure): _pack_ = 1 _fields_ = [('spec', kowhai_protocol_payload_spec_t), ('buffer_', ctypes.c_void_p)] class kowhai_protocol_t(ctypes.Structure): _pack_ = 1 _fields_ = [('header', kowhai_protocol_header_t), ('payload', kowhai_protocol_payload_t)] #int kowhai_protocol_parse(void* proto_packet, int packet_size, struct kowhai_protocol_t* protocol); def parse(proto_packet, packet_size, protocol): return kowhai_lib.kowhai_protocol_parse(ctypes.byref(proto_packet), ctypes.c_int(packet_size), ctypes.byref(protocol)) #int kowhai_protocol_create(void* proto_packet, int packet_size, struct kowhai_protocol_t* protocol, int* bytes_required); def create(proto_packet, packet_size, protocol, bytes_required): return kowhai_lib.kowhai_protocol_create(ctypes.byref(proto_packet), ctypes.c_int(packet_size), ctypes.byref(protocol), ctypes.byref(bytes_required)) #int kowhai_protocol_get_overhead(struct kowhai_protocol_t* protocol, int* overhead); def get_overhead(protocol, overhead): return kowhai_lib.kowhai_protocol_get_overhead(ctypes.byref(protocol), ctypes.byref(overhead)) if __name__ == "__main__": print "test kowhai protocol wrapper" buf = ctypes.create_string_buffer("\x10\x01\x00") prot = kowhai_protocol_t() res = parse(buf, 3, prot) print "kowhai_protocol_parse() - res %d, prot.command: %d, prot.id: %d" % (res, prot.header.command, prot.header.id_) prot.header.command = KOW_CMD_READ_DESCRIPTOR prot.header.id_ = 65535 bytes_required = ctypes.c_int() res = create(buf, 3, prot, bytes_required) print "kowhai_protocol_create() - res %d, bytes_required: %d, buf: %s" % (res, bytes_required.value, repr(buf.raw)) overhead = ctypes.c_int() res = get_overhead(prot, overhead) print "kowhai_protocol_get_overhead() - res: %d, overhead: %d" % (res, overhead.value)
{ "content_hash": "d07aa8995cbcdda740c7a80042169475", "timestamp": "", "source": "github", "line_count": 153, "max_line_length": 153, "avg_line_length": 37.71895424836601, "alnum_prop": 0.6473748050597816, "repo_name": "djpnewton/kowhai", "id": "1626b4e19e5afb7be31bd900dae489a9c43baed7", "size": "5794", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "python/kowhai_protocol.py", "mode": "33188", "license": "mit", "language": [ { "name": "C", "bytes": "163983" }, { "name": "C#", "bytes": "125163" }, { "name": "C++", "bytes": "10440" }, { "name": "Python", "bytes": "28010" }, { "name": "Shell", "bytes": "2224" } ], "symlink_target": "" }
import os import sys from warnings import warn from django import http from django.core import signals from django.core.handlers.base import BaseHandler from django.core.urlresolvers import set_script_prefix from django.utils import datastructures from django.utils.encoding import force_unicode, iri_to_uri from django.utils.log import getLogger logger = getLogger('django.request') # NOTE: do *not* import settings (or any module which eventually imports # settings) until after ModPythonHandler has been called; otherwise os.environ # won't be set up correctly (with respect to settings). class ModPythonRequest(http.HttpRequest): def __init__(self, req): self._req = req # FIXME: This isn't ideal. The request URI may be encoded (it's # non-normalized) slightly differently to the "real" SCRIPT_NAME # and PATH_INFO values. This causes problems when we compute path_info, # below. For now, don't use script names that will be subject to # encoding/decoding. self.path = force_unicode(req.uri) root = req.get_options().get('django.root', '') self.django_root = root # req.path_info isn't necessarily computed correctly in all # circumstances (it's out of mod_python's control a bit), so we use # req.uri and some string manipulations to get the right value. if root and req.uri.startswith(root): self.path_info = force_unicode(req.uri[len(root):]) else: self.path_info = self.path if not self.path_info: # Django prefers empty paths to be '/', rather than '', to give us # a common start character for URL patterns. So this is a little # naughty, but also pretty harmless. self.path_info = u'/' self._post_parse_error = False self._stream = self._req self._read_started = False def get_full_path(self): # RFC 3986 requires self._req.args to be in the ASCII range, but this # doesn't always happen, so rather than crash, we defensively encode it. return '%s%s' % (self.path, self._req.args and ('?' + iri_to_uri(self._req.args)) or '') def is_secure(self): try: return self._req.is_https() except AttributeError: # mod_python < 3.2.10 doesn't have req.is_https(). return self._req.subprocess_env.get('HTTPS', '').lower() in ('on', '1') def _get_request(self): if not hasattr(self, '_request'): self._request = datastructures.MergeDict(self.POST, self.GET) return self._request def _get_get(self): if not hasattr(self, '_get'): self._get = http.QueryDict(self._req.args, encoding=self._encoding) return self._get def _set_get(self, get): self._get = get def _get_post(self): if not hasattr(self, '_post'): self._load_post_and_files() return self._post def _set_post(self, post): self._post = post def _get_cookies(self): if not hasattr(self, '_cookies'): self._cookies = http.parse_cookie(self._req.headers_in.get('cookie', '')) return self._cookies def _set_cookies(self, cookies): self._cookies = cookies def _get_files(self): if not hasattr(self, '_files'): self._load_post_and_files() return self._files def _get_meta(self): "Lazy loader that returns self.META dictionary" if not hasattr(self, '_meta'): self._meta = { 'AUTH_TYPE': self._req.ap_auth_type, 'CONTENT_LENGTH': self._req.headers_in.get('content-length', 0), 'CONTENT_TYPE': self._req.headers_in.get('content-type'), 'GATEWAY_INTERFACE': 'CGI/1.1', 'PATH_INFO': self.path_info, 'PATH_TRANSLATED': None, # Not supported 'QUERY_STRING': self._req.args, 'REMOTE_ADDR': self._req.connection.remote_ip, 'REMOTE_HOST': None, # DNS lookups not supported 'REMOTE_IDENT': self._req.connection.remote_logname, 'REMOTE_USER': self._req.user, 'REQUEST_METHOD': self._req.method, 'SCRIPT_NAME': self.django_root, 'SERVER_NAME': self._req.server.server_hostname, 'SERVER_PORT': self._req.connection.local_addr[1], 'SERVER_PROTOCOL': self._req.protocol, 'SERVER_SOFTWARE': 'mod_python' } for key, value in self._req.headers_in.items(): key = 'HTTP_' + key.upper().replace('-', '_') self._meta[key] = value return self._meta def _get_method(self): return self.META['REQUEST_METHOD'].upper() GET = property(_get_get, _set_get) POST = property(_get_post, _set_post) COOKIES = property(_get_cookies, _set_cookies) FILES = property(_get_files) META = property(_get_meta) REQUEST = property(_get_request) method = property(_get_method) class ModPythonHandler(BaseHandler): request_class = ModPythonRequest def __call__(self, req): warn(('The mod_python handler is deprecated; use a WSGI or FastCGI server instead.'), DeprecationWarning) # mod_python fakes the environ, and thus doesn't process SetEnv. This fixes that os.environ.update(req.subprocess_env) # now that the environ works we can see the correct settings, so imports # that use settings now can work from django.conf import settings # if we need to set up middleware, now that settings works we can do it now. if self._request_middleware is None: self.load_middleware() set_script_prefix(req.get_options().get('django.root', '')) signals.request_started.send(sender=self.__class__) try: try: request = self.request_class(req) except UnicodeDecodeError: logger.warning('Bad Request (UnicodeDecodeError)', exc_info=sys.exc_info(), extra={ 'status_code': 400, } ) response = http.HttpResponseBadRequest() else: response = self.get_response(request) finally: signals.request_finished.send(sender=self.__class__) # Convert our custom HttpResponse object back into the mod_python req. req.content_type = response['Content-Type'] for key, value in response.items(): if key != 'content-type': req.headers_out[str(key)] = str(value) for c in response.cookies.values(): req.headers_out.add('Set-Cookie', c.output(header='')) req.status = response.status_code try: for chunk in response: req.write(chunk) finally: response.close() return 0 # mod_python.apache.OK def handler(req): # mod_python hooks into this function. return ModPythonHandler()(req)
{ "content_hash": "09ba853b67b62407392d773354677a98", "timestamp": "", "source": "github", "line_count": 185, "max_line_length": 96, "avg_line_length": 39.22162162162162, "alnum_prop": 0.5836549062844543, "repo_name": "mixman/djangodev", "id": "eba9dd39cac5465bba5cae9da3387ee5c22e8064", "size": "7256", "binary": false, "copies": "4", "ref": "refs/heads/master", "path": "django/core/handlers/modpython.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "JavaScript", "bytes": "88362" }, { "name": "Python", "bytes": "7834206" }, { "name": "Shell", "bytes": "9076" } ], "symlink_target": "" }
import unittest import filecmp import time from threading import Event from rx import * from TorrentPython.DownloadManager import DownloadManager from TorrentPython.MetaInfo import MetaInfo from TorrentPython.RoutingTable import RoutingTable from TorrentPython.TorrentUtils import TorrentUtils SAMPLE_TORRENT_PATH = '../Resources/sample.torrent' ROUTING_TABLE_PATH = '../Resources/routing_table.py' class DownloadManagerTest(unittest.TestCase): def setUp(self): self.client_id = TorrentUtils.getPeerID() self.metainfo = MetaInfo.create_from_torrent(SAMPLE_TORRENT_PATH) self.info = self.metainfo.get_info() self.dest = 'D:/sandbox/' # self.routing_table = RoutingTable.load(ROUTING_TABLE_PATH) self.routing_table = None self.answer = 'D:/sandbox2/' def tearDown(self): pass @unittest.skip("clear") def test_new(self): testObj = DownloadManager.start(self.client_id, self.metainfo, self.dest, self.routing_table) testObj.stop() del testObj @unittest.skip("wait") def test_on_off(self): test_obj = DownloadManager.start(self.client_id, self.metainfo, self.dest, self.routing_table) self.assertIsNotNone(test_obj) test_obj.subscribe(lambda msg: print(msg)) test_obj.on() time.sleep(10) # test_obj.off() test_obj.stop() del test_obj # @unittest.skip("wait") def test_download(self): testObj = DownloadManager.start(self.client_id, self.metainfo, self.dest, self.routing_table) self.assertIsNotNone(testObj) endEvent = Event() class DownloadManagerObserver(Observer): def __init__(self, event): self.endEvent = event def on_next(self, msg): print(msg) if msg.bitfield_ext.get_percent() == 100: endEvent.set() def on_completed(self): print('on_completed') def on_error(self, e): print('on_error') testObj.subscribe(DownloadManagerObserver(endEvent)) testObj.on() endEvent.wait() # self.assertTrue( # filecmp.cmp(self.dest + self.info.get_name().decode(), # self.answer + self.info.get_name().decode())) testObj.stop() del testObj if __name__ == '__main__': unittest.main()
{ "content_hash": "421c1cb84f3aeabaa7208577ff7b3384", "timestamp": "", "source": "github", "line_count": 83, "max_line_length": 102, "avg_line_length": 29.3855421686747, "alnum_prop": 0.6178761787617876, "repo_name": "reignofmiracle/RM_Torrent", "id": "ef42b5fd10e3f183d9e120f7b6f8bd2a7330fbfa", "size": "2439", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "TorrentPython/unittest/DownloadManager_test.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "121128" } ], "symlink_target": "" }
import platform import re # Platform identification constants. UNKNOWN = 0 RASPBERRY_PI = 1 BEAGLEBONE_BLACK = 2 def platform_detect(): """Detect if running on the Raspberry Pi or Beaglebone Black and return the platform type. Will return RASPBERRY_PI, BEAGLEBONE_BLACK, or UNKNOWN.""" # Handle Raspberry Pi pi = pi_version() if pi is not None: return RASPBERRY_PI # Handle Beaglebone Black # TODO: Check the Beaglebone Black /proc/cpuinfo value instead of reading # the platform. plat = platform.platform() if plat.lower().find('armv7l-with-debian') > -1: return BEAGLEBONE_BLACK elif plat.lower().find('armv7l-with-ubuntu') > -1: return BEAGLEBONE_BLACK elif plat.lower().find('armv7l-with-glibc2.4') > -1: return BEAGLEBONE_BLACK elif plat.lower().find('armv7l-with-arch') > -1: return BEAGLEBONE_BLACK # Couldn't figure out the platform, just return unknown. return UNKNOWN def pi_revision(): """Detect the revision number of a Raspberry Pi, useful for changing functionality like default I2C bus based on revision.""" # Revision list available at: http://elinux.org/RPi_HardwareHistory#Board_Revision_History with open('/proc/cpuinfo', 'r') as infile: for line in infile: # Match a line of the form "Revision : 0002" while ignoring extra # info in front of the revsion (like 1000 when the Pi was over-volted). match = re.match('Revision\s+:\s+.*(\w{4})$', line, flags=re.IGNORECASE) if match and match.group(1) in ['0000', '0002', '0003']: # Return revision 1 if revision ends with 0000, 0002 or 0003. return 1 elif match: # Assume revision 2 if revision ends with any other 4 chars. return 2 # Couldn't find the revision, throw an exception. raise RuntimeError('Could not determine Raspberry Pi revision.') def pi_version(): """Detect the version of the Raspberry Pi. Returns either 1, 2, 3 or None depending on if it's a Raspberry Pi 1 (model A, B, A+, B+), Raspberry Pi 2 (model B+), Raspberry Pi 3 or not a Raspberry Pi. """ # Check /proc/cpuinfo for the Hardware field value. # 2708 is pi 1 # 2709 is pi 2 # 2835 is pi 3 # Anything else is not a pi. with open('/proc/cpuinfo', 'r') as infile: cpuinfo = infile.read() # Match a line like 'Hardware : BCM2709' match = re.search('^Hardware\s+:\s+(\w+)$', cpuinfo, flags=re.MULTILINE | re.IGNORECASE) if not match: # Couldn't find the hardware, assume it isn't a pi. return None if match.group(1) == 'BCM2708': # Pi 1 return 1 elif match.group(1) == 'BCM2709': # Pi 2 return 2 elif match.group(1) == 'BCM2835': # Pi 3 return 3 else: # Something else, not a pi. return None
{ "content_hash": "09fc5bc546b8c25b8d04d14592dff23e", "timestamp": "", "source": "github", "line_count": 83, "max_line_length": 94, "avg_line_length": 36.10843373493976, "alnum_prop": 0.6139472806139473, "repo_name": "rjkunde/TempHumiditySensorProject", "id": "4aec91096a06377d1f959e244809b756eded6212", "size": "4375", "binary": false, "copies": "5", "ref": "refs/heads/master", "path": "Adafruit_DHT/platform_detect.py", "mode": "33188", "license": "mit", "language": [ { "name": "C", "bytes": "50928" }, { "name": "C++", "bytes": "4627" }, { "name": "Python", "bytes": "46061" } ], "symlink_target": "" }
import django from django.conf import settings from django.utils.importlib import import_module import sys from tornado import web, ioloop from sockjs.tornado import SockJSRouter from swampdragon import discover_routes, load_field_deserializers from swampdragon.settings_provider import SettingsHandler def run_server(): if hasattr(django, 'setup'): django.setup() args = sys.argv HOST = '127.0.0.1' PORT = 9999 if len(args) > 1: host_port = args[1] HOST = host_port.split(':')[0] PORT = host_port.split(':')[1] routers = [] if hasattr(settings, 'SOCKJS_CLASSES'): raise Exception(''' -------------- The SOCKJS_CLASSES setting has been removed in favour of SWAMP_DRAGON_CONNECTION Update your settings and add SWAMP_DRAGON_CONNECTION. -------------- ''') module_name, cls_name = settings.SWAMP_DRAGON_CONNECTION[0].rsplit('.', 1) module = import_module(module_name) cls = getattr(module, cls_name) channel = settings.SWAMP_DRAGON_CONNECTION[1] routers.append(SockJSRouter(cls, channel)) print('Channel {}'.format(channel)) app_settings = { 'debug': settings.DEBUG, } urls = discover_routes() for router in routers: urls += router.urls urls.append(('/settings.js$', SettingsHandler)) load_field_deserializers() app = web.Application(urls, **app_settings) app.listen(PORT, address=HOST, no_keep_alive=False) print('Running SwampDragon on {}:{}'.format(HOST, PORT)) try: iol = ioloop.IOLoop.instance() iol.start() except KeyboardInterrupt: # so you don't think you erred when ^C'ing out pass
{ "content_hash": "ad65a528695c2985260cbed3fac1cca0", "timestamp": "", "source": "github", "line_count": 60, "max_line_length": 80, "avg_line_length": 28.2, "alnum_prop": 0.6495271867612293, "repo_name": "h-hirokawa/swampdragon", "id": "aea03b311fa855aa1c2a2d28486c8276ef34369b", "size": "1692", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "swampdragon/swampdragon_server.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "CSS", "bytes": "4233" }, { "name": "JavaScript", "bytes": "120634" }, { "name": "Python", "bytes": "166684" } ], "symlink_target": "" }
import pyaf.Bench.TS_datasets as tsds import tests.artificial.process_artificial_dataset as art art.process_dataset(N = 1024 , FREQ = 'D', seed = 0, trendtype = "LinearTrend", cycle_length = 0, transform = "None", sigma = 0.0, exog_count = 100, ar_order = 12);
{ "content_hash": "bae6151b5c3d65403483e9846ee86ab8", "timestamp": "", "source": "github", "line_count": 7, "max_line_length": 164, "avg_line_length": 37.714285714285715, "alnum_prop": 0.7045454545454546, "repo_name": "antoinecarme/pyaf", "id": "773cdfe8845fb046de1008e4679a13ce675d6604", "size": "264", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tests/artificial/transf_None/trend_LinearTrend/cycle_0/ar_12/test_artificial_1024_None_LinearTrend_0_12_100.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Makefile", "bytes": "6773299" }, { "name": "Procfile", "bytes": "24" }, { "name": "Python", "bytes": "54209093" }, { "name": "R", "bytes": "807" }, { "name": "Shell", "bytes": "3619" } ], "symlink_target": "" }
from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): replaces = [ ("PartyList", "0016_auto_20180411_1734"), ("PartyList", "0017_auto_20180411_1739"), ("PartyList", "0018_auto_20180411_1740"), ] dependencies = [ ("PartyList", "0015_add_count_permission"), ] operations = [ migrations.AlterField( model_name="partyguest", name="timeFirstSignedIn", field=models.DateTimeField(), ), migrations.AlterField( model_name="partyguest", name="timeFirstSignedIn", field=models.DateTimeField(blank=True), ), migrations.AlterField( model_name="partyguest", name="timeFirstSignedIn", field=models.DateTimeField(null=True), ), ]
{ "content_hash": "11491dd9fe9a82671f3f4559778b1f2e", "timestamp": "", "source": "github", "line_count": 34, "max_line_length": 51, "avg_line_length": 26.441176470588236, "alnum_prop": 0.5728587319243604, "repo_name": "sigmapi-gammaiota/sigmapi-web", "id": "e168e959851abf5b5207578a3c3f6a88ced0cc25", "size": "972", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "sigmapiweb/apps/PartyList/migrations/0016_fix_signed_in_time.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "47173" }, { "name": "HTML", "bytes": "265883" }, { "name": "JavaScript", "bytes": "1338629" }, { "name": "Python", "bytes": "335952" }, { "name": "SCSS", "bytes": "44203" }, { "name": "Shell", "bytes": "3928" } ], "symlink_target": "" }
""" Classes to represent the default SQL aggregate functions """ import copy from django.db.models.fields import IntegerField, FloatField __all__ = ['Aggregate', 'Avg', 'Count', 'Max', 'Min', 'StdDev', 'Sum', 'Variance'] # Fake fields used to identify aggregate types in data-conversion operations. ordinal_aggregate_field = IntegerField() computed_aggregate_field = FloatField() class Aggregate(object): """ Default SQL Aggregate. """ is_ordinal = False is_computed = False sql_template = '%(function)s(%(field)s)' def __init__(self, col, source=None, is_summary=False, **extra): """Instantiate an SQL aggregate * col is a column reference describing the subject field of the aggregate. It can be an alias, or a tuple describing a table and column name. * source is the underlying field or aggregate definition for the column reference. If the aggregate is not an ordinal or computed type, this reference is used to determine the coerced output type of the aggregate. * extra is a dictionary of additional data to provide for the aggregate definition Also utilizes the class variables: * sql_function, the name of the SQL function that implements the aggregate. * sql_template, a template string that is used to render the aggregate into SQL. * is_ordinal, a boolean indicating if the output of this aggregate is an integer (e.g., a count) * is_computed, a boolean indicating if this output of this aggregate is a computed float (e.g., an average), regardless of the input type. """ self.col = col self.source = source self.is_summary = is_summary self.extra = extra # Follow the chain of aggregate sources back until you find an # actual field, or an aggregate that forces a particular output # type. This type of this field will be used to coerce values # retrieved from the database. tmp = self while tmp and isinstance(tmp, Aggregate): if getattr(tmp, 'is_ordinal', False): tmp = ordinal_aggregate_field elif getattr(tmp, 'is_computed', False): tmp = computed_aggregate_field else: tmp = tmp.source self.field = tmp def relabeled_clone(self, change_map): clone = copy.copy(self) if isinstance(self.col, (list, tuple)): clone.col = (change_map.get(self.col[0], self.col[0]), self.col[1]) return clone def as_sql(self, qn, connection): "Return the aggregate, rendered as SQL with parameters." params = [] if hasattr(self.col, 'as_sql'): field_name, params = self.col.as_sql(qn, connection) elif isinstance(self.col, (list, tuple)): field_name = '.'.join(qn(c) for c in self.col) else: field_name = qn(self.col) substitutions = { 'function': self.sql_function, 'field': field_name } substitutions.update(self.extra) return self.sql_template % substitutions, params class Avg(Aggregate): is_computed = True sql_function = 'AVG' class Count(Aggregate): is_ordinal = True sql_function = 'COUNT' sql_template = '%(function)s(%(distinct)s%(field)s)' def __init__(self, col, distinct=False, **extra): super(Count, self).__init__(col, distinct='DISTINCT ' if distinct else '', **extra) class Max(Aggregate): sql_function = 'MAX' class Min(Aggregate): sql_function = 'MIN' class StdDev(Aggregate): is_computed = True def __init__(self, col, sample=False, **extra): super(StdDev, self).__init__(col, **extra) self.sql_function = 'STDDEV_SAMP' if sample else 'STDDEV_POP' class Sum(Aggregate): sql_function = 'SUM' class Variance(Aggregate): is_computed = True def __init__(self, col, sample=False, **extra): super(Variance, self).__init__(col, **extra) self.sql_function = 'VAR_SAMP' if sample else 'VAR_POP'
{ "content_hash": "74840fdc3f6f82c2ff0c63c036d78573", "timestamp": "", "source": "github", "line_count": 136, "max_line_length": 91, "avg_line_length": 30.985294117647058, "alnum_prop": 0.6120075937351684, "repo_name": "AlexHill/django", "id": "8542a330c64c4aae607dc38a9bb15e13a177258c", "size": "4214", "binary": false, "copies": "6", "ref": "refs/heads/master", "path": "django/db/models/sql/aggregates.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "CSS", "bytes": "51177" }, { "name": "JavaScript", "bytes": "102290" }, { "name": "Python", "bytes": "9207507" }, { "name": "Shell", "bytes": "12137" } ], "symlink_target": "" }
"""Functions for setting up a Samba configuration (LDB and LDAP backends).""" from base64 import b64encode import errno import ldb import os import sys import uuid import time import shutil import subprocess import urllib from ldb import SCOPE_BASE, SCOPE_ONELEVEL, LdbError, timestring from samba import Ldb, read_and_sub_file, setup_file from samba.credentials import Credentials, DONT_USE_KERBEROS from samba.schema import Schema class SlapdAlreadyRunning(Exception): def __init__(self, uri): self.ldapi_uri = uri super(SlapdAlreadyRunning, self).__init__("Another slapd Instance " "seems already running on this host, listening to %s." % self.ldapi_uri) class BackendResult(object): def report_logger(self, logger): """Rerport this result to a particular logger. """ raise NotImplementedError(self.report_logger) class LDAPBackendResult(BackendResult): def __init__(self, credentials, slapd_command_escaped, ldapdir): self.credentials = credentials self.slapd_command_escaped = slapd_command_escaped self.ldapdir = ldapdir def report_logger(self, logger): if self.credentials.get_bind_dn() is not None: logger.info("LDAP Backend Admin DN: %s" % self.credentials.get_bind_dn()) else: logger.info("LDAP Admin User: %s" % self.credentials.get_username()) if self.slapd_command_escaped is not None: # now display slapd_command_file.txt to show how slapd must be # started next time logger.info( "Use later the following commandline to start slapd, then Samba:") logger.info(self.slapd_command_escaped) logger.info( "This slapd-Commandline is also stored under: %s/ldap_backend_startup.sh", self.ldapdir) class ProvisionBackend(object): def __init__(self, backend_type, paths=None, lp=None, credentials=None, names=None, logger=None): """Provision a backend for samba4""" self.paths = paths self.lp = lp self.credentials = credentials self.names = names self.logger = logger self.type = backend_type # Set a default - the code for "existing" below replaces this self.ldap_backend_type = backend_type def init(self): """Initialize the backend.""" raise NotImplementedError(self.init) def start(self): """Start the backend.""" raise NotImplementedError(self.start) def shutdown(self): """Shutdown the backend.""" raise NotImplementedError(self.shutdown) def post_setup(self): """Post setup. :return: A BackendResult or None """ raise NotImplementedError(self.post_setup) class LDBBackend(ProvisionBackend): def init(self): self.credentials = None self.secrets_credentials = None # Wipe the old sam.ldb databases away shutil.rmtree(self.paths.samdb + ".d", True) def start(self): pass def shutdown(self): pass def post_setup(self): pass class ExistingBackend(ProvisionBackend): def __init__(self, backend_type, paths=None, lp=None, credentials=None, names=None, logger=None, ldapi_uri=None): super(ExistingBackend, self).__init__(backend_type=backend_type, paths=paths, lp=lp, credentials=credentials, names=names, logger=logger, ldap_backend_forced_uri=ldapi_uri) def init(self): # Check to see that this 'existing' LDAP backend in fact exists ldapi_db = Ldb(self.ldapi_uri, credentials=self.credentials) ldapi_db.search(base="", scope=SCOPE_BASE, expression="(objectClass=OpenLDAProotDSE)") # If we have got here, then we must have a valid connection to the LDAP # server, with valid credentials supplied This caused them to be set # into the long-term database later in the script. self.secrets_credentials = self.credentials # For now, assume existing backends at least emulate OpenLDAP self.ldap_backend_type = "openldap" class LDAPBackend(ProvisionBackend): def __init__(self, backend_type, paths=None, lp=None, credentials=None, names=None, logger=None, domainsid=None, schema=None, hostname=None, ldapadminpass=None, slapd_path=None, ldap_backend_extra_port=None, ldap_backend_forced_uri=None, ldap_dryrun_mode=True): super(LDAPBackend, self).__init__(backend_type=backend_type, paths=paths, lp=lp, credentials=credentials, names=names, logger=logger) self.domainsid = domainsid self.schema = schema self.hostname = hostname self.ldapdir = os.path.join(paths.private_dir, "ldap") self.ldapadminpass = ldapadminpass self.slapd_path = slapd_path self.slapd_command = None self.slapd_command_escaped = None self.slapd_pid = os.path.join(self.ldapdir, "slapd.pid") self.ldap_backend_extra_port = ldap_backend_extra_port self.ldap_dryrun_mode = ldap_dryrun_mode if ldap_backend_forced_uri is not None: self.ldap_uri = ldap_backend_forced_uri else: self.ldap_uri = "ldapi://%s" % urllib.quote( os.path.join(self.ldapdir, "ldapi"), safe="") if not os.path.exists(self.ldapdir): os.mkdir(self.ldapdir) def init(self): from samba.provision import ProvisioningError # we will shortly start slapd with ldapi for final provisioning. first # check with ldapsearch -> rootDSE via self.ldap_uri if another # instance of slapd is already running try: ldapi_db = Ldb(self.ldap_uri) ldapi_db.search(base="", scope=SCOPE_BASE, expression="(objectClass=OpenLDAProotDSE)") try: f = open(self.slapd_pid, "r") except IOError, err: if err != errno.ENOENT: raise else: try: p = f.read() finally: f.close() self.logger.info("Check for slapd process with PID: %s and terminate it manually." % p) raise SlapdAlreadyRunning(self.ldap_uri) except LdbError: # XXX: We should never be catching all Ldb errors pass # Try to print helpful messages when the user has not specified the # path to slapd if self.slapd_path is None: raise ProvisioningError("Warning: LDAP-Backend must be setup with path to slapd, e.g. --slapd-path=\"/usr/local/libexec/slapd\"!") if not os.path.exists(self.slapd_path): self.logger.warning("Path (%s) to slapd does not exist!", self.slapd_path) if not os.path.isdir(self.ldapdir): os.makedirs(self.ldapdir, 0700) # Put the LDIF of the schema into a database so we can search on # it to generate schema-dependent configurations in Fedora DS and # OpenLDAP schemadb_path = os.path.join(self.ldapdir, "schema-tmp.ldb") try: os.unlink(schemadb_path) except OSError: pass self.schema.write_to_tmp_ldb(schemadb_path) self.credentials = Credentials() self.credentials.guess(self.lp) # Kerberos to an ldapi:// backend makes no sense self.credentials.set_kerberos_state(DONT_USE_KERBEROS) self.credentials.set_password(self.ldapadminpass) self.secrets_credentials = Credentials() self.secrets_credentials.guess(self.lp) # Kerberos to an ldapi:// backend makes no sense self.secrets_credentials.set_kerberos_state(DONT_USE_KERBEROS) self.secrets_credentials.set_username("samba-admin") self.secrets_credentials.set_password(self.ldapadminpass) self.provision() def provision(self): pass def start(self): from samba.provision import ProvisioningError self.slapd_command_escaped = "\'" + "\' \'".join(self.slapd_command) + "\'" f = open(os.path.join(self.ldapdir, "ldap_backend_startup.sh"), 'w') try: f.write("#!/bin/sh\n" + self.slapd_command_escaped + "\n") finally: f.close() # Now start the slapd, so we can provision onto it. We keep the # subprocess context around, to kill this off at the successful # end of the script self.slapd = subprocess.Popen(self.slapd_provision_command, close_fds=True, shell=False) count = 0 while self.slapd.poll() is None: # Wait until the socket appears try: ldapi_db = Ldb(self.ldap_uri, lp=self.lp, credentials=self.credentials) ldapi_db.search(base="", scope=SCOPE_BASE, expression="(objectClass=OpenLDAProotDSE)") # If we have got here, then we must have a valid connection to # the LDAP server! return except LdbError: time.sleep(1) count = count + 1 if count > 15: self.logger.error("Could not connect to slapd started with: %s" % "\'" + "\' \'".join(self.slapd_provision_command) + "\'") raise ProvisioningError("slapd never accepted a connection within 15 seconds of starting") self.logger.error("Could not start slapd with: %s" % "\'" + "\' \'".join(self.slapd_provision_command) + "\'") raise ProvisioningError("slapd died before we could make a connection to it") def shutdown(self): # if an LDAP backend is in use, terminate slapd after final provision # and check its proper termination if self.slapd.poll() is None: # Kill the slapd if getattr(self.slapd, "terminate", None) is not None: self.slapd.terminate() else: # Older python versions don't have .terminate() import signal os.kill(self.slapd.pid, signal.SIGTERM) # and now wait for it to die self.slapd.communicate() def post_setup(self): return LDAPBackendResult(self.credentials, self.slapd_command_escaped, self.ldapdir) class OpenLDAPBackend(LDAPBackend): def __init__(self, backend_type, paths=None, lp=None, credentials=None, names=None, logger=None, domainsid=None, schema=None, hostname=None, ldapadminpass=None, slapd_path=None, ldap_backend_extra_port=None, ldap_dryrun_mode=True, ol_mmr_urls=None, nosync=False, ldap_backend_forced_uri=None): from samba.provision import setup_path super(OpenLDAPBackend, self).__init__( backend_type=backend_type, paths=paths, lp=lp, credentials=credentials, names=names, logger=logger, domainsid=domainsid, schema=schema, hostname=hostname, ldapadminpass=ldapadminpass, slapd_path=slapd_path, ldap_backend_extra_port=ldap_backend_extra_port, ldap_backend_forced_uri=ldap_backend_forced_uri, ldap_dryrun_mode=ldap_dryrun_mode) self.ol_mmr_urls = ol_mmr_urls self.nosync = nosync self.slapdconf = os.path.join(self.ldapdir, "slapd.conf") self.modulesconf = os.path.join(self.ldapdir, "modules.conf") self.memberofconf = os.path.join(self.ldapdir, "memberof.conf") self.olmmrserveridsconf = os.path.join(self.ldapdir, "mmr_serverids.conf") self.olmmrsyncreplconf = os.path.join(self.ldapdir, "mmr_syncrepl.conf") self.olcdir = os.path.join(self.ldapdir, "slapd.d") self.olcseedldif = os.path.join(self.ldapdir, "olc_seed.ldif") self.schema = Schema(self.domainsid, schemadn=self.names.schemadn, files=[ setup_path("schema_samba4.ldif")]) def setup_db_config(self, dbdir): """Setup a Berkeley database. :param dbdir: Database directory. """ from samba.provision import setup_path if not os.path.isdir(os.path.join(dbdir, "bdb-logs")): os.makedirs(os.path.join(dbdir, "bdb-logs"), 0700) if not os.path.isdir(os.path.join(dbdir, "tmp")): os.makedirs(os.path.join(dbdir, "tmp"), 0700) setup_file(setup_path("DB_CONFIG"), os.path.join(dbdir, "DB_CONFIG"), {"LDAPDBDIR": dbdir}) def provision(self): from samba.provision import ProvisioningError, setup_path # Wipe the directories so we can start shutil.rmtree(os.path.join(self.ldapdir, "db"), True) # Allow the test scripts to turn off fsync() for OpenLDAP as for TDB # and LDB nosync_config = "" if self.nosync: nosync_config = "dbnosync" lnkattr = self.schema.linked_attributes() refint_attributes = "" memberof_config = "# Generated from Samba4 schema\n" for att in lnkattr.keys(): if lnkattr[att] is not None: refint_attributes = refint_attributes + " " + att memberof_config += read_and_sub_file( setup_path("memberof.conf"), { "MEMBER_ATTR": att, "MEMBEROF_ATTR" : lnkattr[att] }) refint_config = read_and_sub_file(setup_path("refint.conf"), { "LINK_ATTRS" : refint_attributes}) attrs = ["linkID", "lDAPDisplayName"] res = self.schema.ldb.search(expression="(&(objectclass=attributeSchema)(searchFlags:1.2.840.113556.1.4.803:=1))", base=self.names.schemadn, scope=SCOPE_ONELEVEL, attrs=attrs) index_config = "" for i in range (0, len(res)): index_attr = res[i]["lDAPDisplayName"][0] if index_attr == "objectGUID": index_attr = "entryUUID" index_config += "index " + index_attr + " eq\n" # generate serverids, ldap-urls and syncrepl-blocks for mmr hosts mmr_on_config = "" mmr_replicator_acl = "" mmr_serverids_config = "" mmr_syncrepl_schema_config = "" mmr_syncrepl_config_config = "" mmr_syncrepl_user_config = "" if self.ol_mmr_urls is not None: # For now, make these equal mmr_pass = self.ldapadminpass url_list = filter(None,self.ol_mmr_urls.split(',')) for url in url_list: self.logger.info("Using LDAP-URL: "+url) if len(url_list) == 1: raise ProvisioningError("At least 2 LDAP-URLs needed for MMR!") mmr_on_config = "MirrorMode On" mmr_replicator_acl = " by dn=cn=replicator,cn=samba read" serverid = 0 for url in url_list: serverid = serverid + 1 mmr_serverids_config += read_and_sub_file( setup_path("mmr_serverids.conf"), { "SERVERID": str(serverid), "LDAPSERVER": url }) rid = serverid * 10 rid = rid + 1 mmr_syncrepl_schema_config += read_and_sub_file( setup_path("mmr_syncrepl.conf"), { "RID" : str(rid), "MMRDN": self.names.schemadn, "LDAPSERVER" : url, "MMR_PASSWORD": mmr_pass}) rid = rid + 1 mmr_syncrepl_config_config += read_and_sub_file( setup_path("mmr_syncrepl.conf"), { "RID" : str(rid), "MMRDN": self.names.configdn, "LDAPSERVER" : url, "MMR_PASSWORD": mmr_pass}) rid = rid + 1 mmr_syncrepl_user_config += read_and_sub_file( setup_path("mmr_syncrepl.conf"), { "RID" : str(rid), "MMRDN": self.names.domaindn, "LDAPSERVER" : url, "MMR_PASSWORD": mmr_pass }) # OpenLDAP cn=config initialisation olc_syncrepl_config = "" olc_mmr_config = "" # if mmr = yes, generate cn=config-replication directives # and olc_seed.lif for the other mmr-servers if self.ol_mmr_urls is not None: serverid = 0 olc_serverids_config = "" olc_syncrepl_seed_config = "" olc_mmr_config += read_and_sub_file( setup_path("olc_mmr.conf"), {}) rid = 500 for url in url_list: serverid = serverid + 1 olc_serverids_config += read_and_sub_file( setup_path("olc_serverid.conf"), { "SERVERID" : str(serverid), "LDAPSERVER" : url }) rid = rid + 1 olc_syncrepl_config += read_and_sub_file( setup_path("olc_syncrepl.conf"), { "RID" : str(rid), "LDAPSERVER" : url, "MMR_PASSWORD": mmr_pass}) olc_syncrepl_seed_config += read_and_sub_file( setup_path("olc_syncrepl_seed.conf"), { "RID" : str(rid), "LDAPSERVER" : url}) setup_file(setup_path("olc_seed.ldif"), self.olcseedldif, {"OLC_SERVER_ID_CONF": olc_serverids_config, "OLC_PW": self.ldapadminpass, "OLC_SYNCREPL_CONF": olc_syncrepl_seed_config}) # end olc setup_file(setup_path("slapd.conf"), self.slapdconf, {"DNSDOMAIN": self.names.dnsdomain, "LDAPDIR": self.ldapdir, "DOMAINDN": self.names.domaindn, "CONFIGDN": self.names.configdn, "SCHEMADN": self.names.schemadn, "MEMBEROF_CONFIG": memberof_config, "MIRRORMODE": mmr_on_config, "REPLICATOR_ACL": mmr_replicator_acl, "MMR_SERVERIDS_CONFIG": mmr_serverids_config, "MMR_SYNCREPL_SCHEMA_CONFIG": mmr_syncrepl_schema_config, "MMR_SYNCREPL_CONFIG_CONFIG": mmr_syncrepl_config_config, "MMR_SYNCREPL_USER_CONFIG": mmr_syncrepl_user_config, "OLC_SYNCREPL_CONFIG": olc_syncrepl_config, "OLC_MMR_CONFIG": olc_mmr_config, "REFINT_CONFIG": refint_config, "INDEX_CONFIG": index_config, "NOSYNC": nosync_config}) self.setup_db_config(os.path.join(self.ldapdir, "db", "user")) self.setup_db_config(os.path.join(self.ldapdir, "db", "config")) self.setup_db_config(os.path.join(self.ldapdir, "db", "schema")) if not os.path.exists(os.path.join(self.ldapdir, "db", "samba", "cn=samba")): os.makedirs(os.path.join(self.ldapdir, "db", "samba", "cn=samba"), 0700) setup_file(setup_path("cn=samba.ldif"), os.path.join(self.ldapdir, "db", "samba", "cn=samba.ldif"), { "UUID": str(uuid.uuid4()), "LDAPTIME": timestring(int(time.time()))} ) setup_file(setup_path("cn=samba-admin.ldif"), os.path.join(self.ldapdir, "db", "samba", "cn=samba", "cn=samba-admin.ldif"), {"LDAPADMINPASS_B64": b64encode(self.ldapadminpass), "UUID": str(uuid.uuid4()), "LDAPTIME": timestring(int(time.time()))} ) if self.ol_mmr_urls is not None: setup_file(setup_path("cn=replicator.ldif"), os.path.join(self.ldapdir, "db", "samba", "cn=samba", "cn=replicator.ldif"), {"MMR_PASSWORD_B64": b64encode(mmr_pass), "UUID": str(uuid.uuid4()), "LDAPTIME": timestring(int(time.time()))} ) mapping = "schema-map-openldap-2.3" backend_schema = "backend-schema.schema" f = open(setup_path(mapping), 'r') try: backend_schema_data = self.schema.convert_to_openldap( "openldap", f.read()) finally: f.close() assert backend_schema_data is not None f = open(os.path.join(self.ldapdir, backend_schema), 'w') try: f.write(backend_schema_data) finally: f.close() # now we generate the needed strings to start slapd automatically, if self.ldap_backend_extra_port is not None: # When we use MMR, we can't use 0.0.0.0 as it uses the name # specified there as part of it's clue as to it's own name, # and not to replicate to itself if self.ol_mmr_urls is None: server_port_string = "ldap://0.0.0.0:%d" % self.ldap_backend_extra_port else: server_port_string = "ldap://%s.%s:%d" (self.names.hostname, self.names.dnsdomain, self.ldap_backend_extra_port) else: server_port_string = "" # Prepare the 'result' information - the commands to return in # particular self.slapd_provision_command = [self.slapd_path, "-F" + self.olcdir, "-h"] # copy this command so we have two version, one with -d0 and only # ldapi (or the forced ldap_uri), and one with all the listen commands self.slapd_command = list(self.slapd_provision_command) self.slapd_provision_command.extend([self.ldap_uri, "-d0"]) uris = self.ldap_uri if server_port_string is not "": uris = uris + " " + server_port_string self.slapd_command.append(uris) # Set the username - done here because Fedora DS still uses the admin # DN and simple bind self.credentials.set_username("samba-admin") # Wipe the old sam.ldb databases away shutil.rmtree(self.olcdir, True) os.makedirs(self.olcdir, 0770) # If we were just looking for crashes up to this point, it's a # good time to exit before we realise we don't have OpenLDAP on # this system if self.ldap_dryrun_mode: sys.exit(0) slapd_cmd = [self.slapd_path, "-Ttest", "-n", "0", "-f", self.slapdconf, "-F", self.olcdir] retcode = subprocess.call(slapd_cmd, close_fds=True, shell=False) if retcode != 0: self.logger.error("conversion from slapd.conf to cn=config failed slapd started with: %s" % "\'" + "\' \'".join(slapd_cmd) + "\'") raise ProvisioningError("conversion from slapd.conf to cn=config failed") if not os.path.exists(os.path.join(self.olcdir, "cn=config.ldif")): raise ProvisioningError("conversion from slapd.conf to cn=config failed") # Don't confuse the admin by leaving the slapd.conf around os.remove(self.slapdconf) class FDSBackend(LDAPBackend): def __init__(self, backend_type, paths=None, lp=None, credentials=None, names=None, logger=None, domainsid=None, schema=None, hostname=None, ldapadminpass=None, slapd_path=None, ldap_backend_extra_port=None, ldap_dryrun_mode=True, root=None, setup_ds_path=None): from samba.provision import setup_path super(FDSBackend, self).__init__(backend_type=backend_type, paths=paths, lp=lp, credentials=credentials, names=names, logger=logger, domainsid=domainsid, schema=schema, hostname=hostname, ldapadminpass=ldapadminpass, slapd_path=slapd_path, ldap_backend_extra_port=ldap_backend_extra_port, ldap_backend_forced_uri=ldap_backend_forced_uri, ldap_dryrun_mode=ldap_dryrun_mode) self.root = root self.setup_ds_path = setup_ds_path self.ldap_instance = self.names.netbiosname.lower() self.sambadn = "CN=Samba" self.fedoradsinf = os.path.join(self.ldapdir, "fedorads.inf") self.partitions_ldif = os.path.join(self.ldapdir, "fedorads-partitions.ldif") self.sasl_ldif = os.path.join(self.ldapdir, "fedorads-sasl.ldif") self.dna_ldif = os.path.join(self.ldapdir, "fedorads-dna.ldif") self.pam_ldif = os.path.join(self.ldapdir, "fedorads-pam.ldif") self.refint_ldif = os.path.join(self.ldapdir, "fedorads-refint.ldif") self.linked_attrs_ldif = os.path.join(self.ldapdir, "fedorads-linked-attributes.ldif") self.index_ldif = os.path.join(self.ldapdir, "fedorads-index.ldif") self.samba_ldif = os.path.join(self.ldapdir, "fedorads-samba.ldif") self.samba3_schema = setup_path( "../../examples/LDAP/samba.schema") self.samba3_ldif = os.path.join(self.ldapdir, "samba3.ldif") self.retcode = subprocess.call(["bin/oLschema2ldif", "-I", self.samba3_schema, "-O", self.samba3_ldif, "-b", self.names.domaindn], close_fds=True, shell=False) if self.retcode != 0: raise Exception("Unable to convert Samba 3 schema.") self.schema = Schema( self.domainsid, schemadn=self.names.schemadn, files=[setup_path("schema_samba4.ldif"), self.samba3_ldif], additional_prefixmap=["1000:1.3.6.1.4.1.7165.2.1", "1001:1.3.6.1.4.1.7165.2.2"]) def provision(self): from samba.provision import ProvisioningError, setup_path if self.ldap_backend_extra_port is not None: serverport = "ServerPort=%d" % self.ldap_backend_extra_port else: serverport = "" setup_file(setup_path("fedorads.inf"), self.fedoradsinf, {"ROOT": self.root, "HOSTNAME": self.hostname, "DNSDOMAIN": self.names.dnsdomain, "LDAPDIR": self.ldapdir, "DOMAINDN": self.names.domaindn, "LDAP_INSTANCE": self.ldap_instance, "LDAPMANAGERDN": self.names.ldapmanagerdn, "LDAPMANAGERPASS": self.ldapadminpass, "SERVERPORT": serverport}) setup_file(setup_path("fedorads-partitions.ldif"), self.partitions_ldif, {"CONFIGDN": self.names.configdn, "SCHEMADN": self.names.schemadn, "SAMBADN": self.sambadn, }) setup_file(setup_path("fedorads-sasl.ldif"), self.sasl_ldif, {"SAMBADN": self.sambadn, }) setup_file(setup_path("fedorads-dna.ldif"), self.dna_ldif, {"DOMAINDN": self.names.domaindn, "SAMBADN": self.sambadn, "DOMAINSID": str(self.domainsid), }) setup_file(setup_path("fedorads-pam.ldif"), self.pam_ldif) lnkattr = self.schema.linked_attributes() f = open(setup_path("fedorads-refint-delete.ldif"), 'r') try: refint_config = f.read() finally: f.close() memberof_config = "" index_config = "" argnum = 3 for attr in lnkattr.keys(): if lnkattr[attr] is not None: refint_config += read_and_sub_file( setup_path("fedorads-refint-add.ldif"), { "ARG_NUMBER" : str(argnum), "LINK_ATTR" : attr }) memberof_config += read_and_sub_file( setup_path("fedorads-linked-attributes.ldif"), { "MEMBER_ATTR" : attr, "MEMBEROF_ATTR" : lnkattr[attr] }) index_config += read_and_sub_file( setup_path("fedorads-index.ldif"), { "ATTR" : attr }) argnum += 1 f = open(self.refint_ldif, 'w') try: f.write(refint_config) finally: f.close() f = open(self.linked_attrs_ldif, 'w') try: f.write(memberof_config) finally: f.close() attrs = ["lDAPDisplayName"] res = self.schema.ldb.search(expression="(&(objectclass=attributeSchema)(searchFlags:1.2.840.113556.1.4.803:=1))", base=self.names.schemadn, scope=SCOPE_ONELEVEL, attrs=attrs) for i in range (0, len(res)): attr = res[i]["lDAPDisplayName"][0] if attr == "objectGUID": attr = "nsUniqueId" index_config += read_and_sub_file( setup_path("fedorads-index.ldif"), { "ATTR" : attr }) f = open(self.index_ldif, 'w') try: f.write(index_config) finally: f.close() setup_file(setup_path("fedorads-samba.ldif"), self.samba_ldif, { "SAMBADN": self.sambadn, "LDAPADMINPASS": self.ldapadminpass }) mapping = "schema-map-fedora-ds-1.0" backend_schema = "99_ad.ldif" # Build a schema file in Fedora DS format f = open(setup_path(mapping), 'r') try: backend_schema_data = self.schema.convert_to_openldap("fedora-ds", f.read()) finally: f.close() assert backend_schema_data is not None f = open(os.path.join(self.ldapdir, backend_schema), 'w') try: f.write(backend_schema_data) finally: f.close() self.credentials.set_bind_dn(self.names.ldapmanagerdn) # Destory the target directory, or else setup-ds.pl will complain fedora_ds_dir = os.path.join(self.ldapdir, "slapd-" + self.ldap_instance) shutil.rmtree(fedora_ds_dir, True) self.slapd_provision_command = [self.slapd_path, "-D", fedora_ds_dir, "-i", self.slapd_pid] # In the 'provision' command line, stay in the foreground so we can # easily kill it self.slapd_provision_command.append("-d0") #the command for the final run is the normal script self.slapd_command = [os.path.join(self.ldapdir, "slapd-" + self.ldap_instance, "start-slapd")] # If we were just looking for crashes up to this point, it's a # good time to exit before we realise we don't have Fedora DS on if self.ldap_dryrun_mode: sys.exit(0) # Try to print helpful messages when the user has not specified the # path to the setup-ds tool if self.setup_ds_path is None: raise ProvisioningError("Fedora DS LDAP-Backend must be setup with path to setup-ds, e.g. --setup-ds-path=\"/usr/sbin/setup-ds.pl\"!") if not os.path.exists(self.setup_ds_path): self.logger.warning("Path (%s) to slapd does not exist!", self.setup_ds_path) # Run the Fedora DS setup utility retcode = subprocess.call([self.setup_ds_path, "--silent", "--file", self.fedoradsinf], close_fds=True, shell=False) if retcode != 0: raise ProvisioningError("setup-ds failed") # Load samba-admin retcode = subprocess.call([ os.path.join(self.ldapdir, "slapd-" + self.ldap_instance, "ldif2db"), "-s", self.sambadn, "-i", self.samba_ldif], close_fds=True, shell=False) if retcode != 0: raise ProvisioningError("ldif2db failed") def post_setup(self): ldapi_db = Ldb(self.ldap_uri, credentials=self.credentials) # configure in-directory access control on Fedora DS via the aci # attribute (over a direct ldapi:// socket) aci = """(targetattr = "*") (version 3.0;acl "full access to all by samba-admin";allow (all)(userdn = "ldap:///CN=samba-admin,%s");)""" % self.sambadn m = ldb.Message() m["aci"] = ldb.MessageElement([aci], ldb.FLAG_MOD_REPLACE, "aci") for dnstring in (self.names.domaindn, self.names.configdn, self.names.schemadn): m.dn = ldb.Dn(ldapi_db, dnstring) ldapi_db.modify(m) return LDAPBackendResult(self.credentials, self.slapd_command_escaped, self.ldapdir)
{ "content_hash": "03a5ed8df28d6a44047ece52dcaf625c", "timestamp": "", "source": "github", "line_count": 815, "max_line_length": 183, "avg_line_length": 40.32515337423313, "alnum_prop": 0.5645519549672904, "repo_name": "jorik041/pth-toolkit", "id": "f88b0db89c52f3128204a259415a7b9f2e307316", "size": "33857", "binary": false, "copies": "11", "ref": "refs/heads/master", "path": "lib/python2.7/site-packages/samba/provision/backend.py", "mode": "33188", "license": "bsd-2-clause", "language": [ { "name": "Perl", "bytes": "5113" }, { "name": "Python", "bytes": "1294855" }, { "name": "Shell", "bytes": "1105" } ], "symlink_target": "" }
""" ulid/api/monotonic ~~~~~~~~~~~~~~~~~~ Contains the public API of the `ulid` package using the monotonic provider. """ from .. import consts, providers, ulid from . import api API = api.Api(providers.MONOTONIC) create = API.create from_bytes = API.from_bytes from_int = API.from_int from_randomness = API.from_randomness from_str = API.from_str from_timestamp = API.from_timestamp from_uuid = API.from_uuid new = API.new parse = API.parse MIN_TIMESTAMP = consts.MIN_TIMESTAMP MAX_TIMESTAMP = consts.MAX_TIMESTAMP MIN_RANDOMNESS = consts.MIN_RANDOMNESS MAX_RANDOMNESS = consts.MAX_RANDOMNESS MIN_ULID = consts.MIN_ULID MAX_ULID = consts.MAX_ULID Timestamp = ulid.Timestamp Randomness = ulid.Randomness ULID = ulid.ULID __all__ = api.ALL
{ "content_hash": "be78f3e92176b7343ab06a0164553d37", "timestamp": "", "source": "github", "line_count": 33, "max_line_length": 79, "avg_line_length": 22.939393939393938, "alnum_prop": 0.7239101717305152, "repo_name": "ahawker/ulid", "id": "2734933579a7c23e272627b869c090475f0761f5", "size": "757", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "ulid/api/monotonic.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Makefile", "bytes": "3387" }, { "name": "PowerShell", "bytes": "7195" }, { "name": "Python", "bytes": "93605" } ], "symlink_target": "" }
""" Read tuples from a corpus consisting of categorized strings. For example, from the question classification corpus: NUM:dist How far is it from Denver to Aspen ? LOC:city What county is Modesto , California in ? HUM:desc Who was Galileo ? DESC:def What is an atom ? NUM:date When did Hawaii become a state ? """ # based on PPAttachmentCorpusReader import os from nltk import compat from .util import * from .api import * # [xx] Should the order of the tuple be reversed -- in most other places # in nltk, we use the form (data, tag) -- e.g., tagged words and # labeled texts for classifiers. class StringCategoryCorpusReader(CorpusReader): def __init__(self, root, fileids, delimiter=' ', encoding='utf8'): """ :param root: The root directory for this corpus. :param fileids: A list or regexp specifying the fileids in this corpus. :param delimiter: Field delimiter """ CorpusReader.__init__(self, root, fileids, encoding) self._delimiter = delimiter def tuples(self, fileids=None): if fileids is None: fileids = self._fileids elif isinstance(fileids, compat.string_types): fileids = [fileids] return concat([StreamBackedCorpusView(fileid, self._read_tuple_block, encoding=enc) for (fileid, enc) in self.abspaths(fileids, True)]) def raw(self, fileids=None): """ :return: the text contents of the given fileids, as a single string. """ if fileids is None: fileids = self._fileids elif isinstance(fileids, compat.string_types): fileids = [fileids] return concat([self.open(f).read() for f in fileids]) def _read_tuple_block(self, stream): line = stream.readline().strip() if line: return [tuple(line.split(self._delimiter, 1))] else: return []
{ "content_hash": "93fcb35f31aec731ca82f7041a57999d", "timestamp": "", "source": "github", "line_count": 53, "max_line_length": 79, "avg_line_length": 36.132075471698116, "alnum_prop": 0.6407310704960836, "repo_name": "bbengfort/TextBlob", "id": "d84ec5f41ad740c38bc1a70ea96bbf8bda034668", "size": "2185", "binary": false, "copies": "2", "ref": "refs/heads/dev", "path": "textblob/nltk/corpus/reader/string_category.py", "mode": "33261", "license": "mit", "language": [], "symlink_target": "" }
import pytest from pillowtalk import * from pillowtalk.schemas import add_schema @pytest.fixture def models(mybase): @add_schema class Person(mybase): items = [] FIELDS = ["id", "name"] description = fields.String(required=False) # ADDITIONAL_FIELDS = dict( # description=fields.String(required=False) # ) RELATIONSHIPS = [ One("address", "find Person.address_id <> Address.id") ] @add_schema class Address(mybase): items = [] FIELDS = ["id", "address_str"] RELATIONSHIPS = [ One("street", "find Person.street_id <> Street.id") ] @add_schema class Street(mybase): items = [] FIELDS = ["id", "name"] return Person, Address, Street def test_main(models): Person, Address, Street = models street_data = { "id": 3, "name": "Colfax" } address_data = { "id": 3, "address_str": "Denver, CO", "street": street_data } person_data = { "id" : 5, "name" : "Jeff", "address": address_data } p = Person.load(person_data) assert type(p.address.street) is Street
{ "content_hash": "0925f63aacea9077b00bc093d2a93628", "timestamp": "", "source": "github", "line_count": 57, "max_line_length": 66, "avg_line_length": 21.666666666666668, "alnum_prop": 0.5287449392712551, "repo_name": "jvrana/Pillowtalk", "id": "c217ae36438e7cf0d3a5a0c9f9fc62b019249021", "size": "1235", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tests/test_relationships/test_three_teir.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "52925" } ], "symlink_target": "" }
from wtforms import ValidationError from uchan.lib import validation from uchan.lib.service import board_service class BoardValidator: def __call__(self, form, field): if not validation.check_board_name_validity(field.data): raise ValidationError('Board name not valid.') board = board_service.find_board(field.data) if not board: raise ValidationError('Board does not exist') field.board = board class BoardNameValidator: def __call__(self, form, field): if not validation.check_board_name_validity(field.data): raise ValidationError('Board name not valid.') class PageTitleValidator: def __call__(self, form, field): if not validation.check_page_title_validity(field.data): raise ValidationError('Page title not valid.') class PageLinkValidator(): def __call__(self, form, field): if not validation.check_page_link_name_validity(field.data): raise ValidationError('Page link not valid.') class ModeratorUsernameValidator: def __call__(self, form, field): if not validation.check_username_validity(field.data): raise ValidationError('Username not valid') class ModeratorPasswordValidator: def __call__(self, form, field): if not validation.check_password_validity(field.data): raise ValidationError('Password not valid')
{ "content_hash": "7f5faafe8c99fc0ac35009375972655c", "timestamp": "", "source": "github", "line_count": 45, "max_line_length": 68, "avg_line_length": 31.466666666666665, "alnum_prop": 0.6772598870056498, "repo_name": "Floens/uchan", "id": "385328e89bd542581753a67f28ef8276f4b4156a", "size": "1416", "binary": false, "copies": "1", "ref": "refs/heads/dev", "path": "uchan/view/form/validators.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "10791" }, { "name": "Dockerfile", "bytes": "970" }, { "name": "HTML", "bytes": "53257" }, { "name": "JavaScript", "bytes": "3766" }, { "name": "Makefile", "bytes": "589" }, { "name": "Mako", "bytes": "494" }, { "name": "Python", "bytes": "295111" }, { "name": "Shell", "bytes": "1071" }, { "name": "TypeScript", "bytes": "65197" }, { "name": "VCL", "bytes": "1762" } ], "symlink_target": "" }
"""Discrete Cosine Transform ops.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import math as _math from tensorflow.python.framework import dtypes as _dtypes from tensorflow.python.framework import ops as _ops from tensorflow.python.framework import tensor_shape from tensorflow.python.ops import array_ops as _array_ops from tensorflow.python.ops import math_ops as _math_ops from tensorflow.python.ops.signal import fft_ops from tensorflow.python.util.tf_export import tf_export def _validate_dct_arguments(input_tensor, dct_type, n, axis, norm): """Checks that DCT/IDCT arguments are compatible and well formed.""" if n is not None: raise NotImplementedError("The DCT length argument is not implemented.") if axis != -1: raise NotImplementedError("axis must be -1. Got: %s" % axis) if dct_type not in (1, 2, 3): raise ValueError("Only Types I, II and III (I)DCT are supported.") if dct_type == 1: if norm == "ortho": raise ValueError("Normalization is not supported for the Type-I DCT.") if input_tensor.shape[-1] is not None and input_tensor.shape[-1] < 2: raise ValueError( "Type-I DCT requires the dimension to be greater than one.") if norm not in (None, "ortho"): raise ValueError( "Unknown normalization. Expected None or 'ortho', got: %s" % norm) # TODO(rjryan): Implement `n` and `axis` parameters. @tf_export("signal.dct", v1=["signal.dct", "spectral.dct"]) def dct(input, type=2, n=None, axis=-1, norm=None, name=None): # pylint: disable=redefined-builtin """Computes the 1D [Discrete Cosine Transform (DCT)][dct] of `input`. Currently only Types I, II and III are supported. Type I is implemented using a length `2N` padded `tf.spectral.rfft`. Type II is implemented using a length `2N` padded `tf.spectral.rfft`, as described here: https://dsp.stackexchange.com/a/10606. Type III is a fairly straightforward inverse of Type II (i.e. using a length `2N` padded `tf.spectral.irfft`). @compatibility(scipy) Equivalent to scipy.fftpack.dct for Type-I, Type-II and Type-III DCT. https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.fftpack.dct.html @end_compatibility Args: input: A `[..., samples]` `float32` `Tensor` containing the signals to take the DCT of. type: The DCT type to perform. Must be 1, 2 or 3. n: For future expansion. The length of the transform. Must be `None`. axis: For future expansion. The axis to compute the DCT along. Must be `-1`. norm: The normalization to apply. `None` for no normalization or `'ortho'` for orthonormal normalization. name: An optional name for the operation. Returns: A `[..., samples]` `float32` `Tensor` containing the DCT of `input`. Raises: ValueError: If `type` is not `1`, `2` or `3`, `n` is not `None, `axis` is not `-1`, or `norm` is not `None` or `'ortho'`. ValueError: If `type` is `1` and `norm` is `ortho`. [dct]: https://en.wikipedia.org/wiki/Discrete_cosine_transform """ _validate_dct_arguments(input, type, n, axis, norm) with _ops.name_scope(name, "dct", [input]): # We use the RFFT to compute the DCT and TensorFlow only supports float32 # for FFTs at the moment. input = _ops.convert_to_tensor(input, dtype=_dtypes.float32) axis_dim = (tensor_shape.dimension_value(input.shape[-1]) or _array_ops.shape(input)[-1]) axis_dim_float = _math_ops.to_float(axis_dim) if type == 1: dct1_input = _array_ops.concat([input, input[..., -2:0:-1]], axis=-1) dct1 = _math_ops.real(fft_ops.rfft(dct1_input)) return dct1 if type == 2: scale = 2.0 * _math_ops.exp( _math_ops.complex( 0.0, -_math_ops.range(axis_dim_float) * _math.pi * 0.5 / axis_dim_float)) # TODO(rjryan): Benchmark performance and memory usage of the various # approaches to computing a DCT via the RFFT. dct2 = _math_ops.real( fft_ops.rfft( input, fft_length=[2 * axis_dim])[..., :axis_dim] * scale) if norm == "ortho": n1 = 0.5 * _math_ops.rsqrt(axis_dim_float) n2 = n1 * _math_ops.sqrt(2.0) # Use tf.pad to make a vector of [n1, n2, n2, n2, ...]. weights = _array_ops.pad( _array_ops.expand_dims(n1, 0), [[0, axis_dim - 1]], constant_values=n2) dct2 *= weights return dct2 elif type == 3: if norm == "ortho": n1 = _math_ops.sqrt(axis_dim_float) n2 = n1 * _math_ops.sqrt(0.5) # Use tf.pad to make a vector of [n1, n2, n2, n2, ...]. weights = _array_ops.pad( _array_ops.expand_dims(n1, 0), [[0, axis_dim - 1]], constant_values=n2) input *= weights else: input *= axis_dim_float scale = 2.0 * _math_ops.exp( _math_ops.complex( 0.0, _math_ops.range(axis_dim_float) * _math.pi * 0.5 / axis_dim_float)) dct3 = _math_ops.real( fft_ops.irfft( scale * _math_ops.complex(input, 0.0), fft_length=[2 * axis_dim]))[..., :axis_dim] return dct3 # TODO(rjryan): Implement `n` and `axis` parameters. @tf_export("signal.idct", v1=["signal.idct", "spectral.idct"]) def idct(input, type=2, n=None, axis=-1, norm=None, name=None): # pylint: disable=redefined-builtin """Computes the 1D [Inverse Discrete Cosine Transform (DCT)][idct] of `input`. Currently only Types I, II and III are supported. Type III is the inverse of Type II, and vice versa. Note that you must re-normalize by 1/(2n) to obtain an inverse if `norm` is not `'ortho'`. That is: `signal == idct(dct(signal)) * 0.5 / signal.shape[-1]`. When `norm='ortho'`, we have: `signal == idct(dct(signal, norm='ortho'), norm='ortho')`. @compatibility(scipy) Equivalent to scipy.fftpack.idct for Type-I, Type-II and Type-III DCT. https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.fftpack.idct.html @end_compatibility Args: input: A `[..., samples]` `float32` `Tensor` containing the signals to take the DCT of. type: The IDCT type to perform. Must be 1, 2 or 3. n: For future expansion. The length of the transform. Must be `None`. axis: For future expansion. The axis to compute the DCT along. Must be `-1`. norm: The normalization to apply. `None` for no normalization or `'ortho'` for orthonormal normalization. name: An optional name for the operation. Returns: A `[..., samples]` `float32` `Tensor` containing the IDCT of `input`. Raises: ValueError: If `type` is not `1`, `2` or `3`, `n` is not `None, `axis` is not `-1`, or `norm` is not `None` or `'ortho'`. [idct]: https://en.wikipedia.org/wiki/Discrete_cosine_transform#Inverse_transforms """ _validate_dct_arguments(input, type, n, axis, norm) inverse_type = {1: 1, 2: 3, 3: 2}[type] return dct(input, type=inverse_type, n=n, axis=axis, norm=norm, name=name)
{ "content_hash": "cbadfebdf442c622a197da268d81eed2", "timestamp": "", "source": "github", "line_count": 178, "max_line_length": 100, "avg_line_length": 39.79775280898876, "alnum_prop": 0.6380575945793338, "repo_name": "jendap/tensorflow", "id": "d042c95c049538354836ef83f0b21d8babccedc8", "size": "7773", "binary": false, "copies": "9", "ref": "refs/heads/master", "path": "tensorflow/python/ops/signal/dct_ops.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Assembly", "bytes": "2867" }, { "name": "Batchfile", "bytes": "14734" }, { "name": "C", "bytes": "606044" }, { "name": "C#", "bytes": "8446" }, { "name": "C++", "bytes": "55619540" }, { "name": "CMake", "bytes": "207169" }, { "name": "Dockerfile", "bytes": "78675" }, { "name": "Go", "bytes": "1383418" }, { "name": "HTML", "bytes": "4680118" }, { "name": "Java", "bytes": "900190" }, { "name": "Jupyter Notebook", "bytes": "2510235" }, { "name": "LLVM", "bytes": "6536" }, { "name": "Makefile", "bytes": "77367" }, { "name": "Objective-C", "bytes": "16140" }, { "name": "Objective-C++", "bytes": "102889" }, { "name": "PHP", "bytes": "14644" }, { "name": "Pascal", "bytes": "399" }, { "name": "Perl", "bytes": "7536" }, { "name": "PureBasic", "bytes": "25356" }, { "name": "Python", "bytes": "45358371" }, { "name": "RobotFramework", "bytes": "891" }, { "name": "Ruby", "bytes": "838" }, { "name": "Shell", "bytes": "530065" }, { "name": "Smarty", "bytes": "25609" } ], "symlink_target": "" }
"""Nagcat single-test scheduler""" from collections import defaultdict from nagcat import errors, log from nagcat import scheduler class ObjectDummy(defaultdict): """Provide a replacement for a real ObjectParser""" def __init__(self): super(ObjectDummy, self).__init__(list) def types(self): return self.keys() class NagcatDummy(scheduler.Scheduler): """For testing""" def build_tests(self, config): return [] def nagios_status(self): return ObjectDummy() class NagcatSimple(NagcatDummy): """Run only a single test, do not report to nagios. Useful for testing a new test template. """ def _report(self, report): log.info("REPORT:\n%s" % report['text']) def new_test(self, config): new = super(NagcatSimple, self).new_test(config) new.addReportCallback(self._report) return new def build_tests(self, config, test_name=None, host=None, port=None): config = config.get(test_name, None) if not config: raise errors.InitError("Test '%s' not found in config file!" % test_name) config = config.copy() config.setdefault('host', host) config.setdefault('port', port) config.setdefault('test', test_name) config.setdefault('description', test_name) return [self.new_test(config)] def start(self): assert self._startup self._startup = False del self._group_index runnable = self._registered.pop() return runnable.start()
{ "content_hash": "dec3e13ea72d9649e43cbb874374fc99", "timestamp": "", "source": "github", "line_count": 59, "max_line_length": 72, "avg_line_length": 26.76271186440678, "alnum_prop": 0.6219126029132362, "repo_name": "marineam/nagcat", "id": "656f797ca0d6dce84014c50700ee43fcc2b98d25", "size": "2167", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "python/nagcat/simple.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "JavaScript", "bytes": "119709" }, { "name": "Python", "bytes": "572702" }, { "name": "Shell", "bytes": "3443" } ], "symlink_target": "" }
"""Create the assaults on officers table Revision ID: 720df9948a25 Revises: 4e98ea7e43d Create Date: 2016-06-15 15:47:59.913618 """ # revision identifiers, used by Alembic. revision = '720df9948a25' down_revision = '4e98ea7e43d' from alembic import op import sqlalchemy as sa def upgrade(): op.create_table( 'assaults_on_officers', sa.Column('id', sa.Integer(), nullable=False), sa.Column('department_id', sa.Integer(), nullable=False), sa.Column('opaque_id', sa.String(length=255), nullable=False), sa.Column('officer_identifier', sa.String(length=255), nullable=True), sa.Column('service_type', sa.String(length=255), nullable=True), sa.Column('force_type', sa.String(length=255), nullable=True), sa.Column('assignment', sa.String(length=255), nullable=True), sa.Column('arrest_made', sa.Boolean(), nullable=True), sa.Column('officer_injured', sa.Boolean(), nullable=True), sa.Column('officer_killed', sa.Boolean(), nullable=True), sa.Column('report_filed', sa.Boolean(), nullable=True), sa.ForeignKeyConstraint(['department_id'], ['departments.id'], ), sa.PrimaryKeyConstraint('id') ) def downgrade(): op.drop_table('assaults_on_officers')
{ "content_hash": "92a6085559bfdab06bb04acae5ba9c3f", "timestamp": "", "source": "github", "line_count": 35, "max_line_length": 78, "avg_line_length": 36.371428571428574, "alnum_prop": 0.6661429693637078, "repo_name": "codeforamerica/comport", "id": "68938036552f4a789b187b7687a4a87034409b77", "size": "1273", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "migrations/versions/720df9948a25_.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "CSS", "bytes": "6889" }, { "name": "HTML", "bytes": "73956" }, { "name": "JavaScript", "bytes": "228515" }, { "name": "Makefile", "bytes": "343" }, { "name": "Mako", "bytes": "412" }, { "name": "PowerShell", "bytes": "471" }, { "name": "Python", "bytes": "725626" }, { "name": "Ruby", "bytes": "1030" } ], "symlink_target": "" }
import datetime import sys from typing import Any, Dict, List, Optional, TYPE_CHECKING, Union from .. import _serialization if sys.version_info >= (3, 9): from collections.abc import MutableMapping else: from typing import MutableMapping # type: ignore # pylint: disable=ungrouped-imports if TYPE_CHECKING: # pylint: disable=unused-import,ungrouped-imports from .. import models as _models JSON = MutableMapping[str, Any] # pylint: disable=unsubscriptable-object class AgreementContent(_serialization.Model): """The integration account agreement content. :ivar a_s2: The AS2 agreement content. :vartype a_s2: ~azure.mgmt.logic.models.AS2AgreementContent :ivar x12: The X12 agreement content. :vartype x12: ~azure.mgmt.logic.models.X12AgreementContent :ivar edifact: The EDIFACT agreement content. :vartype edifact: ~azure.mgmt.logic.models.EdifactAgreementContent """ _attribute_map = { "a_s2": {"key": "aS2", "type": "AS2AgreementContent"}, "x12": {"key": "x12", "type": "X12AgreementContent"}, "edifact": {"key": "edifact", "type": "EdifactAgreementContent"}, } def __init__( self, *, a_s2: Optional["_models.AS2AgreementContent"] = None, x12: Optional["_models.X12AgreementContent"] = None, edifact: Optional["_models.EdifactAgreementContent"] = None, **kwargs ): """ :keyword a_s2: The AS2 agreement content. :paramtype a_s2: ~azure.mgmt.logic.models.AS2AgreementContent :keyword x12: The X12 agreement content. :paramtype x12: ~azure.mgmt.logic.models.X12AgreementContent :keyword edifact: The EDIFACT agreement content. :paramtype edifact: ~azure.mgmt.logic.models.EdifactAgreementContent """ super().__init__(**kwargs) self.a_s2 = a_s2 self.x12 = x12 self.edifact = edifact class ApiDeploymentParameterMetadata(_serialization.Model): """The API deployment parameter metadata. :ivar type: The type. :vartype type: str :ivar is_required: Indicates whether its required. :vartype is_required: bool :ivar display_name: The display name. :vartype display_name: str :ivar description: The description. :vartype description: str :ivar visibility: The visibility. Known values are: "NotSpecified", "Default", and "Internal". :vartype visibility: str or ~azure.mgmt.logic.models.ApiDeploymentParameterVisibility """ _attribute_map = { "type": {"key": "type", "type": "str"}, "is_required": {"key": "isRequired", "type": "bool"}, "display_name": {"key": "displayName", "type": "str"}, "description": {"key": "description", "type": "str"}, "visibility": {"key": "visibility", "type": "str"}, } def __init__( self, *, type: Optional[str] = None, is_required: Optional[bool] = None, display_name: Optional[str] = None, description: Optional[str] = None, visibility: Optional[Union[str, "_models.ApiDeploymentParameterVisibility"]] = None, **kwargs ): """ :keyword type: The type. :paramtype type: str :keyword is_required: Indicates whether its required. :paramtype is_required: bool :keyword display_name: The display name. :paramtype display_name: str :keyword description: The description. :paramtype description: str :keyword visibility: The visibility. Known values are: "NotSpecified", "Default", and "Internal". :paramtype visibility: str or ~azure.mgmt.logic.models.ApiDeploymentParameterVisibility """ super().__init__(**kwargs) self.type = type self.is_required = is_required self.display_name = display_name self.description = description self.visibility = visibility class ApiDeploymentParameterMetadataSet(_serialization.Model): """The API deployment parameters metadata. :ivar package_content_link: The package content link parameter. :vartype package_content_link: ~azure.mgmt.logic.models.ApiDeploymentParameterMetadata :ivar redis_cache_connection_string: The package content link parameter. :vartype redis_cache_connection_string: ~azure.mgmt.logic.models.ApiDeploymentParameterMetadata """ _attribute_map = { "package_content_link": {"key": "packageContentLink", "type": "ApiDeploymentParameterMetadata"}, "redis_cache_connection_string": { "key": "redisCacheConnectionString", "type": "ApiDeploymentParameterMetadata", }, } def __init__( self, *, package_content_link: Optional["_models.ApiDeploymentParameterMetadata"] = None, redis_cache_connection_string: Optional["_models.ApiDeploymentParameterMetadata"] = None, **kwargs ): """ :keyword package_content_link: The package content link parameter. :paramtype package_content_link: ~azure.mgmt.logic.models.ApiDeploymentParameterMetadata :keyword redis_cache_connection_string: The package content link parameter. :paramtype redis_cache_connection_string: ~azure.mgmt.logic.models.ApiDeploymentParameterMetadata """ super().__init__(**kwargs) self.package_content_link = package_content_link self.redis_cache_connection_string = redis_cache_connection_string class Resource(_serialization.Model): """The base resource type. Variables are only populated by the server, and will be ignored when sending a request. :ivar id: The resource id. :vartype id: str :ivar name: Gets the resource name. :vartype name: str :ivar type: Gets the resource type. :vartype type: str :ivar location: The resource location. :vartype location: str :ivar tags: The resource tags. :vartype tags: dict[str, str] """ _validation = { "id": {"readonly": True}, "name": {"readonly": True}, "type": {"readonly": True}, } _attribute_map = { "id": {"key": "id", "type": "str"}, "name": {"key": "name", "type": "str"}, "type": {"key": "type", "type": "str"}, "location": {"key": "location", "type": "str"}, "tags": {"key": "tags", "type": "{str}"}, } def __init__(self, *, location: Optional[str] = None, tags: Optional[Dict[str, str]] = None, **kwargs): """ :keyword location: The resource location. :paramtype location: str :keyword tags: The resource tags. :paramtype tags: dict[str, str] """ super().__init__(**kwargs) self.id = None self.name = None self.type = None self.location = location self.tags = tags class ApiOperation(Resource): """The api operation. Variables are only populated by the server, and will be ignored when sending a request. :ivar id: The resource id. :vartype id: str :ivar name: Gets the resource name. :vartype name: str :ivar type: Gets the resource type. :vartype type: str :ivar location: The resource location. :vartype location: str :ivar tags: The resource tags. :vartype tags: dict[str, str] :ivar properties: The api operations properties. :vartype properties: ~azure.mgmt.logic.models.ApiOperationPropertiesDefinition """ _validation = { "id": {"readonly": True}, "name": {"readonly": True}, "type": {"readonly": True}, } _attribute_map = { "id": {"key": "id", "type": "str"}, "name": {"key": "name", "type": "str"}, "type": {"key": "type", "type": "str"}, "location": {"key": "location", "type": "str"}, "tags": {"key": "tags", "type": "{str}"}, "properties": {"key": "properties", "type": "ApiOperationPropertiesDefinition"}, } def __init__( self, *, location: Optional[str] = None, tags: Optional[Dict[str, str]] = None, properties: Optional["_models.ApiOperationPropertiesDefinition"] = None, **kwargs ): """ :keyword location: The resource location. :paramtype location: str :keyword tags: The resource tags. :paramtype tags: dict[str, str] :keyword properties: The api operations properties. :paramtype properties: ~azure.mgmt.logic.models.ApiOperationPropertiesDefinition """ super().__init__(location=location, tags=tags, **kwargs) self.properties = properties class ApiOperationAnnotation(_serialization.Model): """The Api Operation Annotation. :ivar status: The status annotation. Known values are: "NotSpecified", "Preview", and "Production". :vartype status: str or ~azure.mgmt.logic.models.StatusAnnotation :ivar family: The family. :vartype family: str :ivar revision: The revision. :vartype revision: int """ _attribute_map = { "status": {"key": "status", "type": "str"}, "family": {"key": "family", "type": "str"}, "revision": {"key": "revision", "type": "int"}, } def __init__( self, *, status: Optional[Union[str, "_models.StatusAnnotation"]] = None, family: Optional[str] = None, revision: Optional[int] = None, **kwargs ): """ :keyword status: The status annotation. Known values are: "NotSpecified", "Preview", and "Production". :paramtype status: str or ~azure.mgmt.logic.models.StatusAnnotation :keyword family: The family. :paramtype family: str :keyword revision: The revision. :paramtype revision: int """ super().__init__(**kwargs) self.status = status self.family = family self.revision = revision class ApiOperationListResult(_serialization.Model): """The list of managed API operations. :ivar value: The api operation definitions for an API. :vartype value: list[~azure.mgmt.logic.models.ApiOperation] :ivar next_link: The URL to get the next set of results. :vartype next_link: str """ _attribute_map = { "value": {"key": "value", "type": "[ApiOperation]"}, "next_link": {"key": "nextLink", "type": "str"}, } def __init__( self, *, value: Optional[List["_models.ApiOperation"]] = None, next_link: Optional[str] = None, **kwargs ): """ :keyword value: The api operation definitions for an API. :paramtype value: list[~azure.mgmt.logic.models.ApiOperation] :keyword next_link: The URL to get the next set of results. :paramtype next_link: str """ super().__init__(**kwargs) self.value = value self.next_link = next_link class ApiOperationPropertiesDefinition(_serialization.Model): # pylint: disable=too-many-instance-attributes """The api operations properties. :ivar summary: The summary of the api operation. :vartype summary: str :ivar description: The description of the api operation. :vartype description: str :ivar visibility: The visibility of the api operation. :vartype visibility: str :ivar trigger: The trigger type of api operation. :vartype trigger: str :ivar trigger_hint: The trigger hint for the api operation. :vartype trigger_hint: str :ivar pageable: Indicates whether the api operation is pageable. :vartype pageable: bool :ivar annotation: The annotation of api operation. :vartype annotation: ~azure.mgmt.logic.models.ApiOperationAnnotation :ivar api: The api reference. :vartype api: ~azure.mgmt.logic.models.ApiReference :ivar inputs_definition: The operation inputs definition schema. :vartype inputs_definition: ~azure.mgmt.logic.models.SwaggerSchema :ivar responses_definition: The operation responses definition schemas. :vartype responses_definition: dict[str, ~azure.mgmt.logic.models.SwaggerSchema] :ivar is_webhook: Indicates whether the API operation is webhook or not. :vartype is_webhook: bool :ivar is_notification: Indicates whether the API operation is notification or not. :vartype is_notification: bool """ _attribute_map = { "summary": {"key": "summary", "type": "str"}, "description": {"key": "description", "type": "str"}, "visibility": {"key": "visibility", "type": "str"}, "trigger": {"key": "trigger", "type": "str"}, "trigger_hint": {"key": "triggerHint", "type": "str"}, "pageable": {"key": "pageable", "type": "bool"}, "annotation": {"key": "annotation", "type": "ApiOperationAnnotation"}, "api": {"key": "api", "type": "ApiReference"}, "inputs_definition": {"key": "inputsDefinition", "type": "SwaggerSchema"}, "responses_definition": {"key": "responsesDefinition", "type": "{SwaggerSchema}"}, "is_webhook": {"key": "isWebhook", "type": "bool"}, "is_notification": {"key": "isNotification", "type": "bool"}, } def __init__( self, *, summary: Optional[str] = None, description: Optional[str] = None, visibility: Optional[str] = None, trigger: Optional[str] = None, trigger_hint: Optional[str] = None, pageable: Optional[bool] = None, annotation: Optional["_models.ApiOperationAnnotation"] = None, api: Optional["_models.ApiReference"] = None, inputs_definition: Optional["_models.SwaggerSchema"] = None, responses_definition: Optional[Dict[str, "_models.SwaggerSchema"]] = None, is_webhook: Optional[bool] = None, is_notification: Optional[bool] = None, **kwargs ): """ :keyword summary: The summary of the api operation. :paramtype summary: str :keyword description: The description of the api operation. :paramtype description: str :keyword visibility: The visibility of the api operation. :paramtype visibility: str :keyword trigger: The trigger type of api operation. :paramtype trigger: str :keyword trigger_hint: The trigger hint for the api operation. :paramtype trigger_hint: str :keyword pageable: Indicates whether the api operation is pageable. :paramtype pageable: bool :keyword annotation: The annotation of api operation. :paramtype annotation: ~azure.mgmt.logic.models.ApiOperationAnnotation :keyword api: The api reference. :paramtype api: ~azure.mgmt.logic.models.ApiReference :keyword inputs_definition: The operation inputs definition schema. :paramtype inputs_definition: ~azure.mgmt.logic.models.SwaggerSchema :keyword responses_definition: The operation responses definition schemas. :paramtype responses_definition: dict[str, ~azure.mgmt.logic.models.SwaggerSchema] :keyword is_webhook: Indicates whether the API operation is webhook or not. :paramtype is_webhook: bool :keyword is_notification: Indicates whether the API operation is notification or not. :paramtype is_notification: bool """ super().__init__(**kwargs) self.summary = summary self.description = description self.visibility = visibility self.trigger = trigger self.trigger_hint = trigger_hint self.pageable = pageable self.annotation = annotation self.api = api self.inputs_definition = inputs_definition self.responses_definition = responses_definition self.is_webhook = is_webhook self.is_notification = is_notification class ResourceReference(_serialization.Model): """The resource reference. Variables are only populated by the server, and will be ignored when sending a request. :ivar id: The resource id. :vartype id: str :ivar name: Gets the resource name. :vartype name: str :ivar type: Gets the resource type. :vartype type: str """ _validation = { "name": {"readonly": True}, "type": {"readonly": True}, } _attribute_map = { "id": {"key": "id", "type": "str"}, "name": {"key": "name", "type": "str"}, "type": {"key": "type", "type": "str"}, } def __init__(self, *, id: Optional[str] = None, **kwargs): # pylint: disable=redefined-builtin """ :keyword id: The resource id. :paramtype id: str """ super().__init__(**kwargs) self.id = id self.name = None self.type = None class ApiReference(ResourceReference): """The Api reference. Variables are only populated by the server, and will be ignored when sending a request. :ivar id: The resource id. :vartype id: str :ivar name: Gets the resource name. :vartype name: str :ivar type: Gets the resource type. :vartype type: str :ivar display_name: The display name of the api. :vartype display_name: str :ivar description: The description of the api. :vartype description: str :ivar icon_uri: The icon uri of the api. :vartype icon_uri: str :ivar swagger: The swagger of the api. :vartype swagger: JSON :ivar brand_color: The brand color of the api. :vartype brand_color: str :ivar category: The tier. Known values are: "NotSpecified", "Enterprise", "Standard", and "Premium". :vartype category: str or ~azure.mgmt.logic.models.ApiTier :ivar integration_service_environment: The integration service environment reference. :vartype integration_service_environment: ~azure.mgmt.logic.models.ResourceReference """ _validation = { "name": {"readonly": True}, "type": {"readonly": True}, } _attribute_map = { "id": {"key": "id", "type": "str"}, "name": {"key": "name", "type": "str"}, "type": {"key": "type", "type": "str"}, "display_name": {"key": "displayName", "type": "str"}, "description": {"key": "description", "type": "str"}, "icon_uri": {"key": "iconUri", "type": "str"}, "swagger": {"key": "swagger", "type": "object"}, "brand_color": {"key": "brandColor", "type": "str"}, "category": {"key": "category", "type": "str"}, "integration_service_environment": {"key": "integrationServiceEnvironment", "type": "ResourceReference"}, } def __init__( self, *, id: Optional[str] = None, # pylint: disable=redefined-builtin display_name: Optional[str] = None, description: Optional[str] = None, icon_uri: Optional[str] = None, swagger: Optional[JSON] = None, brand_color: Optional[str] = None, category: Optional[Union[str, "_models.ApiTier"]] = None, integration_service_environment: Optional["_models.ResourceReference"] = None, **kwargs ): """ :keyword id: The resource id. :paramtype id: str :keyword display_name: The display name of the api. :paramtype display_name: str :keyword description: The description of the api. :paramtype description: str :keyword icon_uri: The icon uri of the api. :paramtype icon_uri: str :keyword swagger: The swagger of the api. :paramtype swagger: JSON :keyword brand_color: The brand color of the api. :paramtype brand_color: str :keyword category: The tier. Known values are: "NotSpecified", "Enterprise", "Standard", and "Premium". :paramtype category: str or ~azure.mgmt.logic.models.ApiTier :keyword integration_service_environment: The integration service environment reference. :paramtype integration_service_environment: ~azure.mgmt.logic.models.ResourceReference """ super().__init__(id=id, **kwargs) self.display_name = display_name self.description = description self.icon_uri = icon_uri self.swagger = swagger self.brand_color = brand_color self.category = category self.integration_service_environment = integration_service_environment class ApiResourceBackendService(_serialization.Model): """The API backend service. :ivar service_url: The service URL. :vartype service_url: str """ _attribute_map = { "service_url": {"key": "serviceUrl", "type": "str"}, } def __init__(self, *, service_url: Optional[str] = None, **kwargs): """ :keyword service_url: The service URL. :paramtype service_url: str """ super().__init__(**kwargs) self.service_url = service_url class ApiResourceDefinitions(_serialization.Model): """The Api resource definition. :ivar original_swagger_url: The original swagger url. :vartype original_swagger_url: str :ivar modified_swagger_url: The modified swagger url. :vartype modified_swagger_url: str """ _attribute_map = { "original_swagger_url": {"key": "originalSwaggerUrl", "type": "str"}, "modified_swagger_url": {"key": "modifiedSwaggerUrl", "type": "str"}, } def __init__( self, *, original_swagger_url: Optional[str] = None, modified_swagger_url: Optional[str] = None, **kwargs ): """ :keyword original_swagger_url: The original swagger url. :paramtype original_swagger_url: str :keyword modified_swagger_url: The modified swagger url. :paramtype modified_swagger_url: str """ super().__init__(**kwargs) self.original_swagger_url = original_swagger_url self.modified_swagger_url = modified_swagger_url class ApiResourceGeneralInformation(_serialization.Model): """The API general information. :ivar icon_url: The icon url. :vartype icon_url: str :ivar display_name: The display name. :vartype display_name: str :ivar description: The description. :vartype description: str :ivar terms_of_use_url: The terms of use url. :vartype terms_of_use_url: str :ivar release_tag: The release tag. :vartype release_tag: str :ivar tier: The tier. Known values are: "NotSpecified", "Enterprise", "Standard", and "Premium". :vartype tier: str or ~azure.mgmt.logic.models.ApiTier """ _attribute_map = { "icon_url": {"key": "iconUrl", "type": "str"}, "display_name": {"key": "displayName", "type": "str"}, "description": {"key": "description", "type": "str"}, "terms_of_use_url": {"key": "termsOfUseUrl", "type": "str"}, "release_tag": {"key": "releaseTag", "type": "str"}, "tier": {"key": "tier", "type": "str"}, } def __init__( self, *, icon_url: Optional[str] = None, display_name: Optional[str] = None, description: Optional[str] = None, terms_of_use_url: Optional[str] = None, release_tag: Optional[str] = None, tier: Optional[Union[str, "_models.ApiTier"]] = None, **kwargs ): """ :keyword icon_url: The icon url. :paramtype icon_url: str :keyword display_name: The display name. :paramtype display_name: str :keyword description: The description. :paramtype description: str :keyword terms_of_use_url: The terms of use url. :paramtype terms_of_use_url: str :keyword release_tag: The release tag. :paramtype release_tag: str :keyword tier: The tier. Known values are: "NotSpecified", "Enterprise", "Standard", and "Premium". :paramtype tier: str or ~azure.mgmt.logic.models.ApiTier """ super().__init__(**kwargs) self.icon_url = icon_url self.display_name = display_name self.description = description self.terms_of_use_url = terms_of_use_url self.release_tag = release_tag self.tier = tier class ApiResourceMetadata(_serialization.Model): """The api resource metadata. :ivar source: The source. :vartype source: str :ivar brand_color: The brand color. :vartype brand_color: str :ivar hide_key: The hide key. :vartype hide_key: str :ivar tags: The tags. :vartype tags: dict[str, str] :ivar api_type: The api type. Known values are: "NotSpecified", "Rest", and "Soap". :vartype api_type: str or ~azure.mgmt.logic.models.ApiType :ivar wsdl_service: The WSDL service. :vartype wsdl_service: ~azure.mgmt.logic.models.WsdlService :ivar wsdl_import_method: The WSDL import method. Known values are: "NotSpecified", "SoapToRest", and "SoapPassThrough". :vartype wsdl_import_method: str or ~azure.mgmt.logic.models.WsdlImportMethod :ivar connection_type: The connection type. :vartype connection_type: str :ivar provisioning_state: The provisioning state. Known values are: "NotSpecified", "Accepted", "Running", "Ready", "Creating", "Created", "Deleting", "Deleted", "Canceled", "Failed", "Succeeded", "Moving", "Updating", "Registering", "Registered", "Unregistering", "Unregistered", "Completed", "Renewing", "Pending", "Waiting", and "InProgress". :vartype provisioning_state: str or ~azure.mgmt.logic.models.WorkflowProvisioningState :ivar deployment_parameters: The connector deployment parameters metadata. :vartype deployment_parameters: ~azure.mgmt.logic.models.ApiDeploymentParameterMetadataSet """ _attribute_map = { "source": {"key": "source", "type": "str"}, "brand_color": {"key": "brandColor", "type": "str"}, "hide_key": {"key": "hideKey", "type": "str"}, "tags": {"key": "tags", "type": "{str}"}, "api_type": {"key": "ApiType", "type": "str"}, "wsdl_service": {"key": "wsdlService", "type": "WsdlService"}, "wsdl_import_method": {"key": "wsdlImportMethod", "type": "str"}, "connection_type": {"key": "connectionType", "type": "str"}, "provisioning_state": {"key": "provisioningState", "type": "str"}, "deployment_parameters": {"key": "deploymentParameters", "type": "ApiDeploymentParameterMetadataSet"}, } def __init__( self, *, source: Optional[str] = None, brand_color: Optional[str] = None, hide_key: Optional[str] = None, tags: Optional[Dict[str, str]] = None, api_type: Optional[Union[str, "_models.ApiType"]] = None, wsdl_service: Optional["_models.WsdlService"] = None, wsdl_import_method: Optional[Union[str, "_models.WsdlImportMethod"]] = None, connection_type: Optional[str] = None, provisioning_state: Optional[Union[str, "_models.WorkflowProvisioningState"]] = None, deployment_parameters: Optional["_models.ApiDeploymentParameterMetadataSet"] = None, **kwargs ): """ :keyword source: The source. :paramtype source: str :keyword brand_color: The brand color. :paramtype brand_color: str :keyword hide_key: The hide key. :paramtype hide_key: str :keyword tags: The tags. :paramtype tags: dict[str, str] :keyword api_type: The api type. Known values are: "NotSpecified", "Rest", and "Soap". :paramtype api_type: str or ~azure.mgmt.logic.models.ApiType :keyword wsdl_service: The WSDL service. :paramtype wsdl_service: ~azure.mgmt.logic.models.WsdlService :keyword wsdl_import_method: The WSDL import method. Known values are: "NotSpecified", "SoapToRest", and "SoapPassThrough". :paramtype wsdl_import_method: str or ~azure.mgmt.logic.models.WsdlImportMethod :keyword connection_type: The connection type. :paramtype connection_type: str :keyword provisioning_state: The provisioning state. Known values are: "NotSpecified", "Accepted", "Running", "Ready", "Creating", "Created", "Deleting", "Deleted", "Canceled", "Failed", "Succeeded", "Moving", "Updating", "Registering", "Registered", "Unregistering", "Unregistered", "Completed", "Renewing", "Pending", "Waiting", and "InProgress". :paramtype provisioning_state: str or ~azure.mgmt.logic.models.WorkflowProvisioningState :keyword deployment_parameters: The connector deployment parameters metadata. :paramtype deployment_parameters: ~azure.mgmt.logic.models.ApiDeploymentParameterMetadataSet """ super().__init__(**kwargs) self.source = source self.brand_color = brand_color self.hide_key = hide_key self.tags = tags self.api_type = api_type self.wsdl_service = wsdl_service self.wsdl_import_method = wsdl_import_method self.connection_type = connection_type self.provisioning_state = provisioning_state self.deployment_parameters = deployment_parameters class ApiResourcePolicies(_serialization.Model): """The API resource policies. :ivar content: The API level only policies XML as embedded content. :vartype content: str :ivar content_link: The content link to the policies. :vartype content_link: str """ _attribute_map = { "content": {"key": "content", "type": "str"}, "content_link": {"key": "contentLink", "type": "str"}, } def __init__(self, *, content: Optional[str] = None, content_link: Optional[str] = None, **kwargs): """ :keyword content: The API level only policies XML as embedded content. :paramtype content: str :keyword content_link: The content link to the policies. :paramtype content_link: str """ super().__init__(**kwargs) self.content = content self.content_link = content_link class ApiResourceProperties(_serialization.Model): # pylint: disable=too-many-instance-attributes """The API resource properties. Variables are only populated by the server, and will be ignored when sending a request. :ivar name: The name. :vartype name: str :ivar connection_parameters: The connection parameters. :vartype connection_parameters: dict[str, JSON] :ivar metadata: The metadata. :vartype metadata: ~azure.mgmt.logic.models.ApiResourceMetadata :ivar runtime_urls: The runtime urls. :vartype runtime_urls: list[str] :ivar general_information: The api general information. :vartype general_information: ~azure.mgmt.logic.models.ApiResourceGeneralInformation :ivar capabilities: The capabilities. :vartype capabilities: list[str] :ivar backend_service: The backend service. :vartype backend_service: ~azure.mgmt.logic.models.ApiResourceBackendService :ivar policies: The policies for the API. :vartype policies: ~azure.mgmt.logic.models.ApiResourcePolicies :ivar api_definition_url: The API definition. :vartype api_definition_url: str :ivar api_definitions: The api definitions. :vartype api_definitions: ~azure.mgmt.logic.models.ApiResourceDefinitions :ivar integration_service_environment: The integration service environment reference. :vartype integration_service_environment: ~azure.mgmt.logic.models.ResourceReference :ivar provisioning_state: The provisioning state. Known values are: "NotSpecified", "Accepted", "Running", "Ready", "Creating", "Created", "Deleting", "Deleted", "Canceled", "Failed", "Succeeded", "Moving", "Updating", "Registering", "Registered", "Unregistering", "Unregistered", "Completed", "Renewing", "Pending", "Waiting", and "InProgress". :vartype provisioning_state: str or ~azure.mgmt.logic.models.WorkflowProvisioningState :ivar category: The category. Known values are: "NotSpecified", "Enterprise", "Standard", and "Premium". :vartype category: str or ~azure.mgmt.logic.models.ApiTier """ _validation = { "name": {"readonly": True}, "connection_parameters": {"readonly": True}, "metadata": {"readonly": True}, "runtime_urls": {"readonly": True}, "general_information": {"readonly": True}, "capabilities": {"readonly": True}, "backend_service": {"readonly": True}, "policies": {"readonly": True}, "api_definition_url": {"readonly": True}, "api_definitions": {"readonly": True}, "provisioning_state": {"readonly": True}, "category": {"readonly": True}, } _attribute_map = { "name": {"key": "name", "type": "str"}, "connection_parameters": {"key": "connectionParameters", "type": "{object}"}, "metadata": {"key": "metadata", "type": "ApiResourceMetadata"}, "runtime_urls": {"key": "runtimeUrls", "type": "[str]"}, "general_information": {"key": "generalInformation", "type": "ApiResourceGeneralInformation"}, "capabilities": {"key": "capabilities", "type": "[str]"}, "backend_service": {"key": "backendService", "type": "ApiResourceBackendService"}, "policies": {"key": "policies", "type": "ApiResourcePolicies"}, "api_definition_url": {"key": "apiDefinitionUrl", "type": "str"}, "api_definitions": {"key": "apiDefinitions", "type": "ApiResourceDefinitions"}, "integration_service_environment": {"key": "integrationServiceEnvironment", "type": "ResourceReference"}, "provisioning_state": {"key": "provisioningState", "type": "str"}, "category": {"key": "category", "type": "str"}, } def __init__(self, *, integration_service_environment: Optional["_models.ResourceReference"] = None, **kwargs): """ :keyword integration_service_environment: The integration service environment reference. :paramtype integration_service_environment: ~azure.mgmt.logic.models.ResourceReference """ super().__init__(**kwargs) self.name = None self.connection_parameters = None self.metadata = None self.runtime_urls = None self.general_information = None self.capabilities = None self.backend_service = None self.policies = None self.api_definition_url = None self.api_definitions = None self.integration_service_environment = integration_service_environment self.provisioning_state = None self.category = None class ArtifactProperties(_serialization.Model): """The artifact properties definition. :ivar created_time: The artifact creation time. :vartype created_time: ~datetime.datetime :ivar changed_time: The artifact changed time. :vartype changed_time: ~datetime.datetime :ivar metadata: Anything. :vartype metadata: any """ _attribute_map = { "created_time": {"key": "createdTime", "type": "iso-8601"}, "changed_time": {"key": "changedTime", "type": "iso-8601"}, "metadata": {"key": "metadata", "type": "object"}, } def __init__( self, *, created_time: Optional[datetime.datetime] = None, changed_time: Optional[datetime.datetime] = None, metadata: Optional[Any] = None, **kwargs ): """ :keyword created_time: The artifact creation time. :paramtype created_time: ~datetime.datetime :keyword changed_time: The artifact changed time. :paramtype changed_time: ~datetime.datetime :keyword metadata: Anything. :paramtype metadata: any """ super().__init__(**kwargs) self.created_time = created_time self.changed_time = changed_time self.metadata = metadata class ArtifactContentPropertiesDefinition(ArtifactProperties): """The artifact content properties definition. :ivar created_time: The artifact creation time. :vartype created_time: ~datetime.datetime :ivar changed_time: The artifact changed time. :vartype changed_time: ~datetime.datetime :ivar metadata: Anything. :vartype metadata: any :ivar content: Anything. :vartype content: any :ivar content_type: The content type. :vartype content_type: str :ivar content_link: The content link. :vartype content_link: ~azure.mgmt.logic.models.ContentLink """ _attribute_map = { "created_time": {"key": "createdTime", "type": "iso-8601"}, "changed_time": {"key": "changedTime", "type": "iso-8601"}, "metadata": {"key": "metadata", "type": "object"}, "content": {"key": "content", "type": "object"}, "content_type": {"key": "contentType", "type": "str"}, "content_link": {"key": "contentLink", "type": "ContentLink"}, } def __init__( self, *, created_time: Optional[datetime.datetime] = None, changed_time: Optional[datetime.datetime] = None, metadata: Optional[Any] = None, content: Optional[Any] = None, content_type: Optional[str] = None, content_link: Optional["_models.ContentLink"] = None, **kwargs ): """ :keyword created_time: The artifact creation time. :paramtype created_time: ~datetime.datetime :keyword changed_time: The artifact changed time. :paramtype changed_time: ~datetime.datetime :keyword metadata: Anything. :paramtype metadata: any :keyword content: Anything. :paramtype content: any :keyword content_type: The content type. :paramtype content_type: str :keyword content_link: The content link. :paramtype content_link: ~azure.mgmt.logic.models.ContentLink """ super().__init__(created_time=created_time, changed_time=changed_time, metadata=metadata, **kwargs) self.content = content self.content_type = content_type self.content_link = content_link class AS2AcknowledgementConnectionSettings(_serialization.Model): """The AS2 agreement acknowledgement connection settings. All required parameters must be populated in order to send to Azure. :ivar ignore_certificate_name_mismatch: Indicates whether to ignore mismatch in certificate name. Required. :vartype ignore_certificate_name_mismatch: bool :ivar support_http_status_code_continue: Indicates whether to support HTTP status code 'CONTINUE'. Required. :vartype support_http_status_code_continue: bool :ivar keep_http_connection_alive: Indicates whether to keep the connection alive. Required. :vartype keep_http_connection_alive: bool :ivar unfold_http_headers: Indicates whether to unfold the HTTP headers. Required. :vartype unfold_http_headers: bool """ _validation = { "ignore_certificate_name_mismatch": {"required": True}, "support_http_status_code_continue": {"required": True}, "keep_http_connection_alive": {"required": True}, "unfold_http_headers": {"required": True}, } _attribute_map = { "ignore_certificate_name_mismatch": {"key": "ignoreCertificateNameMismatch", "type": "bool"}, "support_http_status_code_continue": {"key": "supportHttpStatusCodeContinue", "type": "bool"}, "keep_http_connection_alive": {"key": "keepHttpConnectionAlive", "type": "bool"}, "unfold_http_headers": {"key": "unfoldHttpHeaders", "type": "bool"}, } def __init__( self, *, ignore_certificate_name_mismatch: bool, support_http_status_code_continue: bool, keep_http_connection_alive: bool, unfold_http_headers: bool, **kwargs ): """ :keyword ignore_certificate_name_mismatch: Indicates whether to ignore mismatch in certificate name. Required. :paramtype ignore_certificate_name_mismatch: bool :keyword support_http_status_code_continue: Indicates whether to support HTTP status code 'CONTINUE'. Required. :paramtype support_http_status_code_continue: bool :keyword keep_http_connection_alive: Indicates whether to keep the connection alive. Required. :paramtype keep_http_connection_alive: bool :keyword unfold_http_headers: Indicates whether to unfold the HTTP headers. Required. :paramtype unfold_http_headers: bool """ super().__init__(**kwargs) self.ignore_certificate_name_mismatch = ignore_certificate_name_mismatch self.support_http_status_code_continue = support_http_status_code_continue self.keep_http_connection_alive = keep_http_connection_alive self.unfold_http_headers = unfold_http_headers class AS2AgreementContent(_serialization.Model): """The integration account AS2 agreement content. All required parameters must be populated in order to send to Azure. :ivar receive_agreement: The AS2 one-way receive agreement. Required. :vartype receive_agreement: ~azure.mgmt.logic.models.AS2OneWayAgreement :ivar send_agreement: The AS2 one-way send agreement. Required. :vartype send_agreement: ~azure.mgmt.logic.models.AS2OneWayAgreement """ _validation = { "receive_agreement": {"required": True}, "send_agreement": {"required": True}, } _attribute_map = { "receive_agreement": {"key": "receiveAgreement", "type": "AS2OneWayAgreement"}, "send_agreement": {"key": "sendAgreement", "type": "AS2OneWayAgreement"}, } def __init__( self, *, receive_agreement: "_models.AS2OneWayAgreement", send_agreement: "_models.AS2OneWayAgreement", **kwargs ): """ :keyword receive_agreement: The AS2 one-way receive agreement. Required. :paramtype receive_agreement: ~azure.mgmt.logic.models.AS2OneWayAgreement :keyword send_agreement: The AS2 one-way send agreement. Required. :paramtype send_agreement: ~azure.mgmt.logic.models.AS2OneWayAgreement """ super().__init__(**kwargs) self.receive_agreement = receive_agreement self.send_agreement = send_agreement class AS2EnvelopeSettings(_serialization.Model): """The AS2 agreement envelope settings. All required parameters must be populated in order to send to Azure. :ivar message_content_type: The message content type. Required. :vartype message_content_type: str :ivar transmit_file_name_in_mime_header: The value indicating whether to transmit file name in mime header. Required. :vartype transmit_file_name_in_mime_header: bool :ivar file_name_template: The template for file name. Required. :vartype file_name_template: str :ivar suspend_message_on_file_name_generation_error: The value indicating whether to suspend message on file name generation error. Required. :vartype suspend_message_on_file_name_generation_error: bool :ivar autogenerate_file_name: The value indicating whether to auto generate file name. Required. :vartype autogenerate_file_name: bool """ _validation = { "message_content_type": {"required": True}, "transmit_file_name_in_mime_header": {"required": True}, "file_name_template": {"required": True}, "suspend_message_on_file_name_generation_error": {"required": True}, "autogenerate_file_name": {"required": True}, } _attribute_map = { "message_content_type": {"key": "messageContentType", "type": "str"}, "transmit_file_name_in_mime_header": {"key": "transmitFileNameInMimeHeader", "type": "bool"}, "file_name_template": {"key": "fileNameTemplate", "type": "str"}, "suspend_message_on_file_name_generation_error": { "key": "suspendMessageOnFileNameGenerationError", "type": "bool", }, "autogenerate_file_name": {"key": "autogenerateFileName", "type": "bool"}, } def __init__( self, *, message_content_type: str, transmit_file_name_in_mime_header: bool, file_name_template: str, suspend_message_on_file_name_generation_error: bool, autogenerate_file_name: bool, **kwargs ): """ :keyword message_content_type: The message content type. Required. :paramtype message_content_type: str :keyword transmit_file_name_in_mime_header: The value indicating whether to transmit file name in mime header. Required. :paramtype transmit_file_name_in_mime_header: bool :keyword file_name_template: The template for file name. Required. :paramtype file_name_template: str :keyword suspend_message_on_file_name_generation_error: The value indicating whether to suspend message on file name generation error. Required. :paramtype suspend_message_on_file_name_generation_error: bool :keyword autogenerate_file_name: The value indicating whether to auto generate file name. Required. :paramtype autogenerate_file_name: bool """ super().__init__(**kwargs) self.message_content_type = message_content_type self.transmit_file_name_in_mime_header = transmit_file_name_in_mime_header self.file_name_template = file_name_template self.suspend_message_on_file_name_generation_error = suspend_message_on_file_name_generation_error self.autogenerate_file_name = autogenerate_file_name class AS2ErrorSettings(_serialization.Model): """The AS2 agreement error settings. All required parameters must be populated in order to send to Azure. :ivar suspend_duplicate_message: The value indicating whether to suspend duplicate message. Required. :vartype suspend_duplicate_message: bool :ivar resend_if_mdn_not_received: The value indicating whether to resend message If MDN is not received. Required. :vartype resend_if_mdn_not_received: bool """ _validation = { "suspend_duplicate_message": {"required": True}, "resend_if_mdn_not_received": {"required": True}, } _attribute_map = { "suspend_duplicate_message": {"key": "suspendDuplicateMessage", "type": "bool"}, "resend_if_mdn_not_received": {"key": "resendIfMDNNotReceived", "type": "bool"}, } def __init__(self, *, suspend_duplicate_message: bool, resend_if_mdn_not_received: bool, **kwargs): """ :keyword suspend_duplicate_message: The value indicating whether to suspend duplicate message. Required. :paramtype suspend_duplicate_message: bool :keyword resend_if_mdn_not_received: The value indicating whether to resend message If MDN is not received. Required. :paramtype resend_if_mdn_not_received: bool """ super().__init__(**kwargs) self.suspend_duplicate_message = suspend_duplicate_message self.resend_if_mdn_not_received = resend_if_mdn_not_received class AS2MdnSettings(_serialization.Model): """The AS2 agreement mdn settings. All required parameters must be populated in order to send to Azure. :ivar need_mdn: The value indicating whether to send or request a MDN. Required. :vartype need_mdn: bool :ivar sign_mdn: The value indicating whether the MDN needs to be signed or not. Required. :vartype sign_mdn: bool :ivar send_mdn_asynchronously: The value indicating whether to send the asynchronous MDN. Required. :vartype send_mdn_asynchronously: bool :ivar receipt_delivery_url: The receipt delivery URL. :vartype receipt_delivery_url: str :ivar disposition_notification_to: The disposition notification to header value. :vartype disposition_notification_to: str :ivar sign_outbound_mdn_if_optional: The value indicating whether to sign the outbound MDN if optional. Required. :vartype sign_outbound_mdn_if_optional: bool :ivar mdn_text: The MDN text. :vartype mdn_text: str :ivar send_inbound_mdn_to_message_box: The value indicating whether to send inbound MDN to message box. Required. :vartype send_inbound_mdn_to_message_box: bool :ivar mic_hashing_algorithm: The signing or hashing algorithm. Required. Known values are: "NotSpecified", "None", "MD5", "SHA1", "SHA2256", "SHA2384", and "SHA2512". :vartype mic_hashing_algorithm: str or ~azure.mgmt.logic.models.HashingAlgorithm """ _validation = { "need_mdn": {"required": True}, "sign_mdn": {"required": True}, "send_mdn_asynchronously": {"required": True}, "sign_outbound_mdn_if_optional": {"required": True}, "send_inbound_mdn_to_message_box": {"required": True}, "mic_hashing_algorithm": {"required": True}, } _attribute_map = { "need_mdn": {"key": "needMDN", "type": "bool"}, "sign_mdn": {"key": "signMDN", "type": "bool"}, "send_mdn_asynchronously": {"key": "sendMDNAsynchronously", "type": "bool"}, "receipt_delivery_url": {"key": "receiptDeliveryUrl", "type": "str"}, "disposition_notification_to": {"key": "dispositionNotificationTo", "type": "str"}, "sign_outbound_mdn_if_optional": {"key": "signOutboundMDNIfOptional", "type": "bool"}, "mdn_text": {"key": "mdnText", "type": "str"}, "send_inbound_mdn_to_message_box": {"key": "sendInboundMDNToMessageBox", "type": "bool"}, "mic_hashing_algorithm": {"key": "micHashingAlgorithm", "type": "str"}, } def __init__( self, *, need_mdn: bool, sign_mdn: bool, send_mdn_asynchronously: bool, sign_outbound_mdn_if_optional: bool, send_inbound_mdn_to_message_box: bool, mic_hashing_algorithm: Union[str, "_models.HashingAlgorithm"], receipt_delivery_url: Optional[str] = None, disposition_notification_to: Optional[str] = None, mdn_text: Optional[str] = None, **kwargs ): """ :keyword need_mdn: The value indicating whether to send or request a MDN. Required. :paramtype need_mdn: bool :keyword sign_mdn: The value indicating whether the MDN needs to be signed or not. Required. :paramtype sign_mdn: bool :keyword send_mdn_asynchronously: The value indicating whether to send the asynchronous MDN. Required. :paramtype send_mdn_asynchronously: bool :keyword receipt_delivery_url: The receipt delivery URL. :paramtype receipt_delivery_url: str :keyword disposition_notification_to: The disposition notification to header value. :paramtype disposition_notification_to: str :keyword sign_outbound_mdn_if_optional: The value indicating whether to sign the outbound MDN if optional. Required. :paramtype sign_outbound_mdn_if_optional: bool :keyword mdn_text: The MDN text. :paramtype mdn_text: str :keyword send_inbound_mdn_to_message_box: The value indicating whether to send inbound MDN to message box. Required. :paramtype send_inbound_mdn_to_message_box: bool :keyword mic_hashing_algorithm: The signing or hashing algorithm. Required. Known values are: "NotSpecified", "None", "MD5", "SHA1", "SHA2256", "SHA2384", and "SHA2512". :paramtype mic_hashing_algorithm: str or ~azure.mgmt.logic.models.HashingAlgorithm """ super().__init__(**kwargs) self.need_mdn = need_mdn self.sign_mdn = sign_mdn self.send_mdn_asynchronously = send_mdn_asynchronously self.receipt_delivery_url = receipt_delivery_url self.disposition_notification_to = disposition_notification_to self.sign_outbound_mdn_if_optional = sign_outbound_mdn_if_optional self.mdn_text = mdn_text self.send_inbound_mdn_to_message_box = send_inbound_mdn_to_message_box self.mic_hashing_algorithm = mic_hashing_algorithm class AS2MessageConnectionSettings(_serialization.Model): """The AS2 agreement message connection settings. All required parameters must be populated in order to send to Azure. :ivar ignore_certificate_name_mismatch: The value indicating whether to ignore mismatch in certificate name. Required. :vartype ignore_certificate_name_mismatch: bool :ivar support_http_status_code_continue: The value indicating whether to support HTTP status code 'CONTINUE'. Required. :vartype support_http_status_code_continue: bool :ivar keep_http_connection_alive: The value indicating whether to keep the connection alive. Required. :vartype keep_http_connection_alive: bool :ivar unfold_http_headers: The value indicating whether to unfold the HTTP headers. Required. :vartype unfold_http_headers: bool """ _validation = { "ignore_certificate_name_mismatch": {"required": True}, "support_http_status_code_continue": {"required": True}, "keep_http_connection_alive": {"required": True}, "unfold_http_headers": {"required": True}, } _attribute_map = { "ignore_certificate_name_mismatch": {"key": "ignoreCertificateNameMismatch", "type": "bool"}, "support_http_status_code_continue": {"key": "supportHttpStatusCodeContinue", "type": "bool"}, "keep_http_connection_alive": {"key": "keepHttpConnectionAlive", "type": "bool"}, "unfold_http_headers": {"key": "unfoldHttpHeaders", "type": "bool"}, } def __init__( self, *, ignore_certificate_name_mismatch: bool, support_http_status_code_continue: bool, keep_http_connection_alive: bool, unfold_http_headers: bool, **kwargs ): """ :keyword ignore_certificate_name_mismatch: The value indicating whether to ignore mismatch in certificate name. Required. :paramtype ignore_certificate_name_mismatch: bool :keyword support_http_status_code_continue: The value indicating whether to support HTTP status code 'CONTINUE'. Required. :paramtype support_http_status_code_continue: bool :keyword keep_http_connection_alive: The value indicating whether to keep the connection alive. Required. :paramtype keep_http_connection_alive: bool :keyword unfold_http_headers: The value indicating whether to unfold the HTTP headers. Required. :paramtype unfold_http_headers: bool """ super().__init__(**kwargs) self.ignore_certificate_name_mismatch = ignore_certificate_name_mismatch self.support_http_status_code_continue = support_http_status_code_continue self.keep_http_connection_alive = keep_http_connection_alive self.unfold_http_headers = unfold_http_headers class AS2OneWayAgreement(_serialization.Model): """The integration account AS2 one-way agreement. All required parameters must be populated in order to send to Azure. :ivar sender_business_identity: The sender business identity. Required. :vartype sender_business_identity: ~azure.mgmt.logic.models.BusinessIdentity :ivar receiver_business_identity: The receiver business identity. Required. :vartype receiver_business_identity: ~azure.mgmt.logic.models.BusinessIdentity :ivar protocol_settings: The AS2 protocol settings. Required. :vartype protocol_settings: ~azure.mgmt.logic.models.AS2ProtocolSettings """ _validation = { "sender_business_identity": {"required": True}, "receiver_business_identity": {"required": True}, "protocol_settings": {"required": True}, } _attribute_map = { "sender_business_identity": {"key": "senderBusinessIdentity", "type": "BusinessIdentity"}, "receiver_business_identity": {"key": "receiverBusinessIdentity", "type": "BusinessIdentity"}, "protocol_settings": {"key": "protocolSettings", "type": "AS2ProtocolSettings"}, } def __init__( self, *, sender_business_identity: "_models.BusinessIdentity", receiver_business_identity: "_models.BusinessIdentity", protocol_settings: "_models.AS2ProtocolSettings", **kwargs ): """ :keyword sender_business_identity: The sender business identity. Required. :paramtype sender_business_identity: ~azure.mgmt.logic.models.BusinessIdentity :keyword receiver_business_identity: The receiver business identity. Required. :paramtype receiver_business_identity: ~azure.mgmt.logic.models.BusinessIdentity :keyword protocol_settings: The AS2 protocol settings. Required. :paramtype protocol_settings: ~azure.mgmt.logic.models.AS2ProtocolSettings """ super().__init__(**kwargs) self.sender_business_identity = sender_business_identity self.receiver_business_identity = receiver_business_identity self.protocol_settings = protocol_settings class AS2ProtocolSettings(_serialization.Model): """The AS2 agreement protocol settings. All required parameters must be populated in order to send to Azure. :ivar message_connection_settings: The message connection settings. Required. :vartype message_connection_settings: ~azure.mgmt.logic.models.AS2MessageConnectionSettings :ivar acknowledgement_connection_settings: The acknowledgement connection settings. Required. :vartype acknowledgement_connection_settings: ~azure.mgmt.logic.models.AS2AcknowledgementConnectionSettings :ivar mdn_settings: The MDN settings. Required. :vartype mdn_settings: ~azure.mgmt.logic.models.AS2MdnSettings :ivar security_settings: The security settings. Required. :vartype security_settings: ~azure.mgmt.logic.models.AS2SecuritySettings :ivar validation_settings: The validation settings. Required. :vartype validation_settings: ~azure.mgmt.logic.models.AS2ValidationSettings :ivar envelope_settings: The envelope settings. Required. :vartype envelope_settings: ~azure.mgmt.logic.models.AS2EnvelopeSettings :ivar error_settings: The error settings. Required. :vartype error_settings: ~azure.mgmt.logic.models.AS2ErrorSettings """ _validation = { "message_connection_settings": {"required": True}, "acknowledgement_connection_settings": {"required": True}, "mdn_settings": {"required": True}, "security_settings": {"required": True}, "validation_settings": {"required": True}, "envelope_settings": {"required": True}, "error_settings": {"required": True}, } _attribute_map = { "message_connection_settings": {"key": "messageConnectionSettings", "type": "AS2MessageConnectionSettings"}, "acknowledgement_connection_settings": { "key": "acknowledgementConnectionSettings", "type": "AS2AcknowledgementConnectionSettings", }, "mdn_settings": {"key": "mdnSettings", "type": "AS2MdnSettings"}, "security_settings": {"key": "securitySettings", "type": "AS2SecuritySettings"}, "validation_settings": {"key": "validationSettings", "type": "AS2ValidationSettings"}, "envelope_settings": {"key": "envelopeSettings", "type": "AS2EnvelopeSettings"}, "error_settings": {"key": "errorSettings", "type": "AS2ErrorSettings"}, } def __init__( self, *, message_connection_settings: "_models.AS2MessageConnectionSettings", acknowledgement_connection_settings: "_models.AS2AcknowledgementConnectionSettings", mdn_settings: "_models.AS2MdnSettings", security_settings: "_models.AS2SecuritySettings", validation_settings: "_models.AS2ValidationSettings", envelope_settings: "_models.AS2EnvelopeSettings", error_settings: "_models.AS2ErrorSettings", **kwargs ): """ :keyword message_connection_settings: The message connection settings. Required. :paramtype message_connection_settings: ~azure.mgmt.logic.models.AS2MessageConnectionSettings :keyword acknowledgement_connection_settings: The acknowledgement connection settings. Required. :paramtype acknowledgement_connection_settings: ~azure.mgmt.logic.models.AS2AcknowledgementConnectionSettings :keyword mdn_settings: The MDN settings. Required. :paramtype mdn_settings: ~azure.mgmt.logic.models.AS2MdnSettings :keyword security_settings: The security settings. Required. :paramtype security_settings: ~azure.mgmt.logic.models.AS2SecuritySettings :keyword validation_settings: The validation settings. Required. :paramtype validation_settings: ~azure.mgmt.logic.models.AS2ValidationSettings :keyword envelope_settings: The envelope settings. Required. :paramtype envelope_settings: ~azure.mgmt.logic.models.AS2EnvelopeSettings :keyword error_settings: The error settings. Required. :paramtype error_settings: ~azure.mgmt.logic.models.AS2ErrorSettings """ super().__init__(**kwargs) self.message_connection_settings = message_connection_settings self.acknowledgement_connection_settings = acknowledgement_connection_settings self.mdn_settings = mdn_settings self.security_settings = security_settings self.validation_settings = validation_settings self.envelope_settings = envelope_settings self.error_settings = error_settings class AS2SecuritySettings(_serialization.Model): """The AS2 agreement security settings. All required parameters must be populated in order to send to Azure. :ivar override_group_signing_certificate: The value indicating whether to send or request a MDN. Required. :vartype override_group_signing_certificate: bool :ivar signing_certificate_name: The name of the signing certificate. :vartype signing_certificate_name: str :ivar encryption_certificate_name: The name of the encryption certificate. :vartype encryption_certificate_name: str :ivar enable_nrr_for_inbound_encoded_messages: The value indicating whether to enable NRR for inbound encoded messages. Required. :vartype enable_nrr_for_inbound_encoded_messages: bool :ivar enable_nrr_for_inbound_decoded_messages: The value indicating whether to enable NRR for inbound decoded messages. Required. :vartype enable_nrr_for_inbound_decoded_messages: bool :ivar enable_nrr_for_outbound_mdn: The value indicating whether to enable NRR for outbound MDN. Required. :vartype enable_nrr_for_outbound_mdn: bool :ivar enable_nrr_for_outbound_encoded_messages: The value indicating whether to enable NRR for outbound encoded messages. Required. :vartype enable_nrr_for_outbound_encoded_messages: bool :ivar enable_nrr_for_outbound_decoded_messages: The value indicating whether to enable NRR for outbound decoded messages. Required. :vartype enable_nrr_for_outbound_decoded_messages: bool :ivar enable_nrr_for_inbound_mdn: The value indicating whether to enable NRR for inbound MDN. Required. :vartype enable_nrr_for_inbound_mdn: bool :ivar sha2_algorithm_format: The Sha2 algorithm format. Valid values are Sha2, ShaHashSize, ShaHyphenHashSize, Sha2UnderscoreHashSize. :vartype sha2_algorithm_format: str """ _validation = { "override_group_signing_certificate": {"required": True}, "enable_nrr_for_inbound_encoded_messages": {"required": True}, "enable_nrr_for_inbound_decoded_messages": {"required": True}, "enable_nrr_for_outbound_mdn": {"required": True}, "enable_nrr_for_outbound_encoded_messages": {"required": True}, "enable_nrr_for_outbound_decoded_messages": {"required": True}, "enable_nrr_for_inbound_mdn": {"required": True}, } _attribute_map = { "override_group_signing_certificate": {"key": "overrideGroupSigningCertificate", "type": "bool"}, "signing_certificate_name": {"key": "signingCertificateName", "type": "str"}, "encryption_certificate_name": {"key": "encryptionCertificateName", "type": "str"}, "enable_nrr_for_inbound_encoded_messages": {"key": "enableNRRForInboundEncodedMessages", "type": "bool"}, "enable_nrr_for_inbound_decoded_messages": {"key": "enableNRRForInboundDecodedMessages", "type": "bool"}, "enable_nrr_for_outbound_mdn": {"key": "enableNRRForOutboundMDN", "type": "bool"}, "enable_nrr_for_outbound_encoded_messages": {"key": "enableNRRForOutboundEncodedMessages", "type": "bool"}, "enable_nrr_for_outbound_decoded_messages": {"key": "enableNRRForOutboundDecodedMessages", "type": "bool"}, "enable_nrr_for_inbound_mdn": {"key": "enableNRRForInboundMDN", "type": "bool"}, "sha2_algorithm_format": {"key": "sha2AlgorithmFormat", "type": "str"}, } def __init__( self, *, override_group_signing_certificate: bool, enable_nrr_for_inbound_encoded_messages: bool, enable_nrr_for_inbound_decoded_messages: bool, enable_nrr_for_outbound_mdn: bool, enable_nrr_for_outbound_encoded_messages: bool, enable_nrr_for_outbound_decoded_messages: bool, enable_nrr_for_inbound_mdn: bool, signing_certificate_name: Optional[str] = None, encryption_certificate_name: Optional[str] = None, sha2_algorithm_format: Optional[str] = None, **kwargs ): """ :keyword override_group_signing_certificate: The value indicating whether to send or request a MDN. Required. :paramtype override_group_signing_certificate: bool :keyword signing_certificate_name: The name of the signing certificate. :paramtype signing_certificate_name: str :keyword encryption_certificate_name: The name of the encryption certificate. :paramtype encryption_certificate_name: str :keyword enable_nrr_for_inbound_encoded_messages: The value indicating whether to enable NRR for inbound encoded messages. Required. :paramtype enable_nrr_for_inbound_encoded_messages: bool :keyword enable_nrr_for_inbound_decoded_messages: The value indicating whether to enable NRR for inbound decoded messages. Required. :paramtype enable_nrr_for_inbound_decoded_messages: bool :keyword enable_nrr_for_outbound_mdn: The value indicating whether to enable NRR for outbound MDN. Required. :paramtype enable_nrr_for_outbound_mdn: bool :keyword enable_nrr_for_outbound_encoded_messages: The value indicating whether to enable NRR for outbound encoded messages. Required. :paramtype enable_nrr_for_outbound_encoded_messages: bool :keyword enable_nrr_for_outbound_decoded_messages: The value indicating whether to enable NRR for outbound decoded messages. Required. :paramtype enable_nrr_for_outbound_decoded_messages: bool :keyword enable_nrr_for_inbound_mdn: The value indicating whether to enable NRR for inbound MDN. Required. :paramtype enable_nrr_for_inbound_mdn: bool :keyword sha2_algorithm_format: The Sha2 algorithm format. Valid values are Sha2, ShaHashSize, ShaHyphenHashSize, Sha2UnderscoreHashSize. :paramtype sha2_algorithm_format: str """ super().__init__(**kwargs) self.override_group_signing_certificate = override_group_signing_certificate self.signing_certificate_name = signing_certificate_name self.encryption_certificate_name = encryption_certificate_name self.enable_nrr_for_inbound_encoded_messages = enable_nrr_for_inbound_encoded_messages self.enable_nrr_for_inbound_decoded_messages = enable_nrr_for_inbound_decoded_messages self.enable_nrr_for_outbound_mdn = enable_nrr_for_outbound_mdn self.enable_nrr_for_outbound_encoded_messages = enable_nrr_for_outbound_encoded_messages self.enable_nrr_for_outbound_decoded_messages = enable_nrr_for_outbound_decoded_messages self.enable_nrr_for_inbound_mdn = enable_nrr_for_inbound_mdn self.sha2_algorithm_format = sha2_algorithm_format class AS2ValidationSettings(_serialization.Model): """The AS2 agreement validation settings. All required parameters must be populated in order to send to Azure. :ivar override_message_properties: The value indicating whether to override incoming message properties with those in agreement. Required. :vartype override_message_properties: bool :ivar encrypt_message: The value indicating whether the message has to be encrypted. Required. :vartype encrypt_message: bool :ivar sign_message: The value indicating whether the message has to be signed. Required. :vartype sign_message: bool :ivar compress_message: The value indicating whether the message has to be compressed. Required. :vartype compress_message: bool :ivar check_duplicate_message: The value indicating whether to check for duplicate message. Required. :vartype check_duplicate_message: bool :ivar interchange_duplicates_validity_days: The number of days to look back for duplicate interchange. Required. :vartype interchange_duplicates_validity_days: int :ivar check_certificate_revocation_list_on_send: The value indicating whether to check for certificate revocation list on send. Required. :vartype check_certificate_revocation_list_on_send: bool :ivar check_certificate_revocation_list_on_receive: The value indicating whether to check for certificate revocation list on receive. Required. :vartype check_certificate_revocation_list_on_receive: bool :ivar encryption_algorithm: The encryption algorithm. Required. Known values are: "NotSpecified", "None", "DES3", "RC2", "AES128", "AES192", and "AES256". :vartype encryption_algorithm: str or ~azure.mgmt.logic.models.EncryptionAlgorithm :ivar signing_algorithm: The signing algorithm. Known values are: "NotSpecified", "Default", "SHA1", "SHA2256", "SHA2384", and "SHA2512". :vartype signing_algorithm: str or ~azure.mgmt.logic.models.SigningAlgorithm """ _validation = { "override_message_properties": {"required": True}, "encrypt_message": {"required": True}, "sign_message": {"required": True}, "compress_message": {"required": True}, "check_duplicate_message": {"required": True}, "interchange_duplicates_validity_days": {"required": True}, "check_certificate_revocation_list_on_send": {"required": True}, "check_certificate_revocation_list_on_receive": {"required": True}, "encryption_algorithm": {"required": True}, } _attribute_map = { "override_message_properties": {"key": "overrideMessageProperties", "type": "bool"}, "encrypt_message": {"key": "encryptMessage", "type": "bool"}, "sign_message": {"key": "signMessage", "type": "bool"}, "compress_message": {"key": "compressMessage", "type": "bool"}, "check_duplicate_message": {"key": "checkDuplicateMessage", "type": "bool"}, "interchange_duplicates_validity_days": {"key": "interchangeDuplicatesValidityDays", "type": "int"}, "check_certificate_revocation_list_on_send": {"key": "checkCertificateRevocationListOnSend", "type": "bool"}, "check_certificate_revocation_list_on_receive": { "key": "checkCertificateRevocationListOnReceive", "type": "bool", }, "encryption_algorithm": {"key": "encryptionAlgorithm", "type": "str"}, "signing_algorithm": {"key": "signingAlgorithm", "type": "str"}, } def __init__( self, *, override_message_properties: bool, encrypt_message: bool, sign_message: bool, compress_message: bool, check_duplicate_message: bool, interchange_duplicates_validity_days: int, check_certificate_revocation_list_on_send: bool, check_certificate_revocation_list_on_receive: bool, encryption_algorithm: Union[str, "_models.EncryptionAlgorithm"], signing_algorithm: Optional[Union[str, "_models.SigningAlgorithm"]] = None, **kwargs ): """ :keyword override_message_properties: The value indicating whether to override incoming message properties with those in agreement. Required. :paramtype override_message_properties: bool :keyword encrypt_message: The value indicating whether the message has to be encrypted. Required. :paramtype encrypt_message: bool :keyword sign_message: The value indicating whether the message has to be signed. Required. :paramtype sign_message: bool :keyword compress_message: The value indicating whether the message has to be compressed. Required. :paramtype compress_message: bool :keyword check_duplicate_message: The value indicating whether to check for duplicate message. Required. :paramtype check_duplicate_message: bool :keyword interchange_duplicates_validity_days: The number of days to look back for duplicate interchange. Required. :paramtype interchange_duplicates_validity_days: int :keyword check_certificate_revocation_list_on_send: The value indicating whether to check for certificate revocation list on send. Required. :paramtype check_certificate_revocation_list_on_send: bool :keyword check_certificate_revocation_list_on_receive: The value indicating whether to check for certificate revocation list on receive. Required. :paramtype check_certificate_revocation_list_on_receive: bool :keyword encryption_algorithm: The encryption algorithm. Required. Known values are: "NotSpecified", "None", "DES3", "RC2", "AES128", "AES192", and "AES256". :paramtype encryption_algorithm: str or ~azure.mgmt.logic.models.EncryptionAlgorithm :keyword signing_algorithm: The signing algorithm. Known values are: "NotSpecified", "Default", "SHA1", "SHA2256", "SHA2384", and "SHA2512". :paramtype signing_algorithm: str or ~azure.mgmt.logic.models.SigningAlgorithm """ super().__init__(**kwargs) self.override_message_properties = override_message_properties self.encrypt_message = encrypt_message self.sign_message = sign_message self.compress_message = compress_message self.check_duplicate_message = check_duplicate_message self.interchange_duplicates_validity_days = interchange_duplicates_validity_days self.check_certificate_revocation_list_on_send = check_certificate_revocation_list_on_send self.check_certificate_revocation_list_on_receive = check_certificate_revocation_list_on_receive self.encryption_algorithm = encryption_algorithm self.signing_algorithm = signing_algorithm class AssemblyCollection(_serialization.Model): """A collection of assembly definitions. :ivar value: :vartype value: list[~azure.mgmt.logic.models.AssemblyDefinition] """ _attribute_map = { "value": {"key": "value", "type": "[AssemblyDefinition]"}, } def __init__(self, *, value: Optional[List["_models.AssemblyDefinition"]] = None, **kwargs): """ :keyword value: :paramtype value: list[~azure.mgmt.logic.models.AssemblyDefinition] """ super().__init__(**kwargs) self.value = value class AssemblyDefinition(Resource): """The assembly definition. Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. :ivar id: The resource id. :vartype id: str :ivar name: Gets the resource name. :vartype name: str :ivar type: Gets the resource type. :vartype type: str :ivar location: The resource location. :vartype location: str :ivar tags: The resource tags. :vartype tags: dict[str, str] :ivar properties: The assembly properties. Required. :vartype properties: ~azure.mgmt.logic.models.AssemblyProperties """ _validation = { "id": {"readonly": True}, "name": {"readonly": True}, "type": {"readonly": True}, "properties": {"required": True}, } _attribute_map = { "id": {"key": "id", "type": "str"}, "name": {"key": "name", "type": "str"}, "type": {"key": "type", "type": "str"}, "location": {"key": "location", "type": "str"}, "tags": {"key": "tags", "type": "{str}"}, "properties": {"key": "properties", "type": "AssemblyProperties"}, } def __init__( self, *, properties: "_models.AssemblyProperties", location: Optional[str] = None, tags: Optional[Dict[str, str]] = None, **kwargs ): """ :keyword location: The resource location. :paramtype location: str :keyword tags: The resource tags. :paramtype tags: dict[str, str] :keyword properties: The assembly properties. Required. :paramtype properties: ~azure.mgmt.logic.models.AssemblyProperties """ super().__init__(location=location, tags=tags, **kwargs) self.properties = properties class AssemblyProperties(ArtifactContentPropertiesDefinition): """The assembly properties definition. All required parameters must be populated in order to send to Azure. :ivar created_time: The artifact creation time. :vartype created_time: ~datetime.datetime :ivar changed_time: The artifact changed time. :vartype changed_time: ~datetime.datetime :ivar metadata: Anything. :vartype metadata: any :ivar content: Anything. :vartype content: any :ivar content_type: The content type. :vartype content_type: str :ivar content_link: The content link. :vartype content_link: ~azure.mgmt.logic.models.ContentLink :ivar assembly_name: The assembly name. Required. :vartype assembly_name: str :ivar assembly_version: The assembly version. :vartype assembly_version: str :ivar assembly_culture: The assembly culture. :vartype assembly_culture: str :ivar assembly_public_key_token: The assembly public key token. :vartype assembly_public_key_token: str """ _validation = { "assembly_name": {"required": True}, } _attribute_map = { "created_time": {"key": "createdTime", "type": "iso-8601"}, "changed_time": {"key": "changedTime", "type": "iso-8601"}, "metadata": {"key": "metadata", "type": "object"}, "content": {"key": "content", "type": "object"}, "content_type": {"key": "contentType", "type": "str"}, "content_link": {"key": "contentLink", "type": "ContentLink"}, "assembly_name": {"key": "assemblyName", "type": "str"}, "assembly_version": {"key": "assemblyVersion", "type": "str"}, "assembly_culture": {"key": "assemblyCulture", "type": "str"}, "assembly_public_key_token": {"key": "assemblyPublicKeyToken", "type": "str"}, } def __init__( self, *, assembly_name: str, created_time: Optional[datetime.datetime] = None, changed_time: Optional[datetime.datetime] = None, metadata: Optional[Any] = None, content: Optional[Any] = None, content_type: Optional[str] = None, content_link: Optional["_models.ContentLink"] = None, assembly_version: Optional[str] = None, assembly_culture: Optional[str] = None, assembly_public_key_token: Optional[str] = None, **kwargs ): """ :keyword created_time: The artifact creation time. :paramtype created_time: ~datetime.datetime :keyword changed_time: The artifact changed time. :paramtype changed_time: ~datetime.datetime :keyword metadata: Anything. :paramtype metadata: any :keyword content: Anything. :paramtype content: any :keyword content_type: The content type. :paramtype content_type: str :keyword content_link: The content link. :paramtype content_link: ~azure.mgmt.logic.models.ContentLink :keyword assembly_name: The assembly name. Required. :paramtype assembly_name: str :keyword assembly_version: The assembly version. :paramtype assembly_version: str :keyword assembly_culture: The assembly culture. :paramtype assembly_culture: str :keyword assembly_public_key_token: The assembly public key token. :paramtype assembly_public_key_token: str """ super().__init__( created_time=created_time, changed_time=changed_time, metadata=metadata, content=content, content_type=content_type, content_link=content_link, **kwargs ) self.assembly_name = assembly_name self.assembly_version = assembly_version self.assembly_culture = assembly_culture self.assembly_public_key_token = assembly_public_key_token class ErrorInfo(_serialization.Model): """The error info. All required parameters must be populated in order to send to Azure. :ivar code: The error code. Required. :vartype code: str """ _validation = { "code": {"required": True}, } _attribute_map = { "code": {"key": "code", "type": "str"}, } def __init__(self, *, code: str, **kwargs): """ :keyword code: The error code. Required. :paramtype code: str """ super().__init__(**kwargs) self.code = code class AzureResourceErrorInfo(ErrorInfo): """The azure resource error info. All required parameters must be populated in order to send to Azure. :ivar code: The error code. Required. :vartype code: str :ivar message: The error message. Required. :vartype message: str :ivar details: The error details. :vartype details: list[~azure.mgmt.logic.models.AzureResourceErrorInfo] """ _validation = { "code": {"required": True}, "message": {"required": True}, } _attribute_map = { "code": {"key": "code", "type": "str"}, "message": {"key": "message", "type": "str"}, "details": {"key": "details", "type": "[AzureResourceErrorInfo]"}, } def __init__( self, *, code: str, message: str, details: Optional[List["_models.AzureResourceErrorInfo"]] = None, **kwargs ): """ :keyword code: The error code. Required. :paramtype code: str :keyword message: The error message. Required. :paramtype message: str :keyword details: The error details. :paramtype details: list[~azure.mgmt.logic.models.AzureResourceErrorInfo] """ super().__init__(code=code, **kwargs) self.message = message self.details = details class B2BPartnerContent(_serialization.Model): """The B2B partner content. :ivar business_identities: The list of partner business identities. :vartype business_identities: list[~azure.mgmt.logic.models.BusinessIdentity] """ _attribute_map = { "business_identities": {"key": "businessIdentities", "type": "[BusinessIdentity]"}, } def __init__(self, *, business_identities: Optional[List["_models.BusinessIdentity"]] = None, **kwargs): """ :keyword business_identities: The list of partner business identities. :paramtype business_identities: list[~azure.mgmt.logic.models.BusinessIdentity] """ super().__init__(**kwargs) self.business_identities = business_identities class BatchConfiguration(Resource): """The batch configuration resource definition. Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. :ivar id: The resource id. :vartype id: str :ivar name: Gets the resource name. :vartype name: str :ivar type: Gets the resource type. :vartype type: str :ivar location: The resource location. :vartype location: str :ivar tags: The resource tags. :vartype tags: dict[str, str] :ivar properties: The batch configuration properties. Required. :vartype properties: ~azure.mgmt.logic.models.BatchConfigurationProperties """ _validation = { "id": {"readonly": True}, "name": {"readonly": True}, "type": {"readonly": True}, "properties": {"required": True}, } _attribute_map = { "id": {"key": "id", "type": "str"}, "name": {"key": "name", "type": "str"}, "type": {"key": "type", "type": "str"}, "location": {"key": "location", "type": "str"}, "tags": {"key": "tags", "type": "{str}"}, "properties": {"key": "properties", "type": "BatchConfigurationProperties"}, } def __init__( self, *, properties: "_models.BatchConfigurationProperties", location: Optional[str] = None, tags: Optional[Dict[str, str]] = None, **kwargs ): """ :keyword location: The resource location. :paramtype location: str :keyword tags: The resource tags. :paramtype tags: dict[str, str] :keyword properties: The batch configuration properties. Required. :paramtype properties: ~azure.mgmt.logic.models.BatchConfigurationProperties """ super().__init__(location=location, tags=tags, **kwargs) self.properties = properties class BatchConfigurationCollection(_serialization.Model): """A collection of batch configurations. :ivar value: :vartype value: list[~azure.mgmt.logic.models.BatchConfiguration] """ _attribute_map = { "value": {"key": "value", "type": "[BatchConfiguration]"}, } def __init__(self, *, value: Optional[List["_models.BatchConfiguration"]] = None, **kwargs): """ :keyword value: :paramtype value: list[~azure.mgmt.logic.models.BatchConfiguration] """ super().__init__(**kwargs) self.value = value class BatchConfigurationProperties(ArtifactProperties): """The batch configuration properties definition. All required parameters must be populated in order to send to Azure. :ivar created_time: The artifact creation time. :vartype created_time: ~datetime.datetime :ivar changed_time: The artifact changed time. :vartype changed_time: ~datetime.datetime :ivar metadata: Anything. :vartype metadata: any :ivar batch_group_name: The name of the batch group. Required. :vartype batch_group_name: str :ivar release_criteria: The batch release criteria. Required. :vartype release_criteria: ~azure.mgmt.logic.models.BatchReleaseCriteria """ _validation = { "batch_group_name": {"required": True}, "release_criteria": {"required": True}, } _attribute_map = { "created_time": {"key": "createdTime", "type": "iso-8601"}, "changed_time": {"key": "changedTime", "type": "iso-8601"}, "metadata": {"key": "metadata", "type": "object"}, "batch_group_name": {"key": "batchGroupName", "type": "str"}, "release_criteria": {"key": "releaseCriteria", "type": "BatchReleaseCriteria"}, } def __init__( self, *, batch_group_name: str, release_criteria: "_models.BatchReleaseCriteria", created_time: Optional[datetime.datetime] = None, changed_time: Optional[datetime.datetime] = None, metadata: Optional[Any] = None, **kwargs ): """ :keyword created_time: The artifact creation time. :paramtype created_time: ~datetime.datetime :keyword changed_time: The artifact changed time. :paramtype changed_time: ~datetime.datetime :keyword metadata: Anything. :paramtype metadata: any :keyword batch_group_name: The name of the batch group. Required. :paramtype batch_group_name: str :keyword release_criteria: The batch release criteria. Required. :paramtype release_criteria: ~azure.mgmt.logic.models.BatchReleaseCriteria """ super().__init__(created_time=created_time, changed_time=changed_time, metadata=metadata, **kwargs) self.batch_group_name = batch_group_name self.release_criteria = release_criteria class BatchReleaseCriteria(_serialization.Model): """The batch release criteria. :ivar message_count: The message count. :vartype message_count: int :ivar batch_size: The batch size in bytes. :vartype batch_size: int :ivar recurrence: The recurrence. :vartype recurrence: ~azure.mgmt.logic.models.WorkflowTriggerRecurrence """ _attribute_map = { "message_count": {"key": "messageCount", "type": "int"}, "batch_size": {"key": "batchSize", "type": "int"}, "recurrence": {"key": "recurrence", "type": "WorkflowTriggerRecurrence"}, } def __init__( self, *, message_count: Optional[int] = None, batch_size: Optional[int] = None, recurrence: Optional["_models.WorkflowTriggerRecurrence"] = None, **kwargs ): """ :keyword message_count: The message count. :paramtype message_count: int :keyword batch_size: The batch size in bytes. :paramtype batch_size: int :keyword recurrence: The recurrence. :paramtype recurrence: ~azure.mgmt.logic.models.WorkflowTriggerRecurrence """ super().__init__(**kwargs) self.message_count = message_count self.batch_size = batch_size self.recurrence = recurrence class BusinessIdentity(_serialization.Model): """The integration account partner's business identity. All required parameters must be populated in order to send to Azure. :ivar qualifier: The business identity qualifier e.g. as2identity, ZZ, ZZZ, 31, 32. Required. :vartype qualifier: str :ivar value: The user defined business identity value. Required. :vartype value: str """ _validation = { "qualifier": {"required": True}, "value": {"required": True}, } _attribute_map = { "qualifier": {"key": "qualifier", "type": "str"}, "value": {"key": "value", "type": "str"}, } def __init__(self, *, qualifier: str, value: str, **kwargs): """ :keyword qualifier: The business identity qualifier e.g. as2identity, ZZ, ZZZ, 31, 32. Required. :paramtype qualifier: str :keyword value: The user defined business identity value. Required. :paramtype value: str """ super().__init__(**kwargs) self.qualifier = qualifier self.value = value class CallbackUrl(_serialization.Model): """The callback url. :ivar value: The URL value. :vartype value: str """ _attribute_map = { "value": {"key": "value", "type": "str"}, } def __init__(self, *, value: Optional[str] = None, **kwargs): """ :keyword value: The URL value. :paramtype value: str """ super().__init__(**kwargs) self.value = value class ContentHash(_serialization.Model): """The content hash. :ivar algorithm: The algorithm of the content hash. :vartype algorithm: str :ivar value: The value of the content hash. :vartype value: str """ _attribute_map = { "algorithm": {"key": "algorithm", "type": "str"}, "value": {"key": "value", "type": "str"}, } def __init__(self, *, algorithm: Optional[str] = None, value: Optional[str] = None, **kwargs): """ :keyword algorithm: The algorithm of the content hash. :paramtype algorithm: str :keyword value: The value of the content hash. :paramtype value: str """ super().__init__(**kwargs) self.algorithm = algorithm self.value = value class ContentLink(_serialization.Model): """The content link. Variables are only populated by the server, and will be ignored when sending a request. :ivar uri: The content link URI. :vartype uri: str :ivar content_version: The content version. :vartype content_version: str :ivar content_size: The content size. :vartype content_size: int :ivar content_hash: The content hash. :vartype content_hash: ~azure.mgmt.logic.models.ContentHash :ivar metadata: The metadata. :vartype metadata: JSON """ _validation = { "content_version": {"readonly": True}, "content_size": {"readonly": True}, "content_hash": {"readonly": True}, "metadata": {"readonly": True}, } _attribute_map = { "uri": {"key": "uri", "type": "str"}, "content_version": {"key": "contentVersion", "type": "str"}, "content_size": {"key": "contentSize", "type": "int"}, "content_hash": {"key": "contentHash", "type": "ContentHash"}, "metadata": {"key": "metadata", "type": "object"}, } def __init__(self, *, uri: Optional[str] = None, **kwargs): """ :keyword uri: The content link URI. :paramtype uri: str """ super().__init__(**kwargs) self.uri = uri self.content_version = None self.content_size = None self.content_hash = None self.metadata = None class Correlation(_serialization.Model): """The correlation property. :ivar client_tracking_id: The client tracking id. :vartype client_tracking_id: str """ _attribute_map = { "client_tracking_id": {"key": "clientTrackingId", "type": "str"}, } def __init__(self, *, client_tracking_id: Optional[str] = None, **kwargs): """ :keyword client_tracking_id: The client tracking id. :paramtype client_tracking_id: str """ super().__init__(**kwargs) self.client_tracking_id = client_tracking_id class EdifactAcknowledgementSettings(_serialization.Model): # pylint: disable=too-many-instance-attributes """The Edifact agreement acknowledgement settings. All required parameters must be populated in order to send to Azure. :ivar need_technical_acknowledgement: The value indicating whether technical acknowledgement is needed. Required. :vartype need_technical_acknowledgement: bool :ivar batch_technical_acknowledgements: The value indicating whether to batch the technical acknowledgements. Required. :vartype batch_technical_acknowledgements: bool :ivar need_functional_acknowledgement: The value indicating whether functional acknowledgement is needed. Required. :vartype need_functional_acknowledgement: bool :ivar batch_functional_acknowledgements: The value indicating whether to batch functional acknowledgements. Required. :vartype batch_functional_acknowledgements: bool :ivar need_loop_for_valid_messages: The value indicating whether a loop is needed for valid messages. Required. :vartype need_loop_for_valid_messages: bool :ivar send_synchronous_acknowledgement: The value indicating whether to send synchronous acknowledgement. Required. :vartype send_synchronous_acknowledgement: bool :ivar acknowledgement_control_number_prefix: The acknowledgement control number prefix. :vartype acknowledgement_control_number_prefix: str :ivar acknowledgement_control_number_suffix: The acknowledgement control number suffix. :vartype acknowledgement_control_number_suffix: str :ivar acknowledgement_control_number_lower_bound: The acknowledgement control number lower bound. Required. :vartype acknowledgement_control_number_lower_bound: int :ivar acknowledgement_control_number_upper_bound: The acknowledgement control number upper bound. Required. :vartype acknowledgement_control_number_upper_bound: int :ivar rollover_acknowledgement_control_number: The value indicating whether to rollover acknowledgement control number. Required. :vartype rollover_acknowledgement_control_number: bool """ _validation = { "need_technical_acknowledgement": {"required": True}, "batch_technical_acknowledgements": {"required": True}, "need_functional_acknowledgement": {"required": True}, "batch_functional_acknowledgements": {"required": True}, "need_loop_for_valid_messages": {"required": True}, "send_synchronous_acknowledgement": {"required": True}, "acknowledgement_control_number_lower_bound": {"required": True}, "acknowledgement_control_number_upper_bound": {"required": True}, "rollover_acknowledgement_control_number": {"required": True}, } _attribute_map = { "need_technical_acknowledgement": {"key": "needTechnicalAcknowledgement", "type": "bool"}, "batch_technical_acknowledgements": {"key": "batchTechnicalAcknowledgements", "type": "bool"}, "need_functional_acknowledgement": {"key": "needFunctionalAcknowledgement", "type": "bool"}, "batch_functional_acknowledgements": {"key": "batchFunctionalAcknowledgements", "type": "bool"}, "need_loop_for_valid_messages": {"key": "needLoopForValidMessages", "type": "bool"}, "send_synchronous_acknowledgement": {"key": "sendSynchronousAcknowledgement", "type": "bool"}, "acknowledgement_control_number_prefix": {"key": "acknowledgementControlNumberPrefix", "type": "str"}, "acknowledgement_control_number_suffix": {"key": "acknowledgementControlNumberSuffix", "type": "str"}, "acknowledgement_control_number_lower_bound": {"key": "acknowledgementControlNumberLowerBound", "type": "int"}, "acknowledgement_control_number_upper_bound": {"key": "acknowledgementControlNumberUpperBound", "type": "int"}, "rollover_acknowledgement_control_number": {"key": "rolloverAcknowledgementControlNumber", "type": "bool"}, } def __init__( self, *, need_technical_acknowledgement: bool, batch_technical_acknowledgements: bool, need_functional_acknowledgement: bool, batch_functional_acknowledgements: bool, need_loop_for_valid_messages: bool, send_synchronous_acknowledgement: bool, acknowledgement_control_number_lower_bound: int, acknowledgement_control_number_upper_bound: int, rollover_acknowledgement_control_number: bool, acknowledgement_control_number_prefix: Optional[str] = None, acknowledgement_control_number_suffix: Optional[str] = None, **kwargs ): """ :keyword need_technical_acknowledgement: The value indicating whether technical acknowledgement is needed. Required. :paramtype need_technical_acknowledgement: bool :keyword batch_technical_acknowledgements: The value indicating whether to batch the technical acknowledgements. Required. :paramtype batch_technical_acknowledgements: bool :keyword need_functional_acknowledgement: The value indicating whether functional acknowledgement is needed. Required. :paramtype need_functional_acknowledgement: bool :keyword batch_functional_acknowledgements: The value indicating whether to batch functional acknowledgements. Required. :paramtype batch_functional_acknowledgements: bool :keyword need_loop_for_valid_messages: The value indicating whether a loop is needed for valid messages. Required. :paramtype need_loop_for_valid_messages: bool :keyword send_synchronous_acknowledgement: The value indicating whether to send synchronous acknowledgement. Required. :paramtype send_synchronous_acknowledgement: bool :keyword acknowledgement_control_number_prefix: The acknowledgement control number prefix. :paramtype acknowledgement_control_number_prefix: str :keyword acknowledgement_control_number_suffix: The acknowledgement control number suffix. :paramtype acknowledgement_control_number_suffix: str :keyword acknowledgement_control_number_lower_bound: The acknowledgement control number lower bound. Required. :paramtype acknowledgement_control_number_lower_bound: int :keyword acknowledgement_control_number_upper_bound: The acknowledgement control number upper bound. Required. :paramtype acknowledgement_control_number_upper_bound: int :keyword rollover_acknowledgement_control_number: The value indicating whether to rollover acknowledgement control number. Required. :paramtype rollover_acknowledgement_control_number: bool """ super().__init__(**kwargs) self.need_technical_acknowledgement = need_technical_acknowledgement self.batch_technical_acknowledgements = batch_technical_acknowledgements self.need_functional_acknowledgement = need_functional_acknowledgement self.batch_functional_acknowledgements = batch_functional_acknowledgements self.need_loop_for_valid_messages = need_loop_for_valid_messages self.send_synchronous_acknowledgement = send_synchronous_acknowledgement self.acknowledgement_control_number_prefix = acknowledgement_control_number_prefix self.acknowledgement_control_number_suffix = acknowledgement_control_number_suffix self.acknowledgement_control_number_lower_bound = acknowledgement_control_number_lower_bound self.acknowledgement_control_number_upper_bound = acknowledgement_control_number_upper_bound self.rollover_acknowledgement_control_number = rollover_acknowledgement_control_number class EdifactAgreementContent(_serialization.Model): """The Edifact agreement content. All required parameters must be populated in order to send to Azure. :ivar receive_agreement: The EDIFACT one-way receive agreement. Required. :vartype receive_agreement: ~azure.mgmt.logic.models.EdifactOneWayAgreement :ivar send_agreement: The EDIFACT one-way send agreement. Required. :vartype send_agreement: ~azure.mgmt.logic.models.EdifactOneWayAgreement """ _validation = { "receive_agreement": {"required": True}, "send_agreement": {"required": True}, } _attribute_map = { "receive_agreement": {"key": "receiveAgreement", "type": "EdifactOneWayAgreement"}, "send_agreement": {"key": "sendAgreement", "type": "EdifactOneWayAgreement"}, } def __init__( self, *, receive_agreement: "_models.EdifactOneWayAgreement", send_agreement: "_models.EdifactOneWayAgreement", **kwargs ): """ :keyword receive_agreement: The EDIFACT one-way receive agreement. Required. :paramtype receive_agreement: ~azure.mgmt.logic.models.EdifactOneWayAgreement :keyword send_agreement: The EDIFACT one-way send agreement. Required. :paramtype send_agreement: ~azure.mgmt.logic.models.EdifactOneWayAgreement """ super().__init__(**kwargs) self.receive_agreement = receive_agreement self.send_agreement = send_agreement class EdifactDelimiterOverride(_serialization.Model): # pylint: disable=too-many-instance-attributes """The Edifact delimiter override settings. All required parameters must be populated in order to send to Azure. :ivar message_id: The message id. :vartype message_id: str :ivar message_version: The message version. :vartype message_version: str :ivar message_release: The message release. :vartype message_release: str :ivar data_element_separator: The data element separator. Required. :vartype data_element_separator: int :ivar component_separator: The component separator. Required. :vartype component_separator: int :ivar segment_terminator: The segment terminator. Required. :vartype segment_terminator: int :ivar repetition_separator: The repetition separator. Required. :vartype repetition_separator: int :ivar segment_terminator_suffix: The segment terminator suffix. Required. Known values are: "NotSpecified", "None", "CR", "LF", and "CRLF". :vartype segment_terminator_suffix: str or ~azure.mgmt.logic.models.SegmentTerminatorSuffix :ivar decimal_point_indicator: The decimal point indicator. Required. Known values are: "NotSpecified", "Comma", and "Decimal". :vartype decimal_point_indicator: str or ~azure.mgmt.logic.models.EdifactDecimalIndicator :ivar release_indicator: The release indicator. Required. :vartype release_indicator: int :ivar message_association_assigned_code: The message association assigned code. :vartype message_association_assigned_code: str :ivar target_namespace: The target namespace on which this delimiter settings has to be applied. :vartype target_namespace: str """ _validation = { "data_element_separator": {"required": True}, "component_separator": {"required": True}, "segment_terminator": {"required": True}, "repetition_separator": {"required": True}, "segment_terminator_suffix": {"required": True}, "decimal_point_indicator": {"required": True}, "release_indicator": {"required": True}, } _attribute_map = { "message_id": {"key": "messageId", "type": "str"}, "message_version": {"key": "messageVersion", "type": "str"}, "message_release": {"key": "messageRelease", "type": "str"}, "data_element_separator": {"key": "dataElementSeparator", "type": "int"}, "component_separator": {"key": "componentSeparator", "type": "int"}, "segment_terminator": {"key": "segmentTerminator", "type": "int"}, "repetition_separator": {"key": "repetitionSeparator", "type": "int"}, "segment_terminator_suffix": {"key": "segmentTerminatorSuffix", "type": "str"}, "decimal_point_indicator": {"key": "decimalPointIndicator", "type": "str"}, "release_indicator": {"key": "releaseIndicator", "type": "int"}, "message_association_assigned_code": {"key": "messageAssociationAssignedCode", "type": "str"}, "target_namespace": {"key": "targetNamespace", "type": "str"}, } def __init__( self, *, data_element_separator: int, component_separator: int, segment_terminator: int, repetition_separator: int, segment_terminator_suffix: Union[str, "_models.SegmentTerminatorSuffix"], decimal_point_indicator: Union[str, "_models.EdifactDecimalIndicator"], release_indicator: int, message_id: Optional[str] = None, message_version: Optional[str] = None, message_release: Optional[str] = None, message_association_assigned_code: Optional[str] = None, target_namespace: Optional[str] = None, **kwargs ): """ :keyword message_id: The message id. :paramtype message_id: str :keyword message_version: The message version. :paramtype message_version: str :keyword message_release: The message release. :paramtype message_release: str :keyword data_element_separator: The data element separator. Required. :paramtype data_element_separator: int :keyword component_separator: The component separator. Required. :paramtype component_separator: int :keyword segment_terminator: The segment terminator. Required. :paramtype segment_terminator: int :keyword repetition_separator: The repetition separator. Required. :paramtype repetition_separator: int :keyword segment_terminator_suffix: The segment terminator suffix. Required. Known values are: "NotSpecified", "None", "CR", "LF", and "CRLF". :paramtype segment_terminator_suffix: str or ~azure.mgmt.logic.models.SegmentTerminatorSuffix :keyword decimal_point_indicator: The decimal point indicator. Required. Known values are: "NotSpecified", "Comma", and "Decimal". :paramtype decimal_point_indicator: str or ~azure.mgmt.logic.models.EdifactDecimalIndicator :keyword release_indicator: The release indicator. Required. :paramtype release_indicator: int :keyword message_association_assigned_code: The message association assigned code. :paramtype message_association_assigned_code: str :keyword target_namespace: The target namespace on which this delimiter settings has to be applied. :paramtype target_namespace: str """ super().__init__(**kwargs) self.message_id = message_id self.message_version = message_version self.message_release = message_release self.data_element_separator = data_element_separator self.component_separator = component_separator self.segment_terminator = segment_terminator self.repetition_separator = repetition_separator self.segment_terminator_suffix = segment_terminator_suffix self.decimal_point_indicator = decimal_point_indicator self.release_indicator = release_indicator self.message_association_assigned_code = message_association_assigned_code self.target_namespace = target_namespace class EdifactEnvelopeOverride(_serialization.Model): # pylint: disable=too-many-instance-attributes """The Edifact envelope override settings. :ivar message_id: The message id on which this envelope settings has to be applied. :vartype message_id: str :ivar message_version: The message version on which this envelope settings has to be applied. :vartype message_version: str :ivar message_release: The message release version on which this envelope settings has to be applied. :vartype message_release: str :ivar message_association_assigned_code: The message association assigned code. :vartype message_association_assigned_code: str :ivar target_namespace: The target namespace on which this envelope settings has to be applied. :vartype target_namespace: str :ivar functional_group_id: The functional group id. :vartype functional_group_id: str :ivar sender_application_qualifier: The sender application qualifier. :vartype sender_application_qualifier: str :ivar sender_application_id: The sender application id. :vartype sender_application_id: str :ivar receiver_application_qualifier: The receiver application qualifier. :vartype receiver_application_qualifier: str :ivar receiver_application_id: The receiver application id. :vartype receiver_application_id: str :ivar controlling_agency_code: The controlling agency code. :vartype controlling_agency_code: str :ivar group_header_message_version: The group header message version. :vartype group_header_message_version: str :ivar group_header_message_release: The group header message release. :vartype group_header_message_release: str :ivar association_assigned_code: The association assigned code. :vartype association_assigned_code: str :ivar application_password: The application password. :vartype application_password: str """ _attribute_map = { "message_id": {"key": "messageId", "type": "str"}, "message_version": {"key": "messageVersion", "type": "str"}, "message_release": {"key": "messageRelease", "type": "str"}, "message_association_assigned_code": {"key": "messageAssociationAssignedCode", "type": "str"}, "target_namespace": {"key": "targetNamespace", "type": "str"}, "functional_group_id": {"key": "functionalGroupId", "type": "str"}, "sender_application_qualifier": {"key": "senderApplicationQualifier", "type": "str"}, "sender_application_id": {"key": "senderApplicationId", "type": "str"}, "receiver_application_qualifier": {"key": "receiverApplicationQualifier", "type": "str"}, "receiver_application_id": {"key": "receiverApplicationId", "type": "str"}, "controlling_agency_code": {"key": "controllingAgencyCode", "type": "str"}, "group_header_message_version": {"key": "groupHeaderMessageVersion", "type": "str"}, "group_header_message_release": {"key": "groupHeaderMessageRelease", "type": "str"}, "association_assigned_code": {"key": "associationAssignedCode", "type": "str"}, "application_password": {"key": "applicationPassword", "type": "str"}, } def __init__( self, *, message_id: Optional[str] = None, message_version: Optional[str] = None, message_release: Optional[str] = None, message_association_assigned_code: Optional[str] = None, target_namespace: Optional[str] = None, functional_group_id: Optional[str] = None, sender_application_qualifier: Optional[str] = None, sender_application_id: Optional[str] = None, receiver_application_qualifier: Optional[str] = None, receiver_application_id: Optional[str] = None, controlling_agency_code: Optional[str] = None, group_header_message_version: Optional[str] = None, group_header_message_release: Optional[str] = None, association_assigned_code: Optional[str] = None, application_password: Optional[str] = None, **kwargs ): """ :keyword message_id: The message id on which this envelope settings has to be applied. :paramtype message_id: str :keyword message_version: The message version on which this envelope settings has to be applied. :paramtype message_version: str :keyword message_release: The message release version on which this envelope settings has to be applied. :paramtype message_release: str :keyword message_association_assigned_code: The message association assigned code. :paramtype message_association_assigned_code: str :keyword target_namespace: The target namespace on which this envelope settings has to be applied. :paramtype target_namespace: str :keyword functional_group_id: The functional group id. :paramtype functional_group_id: str :keyword sender_application_qualifier: The sender application qualifier. :paramtype sender_application_qualifier: str :keyword sender_application_id: The sender application id. :paramtype sender_application_id: str :keyword receiver_application_qualifier: The receiver application qualifier. :paramtype receiver_application_qualifier: str :keyword receiver_application_id: The receiver application id. :paramtype receiver_application_id: str :keyword controlling_agency_code: The controlling agency code. :paramtype controlling_agency_code: str :keyword group_header_message_version: The group header message version. :paramtype group_header_message_version: str :keyword group_header_message_release: The group header message release. :paramtype group_header_message_release: str :keyword association_assigned_code: The association assigned code. :paramtype association_assigned_code: str :keyword application_password: The application password. :paramtype application_password: str """ super().__init__(**kwargs) self.message_id = message_id self.message_version = message_version self.message_release = message_release self.message_association_assigned_code = message_association_assigned_code self.target_namespace = target_namespace self.functional_group_id = functional_group_id self.sender_application_qualifier = sender_application_qualifier self.sender_application_id = sender_application_id self.receiver_application_qualifier = receiver_application_qualifier self.receiver_application_id = receiver_application_id self.controlling_agency_code = controlling_agency_code self.group_header_message_version = group_header_message_version self.group_header_message_release = group_header_message_release self.association_assigned_code = association_assigned_code self.application_password = application_password class EdifactEnvelopeSettings(_serialization.Model): # pylint: disable=too-many-instance-attributes """The Edifact agreement envelope settings. All required parameters must be populated in order to send to Azure. :ivar group_association_assigned_code: The group association assigned code. :vartype group_association_assigned_code: str :ivar communication_agreement_id: The communication agreement id. :vartype communication_agreement_id: str :ivar apply_delimiter_string_advice: The value indicating whether to apply delimiter string advice. Required. :vartype apply_delimiter_string_advice: bool :ivar create_grouping_segments: The value indicating whether to create grouping segments. Required. :vartype create_grouping_segments: bool :ivar enable_default_group_headers: The value indicating whether to enable default group headers. Required. :vartype enable_default_group_headers: bool :ivar recipient_reference_password_value: The recipient reference password value. :vartype recipient_reference_password_value: str :ivar recipient_reference_password_qualifier: The recipient reference password qualifier. :vartype recipient_reference_password_qualifier: str :ivar application_reference_id: The application reference id. :vartype application_reference_id: str :ivar processing_priority_code: The processing priority code. :vartype processing_priority_code: str :ivar interchange_control_number_lower_bound: The interchange control number lower bound. Required. :vartype interchange_control_number_lower_bound: int :ivar interchange_control_number_upper_bound: The interchange control number upper bound. Required. :vartype interchange_control_number_upper_bound: int :ivar rollover_interchange_control_number: The value indicating whether to rollover interchange control number. Required. :vartype rollover_interchange_control_number: bool :ivar interchange_control_number_prefix: The interchange control number prefix. :vartype interchange_control_number_prefix: str :ivar interchange_control_number_suffix: The interchange control number suffix. :vartype interchange_control_number_suffix: str :ivar sender_reverse_routing_address: The sender reverse routing address. :vartype sender_reverse_routing_address: str :ivar receiver_reverse_routing_address: The receiver reverse routing address. :vartype receiver_reverse_routing_address: str :ivar functional_group_id: The functional group id. :vartype functional_group_id: str :ivar group_controlling_agency_code: The group controlling agency code. :vartype group_controlling_agency_code: str :ivar group_message_version: The group message version. :vartype group_message_version: str :ivar group_message_release: The group message release. :vartype group_message_release: str :ivar group_control_number_lower_bound: The group control number lower bound. Required. :vartype group_control_number_lower_bound: int :ivar group_control_number_upper_bound: The group control number upper bound. Required. :vartype group_control_number_upper_bound: int :ivar rollover_group_control_number: The value indicating whether to rollover group control number. Required. :vartype rollover_group_control_number: bool :ivar group_control_number_prefix: The group control number prefix. :vartype group_control_number_prefix: str :ivar group_control_number_suffix: The group control number suffix. :vartype group_control_number_suffix: str :ivar group_application_receiver_qualifier: The group application receiver qualifier. :vartype group_application_receiver_qualifier: str :ivar group_application_receiver_id: The group application receiver id. :vartype group_application_receiver_id: str :ivar group_application_sender_qualifier: The group application sender qualifier. :vartype group_application_sender_qualifier: str :ivar group_application_sender_id: The group application sender id. :vartype group_application_sender_id: str :ivar group_application_password: The group application password. :vartype group_application_password: str :ivar overwrite_existing_transaction_set_control_number: The value indicating whether to overwrite existing transaction set control number. Required. :vartype overwrite_existing_transaction_set_control_number: bool :ivar transaction_set_control_number_prefix: The transaction set control number prefix. :vartype transaction_set_control_number_prefix: str :ivar transaction_set_control_number_suffix: The transaction set control number suffix. :vartype transaction_set_control_number_suffix: str :ivar transaction_set_control_number_lower_bound: The transaction set control number lower bound. Required. :vartype transaction_set_control_number_lower_bound: int :ivar transaction_set_control_number_upper_bound: The transaction set control number upper bound. Required. :vartype transaction_set_control_number_upper_bound: int :ivar rollover_transaction_set_control_number: The value indicating whether to rollover transaction set control number. Required. :vartype rollover_transaction_set_control_number: bool :ivar is_test_interchange: The value indicating whether the message is a test interchange. Required. :vartype is_test_interchange: bool :ivar sender_internal_identification: The sender internal identification. :vartype sender_internal_identification: str :ivar sender_internal_sub_identification: The sender internal sub identification. :vartype sender_internal_sub_identification: str :ivar receiver_internal_identification: The receiver internal identification. :vartype receiver_internal_identification: str :ivar receiver_internal_sub_identification: The receiver internal sub identification. :vartype receiver_internal_sub_identification: str """ _validation = { "apply_delimiter_string_advice": {"required": True}, "create_grouping_segments": {"required": True}, "enable_default_group_headers": {"required": True}, "interchange_control_number_lower_bound": {"required": True}, "interchange_control_number_upper_bound": {"required": True}, "rollover_interchange_control_number": {"required": True}, "group_control_number_lower_bound": {"required": True}, "group_control_number_upper_bound": {"required": True}, "rollover_group_control_number": {"required": True}, "overwrite_existing_transaction_set_control_number": {"required": True}, "transaction_set_control_number_lower_bound": {"required": True}, "transaction_set_control_number_upper_bound": {"required": True}, "rollover_transaction_set_control_number": {"required": True}, "is_test_interchange": {"required": True}, } _attribute_map = { "group_association_assigned_code": {"key": "groupAssociationAssignedCode", "type": "str"}, "communication_agreement_id": {"key": "communicationAgreementId", "type": "str"}, "apply_delimiter_string_advice": {"key": "applyDelimiterStringAdvice", "type": "bool"}, "create_grouping_segments": {"key": "createGroupingSegments", "type": "bool"}, "enable_default_group_headers": {"key": "enableDefaultGroupHeaders", "type": "bool"}, "recipient_reference_password_value": {"key": "recipientReferencePasswordValue", "type": "str"}, "recipient_reference_password_qualifier": {"key": "recipientReferencePasswordQualifier", "type": "str"}, "application_reference_id": {"key": "applicationReferenceId", "type": "str"}, "processing_priority_code": {"key": "processingPriorityCode", "type": "str"}, "interchange_control_number_lower_bound": {"key": "interchangeControlNumberLowerBound", "type": "int"}, "interchange_control_number_upper_bound": {"key": "interchangeControlNumberUpperBound", "type": "int"}, "rollover_interchange_control_number": {"key": "rolloverInterchangeControlNumber", "type": "bool"}, "interchange_control_number_prefix": {"key": "interchangeControlNumberPrefix", "type": "str"}, "interchange_control_number_suffix": {"key": "interchangeControlNumberSuffix", "type": "str"}, "sender_reverse_routing_address": {"key": "senderReverseRoutingAddress", "type": "str"}, "receiver_reverse_routing_address": {"key": "receiverReverseRoutingAddress", "type": "str"}, "functional_group_id": {"key": "functionalGroupId", "type": "str"}, "group_controlling_agency_code": {"key": "groupControllingAgencyCode", "type": "str"}, "group_message_version": {"key": "groupMessageVersion", "type": "str"}, "group_message_release": {"key": "groupMessageRelease", "type": "str"}, "group_control_number_lower_bound": {"key": "groupControlNumberLowerBound", "type": "int"}, "group_control_number_upper_bound": {"key": "groupControlNumberUpperBound", "type": "int"}, "rollover_group_control_number": {"key": "rolloverGroupControlNumber", "type": "bool"}, "group_control_number_prefix": {"key": "groupControlNumberPrefix", "type": "str"}, "group_control_number_suffix": {"key": "groupControlNumberSuffix", "type": "str"}, "group_application_receiver_qualifier": {"key": "groupApplicationReceiverQualifier", "type": "str"}, "group_application_receiver_id": {"key": "groupApplicationReceiverId", "type": "str"}, "group_application_sender_qualifier": {"key": "groupApplicationSenderQualifier", "type": "str"}, "group_application_sender_id": {"key": "groupApplicationSenderId", "type": "str"}, "group_application_password": {"key": "groupApplicationPassword", "type": "str"}, "overwrite_existing_transaction_set_control_number": { "key": "overwriteExistingTransactionSetControlNumber", "type": "bool", }, "transaction_set_control_number_prefix": {"key": "transactionSetControlNumberPrefix", "type": "str"}, "transaction_set_control_number_suffix": {"key": "transactionSetControlNumberSuffix", "type": "str"}, "transaction_set_control_number_lower_bound": {"key": "transactionSetControlNumberLowerBound", "type": "int"}, "transaction_set_control_number_upper_bound": {"key": "transactionSetControlNumberUpperBound", "type": "int"}, "rollover_transaction_set_control_number": {"key": "rolloverTransactionSetControlNumber", "type": "bool"}, "is_test_interchange": {"key": "isTestInterchange", "type": "bool"}, "sender_internal_identification": {"key": "senderInternalIdentification", "type": "str"}, "sender_internal_sub_identification": {"key": "senderInternalSubIdentification", "type": "str"}, "receiver_internal_identification": {"key": "receiverInternalIdentification", "type": "str"}, "receiver_internal_sub_identification": {"key": "receiverInternalSubIdentification", "type": "str"}, } def __init__( # pylint: disable=too-many-locals self, *, apply_delimiter_string_advice: bool, create_grouping_segments: bool, enable_default_group_headers: bool, interchange_control_number_lower_bound: int, interchange_control_number_upper_bound: int, rollover_interchange_control_number: bool, group_control_number_lower_bound: int, group_control_number_upper_bound: int, rollover_group_control_number: bool, overwrite_existing_transaction_set_control_number: bool, transaction_set_control_number_lower_bound: int, transaction_set_control_number_upper_bound: int, rollover_transaction_set_control_number: bool, is_test_interchange: bool, group_association_assigned_code: Optional[str] = None, communication_agreement_id: Optional[str] = None, recipient_reference_password_value: Optional[str] = None, recipient_reference_password_qualifier: Optional[str] = None, application_reference_id: Optional[str] = None, processing_priority_code: Optional[str] = None, interchange_control_number_prefix: Optional[str] = None, interchange_control_number_suffix: Optional[str] = None, sender_reverse_routing_address: Optional[str] = None, receiver_reverse_routing_address: Optional[str] = None, functional_group_id: Optional[str] = None, group_controlling_agency_code: Optional[str] = None, group_message_version: Optional[str] = None, group_message_release: Optional[str] = None, group_control_number_prefix: Optional[str] = None, group_control_number_suffix: Optional[str] = None, group_application_receiver_qualifier: Optional[str] = None, group_application_receiver_id: Optional[str] = None, group_application_sender_qualifier: Optional[str] = None, group_application_sender_id: Optional[str] = None, group_application_password: Optional[str] = None, transaction_set_control_number_prefix: Optional[str] = None, transaction_set_control_number_suffix: Optional[str] = None, sender_internal_identification: Optional[str] = None, sender_internal_sub_identification: Optional[str] = None, receiver_internal_identification: Optional[str] = None, receiver_internal_sub_identification: Optional[str] = None, **kwargs ): """ :keyword group_association_assigned_code: The group association assigned code. :paramtype group_association_assigned_code: str :keyword communication_agreement_id: The communication agreement id. :paramtype communication_agreement_id: str :keyword apply_delimiter_string_advice: The value indicating whether to apply delimiter string advice. Required. :paramtype apply_delimiter_string_advice: bool :keyword create_grouping_segments: The value indicating whether to create grouping segments. Required. :paramtype create_grouping_segments: bool :keyword enable_default_group_headers: The value indicating whether to enable default group headers. Required. :paramtype enable_default_group_headers: bool :keyword recipient_reference_password_value: The recipient reference password value. :paramtype recipient_reference_password_value: str :keyword recipient_reference_password_qualifier: The recipient reference password qualifier. :paramtype recipient_reference_password_qualifier: str :keyword application_reference_id: The application reference id. :paramtype application_reference_id: str :keyword processing_priority_code: The processing priority code. :paramtype processing_priority_code: str :keyword interchange_control_number_lower_bound: The interchange control number lower bound. Required. :paramtype interchange_control_number_lower_bound: int :keyword interchange_control_number_upper_bound: The interchange control number upper bound. Required. :paramtype interchange_control_number_upper_bound: int :keyword rollover_interchange_control_number: The value indicating whether to rollover interchange control number. Required. :paramtype rollover_interchange_control_number: bool :keyword interchange_control_number_prefix: The interchange control number prefix. :paramtype interchange_control_number_prefix: str :keyword interchange_control_number_suffix: The interchange control number suffix. :paramtype interchange_control_number_suffix: str :keyword sender_reverse_routing_address: The sender reverse routing address. :paramtype sender_reverse_routing_address: str :keyword receiver_reverse_routing_address: The receiver reverse routing address. :paramtype receiver_reverse_routing_address: str :keyword functional_group_id: The functional group id. :paramtype functional_group_id: str :keyword group_controlling_agency_code: The group controlling agency code. :paramtype group_controlling_agency_code: str :keyword group_message_version: The group message version. :paramtype group_message_version: str :keyword group_message_release: The group message release. :paramtype group_message_release: str :keyword group_control_number_lower_bound: The group control number lower bound. Required. :paramtype group_control_number_lower_bound: int :keyword group_control_number_upper_bound: The group control number upper bound. Required. :paramtype group_control_number_upper_bound: int :keyword rollover_group_control_number: The value indicating whether to rollover group control number. Required. :paramtype rollover_group_control_number: bool :keyword group_control_number_prefix: The group control number prefix. :paramtype group_control_number_prefix: str :keyword group_control_number_suffix: The group control number suffix. :paramtype group_control_number_suffix: str :keyword group_application_receiver_qualifier: The group application receiver qualifier. :paramtype group_application_receiver_qualifier: str :keyword group_application_receiver_id: The group application receiver id. :paramtype group_application_receiver_id: str :keyword group_application_sender_qualifier: The group application sender qualifier. :paramtype group_application_sender_qualifier: str :keyword group_application_sender_id: The group application sender id. :paramtype group_application_sender_id: str :keyword group_application_password: The group application password. :paramtype group_application_password: str :keyword overwrite_existing_transaction_set_control_number: The value indicating whether to overwrite existing transaction set control number. Required. :paramtype overwrite_existing_transaction_set_control_number: bool :keyword transaction_set_control_number_prefix: The transaction set control number prefix. :paramtype transaction_set_control_number_prefix: str :keyword transaction_set_control_number_suffix: The transaction set control number suffix. :paramtype transaction_set_control_number_suffix: str :keyword transaction_set_control_number_lower_bound: The transaction set control number lower bound. Required. :paramtype transaction_set_control_number_lower_bound: int :keyword transaction_set_control_number_upper_bound: The transaction set control number upper bound. Required. :paramtype transaction_set_control_number_upper_bound: int :keyword rollover_transaction_set_control_number: The value indicating whether to rollover transaction set control number. Required. :paramtype rollover_transaction_set_control_number: bool :keyword is_test_interchange: The value indicating whether the message is a test interchange. Required. :paramtype is_test_interchange: bool :keyword sender_internal_identification: The sender internal identification. :paramtype sender_internal_identification: str :keyword sender_internal_sub_identification: The sender internal sub identification. :paramtype sender_internal_sub_identification: str :keyword receiver_internal_identification: The receiver internal identification. :paramtype receiver_internal_identification: str :keyword receiver_internal_sub_identification: The receiver internal sub identification. :paramtype receiver_internal_sub_identification: str """ super().__init__(**kwargs) self.group_association_assigned_code = group_association_assigned_code self.communication_agreement_id = communication_agreement_id self.apply_delimiter_string_advice = apply_delimiter_string_advice self.create_grouping_segments = create_grouping_segments self.enable_default_group_headers = enable_default_group_headers self.recipient_reference_password_value = recipient_reference_password_value self.recipient_reference_password_qualifier = recipient_reference_password_qualifier self.application_reference_id = application_reference_id self.processing_priority_code = processing_priority_code self.interchange_control_number_lower_bound = interchange_control_number_lower_bound self.interchange_control_number_upper_bound = interchange_control_number_upper_bound self.rollover_interchange_control_number = rollover_interchange_control_number self.interchange_control_number_prefix = interchange_control_number_prefix self.interchange_control_number_suffix = interchange_control_number_suffix self.sender_reverse_routing_address = sender_reverse_routing_address self.receiver_reverse_routing_address = receiver_reverse_routing_address self.functional_group_id = functional_group_id self.group_controlling_agency_code = group_controlling_agency_code self.group_message_version = group_message_version self.group_message_release = group_message_release self.group_control_number_lower_bound = group_control_number_lower_bound self.group_control_number_upper_bound = group_control_number_upper_bound self.rollover_group_control_number = rollover_group_control_number self.group_control_number_prefix = group_control_number_prefix self.group_control_number_suffix = group_control_number_suffix self.group_application_receiver_qualifier = group_application_receiver_qualifier self.group_application_receiver_id = group_application_receiver_id self.group_application_sender_qualifier = group_application_sender_qualifier self.group_application_sender_id = group_application_sender_id self.group_application_password = group_application_password self.overwrite_existing_transaction_set_control_number = overwrite_existing_transaction_set_control_number self.transaction_set_control_number_prefix = transaction_set_control_number_prefix self.transaction_set_control_number_suffix = transaction_set_control_number_suffix self.transaction_set_control_number_lower_bound = transaction_set_control_number_lower_bound self.transaction_set_control_number_upper_bound = transaction_set_control_number_upper_bound self.rollover_transaction_set_control_number = rollover_transaction_set_control_number self.is_test_interchange = is_test_interchange self.sender_internal_identification = sender_internal_identification self.sender_internal_sub_identification = sender_internal_sub_identification self.receiver_internal_identification = receiver_internal_identification self.receiver_internal_sub_identification = receiver_internal_sub_identification class EdifactFramingSettings(_serialization.Model): # pylint: disable=too-many-instance-attributes """The Edifact agreement framing settings. All required parameters must be populated in order to send to Azure. :ivar service_code_list_directory_version: The service code list directory version. :vartype service_code_list_directory_version: str :ivar character_encoding: The character encoding. :vartype character_encoding: str :ivar protocol_version: The protocol version. Required. :vartype protocol_version: int :ivar data_element_separator: The data element separator. Required. :vartype data_element_separator: int :ivar component_separator: The component separator. Required. :vartype component_separator: int :ivar segment_terminator: The segment terminator. Required. :vartype segment_terminator: int :ivar release_indicator: The release indicator. Required. :vartype release_indicator: int :ivar repetition_separator: The repetition separator. Required. :vartype repetition_separator: int :ivar character_set: The EDIFACT frame setting characterSet. Required. Known values are: "NotSpecified", "UNOB", "UNOA", "UNOC", "UNOD", "UNOE", "UNOF", "UNOG", "UNOH", "UNOI", "UNOJ", "UNOK", "UNOX", "UNOY", and "KECA". :vartype character_set: str or ~azure.mgmt.logic.models.EdifactCharacterSet :ivar decimal_point_indicator: The EDIFACT frame setting decimal indicator. Required. Known values are: "NotSpecified", "Comma", and "Decimal". :vartype decimal_point_indicator: str or ~azure.mgmt.logic.models.EdifactDecimalIndicator :ivar segment_terminator_suffix: The EDIFACT frame setting segment terminator suffix. Required. Known values are: "NotSpecified", "None", "CR", "LF", and "CRLF". :vartype segment_terminator_suffix: str or ~azure.mgmt.logic.models.SegmentTerminatorSuffix """ _validation = { "protocol_version": {"required": True}, "data_element_separator": {"required": True}, "component_separator": {"required": True}, "segment_terminator": {"required": True}, "release_indicator": {"required": True}, "repetition_separator": {"required": True}, "character_set": {"required": True}, "decimal_point_indicator": {"required": True}, "segment_terminator_suffix": {"required": True}, } _attribute_map = { "service_code_list_directory_version": {"key": "serviceCodeListDirectoryVersion", "type": "str"}, "character_encoding": {"key": "characterEncoding", "type": "str"}, "protocol_version": {"key": "protocolVersion", "type": "int"}, "data_element_separator": {"key": "dataElementSeparator", "type": "int"}, "component_separator": {"key": "componentSeparator", "type": "int"}, "segment_terminator": {"key": "segmentTerminator", "type": "int"}, "release_indicator": {"key": "releaseIndicator", "type": "int"}, "repetition_separator": {"key": "repetitionSeparator", "type": "int"}, "character_set": {"key": "characterSet", "type": "str"}, "decimal_point_indicator": {"key": "decimalPointIndicator", "type": "str"}, "segment_terminator_suffix": {"key": "segmentTerminatorSuffix", "type": "str"}, } def __init__( self, *, protocol_version: int, data_element_separator: int, component_separator: int, segment_terminator: int, release_indicator: int, repetition_separator: int, character_set: Union[str, "_models.EdifactCharacterSet"], decimal_point_indicator: Union[str, "_models.EdifactDecimalIndicator"], segment_terminator_suffix: Union[str, "_models.SegmentTerminatorSuffix"], service_code_list_directory_version: Optional[str] = None, character_encoding: Optional[str] = None, **kwargs ): """ :keyword service_code_list_directory_version: The service code list directory version. :paramtype service_code_list_directory_version: str :keyword character_encoding: The character encoding. :paramtype character_encoding: str :keyword protocol_version: The protocol version. Required. :paramtype protocol_version: int :keyword data_element_separator: The data element separator. Required. :paramtype data_element_separator: int :keyword component_separator: The component separator. Required. :paramtype component_separator: int :keyword segment_terminator: The segment terminator. Required. :paramtype segment_terminator: int :keyword release_indicator: The release indicator. Required. :paramtype release_indicator: int :keyword repetition_separator: The repetition separator. Required. :paramtype repetition_separator: int :keyword character_set: The EDIFACT frame setting characterSet. Required. Known values are: "NotSpecified", "UNOB", "UNOA", "UNOC", "UNOD", "UNOE", "UNOF", "UNOG", "UNOH", "UNOI", "UNOJ", "UNOK", "UNOX", "UNOY", and "KECA". :paramtype character_set: str or ~azure.mgmt.logic.models.EdifactCharacterSet :keyword decimal_point_indicator: The EDIFACT frame setting decimal indicator. Required. Known values are: "NotSpecified", "Comma", and "Decimal". :paramtype decimal_point_indicator: str or ~azure.mgmt.logic.models.EdifactDecimalIndicator :keyword segment_terminator_suffix: The EDIFACT frame setting segment terminator suffix. Required. Known values are: "NotSpecified", "None", "CR", "LF", and "CRLF". :paramtype segment_terminator_suffix: str or ~azure.mgmt.logic.models.SegmentTerminatorSuffix """ super().__init__(**kwargs) self.service_code_list_directory_version = service_code_list_directory_version self.character_encoding = character_encoding self.protocol_version = protocol_version self.data_element_separator = data_element_separator self.component_separator = component_separator self.segment_terminator = segment_terminator self.release_indicator = release_indicator self.repetition_separator = repetition_separator self.character_set = character_set self.decimal_point_indicator = decimal_point_indicator self.segment_terminator_suffix = segment_terminator_suffix class EdifactMessageFilter(_serialization.Model): """The Edifact message filter for odata query. All required parameters must be populated in order to send to Azure. :ivar message_filter_type: The message filter type. Required. Known values are: "NotSpecified", "Include", and "Exclude". :vartype message_filter_type: str or ~azure.mgmt.logic.models.MessageFilterType """ _validation = { "message_filter_type": {"required": True}, } _attribute_map = { "message_filter_type": {"key": "messageFilterType", "type": "str"}, } def __init__(self, *, message_filter_type: Union[str, "_models.MessageFilterType"], **kwargs): """ :keyword message_filter_type: The message filter type. Required. Known values are: "NotSpecified", "Include", and "Exclude". :paramtype message_filter_type: str or ~azure.mgmt.logic.models.MessageFilterType """ super().__init__(**kwargs) self.message_filter_type = message_filter_type class EdifactMessageIdentifier(_serialization.Model): """The Edifact message identifier. All required parameters must be populated in order to send to Azure. :ivar message_id: The message id on which this envelope settings has to be applied. Required. :vartype message_id: str """ _validation = { "message_id": {"required": True}, } _attribute_map = { "message_id": {"key": "messageId", "type": "str"}, } def __init__(self, *, message_id: str, **kwargs): """ :keyword message_id: The message id on which this envelope settings has to be applied. Required. :paramtype message_id: str """ super().__init__(**kwargs) self.message_id = message_id class EdifactOneWayAgreement(_serialization.Model): """The Edifact one way agreement. All required parameters must be populated in order to send to Azure. :ivar sender_business_identity: The sender business identity. Required. :vartype sender_business_identity: ~azure.mgmt.logic.models.BusinessIdentity :ivar receiver_business_identity: The receiver business identity. Required. :vartype receiver_business_identity: ~azure.mgmt.logic.models.BusinessIdentity :ivar protocol_settings: The EDIFACT protocol settings. Required. :vartype protocol_settings: ~azure.mgmt.logic.models.EdifactProtocolSettings """ _validation = { "sender_business_identity": {"required": True}, "receiver_business_identity": {"required": True}, "protocol_settings": {"required": True}, } _attribute_map = { "sender_business_identity": {"key": "senderBusinessIdentity", "type": "BusinessIdentity"}, "receiver_business_identity": {"key": "receiverBusinessIdentity", "type": "BusinessIdentity"}, "protocol_settings": {"key": "protocolSettings", "type": "EdifactProtocolSettings"}, } def __init__( self, *, sender_business_identity: "_models.BusinessIdentity", receiver_business_identity: "_models.BusinessIdentity", protocol_settings: "_models.EdifactProtocolSettings", **kwargs ): """ :keyword sender_business_identity: The sender business identity. Required. :paramtype sender_business_identity: ~azure.mgmt.logic.models.BusinessIdentity :keyword receiver_business_identity: The receiver business identity. Required. :paramtype receiver_business_identity: ~azure.mgmt.logic.models.BusinessIdentity :keyword protocol_settings: The EDIFACT protocol settings. Required. :paramtype protocol_settings: ~azure.mgmt.logic.models.EdifactProtocolSettings """ super().__init__(**kwargs) self.sender_business_identity = sender_business_identity self.receiver_business_identity = receiver_business_identity self.protocol_settings = protocol_settings class EdifactProcessingSettings(_serialization.Model): """The Edifact agreement protocol settings. All required parameters must be populated in order to send to Azure. :ivar mask_security_info: The value indicating whether to mask security information. Required. :vartype mask_security_info: bool :ivar preserve_interchange: The value indicating whether to preserve interchange. Required. :vartype preserve_interchange: bool :ivar suspend_interchange_on_error: The value indicating whether to suspend interchange on error. Required. :vartype suspend_interchange_on_error: bool :ivar create_empty_xml_tags_for_trailing_separators: The value indicating whether to create empty xml tags for trailing separators. Required. :vartype create_empty_xml_tags_for_trailing_separators: bool :ivar use_dot_as_decimal_separator: The value indicating whether to use dot as decimal separator. Required. :vartype use_dot_as_decimal_separator: bool """ _validation = { "mask_security_info": {"required": True}, "preserve_interchange": {"required": True}, "suspend_interchange_on_error": {"required": True}, "create_empty_xml_tags_for_trailing_separators": {"required": True}, "use_dot_as_decimal_separator": {"required": True}, } _attribute_map = { "mask_security_info": {"key": "maskSecurityInfo", "type": "bool"}, "preserve_interchange": {"key": "preserveInterchange", "type": "bool"}, "suspend_interchange_on_error": {"key": "suspendInterchangeOnError", "type": "bool"}, "create_empty_xml_tags_for_trailing_separators": { "key": "createEmptyXmlTagsForTrailingSeparators", "type": "bool", }, "use_dot_as_decimal_separator": {"key": "useDotAsDecimalSeparator", "type": "bool"}, } def __init__( self, *, mask_security_info: bool, preserve_interchange: bool, suspend_interchange_on_error: bool, create_empty_xml_tags_for_trailing_separators: bool, use_dot_as_decimal_separator: bool, **kwargs ): """ :keyword mask_security_info: The value indicating whether to mask security information. Required. :paramtype mask_security_info: bool :keyword preserve_interchange: The value indicating whether to preserve interchange. Required. :paramtype preserve_interchange: bool :keyword suspend_interchange_on_error: The value indicating whether to suspend interchange on error. Required. :paramtype suspend_interchange_on_error: bool :keyword create_empty_xml_tags_for_trailing_separators: The value indicating whether to create empty xml tags for trailing separators. Required. :paramtype create_empty_xml_tags_for_trailing_separators: bool :keyword use_dot_as_decimal_separator: The value indicating whether to use dot as decimal separator. Required. :paramtype use_dot_as_decimal_separator: bool """ super().__init__(**kwargs) self.mask_security_info = mask_security_info self.preserve_interchange = preserve_interchange self.suspend_interchange_on_error = suspend_interchange_on_error self.create_empty_xml_tags_for_trailing_separators = create_empty_xml_tags_for_trailing_separators self.use_dot_as_decimal_separator = use_dot_as_decimal_separator class EdifactProtocolSettings(_serialization.Model): # pylint: disable=too-many-instance-attributes """The Edifact agreement protocol settings. All required parameters must be populated in order to send to Azure. :ivar validation_settings: The EDIFACT validation settings. Required. :vartype validation_settings: ~azure.mgmt.logic.models.EdifactValidationSettings :ivar framing_settings: The EDIFACT framing settings. Required. :vartype framing_settings: ~azure.mgmt.logic.models.EdifactFramingSettings :ivar envelope_settings: The EDIFACT envelope settings. Required. :vartype envelope_settings: ~azure.mgmt.logic.models.EdifactEnvelopeSettings :ivar acknowledgement_settings: The EDIFACT acknowledgement settings. Required. :vartype acknowledgement_settings: ~azure.mgmt.logic.models.EdifactAcknowledgementSettings :ivar message_filter: The EDIFACT message filter. Required. :vartype message_filter: ~azure.mgmt.logic.models.EdifactMessageFilter :ivar processing_settings: The EDIFACT processing Settings. Required. :vartype processing_settings: ~azure.mgmt.logic.models.EdifactProcessingSettings :ivar envelope_overrides: The EDIFACT envelope override settings. :vartype envelope_overrides: list[~azure.mgmt.logic.models.EdifactEnvelopeOverride] :ivar message_filter_list: The EDIFACT message filter list. :vartype message_filter_list: list[~azure.mgmt.logic.models.EdifactMessageIdentifier] :ivar schema_references: The EDIFACT schema references. Required. :vartype schema_references: list[~azure.mgmt.logic.models.EdifactSchemaReference] :ivar validation_overrides: The EDIFACT validation override settings. :vartype validation_overrides: list[~azure.mgmt.logic.models.EdifactValidationOverride] :ivar edifact_delimiter_overrides: The EDIFACT delimiter override settings. :vartype edifact_delimiter_overrides: list[~azure.mgmt.logic.models.EdifactDelimiterOverride] """ _validation = { "validation_settings": {"required": True}, "framing_settings": {"required": True}, "envelope_settings": {"required": True}, "acknowledgement_settings": {"required": True}, "message_filter": {"required": True}, "processing_settings": {"required": True}, "schema_references": {"required": True}, } _attribute_map = { "validation_settings": {"key": "validationSettings", "type": "EdifactValidationSettings"}, "framing_settings": {"key": "framingSettings", "type": "EdifactFramingSettings"}, "envelope_settings": {"key": "envelopeSettings", "type": "EdifactEnvelopeSettings"}, "acknowledgement_settings": {"key": "acknowledgementSettings", "type": "EdifactAcknowledgementSettings"}, "message_filter": {"key": "messageFilter", "type": "EdifactMessageFilter"}, "processing_settings": {"key": "processingSettings", "type": "EdifactProcessingSettings"}, "envelope_overrides": {"key": "envelopeOverrides", "type": "[EdifactEnvelopeOverride]"}, "message_filter_list": {"key": "messageFilterList", "type": "[EdifactMessageIdentifier]"}, "schema_references": {"key": "schemaReferences", "type": "[EdifactSchemaReference]"}, "validation_overrides": {"key": "validationOverrides", "type": "[EdifactValidationOverride]"}, "edifact_delimiter_overrides": {"key": "edifactDelimiterOverrides", "type": "[EdifactDelimiterOverride]"}, } def __init__( self, *, validation_settings: "_models.EdifactValidationSettings", framing_settings: "_models.EdifactFramingSettings", envelope_settings: "_models.EdifactEnvelopeSettings", acknowledgement_settings: "_models.EdifactAcknowledgementSettings", message_filter: "_models.EdifactMessageFilter", processing_settings: "_models.EdifactProcessingSettings", schema_references: List["_models.EdifactSchemaReference"], envelope_overrides: Optional[List["_models.EdifactEnvelopeOverride"]] = None, message_filter_list: Optional[List["_models.EdifactMessageIdentifier"]] = None, validation_overrides: Optional[List["_models.EdifactValidationOverride"]] = None, edifact_delimiter_overrides: Optional[List["_models.EdifactDelimiterOverride"]] = None, **kwargs ): """ :keyword validation_settings: The EDIFACT validation settings. Required. :paramtype validation_settings: ~azure.mgmt.logic.models.EdifactValidationSettings :keyword framing_settings: The EDIFACT framing settings. Required. :paramtype framing_settings: ~azure.mgmt.logic.models.EdifactFramingSettings :keyword envelope_settings: The EDIFACT envelope settings. Required. :paramtype envelope_settings: ~azure.mgmt.logic.models.EdifactEnvelopeSettings :keyword acknowledgement_settings: The EDIFACT acknowledgement settings. Required. :paramtype acknowledgement_settings: ~azure.mgmt.logic.models.EdifactAcknowledgementSettings :keyword message_filter: The EDIFACT message filter. Required. :paramtype message_filter: ~azure.mgmt.logic.models.EdifactMessageFilter :keyword processing_settings: The EDIFACT processing Settings. Required. :paramtype processing_settings: ~azure.mgmt.logic.models.EdifactProcessingSettings :keyword envelope_overrides: The EDIFACT envelope override settings. :paramtype envelope_overrides: list[~azure.mgmt.logic.models.EdifactEnvelopeOverride] :keyword message_filter_list: The EDIFACT message filter list. :paramtype message_filter_list: list[~azure.mgmt.logic.models.EdifactMessageIdentifier] :keyword schema_references: The EDIFACT schema references. Required. :paramtype schema_references: list[~azure.mgmt.logic.models.EdifactSchemaReference] :keyword validation_overrides: The EDIFACT validation override settings. :paramtype validation_overrides: list[~azure.mgmt.logic.models.EdifactValidationOverride] :keyword edifact_delimiter_overrides: The EDIFACT delimiter override settings. :paramtype edifact_delimiter_overrides: list[~azure.mgmt.logic.models.EdifactDelimiterOverride] """ super().__init__(**kwargs) self.validation_settings = validation_settings self.framing_settings = framing_settings self.envelope_settings = envelope_settings self.acknowledgement_settings = acknowledgement_settings self.message_filter = message_filter self.processing_settings = processing_settings self.envelope_overrides = envelope_overrides self.message_filter_list = message_filter_list self.schema_references = schema_references self.validation_overrides = validation_overrides self.edifact_delimiter_overrides = edifact_delimiter_overrides class EdifactSchemaReference(_serialization.Model): """The Edifact schema reference. All required parameters must be populated in order to send to Azure. :ivar message_id: The message id. Required. :vartype message_id: str :ivar message_version: The message version. Required. :vartype message_version: str :ivar message_release: The message release version. Required. :vartype message_release: str :ivar sender_application_id: The sender application id. :vartype sender_application_id: str :ivar sender_application_qualifier: The sender application qualifier. :vartype sender_application_qualifier: str :ivar association_assigned_code: The association assigned code. :vartype association_assigned_code: str :ivar schema_name: The schema name. Required. :vartype schema_name: str """ _validation = { "message_id": {"required": True}, "message_version": {"required": True}, "message_release": {"required": True}, "schema_name": {"required": True}, } _attribute_map = { "message_id": {"key": "messageId", "type": "str"}, "message_version": {"key": "messageVersion", "type": "str"}, "message_release": {"key": "messageRelease", "type": "str"}, "sender_application_id": {"key": "senderApplicationId", "type": "str"}, "sender_application_qualifier": {"key": "senderApplicationQualifier", "type": "str"}, "association_assigned_code": {"key": "associationAssignedCode", "type": "str"}, "schema_name": {"key": "schemaName", "type": "str"}, } def __init__( self, *, message_id: str, message_version: str, message_release: str, schema_name: str, sender_application_id: Optional[str] = None, sender_application_qualifier: Optional[str] = None, association_assigned_code: Optional[str] = None, **kwargs ): """ :keyword message_id: The message id. Required. :paramtype message_id: str :keyword message_version: The message version. Required. :paramtype message_version: str :keyword message_release: The message release version. Required. :paramtype message_release: str :keyword sender_application_id: The sender application id. :paramtype sender_application_id: str :keyword sender_application_qualifier: The sender application qualifier. :paramtype sender_application_qualifier: str :keyword association_assigned_code: The association assigned code. :paramtype association_assigned_code: str :keyword schema_name: The schema name. Required. :paramtype schema_name: str """ super().__init__(**kwargs) self.message_id = message_id self.message_version = message_version self.message_release = message_release self.sender_application_id = sender_application_id self.sender_application_qualifier = sender_application_qualifier self.association_assigned_code = association_assigned_code self.schema_name = schema_name class EdifactValidationOverride(_serialization.Model): """The Edifact validation override settings. All required parameters must be populated in order to send to Azure. :ivar message_id: The message id on which the validation settings has to be applied. Required. :vartype message_id: str :ivar enforce_character_set: The value indicating whether to validate character Set. Required. :vartype enforce_character_set: bool :ivar validate_edi_types: The value indicating whether to validate EDI types. Required. :vartype validate_edi_types: bool :ivar validate_xsd_types: The value indicating whether to validate XSD types. Required. :vartype validate_xsd_types: bool :ivar allow_leading_and_trailing_spaces_and_zeroes: The value indicating whether to allow leading and trailing spaces and zeroes. Required. :vartype allow_leading_and_trailing_spaces_and_zeroes: bool :ivar trailing_separator_policy: The trailing separator policy. Required. Known values are: "NotSpecified", "NotAllowed", "Optional", and "Mandatory". :vartype trailing_separator_policy: str or ~azure.mgmt.logic.models.TrailingSeparatorPolicy :ivar trim_leading_and_trailing_spaces_and_zeroes: The value indicating whether to trim leading and trailing spaces and zeroes. Required. :vartype trim_leading_and_trailing_spaces_and_zeroes: bool """ _validation = { "message_id": {"required": True}, "enforce_character_set": {"required": True}, "validate_edi_types": {"required": True}, "validate_xsd_types": {"required": True}, "allow_leading_and_trailing_spaces_and_zeroes": {"required": True}, "trailing_separator_policy": {"required": True}, "trim_leading_and_trailing_spaces_and_zeroes": {"required": True}, } _attribute_map = { "message_id": {"key": "messageId", "type": "str"}, "enforce_character_set": {"key": "enforceCharacterSet", "type": "bool"}, "validate_edi_types": {"key": "validateEDITypes", "type": "bool"}, "validate_xsd_types": {"key": "validateXSDTypes", "type": "bool"}, "allow_leading_and_trailing_spaces_and_zeroes": { "key": "allowLeadingAndTrailingSpacesAndZeroes", "type": "bool", }, "trailing_separator_policy": {"key": "trailingSeparatorPolicy", "type": "str"}, "trim_leading_and_trailing_spaces_and_zeroes": {"key": "trimLeadingAndTrailingSpacesAndZeroes", "type": "bool"}, } def __init__( self, *, message_id: str, enforce_character_set: bool, validate_edi_types: bool, validate_xsd_types: bool, allow_leading_and_trailing_spaces_and_zeroes: bool, trailing_separator_policy: Union[str, "_models.TrailingSeparatorPolicy"], trim_leading_and_trailing_spaces_and_zeroes: bool, **kwargs ): """ :keyword message_id: The message id on which the validation settings has to be applied. Required. :paramtype message_id: str :keyword enforce_character_set: The value indicating whether to validate character Set. Required. :paramtype enforce_character_set: bool :keyword validate_edi_types: The value indicating whether to validate EDI types. Required. :paramtype validate_edi_types: bool :keyword validate_xsd_types: The value indicating whether to validate XSD types. Required. :paramtype validate_xsd_types: bool :keyword allow_leading_and_trailing_spaces_and_zeroes: The value indicating whether to allow leading and trailing spaces and zeroes. Required. :paramtype allow_leading_and_trailing_spaces_and_zeroes: bool :keyword trailing_separator_policy: The trailing separator policy. Required. Known values are: "NotSpecified", "NotAllowed", "Optional", and "Mandatory". :paramtype trailing_separator_policy: str or ~azure.mgmt.logic.models.TrailingSeparatorPolicy :keyword trim_leading_and_trailing_spaces_and_zeroes: The value indicating whether to trim leading and trailing spaces and zeroes. Required. :paramtype trim_leading_and_trailing_spaces_and_zeroes: bool """ super().__init__(**kwargs) self.message_id = message_id self.enforce_character_set = enforce_character_set self.validate_edi_types = validate_edi_types self.validate_xsd_types = validate_xsd_types self.allow_leading_and_trailing_spaces_and_zeroes = allow_leading_and_trailing_spaces_and_zeroes self.trailing_separator_policy = trailing_separator_policy self.trim_leading_and_trailing_spaces_and_zeroes = trim_leading_and_trailing_spaces_and_zeroes class EdifactValidationSettings(_serialization.Model): """The Edifact agreement validation settings. All required parameters must be populated in order to send to Azure. :ivar validate_character_set: The value indicating whether to validate character set in the message. Required. :vartype validate_character_set: bool :ivar check_duplicate_interchange_control_number: The value indicating whether to check for duplicate interchange control number. Required. :vartype check_duplicate_interchange_control_number: bool :ivar interchange_control_number_validity_days: The validity period of interchange control number. Required. :vartype interchange_control_number_validity_days: int :ivar check_duplicate_group_control_number: The value indicating whether to check for duplicate group control number. Required. :vartype check_duplicate_group_control_number: bool :ivar check_duplicate_transaction_set_control_number: The value indicating whether to check for duplicate transaction set control number. Required. :vartype check_duplicate_transaction_set_control_number: bool :ivar validate_edi_types: The value indicating whether to Whether to validate EDI types. Required. :vartype validate_edi_types: bool :ivar validate_xsd_types: The value indicating whether to Whether to validate XSD types. Required. :vartype validate_xsd_types: bool :ivar allow_leading_and_trailing_spaces_and_zeroes: The value indicating whether to allow leading and trailing spaces and zeroes. Required. :vartype allow_leading_and_trailing_spaces_and_zeroes: bool :ivar trim_leading_and_trailing_spaces_and_zeroes: The value indicating whether to trim leading and trailing spaces and zeroes. Required. :vartype trim_leading_and_trailing_spaces_and_zeroes: bool :ivar trailing_separator_policy: The trailing separator policy. Required. Known values are: "NotSpecified", "NotAllowed", "Optional", and "Mandatory". :vartype trailing_separator_policy: str or ~azure.mgmt.logic.models.TrailingSeparatorPolicy """ _validation = { "validate_character_set": {"required": True}, "check_duplicate_interchange_control_number": {"required": True}, "interchange_control_number_validity_days": {"required": True}, "check_duplicate_group_control_number": {"required": True}, "check_duplicate_transaction_set_control_number": {"required": True}, "validate_edi_types": {"required": True}, "validate_xsd_types": {"required": True}, "allow_leading_and_trailing_spaces_and_zeroes": {"required": True}, "trim_leading_and_trailing_spaces_and_zeroes": {"required": True}, "trailing_separator_policy": {"required": True}, } _attribute_map = { "validate_character_set": {"key": "validateCharacterSet", "type": "bool"}, "check_duplicate_interchange_control_number": {"key": "checkDuplicateInterchangeControlNumber", "type": "bool"}, "interchange_control_number_validity_days": {"key": "interchangeControlNumberValidityDays", "type": "int"}, "check_duplicate_group_control_number": {"key": "checkDuplicateGroupControlNumber", "type": "bool"}, "check_duplicate_transaction_set_control_number": { "key": "checkDuplicateTransactionSetControlNumber", "type": "bool", }, "validate_edi_types": {"key": "validateEDITypes", "type": "bool"}, "validate_xsd_types": {"key": "validateXSDTypes", "type": "bool"}, "allow_leading_and_trailing_spaces_and_zeroes": { "key": "allowLeadingAndTrailingSpacesAndZeroes", "type": "bool", }, "trim_leading_and_trailing_spaces_and_zeroes": {"key": "trimLeadingAndTrailingSpacesAndZeroes", "type": "bool"}, "trailing_separator_policy": {"key": "trailingSeparatorPolicy", "type": "str"}, } def __init__( self, *, validate_character_set: bool, check_duplicate_interchange_control_number: bool, interchange_control_number_validity_days: int, check_duplicate_group_control_number: bool, check_duplicate_transaction_set_control_number: bool, validate_edi_types: bool, validate_xsd_types: bool, allow_leading_and_trailing_spaces_and_zeroes: bool, trim_leading_and_trailing_spaces_and_zeroes: bool, trailing_separator_policy: Union[str, "_models.TrailingSeparatorPolicy"], **kwargs ): """ :keyword validate_character_set: The value indicating whether to validate character set in the message. Required. :paramtype validate_character_set: bool :keyword check_duplicate_interchange_control_number: The value indicating whether to check for duplicate interchange control number. Required. :paramtype check_duplicate_interchange_control_number: bool :keyword interchange_control_number_validity_days: The validity period of interchange control number. Required. :paramtype interchange_control_number_validity_days: int :keyword check_duplicate_group_control_number: The value indicating whether to check for duplicate group control number. Required. :paramtype check_duplicate_group_control_number: bool :keyword check_duplicate_transaction_set_control_number: The value indicating whether to check for duplicate transaction set control number. Required. :paramtype check_duplicate_transaction_set_control_number: bool :keyword validate_edi_types: The value indicating whether to Whether to validate EDI types. Required. :paramtype validate_edi_types: bool :keyword validate_xsd_types: The value indicating whether to Whether to validate XSD types. Required. :paramtype validate_xsd_types: bool :keyword allow_leading_and_trailing_spaces_and_zeroes: The value indicating whether to allow leading and trailing spaces and zeroes. Required. :paramtype allow_leading_and_trailing_spaces_and_zeroes: bool :keyword trim_leading_and_trailing_spaces_and_zeroes: The value indicating whether to trim leading and trailing spaces and zeroes. Required. :paramtype trim_leading_and_trailing_spaces_and_zeroes: bool :keyword trailing_separator_policy: The trailing separator policy. Required. Known values are: "NotSpecified", "NotAllowed", "Optional", and "Mandatory". :paramtype trailing_separator_policy: str or ~azure.mgmt.logic.models.TrailingSeparatorPolicy """ super().__init__(**kwargs) self.validate_character_set = validate_character_set self.check_duplicate_interchange_control_number = check_duplicate_interchange_control_number self.interchange_control_number_validity_days = interchange_control_number_validity_days self.check_duplicate_group_control_number = check_duplicate_group_control_number self.check_duplicate_transaction_set_control_number = check_duplicate_transaction_set_control_number self.validate_edi_types = validate_edi_types self.validate_xsd_types = validate_xsd_types self.allow_leading_and_trailing_spaces_and_zeroes = allow_leading_and_trailing_spaces_and_zeroes self.trim_leading_and_trailing_spaces_and_zeroes = trim_leading_and_trailing_spaces_and_zeroes self.trailing_separator_policy = trailing_separator_policy class ErrorProperties(_serialization.Model): """Error properties indicate why the Logic service was not able to process the incoming request. The reason is provided in the error message. :ivar code: Error code. :vartype code: str :ivar message: Error message indicating why the operation failed. :vartype message: str """ _attribute_map = { "code": {"key": "code", "type": "str"}, "message": {"key": "message", "type": "str"}, } def __init__(self, *, code: Optional[str] = None, message: Optional[str] = None, **kwargs): """ :keyword code: Error code. :paramtype code: str :keyword message: Error message indicating why the operation failed. :paramtype message: str """ super().__init__(**kwargs) self.code = code self.message = message class ErrorResponse(_serialization.Model): """Error response indicates Logic service is not able to process the incoming request. The error property contains the error details. :ivar error: The error properties. :vartype error: ~azure.mgmt.logic.models.ErrorProperties """ _attribute_map = { "error": {"key": "error", "type": "ErrorProperties"}, } def __init__(self, *, error: Optional["_models.ErrorProperties"] = None, **kwargs): """ :keyword error: The error properties. :paramtype error: ~azure.mgmt.logic.models.ErrorProperties """ super().__init__(**kwargs) self.error = error class Expression(_serialization.Model): """The expression. :ivar text: The text. :vartype text: str :ivar value: Anything. :vartype value: any :ivar subexpressions: The sub expressions. :vartype subexpressions: list[~azure.mgmt.logic.models.Expression] :ivar error: The azure resource error info. :vartype error: ~azure.mgmt.logic.models.AzureResourceErrorInfo """ _attribute_map = { "text": {"key": "text", "type": "str"}, "value": {"key": "value", "type": "object"}, "subexpressions": {"key": "subexpressions", "type": "[Expression]"}, "error": {"key": "error", "type": "AzureResourceErrorInfo"}, } def __init__( self, *, text: Optional[str] = None, value: Optional[Any] = None, subexpressions: Optional[List["_models.Expression"]] = None, error: Optional["_models.AzureResourceErrorInfo"] = None, **kwargs ): """ :keyword text: The text. :paramtype text: str :keyword value: Anything. :paramtype value: any :keyword subexpressions: The sub expressions. :paramtype subexpressions: list[~azure.mgmt.logic.models.Expression] :keyword error: The azure resource error info. :paramtype error: ~azure.mgmt.logic.models.AzureResourceErrorInfo """ super().__init__(**kwargs) self.text = text self.value = value self.subexpressions = subexpressions self.error = error class ExpressionRoot(Expression): """The expression root. :ivar text: The text. :vartype text: str :ivar value: Anything. :vartype value: any :ivar subexpressions: The sub expressions. :vartype subexpressions: list[~azure.mgmt.logic.models.Expression] :ivar error: The azure resource error info. :vartype error: ~azure.mgmt.logic.models.AzureResourceErrorInfo :ivar path: The path. :vartype path: str """ _attribute_map = { "text": {"key": "text", "type": "str"}, "value": {"key": "value", "type": "object"}, "subexpressions": {"key": "subexpressions", "type": "[Expression]"}, "error": {"key": "error", "type": "AzureResourceErrorInfo"}, "path": {"key": "path", "type": "str"}, } def __init__( self, *, text: Optional[str] = None, value: Optional[Any] = None, subexpressions: Optional[List["_models.Expression"]] = None, error: Optional["_models.AzureResourceErrorInfo"] = None, path: Optional[str] = None, **kwargs ): """ :keyword text: The text. :paramtype text: str :keyword value: Anything. :paramtype value: any :keyword subexpressions: The sub expressions. :paramtype subexpressions: list[~azure.mgmt.logic.models.Expression] :keyword error: The azure resource error info. :paramtype error: ~azure.mgmt.logic.models.AzureResourceErrorInfo :keyword path: The path. :paramtype path: str """ super().__init__(text=text, value=value, subexpressions=subexpressions, error=error, **kwargs) self.path = path class ExpressionTraces(_serialization.Model): """The expression traces. :ivar inputs: :vartype inputs: list[~azure.mgmt.logic.models.ExpressionRoot] """ _attribute_map = { "inputs": {"key": "inputs", "type": "[ExpressionRoot]"}, } def __init__(self, *, inputs: Optional[List["_models.ExpressionRoot"]] = None, **kwargs): """ :keyword inputs: :paramtype inputs: list[~azure.mgmt.logic.models.ExpressionRoot] """ super().__init__(**kwargs) self.inputs = inputs class ExtendedErrorInfo(_serialization.Model): """The extended error info. All required parameters must be populated in order to send to Azure. :ivar code: The error code. Required. Known values are: "NotSpecified", "IntegrationServiceEnvironmentNotFound", "InternalServerError", and "InvalidOperationId". :vartype code: str or ~azure.mgmt.logic.models.ErrorResponseCode :ivar message: The error message. Required. :vartype message: str :ivar details: The error message details. :vartype details: list[~azure.mgmt.logic.models.ExtendedErrorInfo] :ivar inner_error: The inner error. :vartype inner_error: JSON """ _validation = { "code": {"required": True}, "message": {"required": True}, } _attribute_map = { "code": {"key": "code", "type": "str"}, "message": {"key": "message", "type": "str"}, "details": {"key": "details", "type": "[ExtendedErrorInfo]"}, "inner_error": {"key": "innerError", "type": "object"}, } def __init__( self, *, code: Union[str, "_models.ErrorResponseCode"], message: str, details: Optional[List["_models.ExtendedErrorInfo"]] = None, inner_error: Optional[JSON] = None, **kwargs ): """ :keyword code: The error code. Required. Known values are: "NotSpecified", "IntegrationServiceEnvironmentNotFound", "InternalServerError", and "InvalidOperationId". :paramtype code: str or ~azure.mgmt.logic.models.ErrorResponseCode :keyword message: The error message. Required. :paramtype message: str :keyword details: The error message details. :paramtype details: list[~azure.mgmt.logic.models.ExtendedErrorInfo] :keyword inner_error: The inner error. :paramtype inner_error: JSON """ super().__init__(**kwargs) self.code = code self.message = message self.details = details self.inner_error = inner_error class FlowAccessControlConfiguration(_serialization.Model): """The access control configuration. :ivar triggers: The access control configuration for invoking workflow triggers. :vartype triggers: ~azure.mgmt.logic.models.FlowAccessControlConfigurationPolicy :ivar contents: The access control configuration for accessing workflow run contents. :vartype contents: ~azure.mgmt.logic.models.FlowAccessControlConfigurationPolicy :ivar actions: The access control configuration for workflow actions. :vartype actions: ~azure.mgmt.logic.models.FlowAccessControlConfigurationPolicy :ivar workflow_management: The access control configuration for workflow management. :vartype workflow_management: ~azure.mgmt.logic.models.FlowAccessControlConfigurationPolicy """ _attribute_map = { "triggers": {"key": "triggers", "type": "FlowAccessControlConfigurationPolicy"}, "contents": {"key": "contents", "type": "FlowAccessControlConfigurationPolicy"}, "actions": {"key": "actions", "type": "FlowAccessControlConfigurationPolicy"}, "workflow_management": {"key": "workflowManagement", "type": "FlowAccessControlConfigurationPolicy"}, } def __init__( self, *, triggers: Optional["_models.FlowAccessControlConfigurationPolicy"] = None, contents: Optional["_models.FlowAccessControlConfigurationPolicy"] = None, actions: Optional["_models.FlowAccessControlConfigurationPolicy"] = None, workflow_management: Optional["_models.FlowAccessControlConfigurationPolicy"] = None, **kwargs ): """ :keyword triggers: The access control configuration for invoking workflow triggers. :paramtype triggers: ~azure.mgmt.logic.models.FlowAccessControlConfigurationPolicy :keyword contents: The access control configuration for accessing workflow run contents. :paramtype contents: ~azure.mgmt.logic.models.FlowAccessControlConfigurationPolicy :keyword actions: The access control configuration for workflow actions. :paramtype actions: ~azure.mgmt.logic.models.FlowAccessControlConfigurationPolicy :keyword workflow_management: The access control configuration for workflow management. :paramtype workflow_management: ~azure.mgmt.logic.models.FlowAccessControlConfigurationPolicy """ super().__init__(**kwargs) self.triggers = triggers self.contents = contents self.actions = actions self.workflow_management = workflow_management class FlowAccessControlConfigurationPolicy(_serialization.Model): """The access control configuration policy. :ivar allowed_caller_ip_addresses: The allowed caller IP address ranges. :vartype allowed_caller_ip_addresses: list[~azure.mgmt.logic.models.IpAddressRange] :ivar open_authentication_policies: The authentication policies for workflow. :vartype open_authentication_policies: ~azure.mgmt.logic.models.OpenAuthenticationAccessPolicies """ _attribute_map = { "allowed_caller_ip_addresses": {"key": "allowedCallerIpAddresses", "type": "[IpAddressRange]"}, "open_authentication_policies": { "key": "openAuthenticationPolicies", "type": "OpenAuthenticationAccessPolicies", }, } def __init__( self, *, allowed_caller_ip_addresses: Optional[List["_models.IpAddressRange"]] = None, open_authentication_policies: Optional["_models.OpenAuthenticationAccessPolicies"] = None, **kwargs ): """ :keyword allowed_caller_ip_addresses: The allowed caller IP address ranges. :paramtype allowed_caller_ip_addresses: list[~azure.mgmt.logic.models.IpAddressRange] :keyword open_authentication_policies: The authentication policies for workflow. :paramtype open_authentication_policies: ~azure.mgmt.logic.models.OpenAuthenticationAccessPolicies """ super().__init__(**kwargs) self.allowed_caller_ip_addresses = allowed_caller_ip_addresses self.open_authentication_policies = open_authentication_policies class FlowEndpoints(_serialization.Model): """The flow endpoints configuration. :ivar outgoing_ip_addresses: The outgoing ip address. :vartype outgoing_ip_addresses: list[~azure.mgmt.logic.models.IpAddress] :ivar access_endpoint_ip_addresses: The access endpoint ip address. :vartype access_endpoint_ip_addresses: list[~azure.mgmt.logic.models.IpAddress] """ _attribute_map = { "outgoing_ip_addresses": {"key": "outgoingIpAddresses", "type": "[IpAddress]"}, "access_endpoint_ip_addresses": {"key": "accessEndpointIpAddresses", "type": "[IpAddress]"}, } def __init__( self, *, outgoing_ip_addresses: Optional[List["_models.IpAddress"]] = None, access_endpoint_ip_addresses: Optional[List["_models.IpAddress"]] = None, **kwargs ): """ :keyword outgoing_ip_addresses: The outgoing ip address. :paramtype outgoing_ip_addresses: list[~azure.mgmt.logic.models.IpAddress] :keyword access_endpoint_ip_addresses: The access endpoint ip address. :paramtype access_endpoint_ip_addresses: list[~azure.mgmt.logic.models.IpAddress] """ super().__init__(**kwargs) self.outgoing_ip_addresses = outgoing_ip_addresses self.access_endpoint_ip_addresses = access_endpoint_ip_addresses class FlowEndpointsConfiguration(_serialization.Model): """The endpoints configuration. :ivar workflow: The workflow endpoints. :vartype workflow: ~azure.mgmt.logic.models.FlowEndpoints :ivar connector: The connector endpoints. :vartype connector: ~azure.mgmt.logic.models.FlowEndpoints """ _attribute_map = { "workflow": {"key": "workflow", "type": "FlowEndpoints"}, "connector": {"key": "connector", "type": "FlowEndpoints"}, } def __init__( self, *, workflow: Optional["_models.FlowEndpoints"] = None, connector: Optional["_models.FlowEndpoints"] = None, **kwargs ): """ :keyword workflow: The workflow endpoints. :paramtype workflow: ~azure.mgmt.logic.models.FlowEndpoints :keyword connector: The connector endpoints. :paramtype connector: ~azure.mgmt.logic.models.FlowEndpoints """ super().__init__(**kwargs) self.workflow = workflow self.connector = connector class GenerateUpgradedDefinitionParameters(_serialization.Model): """The parameters to generate upgraded definition. :ivar target_schema_version: The target schema version. :vartype target_schema_version: str """ _attribute_map = { "target_schema_version": {"key": "targetSchemaVersion", "type": "str"}, } def __init__(self, *, target_schema_version: Optional[str] = None, **kwargs): """ :keyword target_schema_version: The target schema version. :paramtype target_schema_version: str """ super().__init__(**kwargs) self.target_schema_version = target_schema_version class GetCallbackUrlParameters(_serialization.Model): """The callback url parameters. :ivar not_after: The expiry time. :vartype not_after: ~datetime.datetime :ivar key_type: The key type. Known values are: "NotSpecified", "Primary", and "Secondary". :vartype key_type: str or ~azure.mgmt.logic.models.KeyType """ _attribute_map = { "not_after": {"key": "notAfter", "type": "iso-8601"}, "key_type": {"key": "keyType", "type": "str"}, } def __init__( self, *, not_after: Optional[datetime.datetime] = None, key_type: Optional[Union[str, "_models.KeyType"]] = None, **kwargs ): """ :keyword not_after: The expiry time. :paramtype not_after: ~datetime.datetime :keyword key_type: The key type. Known values are: "NotSpecified", "Primary", and "Secondary". :paramtype key_type: str or ~azure.mgmt.logic.models.KeyType """ super().__init__(**kwargs) self.not_after = not_after self.key_type = key_type class IntegrationAccount(Resource): """The integration account. Variables are only populated by the server, and will be ignored when sending a request. :ivar id: The resource id. :vartype id: str :ivar name: Gets the resource name. :vartype name: str :ivar type: Gets the resource type. :vartype type: str :ivar location: The resource location. :vartype location: str :ivar tags: The resource tags. :vartype tags: dict[str, str] :ivar sku: The sku. :vartype sku: ~azure.mgmt.logic.models.IntegrationAccountSku :ivar integration_service_environment: The integration service environment. :vartype integration_service_environment: ~azure.mgmt.logic.models.ResourceReference :ivar state: The workflow state. Known values are: "NotSpecified", "Completed", "Enabled", "Disabled", "Deleted", and "Suspended". :vartype state: str or ~azure.mgmt.logic.models.WorkflowState """ _validation = { "id": {"readonly": True}, "name": {"readonly": True}, "type": {"readonly": True}, } _attribute_map = { "id": {"key": "id", "type": "str"}, "name": {"key": "name", "type": "str"}, "type": {"key": "type", "type": "str"}, "location": {"key": "location", "type": "str"}, "tags": {"key": "tags", "type": "{str}"}, "sku": {"key": "sku", "type": "IntegrationAccountSku"}, "integration_service_environment": { "key": "properties.integrationServiceEnvironment", "type": "ResourceReference", }, "state": {"key": "properties.state", "type": "str"}, } def __init__( self, *, location: Optional[str] = None, tags: Optional[Dict[str, str]] = None, sku: Optional["_models.IntegrationAccountSku"] = None, integration_service_environment: Optional["_models.ResourceReference"] = None, state: Optional[Union[str, "_models.WorkflowState"]] = None, **kwargs ): """ :keyword location: The resource location. :paramtype location: str :keyword tags: The resource tags. :paramtype tags: dict[str, str] :keyword sku: The sku. :paramtype sku: ~azure.mgmt.logic.models.IntegrationAccountSku :keyword integration_service_environment: The integration service environment. :paramtype integration_service_environment: ~azure.mgmt.logic.models.ResourceReference :keyword state: The workflow state. Known values are: "NotSpecified", "Completed", "Enabled", "Disabled", "Deleted", and "Suspended". :paramtype state: str or ~azure.mgmt.logic.models.WorkflowState """ super().__init__(location=location, tags=tags, **kwargs) self.sku = sku self.integration_service_environment = integration_service_environment self.state = state class IntegrationAccountAgreement(Resource): # pylint: disable=too-many-instance-attributes """The integration account agreement. Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. :ivar id: The resource id. :vartype id: str :ivar name: Gets the resource name. :vartype name: str :ivar type: Gets the resource type. :vartype type: str :ivar location: The resource location. :vartype location: str :ivar tags: The resource tags. :vartype tags: dict[str, str] :ivar created_time: The created time. :vartype created_time: ~datetime.datetime :ivar changed_time: The changed time. :vartype changed_time: ~datetime.datetime :ivar metadata: The metadata. :vartype metadata: JSON :ivar agreement_type: The agreement type. Required. Known values are: "NotSpecified", "AS2", "X12", and "Edifact". :vartype agreement_type: str or ~azure.mgmt.logic.models.AgreementType :ivar host_partner: The integration account partner that is set as host partner for this agreement. Required. :vartype host_partner: str :ivar guest_partner: The integration account partner that is set as guest partner for this agreement. Required. :vartype guest_partner: str :ivar host_identity: The business identity of the host partner. Required. :vartype host_identity: ~azure.mgmt.logic.models.BusinessIdentity :ivar guest_identity: The business identity of the guest partner. Required. :vartype guest_identity: ~azure.mgmt.logic.models.BusinessIdentity :ivar content: The agreement content. Required. :vartype content: ~azure.mgmt.logic.models.AgreementContent """ _validation = { "id": {"readonly": True}, "name": {"readonly": True}, "type": {"readonly": True}, "created_time": {"readonly": True}, "changed_time": {"readonly": True}, "agreement_type": {"required": True}, "host_partner": {"required": True}, "guest_partner": {"required": True}, "host_identity": {"required": True}, "guest_identity": {"required": True}, "content": {"required": True}, } _attribute_map = { "id": {"key": "id", "type": "str"}, "name": {"key": "name", "type": "str"}, "type": {"key": "type", "type": "str"}, "location": {"key": "location", "type": "str"}, "tags": {"key": "tags", "type": "{str}"}, "created_time": {"key": "properties.createdTime", "type": "iso-8601"}, "changed_time": {"key": "properties.changedTime", "type": "iso-8601"}, "metadata": {"key": "properties.metadata", "type": "object"}, "agreement_type": {"key": "properties.agreementType", "type": "str"}, "host_partner": {"key": "properties.hostPartner", "type": "str"}, "guest_partner": {"key": "properties.guestPartner", "type": "str"}, "host_identity": {"key": "properties.hostIdentity", "type": "BusinessIdentity"}, "guest_identity": {"key": "properties.guestIdentity", "type": "BusinessIdentity"}, "content": {"key": "properties.content", "type": "AgreementContent"}, } def __init__( self, *, agreement_type: Union[str, "_models.AgreementType"], host_partner: str, guest_partner: str, host_identity: "_models.BusinessIdentity", guest_identity: "_models.BusinessIdentity", content: "_models.AgreementContent", location: Optional[str] = None, tags: Optional[Dict[str, str]] = None, metadata: Optional[JSON] = None, **kwargs ): """ :keyword location: The resource location. :paramtype location: str :keyword tags: The resource tags. :paramtype tags: dict[str, str] :keyword metadata: The metadata. :paramtype metadata: JSON :keyword agreement_type: The agreement type. Required. Known values are: "NotSpecified", "AS2", "X12", and "Edifact". :paramtype agreement_type: str or ~azure.mgmt.logic.models.AgreementType :keyword host_partner: The integration account partner that is set as host partner for this agreement. Required. :paramtype host_partner: str :keyword guest_partner: The integration account partner that is set as guest partner for this agreement. Required. :paramtype guest_partner: str :keyword host_identity: The business identity of the host partner. Required. :paramtype host_identity: ~azure.mgmt.logic.models.BusinessIdentity :keyword guest_identity: The business identity of the guest partner. Required. :paramtype guest_identity: ~azure.mgmt.logic.models.BusinessIdentity :keyword content: The agreement content. Required. :paramtype content: ~azure.mgmt.logic.models.AgreementContent """ super().__init__(location=location, tags=tags, **kwargs) self.created_time = None self.changed_time = None self.metadata = metadata self.agreement_type = agreement_type self.host_partner = host_partner self.guest_partner = guest_partner self.host_identity = host_identity self.guest_identity = guest_identity self.content = content class IntegrationAccountAgreementFilter(_serialization.Model): """The integration account agreement filter for odata query. All required parameters must be populated in order to send to Azure. :ivar agreement_type: The agreement type of integration account agreement. Required. Known values are: "NotSpecified", "AS2", "X12", and "Edifact". :vartype agreement_type: str or ~azure.mgmt.logic.models.AgreementType """ _validation = { "agreement_type": {"required": True}, } _attribute_map = { "agreement_type": {"key": "agreementType", "type": "str"}, } def __init__(self, *, agreement_type: Union[str, "_models.AgreementType"], **kwargs): """ :keyword agreement_type: The agreement type of integration account agreement. Required. Known values are: "NotSpecified", "AS2", "X12", and "Edifact". :paramtype agreement_type: str or ~azure.mgmt.logic.models.AgreementType """ super().__init__(**kwargs) self.agreement_type = agreement_type class IntegrationAccountAgreementListResult(_serialization.Model): """The list of integration account agreements. :ivar value: The list of integration account agreements. :vartype value: list[~azure.mgmt.logic.models.IntegrationAccountAgreement] :ivar next_link: The URL to get the next set of results. :vartype next_link: str """ _attribute_map = { "value": {"key": "value", "type": "[IntegrationAccountAgreement]"}, "next_link": {"key": "nextLink", "type": "str"}, } def __init__( self, *, value: Optional[List["_models.IntegrationAccountAgreement"]] = None, next_link: Optional[str] = None, **kwargs ): """ :keyword value: The list of integration account agreements. :paramtype value: list[~azure.mgmt.logic.models.IntegrationAccountAgreement] :keyword next_link: The URL to get the next set of results. :paramtype next_link: str """ super().__init__(**kwargs) self.value = value self.next_link = next_link class IntegrationAccountCertificate(Resource): """The integration account certificate. Variables are only populated by the server, and will be ignored when sending a request. :ivar id: The resource id. :vartype id: str :ivar name: Gets the resource name. :vartype name: str :ivar type: Gets the resource type. :vartype type: str :ivar location: The resource location. :vartype location: str :ivar tags: The resource tags. :vartype tags: dict[str, str] :ivar created_time: The created time. :vartype created_time: ~datetime.datetime :ivar changed_time: The changed time. :vartype changed_time: ~datetime.datetime :ivar metadata: The metadata. :vartype metadata: JSON :ivar key: The key details in the key vault. :vartype key: ~azure.mgmt.logic.models.KeyVaultKeyReference :ivar public_certificate: The public certificate. :vartype public_certificate: str """ _validation = { "id": {"readonly": True}, "name": {"readonly": True}, "type": {"readonly": True}, "created_time": {"readonly": True}, "changed_time": {"readonly": True}, } _attribute_map = { "id": {"key": "id", "type": "str"}, "name": {"key": "name", "type": "str"}, "type": {"key": "type", "type": "str"}, "location": {"key": "location", "type": "str"}, "tags": {"key": "tags", "type": "{str}"}, "created_time": {"key": "properties.createdTime", "type": "iso-8601"}, "changed_time": {"key": "properties.changedTime", "type": "iso-8601"}, "metadata": {"key": "properties.metadata", "type": "object"}, "key": {"key": "properties.key", "type": "KeyVaultKeyReference"}, "public_certificate": {"key": "properties.publicCertificate", "type": "str"}, } def __init__( self, *, location: Optional[str] = None, tags: Optional[Dict[str, str]] = None, metadata: Optional[JSON] = None, key: Optional["_models.KeyVaultKeyReference"] = None, public_certificate: Optional[str] = None, **kwargs ): """ :keyword location: The resource location. :paramtype location: str :keyword tags: The resource tags. :paramtype tags: dict[str, str] :keyword metadata: The metadata. :paramtype metadata: JSON :keyword key: The key details in the key vault. :paramtype key: ~azure.mgmt.logic.models.KeyVaultKeyReference :keyword public_certificate: The public certificate. :paramtype public_certificate: str """ super().__init__(location=location, tags=tags, **kwargs) self.created_time = None self.changed_time = None self.metadata = metadata self.key = key self.public_certificate = public_certificate class IntegrationAccountCertificateListResult(_serialization.Model): """The list of integration account certificates. :ivar value: The list of integration account certificates. :vartype value: list[~azure.mgmt.logic.models.IntegrationAccountCertificate] :ivar next_link: The URL to get the next set of results. :vartype next_link: str """ _attribute_map = { "value": {"key": "value", "type": "[IntegrationAccountCertificate]"}, "next_link": {"key": "nextLink", "type": "str"}, } def __init__( self, *, value: Optional[List["_models.IntegrationAccountCertificate"]] = None, next_link: Optional[str] = None, **kwargs ): """ :keyword value: The list of integration account certificates. :paramtype value: list[~azure.mgmt.logic.models.IntegrationAccountCertificate] :keyword next_link: The URL to get the next set of results. :paramtype next_link: str """ super().__init__(**kwargs) self.value = value self.next_link = next_link class IntegrationAccountListResult(_serialization.Model): """The list of integration accounts. :ivar value: The list of integration accounts. :vartype value: list[~azure.mgmt.logic.models.IntegrationAccount] :ivar next_link: The URL to get the next set of results. :vartype next_link: str """ _attribute_map = { "value": {"key": "value", "type": "[IntegrationAccount]"}, "next_link": {"key": "nextLink", "type": "str"}, } def __init__( self, *, value: Optional[List["_models.IntegrationAccount"]] = None, next_link: Optional[str] = None, **kwargs ): """ :keyword value: The list of integration accounts. :paramtype value: list[~azure.mgmt.logic.models.IntegrationAccount] :keyword next_link: The URL to get the next set of results. :paramtype next_link: str """ super().__init__(**kwargs) self.value = value self.next_link = next_link class IntegrationAccountMap(Resource): # pylint: disable=too-many-instance-attributes """The integration account map. Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. :ivar id: The resource id. :vartype id: str :ivar name: Gets the resource name. :vartype name: str :ivar type: Gets the resource type. :vartype type: str :ivar location: The resource location. :vartype location: str :ivar tags: The resource tags. :vartype tags: dict[str, str] :ivar map_type: The map type. Required. Known values are: "NotSpecified", "Xslt", "Xslt20", "Xslt30", and "Liquid". :vartype map_type: str or ~azure.mgmt.logic.models.MapType :ivar parameters_schema: The parameters schema of integration account map. :vartype parameters_schema: ~azure.mgmt.logic.models.IntegrationAccountMapPropertiesParametersSchema :ivar created_time: The created time. :vartype created_time: ~datetime.datetime :ivar changed_time: The changed time. :vartype changed_time: ~datetime.datetime :ivar content: The content. :vartype content: str :ivar content_type: The content type. :vartype content_type: str :ivar content_link: The content link. :vartype content_link: ~azure.mgmt.logic.models.ContentLink :ivar metadata: The metadata. :vartype metadata: JSON """ _validation = { "id": {"readonly": True}, "name": {"readonly": True}, "type": {"readonly": True}, "map_type": {"required": True}, "created_time": {"readonly": True}, "changed_time": {"readonly": True}, "content_link": {"readonly": True}, } _attribute_map = { "id": {"key": "id", "type": "str"}, "name": {"key": "name", "type": "str"}, "type": {"key": "type", "type": "str"}, "location": {"key": "location", "type": "str"}, "tags": {"key": "tags", "type": "{str}"}, "map_type": {"key": "properties.mapType", "type": "str"}, "parameters_schema": { "key": "properties.parametersSchema", "type": "IntegrationAccountMapPropertiesParametersSchema", }, "created_time": {"key": "properties.createdTime", "type": "iso-8601"}, "changed_time": {"key": "properties.changedTime", "type": "iso-8601"}, "content": {"key": "properties.content", "type": "str"}, "content_type": {"key": "properties.contentType", "type": "str"}, "content_link": {"key": "properties.contentLink", "type": "ContentLink"}, "metadata": {"key": "properties.metadata", "type": "object"}, } def __init__( self, *, map_type: Union[str, "_models.MapType"], location: Optional[str] = None, tags: Optional[Dict[str, str]] = None, parameters_schema: Optional["_models.IntegrationAccountMapPropertiesParametersSchema"] = None, content: Optional[str] = None, content_type: Optional[str] = None, metadata: Optional[JSON] = None, **kwargs ): """ :keyword location: The resource location. :paramtype location: str :keyword tags: The resource tags. :paramtype tags: dict[str, str] :keyword map_type: The map type. Required. Known values are: "NotSpecified", "Xslt", "Xslt20", "Xslt30", and "Liquid". :paramtype map_type: str or ~azure.mgmt.logic.models.MapType :keyword parameters_schema: The parameters schema of integration account map. :paramtype parameters_schema: ~azure.mgmt.logic.models.IntegrationAccountMapPropertiesParametersSchema :keyword content: The content. :paramtype content: str :keyword content_type: The content type. :paramtype content_type: str :keyword metadata: The metadata. :paramtype metadata: JSON """ super().__init__(location=location, tags=tags, **kwargs) self.map_type = map_type self.parameters_schema = parameters_schema self.created_time = None self.changed_time = None self.content = content self.content_type = content_type self.content_link = None self.metadata = metadata class IntegrationAccountMapFilter(_serialization.Model): """The integration account map filter for odata query. All required parameters must be populated in order to send to Azure. :ivar map_type: The map type of integration account map. Required. Known values are: "NotSpecified", "Xslt", "Xslt20", "Xslt30", and "Liquid". :vartype map_type: str or ~azure.mgmt.logic.models.MapType """ _validation = { "map_type": {"required": True}, } _attribute_map = { "map_type": {"key": "mapType", "type": "str"}, } def __init__(self, *, map_type: Union[str, "_models.MapType"], **kwargs): """ :keyword map_type: The map type of integration account map. Required. Known values are: "NotSpecified", "Xslt", "Xslt20", "Xslt30", and "Liquid". :paramtype map_type: str or ~azure.mgmt.logic.models.MapType """ super().__init__(**kwargs) self.map_type = map_type class IntegrationAccountMapListResult(_serialization.Model): """The list of integration account maps. :ivar value: The list of integration account maps. :vartype value: list[~azure.mgmt.logic.models.IntegrationAccountMap] :ivar next_link: The URL to get the next set of results. :vartype next_link: str """ _attribute_map = { "value": {"key": "value", "type": "[IntegrationAccountMap]"}, "next_link": {"key": "nextLink", "type": "str"}, } def __init__( self, *, value: Optional[List["_models.IntegrationAccountMap"]] = None, next_link: Optional[str] = None, **kwargs ): """ :keyword value: The list of integration account maps. :paramtype value: list[~azure.mgmt.logic.models.IntegrationAccountMap] :keyword next_link: The URL to get the next set of results. :paramtype next_link: str """ super().__init__(**kwargs) self.value = value self.next_link = next_link class IntegrationAccountMapPropertiesParametersSchema(_serialization.Model): """The parameters schema of integration account map. :ivar ref: The reference name. :vartype ref: str """ _attribute_map = { "ref": {"key": "ref", "type": "str"}, } def __init__(self, *, ref: Optional[str] = None, **kwargs): """ :keyword ref: The reference name. :paramtype ref: str """ super().__init__(**kwargs) self.ref = ref class IntegrationAccountPartner(Resource): """The integration account partner. Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. :ivar id: The resource id. :vartype id: str :ivar name: Gets the resource name. :vartype name: str :ivar type: Gets the resource type. :vartype type: str :ivar location: The resource location. :vartype location: str :ivar tags: The resource tags. :vartype tags: dict[str, str] :ivar partner_type: The partner type. Required. Known values are: "NotSpecified" and "B2B". :vartype partner_type: str or ~azure.mgmt.logic.models.PartnerType :ivar created_time: The created time. :vartype created_time: ~datetime.datetime :ivar changed_time: The changed time. :vartype changed_time: ~datetime.datetime :ivar metadata: The metadata. :vartype metadata: JSON :ivar content: The partner content. Required. :vartype content: ~azure.mgmt.logic.models.PartnerContent """ _validation = { "id": {"readonly": True}, "name": {"readonly": True}, "type": {"readonly": True}, "partner_type": {"required": True}, "created_time": {"readonly": True}, "changed_time": {"readonly": True}, "content": {"required": True}, } _attribute_map = { "id": {"key": "id", "type": "str"}, "name": {"key": "name", "type": "str"}, "type": {"key": "type", "type": "str"}, "location": {"key": "location", "type": "str"}, "tags": {"key": "tags", "type": "{str}"}, "partner_type": {"key": "properties.partnerType", "type": "str"}, "created_time": {"key": "properties.createdTime", "type": "iso-8601"}, "changed_time": {"key": "properties.changedTime", "type": "iso-8601"}, "metadata": {"key": "properties.metadata", "type": "object"}, "content": {"key": "properties.content", "type": "PartnerContent"}, } def __init__( self, *, partner_type: Union[str, "_models.PartnerType"], content: "_models.PartnerContent", location: Optional[str] = None, tags: Optional[Dict[str, str]] = None, metadata: Optional[JSON] = None, **kwargs ): """ :keyword location: The resource location. :paramtype location: str :keyword tags: The resource tags. :paramtype tags: dict[str, str] :keyword partner_type: The partner type. Required. Known values are: "NotSpecified" and "B2B". :paramtype partner_type: str or ~azure.mgmt.logic.models.PartnerType :keyword metadata: The metadata. :paramtype metadata: JSON :keyword content: The partner content. Required. :paramtype content: ~azure.mgmt.logic.models.PartnerContent """ super().__init__(location=location, tags=tags, **kwargs) self.partner_type = partner_type self.created_time = None self.changed_time = None self.metadata = metadata self.content = content class IntegrationAccountPartnerFilter(_serialization.Model): """The integration account partner filter for odata query. All required parameters must be populated in order to send to Azure. :ivar partner_type: The partner type of integration account partner. Required. Known values are: "NotSpecified" and "B2B". :vartype partner_type: str or ~azure.mgmt.logic.models.PartnerType """ _validation = { "partner_type": {"required": True}, } _attribute_map = { "partner_type": {"key": "partnerType", "type": "str"}, } def __init__(self, *, partner_type: Union[str, "_models.PartnerType"], **kwargs): """ :keyword partner_type: The partner type of integration account partner. Required. Known values are: "NotSpecified" and "B2B". :paramtype partner_type: str or ~azure.mgmt.logic.models.PartnerType """ super().__init__(**kwargs) self.partner_type = partner_type class IntegrationAccountPartnerListResult(_serialization.Model): """The list of integration account partners. :ivar value: The list of integration account partners. :vartype value: list[~azure.mgmt.logic.models.IntegrationAccountPartner] :ivar next_link: The URL to get the next set of results. :vartype next_link: str """ _attribute_map = { "value": {"key": "value", "type": "[IntegrationAccountPartner]"}, "next_link": {"key": "nextLink", "type": "str"}, } def __init__( self, *, value: Optional[List["_models.IntegrationAccountPartner"]] = None, next_link: Optional[str] = None, **kwargs ): """ :keyword value: The list of integration account partners. :paramtype value: list[~azure.mgmt.logic.models.IntegrationAccountPartner] :keyword next_link: The URL to get the next set of results. :paramtype next_link: str """ super().__init__(**kwargs) self.value = value self.next_link = next_link class IntegrationAccountSchema(Resource): # pylint: disable=too-many-instance-attributes """The integration account schema. Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. :ivar id: The resource id. :vartype id: str :ivar name: Gets the resource name. :vartype name: str :ivar type: Gets the resource type. :vartype type: str :ivar location: The resource location. :vartype location: str :ivar tags: The resource tags. :vartype tags: dict[str, str] :ivar schema_type: The schema type. Required. Known values are: "NotSpecified" and "Xml". :vartype schema_type: str or ~azure.mgmt.logic.models.SchemaType :ivar target_namespace: The target namespace of the schema. :vartype target_namespace: str :ivar document_name: The document name. :vartype document_name: str :ivar file_name: The file name. :vartype file_name: str :ivar created_time: The created time. :vartype created_time: ~datetime.datetime :ivar changed_time: The changed time. :vartype changed_time: ~datetime.datetime :ivar metadata: The metadata. :vartype metadata: JSON :ivar content: The content. :vartype content: str :ivar content_type: The content type. :vartype content_type: str :ivar content_link: The content link. :vartype content_link: ~azure.mgmt.logic.models.ContentLink """ _validation = { "id": {"readonly": True}, "name": {"readonly": True}, "type": {"readonly": True}, "schema_type": {"required": True}, "created_time": {"readonly": True}, "changed_time": {"readonly": True}, "content_link": {"readonly": True}, } _attribute_map = { "id": {"key": "id", "type": "str"}, "name": {"key": "name", "type": "str"}, "type": {"key": "type", "type": "str"}, "location": {"key": "location", "type": "str"}, "tags": {"key": "tags", "type": "{str}"}, "schema_type": {"key": "properties.schemaType", "type": "str"}, "target_namespace": {"key": "properties.targetNamespace", "type": "str"}, "document_name": {"key": "properties.documentName", "type": "str"}, "file_name": {"key": "properties.fileName", "type": "str"}, "created_time": {"key": "properties.createdTime", "type": "iso-8601"}, "changed_time": {"key": "properties.changedTime", "type": "iso-8601"}, "metadata": {"key": "properties.metadata", "type": "object"}, "content": {"key": "properties.content", "type": "str"}, "content_type": {"key": "properties.contentType", "type": "str"}, "content_link": {"key": "properties.contentLink", "type": "ContentLink"}, } def __init__( self, *, schema_type: Union[str, "_models.SchemaType"], location: Optional[str] = None, tags: Optional[Dict[str, str]] = None, target_namespace: Optional[str] = None, document_name: Optional[str] = None, file_name: Optional[str] = None, metadata: Optional[JSON] = None, content: Optional[str] = None, content_type: Optional[str] = None, **kwargs ): """ :keyword location: The resource location. :paramtype location: str :keyword tags: The resource tags. :paramtype tags: dict[str, str] :keyword schema_type: The schema type. Required. Known values are: "NotSpecified" and "Xml". :paramtype schema_type: str or ~azure.mgmt.logic.models.SchemaType :keyword target_namespace: The target namespace of the schema. :paramtype target_namespace: str :keyword document_name: The document name. :paramtype document_name: str :keyword file_name: The file name. :paramtype file_name: str :keyword metadata: The metadata. :paramtype metadata: JSON :keyword content: The content. :paramtype content: str :keyword content_type: The content type. :paramtype content_type: str """ super().__init__(location=location, tags=tags, **kwargs) self.schema_type = schema_type self.target_namespace = target_namespace self.document_name = document_name self.file_name = file_name self.created_time = None self.changed_time = None self.metadata = metadata self.content = content self.content_type = content_type self.content_link = None class IntegrationAccountSchemaFilter(_serialization.Model): """The integration account schema filter for odata query. All required parameters must be populated in order to send to Azure. :ivar schema_type: The schema type of integration account schema. Required. Known values are: "NotSpecified" and "Xml". :vartype schema_type: str or ~azure.mgmt.logic.models.SchemaType """ _validation = { "schema_type": {"required": True}, } _attribute_map = { "schema_type": {"key": "schemaType", "type": "str"}, } def __init__(self, *, schema_type: Union[str, "_models.SchemaType"], **kwargs): """ :keyword schema_type: The schema type of integration account schema. Required. Known values are: "NotSpecified" and "Xml". :paramtype schema_type: str or ~azure.mgmt.logic.models.SchemaType """ super().__init__(**kwargs) self.schema_type = schema_type class IntegrationAccountSchemaListResult(_serialization.Model): """The list of integration account schemas. :ivar value: The list of integration account schemas. :vartype value: list[~azure.mgmt.logic.models.IntegrationAccountSchema] :ivar next_link: The URL to get the next set of results. :vartype next_link: str """ _attribute_map = { "value": {"key": "value", "type": "[IntegrationAccountSchema]"}, "next_link": {"key": "nextLink", "type": "str"}, } def __init__( self, *, value: Optional[List["_models.IntegrationAccountSchema"]] = None, next_link: Optional[str] = None, **kwargs ): """ :keyword value: The list of integration account schemas. :paramtype value: list[~azure.mgmt.logic.models.IntegrationAccountSchema] :keyword next_link: The URL to get the next set of results. :paramtype next_link: str """ super().__init__(**kwargs) self.value = value self.next_link = next_link class IntegrationAccountSession(Resource): """The integration account session. Variables are only populated by the server, and will be ignored when sending a request. :ivar id: The resource id. :vartype id: str :ivar name: Gets the resource name. :vartype name: str :ivar type: Gets the resource type. :vartype type: str :ivar location: The resource location. :vartype location: str :ivar tags: The resource tags. :vartype tags: dict[str, str] :ivar created_time: The created time. :vartype created_time: ~datetime.datetime :ivar changed_time: The changed time. :vartype changed_time: ~datetime.datetime :ivar content: The session content. :vartype content: JSON """ _validation = { "id": {"readonly": True}, "name": {"readonly": True}, "type": {"readonly": True}, "created_time": {"readonly": True}, "changed_time": {"readonly": True}, } _attribute_map = { "id": {"key": "id", "type": "str"}, "name": {"key": "name", "type": "str"}, "type": {"key": "type", "type": "str"}, "location": {"key": "location", "type": "str"}, "tags": {"key": "tags", "type": "{str}"}, "created_time": {"key": "properties.createdTime", "type": "iso-8601"}, "changed_time": {"key": "properties.changedTime", "type": "iso-8601"}, "content": {"key": "properties.content", "type": "object"}, } def __init__( self, *, location: Optional[str] = None, tags: Optional[Dict[str, str]] = None, content: Optional[JSON] = None, **kwargs ): """ :keyword location: The resource location. :paramtype location: str :keyword tags: The resource tags. :paramtype tags: dict[str, str] :keyword content: The session content. :paramtype content: JSON """ super().__init__(location=location, tags=tags, **kwargs) self.created_time = None self.changed_time = None self.content = content class IntegrationAccountSessionFilter(_serialization.Model): """The integration account session filter. All required parameters must be populated in order to send to Azure. :ivar changed_time: The changed time of integration account sessions. Required. :vartype changed_time: ~datetime.datetime """ _validation = { "changed_time": {"required": True}, } _attribute_map = { "changed_time": {"key": "changedTime", "type": "iso-8601"}, } def __init__(self, *, changed_time: datetime.datetime, **kwargs): """ :keyword changed_time: The changed time of integration account sessions. Required. :paramtype changed_time: ~datetime.datetime """ super().__init__(**kwargs) self.changed_time = changed_time class IntegrationAccountSessionListResult(_serialization.Model): """The list of integration account sessions. :ivar value: The list of integration account sessions. :vartype value: list[~azure.mgmt.logic.models.IntegrationAccountSession] :ivar next_link: The URL to get the next set of results. :vartype next_link: str """ _attribute_map = { "value": {"key": "value", "type": "[IntegrationAccountSession]"}, "next_link": {"key": "nextLink", "type": "str"}, } def __init__( self, *, value: Optional[List["_models.IntegrationAccountSession"]] = None, next_link: Optional[str] = None, **kwargs ): """ :keyword value: The list of integration account sessions. :paramtype value: list[~azure.mgmt.logic.models.IntegrationAccountSession] :keyword next_link: The URL to get the next set of results. :paramtype next_link: str """ super().__init__(**kwargs) self.value = value self.next_link = next_link class IntegrationAccountSku(_serialization.Model): """The integration account sku. All required parameters must be populated in order to send to Azure. :ivar name: The sku name. Required. Known values are: "NotSpecified", "Free", "Basic", and "Standard". :vartype name: str or ~azure.mgmt.logic.models.IntegrationAccountSkuName """ _validation = { "name": {"required": True}, } _attribute_map = { "name": {"key": "name", "type": "str"}, } def __init__(self, *, name: Union[str, "_models.IntegrationAccountSkuName"], **kwargs): """ :keyword name: The sku name. Required. Known values are: "NotSpecified", "Free", "Basic", and "Standard". :paramtype name: str or ~azure.mgmt.logic.models.IntegrationAccountSkuName """ super().__init__(**kwargs) self.name = name class IntegrationServiceEnvironmenEncryptionConfiguration(_serialization.Model): """The encryption configuration for the integration service environment. :ivar encryption_key_reference: The encryption key reference. :vartype encryption_key_reference: ~azure.mgmt.logic.models.IntegrationServiceEnvironmenEncryptionKeyReference """ _attribute_map = { "encryption_key_reference": { "key": "encryptionKeyReference", "type": "IntegrationServiceEnvironmenEncryptionKeyReference", }, } def __init__( self, *, encryption_key_reference: Optional["_models.IntegrationServiceEnvironmenEncryptionKeyReference"] = None, **kwargs ): """ :keyword encryption_key_reference: The encryption key reference. :paramtype encryption_key_reference: ~azure.mgmt.logic.models.IntegrationServiceEnvironmenEncryptionKeyReference """ super().__init__(**kwargs) self.encryption_key_reference = encryption_key_reference class IntegrationServiceEnvironmenEncryptionKeyReference(_serialization.Model): """The encryption key details for the integration service environment. :ivar key_vault: The key vault reference. :vartype key_vault: ~azure.mgmt.logic.models.ResourceReference :ivar key_name: Gets the key name in the Key Vault. :vartype key_name: str :ivar key_version: Gets the version of the key specified in the keyName property. :vartype key_version: str """ _attribute_map = { "key_vault": {"key": "keyVault", "type": "ResourceReference"}, "key_name": {"key": "keyName", "type": "str"}, "key_version": {"key": "keyVersion", "type": "str"}, } def __init__( self, *, key_vault: Optional["_models.ResourceReference"] = None, key_name: Optional[str] = None, key_version: Optional[str] = None, **kwargs ): """ :keyword key_vault: The key vault reference. :paramtype key_vault: ~azure.mgmt.logic.models.ResourceReference :keyword key_name: Gets the key name in the Key Vault. :paramtype key_name: str :keyword key_version: Gets the version of the key specified in the keyName property. :paramtype key_version: str """ super().__init__(**kwargs) self.key_vault = key_vault self.key_name = key_name self.key_version = key_version class IntegrationServiceEnvironment(Resource): """The integration service environment. Variables are only populated by the server, and will be ignored when sending a request. :ivar id: The resource id. :vartype id: str :ivar name: Gets the resource name. :vartype name: str :ivar type: Gets the resource type. :vartype type: str :ivar location: The resource location. :vartype location: str :ivar tags: The resource tags. :vartype tags: dict[str, str] :ivar properties: The integration service environment properties. :vartype properties: ~azure.mgmt.logic.models.IntegrationServiceEnvironmentProperties :ivar sku: The sku. :vartype sku: ~azure.mgmt.logic.models.IntegrationServiceEnvironmentSku :ivar identity: Managed service identity properties. :vartype identity: ~azure.mgmt.logic.models.ManagedServiceIdentity """ _validation = { "id": {"readonly": True}, "name": {"readonly": True}, "type": {"readonly": True}, } _attribute_map = { "id": {"key": "id", "type": "str"}, "name": {"key": "name", "type": "str"}, "type": {"key": "type", "type": "str"}, "location": {"key": "location", "type": "str"}, "tags": {"key": "tags", "type": "{str}"}, "properties": {"key": "properties", "type": "IntegrationServiceEnvironmentProperties"}, "sku": {"key": "sku", "type": "IntegrationServiceEnvironmentSku"}, "identity": {"key": "identity", "type": "ManagedServiceIdentity"}, } def __init__( self, *, location: Optional[str] = None, tags: Optional[Dict[str, str]] = None, properties: Optional["_models.IntegrationServiceEnvironmentProperties"] = None, sku: Optional["_models.IntegrationServiceEnvironmentSku"] = None, identity: Optional["_models.ManagedServiceIdentity"] = None, **kwargs ): """ :keyword location: The resource location. :paramtype location: str :keyword tags: The resource tags. :paramtype tags: dict[str, str] :keyword properties: The integration service environment properties. :paramtype properties: ~azure.mgmt.logic.models.IntegrationServiceEnvironmentProperties :keyword sku: The sku. :paramtype sku: ~azure.mgmt.logic.models.IntegrationServiceEnvironmentSku :keyword identity: Managed service identity properties. :paramtype identity: ~azure.mgmt.logic.models.ManagedServiceIdentity """ super().__init__(location=location, tags=tags, **kwargs) self.properties = properties self.sku = sku self.identity = identity class IntegrationServiceEnvironmentAccessEndpoint(_serialization.Model): """The integration service environment access endpoint. :ivar type: The access endpoint type. Known values are: "NotSpecified", "External", and "Internal". :vartype type: str or ~azure.mgmt.logic.models.IntegrationServiceEnvironmentAccessEndpointType """ _attribute_map = { "type": {"key": "type", "type": "str"}, } def __init__( self, *, type: Optional[Union[str, "_models.IntegrationServiceEnvironmentAccessEndpointType"]] = None, **kwargs ): """ :keyword type: The access endpoint type. Known values are: "NotSpecified", "External", and "Internal". :paramtype type: str or ~azure.mgmt.logic.models.IntegrationServiceEnvironmentAccessEndpointType """ super().__init__(**kwargs) self.type = type class IntegrationServiceEnvironmentListResult(_serialization.Model): """The list of integration service environments. :ivar value: :vartype value: list[~azure.mgmt.logic.models.IntegrationServiceEnvironment] :ivar next_link: The URL to get the next set of results. :vartype next_link: str """ _attribute_map = { "value": {"key": "value", "type": "[IntegrationServiceEnvironment]"}, "next_link": {"key": "nextLink", "type": "str"}, } def __init__( self, *, value: Optional[List["_models.IntegrationServiceEnvironment"]] = None, next_link: Optional[str] = None, **kwargs ): """ :keyword value: :paramtype value: list[~azure.mgmt.logic.models.IntegrationServiceEnvironment] :keyword next_link: The URL to get the next set of results. :paramtype next_link: str """ super().__init__(**kwargs) self.value = value self.next_link = next_link class IntegrationServiceEnvironmentManagedApi(Resource): # pylint: disable=too-many-instance-attributes """The integration service environment managed api. Variables are only populated by the server, and will be ignored when sending a request. :ivar id: The resource id. :vartype id: str :ivar name: Gets the resource name. :vartype name: str :ivar type: Gets the resource type. :vartype type: str :ivar location: The resource location. :vartype location: str :ivar tags: The resource tags. :vartype tags: dict[str, str] :ivar name_properties_name: The name. :vartype name_properties_name: str :ivar connection_parameters: The connection parameters. :vartype connection_parameters: dict[str, JSON] :ivar metadata: The metadata. :vartype metadata: ~azure.mgmt.logic.models.ApiResourceMetadata :ivar runtime_urls: The runtime urls. :vartype runtime_urls: list[str] :ivar general_information: The api general information. :vartype general_information: ~azure.mgmt.logic.models.ApiResourceGeneralInformation :ivar capabilities: The capabilities. :vartype capabilities: list[str] :ivar backend_service: The backend service. :vartype backend_service: ~azure.mgmt.logic.models.ApiResourceBackendService :ivar policies: The policies for the API. :vartype policies: ~azure.mgmt.logic.models.ApiResourcePolicies :ivar api_definition_url: The API definition. :vartype api_definition_url: str :ivar api_definitions: The api definitions. :vartype api_definitions: ~azure.mgmt.logic.models.ApiResourceDefinitions :ivar integration_service_environment: The integration service environment reference. :vartype integration_service_environment: ~azure.mgmt.logic.models.ResourceReference :ivar provisioning_state: The provisioning state. Known values are: "NotSpecified", "Accepted", "Running", "Ready", "Creating", "Created", "Deleting", "Deleted", "Canceled", "Failed", "Succeeded", "Moving", "Updating", "Registering", "Registered", "Unregistering", "Unregistered", "Completed", "Renewing", "Pending", "Waiting", and "InProgress". :vartype provisioning_state: str or ~azure.mgmt.logic.models.WorkflowProvisioningState :ivar category: The category. Known values are: "NotSpecified", "Enterprise", "Standard", and "Premium". :vartype category: str or ~azure.mgmt.logic.models.ApiTier :ivar deployment_parameters: The integration service environment managed api deployment parameters. :vartype deployment_parameters: ~azure.mgmt.logic.models.IntegrationServiceEnvironmentManagedApiDeploymentParameters """ _validation = { "id": {"readonly": True}, "name": {"readonly": True}, "type": {"readonly": True}, "name_properties_name": {"readonly": True}, "connection_parameters": {"readonly": True}, "metadata": {"readonly": True}, "runtime_urls": {"readonly": True}, "general_information": {"readonly": True}, "capabilities": {"readonly": True}, "backend_service": {"readonly": True}, "policies": {"readonly": True}, "api_definition_url": {"readonly": True}, "api_definitions": {"readonly": True}, "provisioning_state": {"readonly": True}, "category": {"readonly": True}, } _attribute_map = { "id": {"key": "id", "type": "str"}, "name": {"key": "name", "type": "str"}, "type": {"key": "type", "type": "str"}, "location": {"key": "location", "type": "str"}, "tags": {"key": "tags", "type": "{str}"}, "name_properties_name": {"key": "properties.name", "type": "str"}, "connection_parameters": {"key": "properties.connectionParameters", "type": "{object}"}, "metadata": {"key": "properties.metadata", "type": "ApiResourceMetadata"}, "runtime_urls": {"key": "properties.runtimeUrls", "type": "[str]"}, "general_information": {"key": "properties.generalInformation", "type": "ApiResourceGeneralInformation"}, "capabilities": {"key": "properties.capabilities", "type": "[str]"}, "backend_service": {"key": "properties.backendService", "type": "ApiResourceBackendService"}, "policies": {"key": "properties.policies", "type": "ApiResourcePolicies"}, "api_definition_url": {"key": "properties.apiDefinitionUrl", "type": "str"}, "api_definitions": {"key": "properties.apiDefinitions", "type": "ApiResourceDefinitions"}, "integration_service_environment": { "key": "properties.integrationServiceEnvironment", "type": "ResourceReference", }, "provisioning_state": {"key": "properties.provisioningState", "type": "str"}, "category": {"key": "properties.category", "type": "str"}, "deployment_parameters": { "key": "properties.deploymentParameters", "type": "IntegrationServiceEnvironmentManagedApiDeploymentParameters", }, } def __init__( self, *, location: Optional[str] = None, tags: Optional[Dict[str, str]] = None, integration_service_environment: Optional["_models.ResourceReference"] = None, deployment_parameters: Optional["_models.IntegrationServiceEnvironmentManagedApiDeploymentParameters"] = None, **kwargs ): """ :keyword location: The resource location. :paramtype location: str :keyword tags: The resource tags. :paramtype tags: dict[str, str] :keyword integration_service_environment: The integration service environment reference. :paramtype integration_service_environment: ~azure.mgmt.logic.models.ResourceReference :keyword deployment_parameters: The integration service environment managed api deployment parameters. :paramtype deployment_parameters: ~azure.mgmt.logic.models.IntegrationServiceEnvironmentManagedApiDeploymentParameters """ super().__init__(location=location, tags=tags, **kwargs) self.name_properties_name = None self.connection_parameters = None self.metadata = None self.runtime_urls = None self.general_information = None self.capabilities = None self.backend_service = None self.policies = None self.api_definition_url = None self.api_definitions = None self.integration_service_environment = integration_service_environment self.provisioning_state = None self.category = None self.deployment_parameters = deployment_parameters class IntegrationServiceEnvironmentManagedApiDeploymentParameters(_serialization.Model): """The integration service environment managed api deployment parameters. :ivar content_link_definition: The integration service environment managed api content link for deployment. :vartype content_link_definition: ~azure.mgmt.logic.models.ContentLink """ _attribute_map = { "content_link_definition": {"key": "contentLinkDefinition", "type": "ContentLink"}, } def __init__(self, *, content_link_definition: Optional["_models.ContentLink"] = None, **kwargs): """ :keyword content_link_definition: The integration service environment managed api content link for deployment. :paramtype content_link_definition: ~azure.mgmt.logic.models.ContentLink """ super().__init__(**kwargs) self.content_link_definition = content_link_definition class IntegrationServiceEnvironmentManagedApiListResult(_serialization.Model): """The list of integration service environment managed APIs. :ivar value: The integration service environment managed APIs. :vartype value: list[~azure.mgmt.logic.models.IntegrationServiceEnvironmentManagedApi] :ivar next_link: The URL to get the next set of results. :vartype next_link: str """ _attribute_map = { "value": {"key": "value", "type": "[IntegrationServiceEnvironmentManagedApi]"}, "next_link": {"key": "nextLink", "type": "str"}, } def __init__( self, *, value: Optional[List["_models.IntegrationServiceEnvironmentManagedApi"]] = None, next_link: Optional[str] = None, **kwargs ): """ :keyword value: The integration service environment managed APIs. :paramtype value: list[~azure.mgmt.logic.models.IntegrationServiceEnvironmentManagedApi] :keyword next_link: The URL to get the next set of results. :paramtype next_link: str """ super().__init__(**kwargs) self.value = value self.next_link = next_link class IntegrationServiceEnvironmentManagedApiProperties( ApiResourceProperties ): # pylint: disable=too-many-instance-attributes """The integration service environment managed api properties. Variables are only populated by the server, and will be ignored when sending a request. :ivar name: The name. :vartype name: str :ivar connection_parameters: The connection parameters. :vartype connection_parameters: dict[str, JSON] :ivar metadata: The metadata. :vartype metadata: ~azure.mgmt.logic.models.ApiResourceMetadata :ivar runtime_urls: The runtime urls. :vartype runtime_urls: list[str] :ivar general_information: The api general information. :vartype general_information: ~azure.mgmt.logic.models.ApiResourceGeneralInformation :ivar capabilities: The capabilities. :vartype capabilities: list[str] :ivar backend_service: The backend service. :vartype backend_service: ~azure.mgmt.logic.models.ApiResourceBackendService :ivar policies: The policies for the API. :vartype policies: ~azure.mgmt.logic.models.ApiResourcePolicies :ivar api_definition_url: The API definition. :vartype api_definition_url: str :ivar api_definitions: The api definitions. :vartype api_definitions: ~azure.mgmt.logic.models.ApiResourceDefinitions :ivar integration_service_environment: The integration service environment reference. :vartype integration_service_environment: ~azure.mgmt.logic.models.ResourceReference :ivar provisioning_state: The provisioning state. Known values are: "NotSpecified", "Accepted", "Running", "Ready", "Creating", "Created", "Deleting", "Deleted", "Canceled", "Failed", "Succeeded", "Moving", "Updating", "Registering", "Registered", "Unregistering", "Unregistered", "Completed", "Renewing", "Pending", "Waiting", and "InProgress". :vartype provisioning_state: str or ~azure.mgmt.logic.models.WorkflowProvisioningState :ivar category: The category. Known values are: "NotSpecified", "Enterprise", "Standard", and "Premium". :vartype category: str or ~azure.mgmt.logic.models.ApiTier :ivar deployment_parameters: The integration service environment managed api deployment parameters. :vartype deployment_parameters: ~azure.mgmt.logic.models.IntegrationServiceEnvironmentManagedApiDeploymentParameters """ _validation = { "name": {"readonly": True}, "connection_parameters": {"readonly": True}, "metadata": {"readonly": True}, "runtime_urls": {"readonly": True}, "general_information": {"readonly": True}, "capabilities": {"readonly": True}, "backend_service": {"readonly": True}, "policies": {"readonly": True}, "api_definition_url": {"readonly": True}, "api_definitions": {"readonly": True}, "provisioning_state": {"readonly": True}, "category": {"readonly": True}, } _attribute_map = { "name": {"key": "name", "type": "str"}, "connection_parameters": {"key": "connectionParameters", "type": "{object}"}, "metadata": {"key": "metadata", "type": "ApiResourceMetadata"}, "runtime_urls": {"key": "runtimeUrls", "type": "[str]"}, "general_information": {"key": "generalInformation", "type": "ApiResourceGeneralInformation"}, "capabilities": {"key": "capabilities", "type": "[str]"}, "backend_service": {"key": "backendService", "type": "ApiResourceBackendService"}, "policies": {"key": "policies", "type": "ApiResourcePolicies"}, "api_definition_url": {"key": "apiDefinitionUrl", "type": "str"}, "api_definitions": {"key": "apiDefinitions", "type": "ApiResourceDefinitions"}, "integration_service_environment": {"key": "integrationServiceEnvironment", "type": "ResourceReference"}, "provisioning_state": {"key": "provisioningState", "type": "str"}, "category": {"key": "category", "type": "str"}, "deployment_parameters": { "key": "deploymentParameters", "type": "IntegrationServiceEnvironmentManagedApiDeploymentParameters", }, } def __init__( self, *, integration_service_environment: Optional["_models.ResourceReference"] = None, deployment_parameters: Optional["_models.IntegrationServiceEnvironmentManagedApiDeploymentParameters"] = None, **kwargs ): """ :keyword integration_service_environment: The integration service environment reference. :paramtype integration_service_environment: ~azure.mgmt.logic.models.ResourceReference :keyword deployment_parameters: The integration service environment managed api deployment parameters. :paramtype deployment_parameters: ~azure.mgmt.logic.models.IntegrationServiceEnvironmentManagedApiDeploymentParameters """ super().__init__(integration_service_environment=integration_service_environment, **kwargs) self.deployment_parameters = deployment_parameters class IntegrationServiceEnvironmentNetworkDependency(_serialization.Model): """The azure async operation resource. :ivar category: The network dependency category type. Known values are: "NotSpecified", "AzureStorage", "AzureManagement", "AzureActiveDirectory", "SSLCertificateVerification", "DiagnosticLogsAndMetrics", "IntegrationServiceEnvironmentConnectors", "RedisCache", "AccessEndpoints", "RecoveryService", "SQL", and "RegionalService". :vartype category: str or ~azure.mgmt.logic.models.IntegrationServiceEnvironmentNetworkDependencyCategoryType :ivar display_name: The display name. :vartype display_name: str :ivar endpoints: The endpoints. :vartype endpoints: list[~azure.mgmt.logic.models.IntegrationServiceEnvironmentNetworkEndpoint] """ _attribute_map = { "category": {"key": "category", "type": "str"}, "display_name": {"key": "displayName", "type": "str"}, "endpoints": {"key": "endpoints", "type": "[IntegrationServiceEnvironmentNetworkEndpoint]"}, } def __init__( self, *, category: Optional[Union[str, "_models.IntegrationServiceEnvironmentNetworkDependencyCategoryType"]] = None, display_name: Optional[str] = None, endpoints: Optional[List["_models.IntegrationServiceEnvironmentNetworkEndpoint"]] = None, **kwargs ): """ :keyword category: The network dependency category type. Known values are: "NotSpecified", "AzureStorage", "AzureManagement", "AzureActiveDirectory", "SSLCertificateVerification", "DiagnosticLogsAndMetrics", "IntegrationServiceEnvironmentConnectors", "RedisCache", "AccessEndpoints", "RecoveryService", "SQL", and "RegionalService". :paramtype category: str or ~azure.mgmt.logic.models.IntegrationServiceEnvironmentNetworkDependencyCategoryType :keyword display_name: The display name. :paramtype display_name: str :keyword endpoints: The endpoints. :paramtype endpoints: list[~azure.mgmt.logic.models.IntegrationServiceEnvironmentNetworkEndpoint] """ super().__init__(**kwargs) self.category = category self.display_name = display_name self.endpoints = endpoints class IntegrationServiceEnvironmentNetworkDependencyHealth(_serialization.Model): """The integration service environment subnet network health. :ivar error: The error if any occurred during the operation. :vartype error: ~azure.mgmt.logic.models.ExtendedErrorInfo :ivar state: The network dependency health state. Known values are: "NotSpecified", "Healthy", "Unhealthy", and "Unknown". :vartype state: str or ~azure.mgmt.logic.models.IntegrationServiceEnvironmentNetworkDependencyHealthState """ _attribute_map = { "error": {"key": "error", "type": "ExtendedErrorInfo"}, "state": {"key": "state", "type": "str"}, } def __init__( self, *, error: Optional["_models.ExtendedErrorInfo"] = None, state: Optional[Union[str, "_models.IntegrationServiceEnvironmentNetworkDependencyHealthState"]] = None, **kwargs ): """ :keyword error: The error if any occurred during the operation. :paramtype error: ~azure.mgmt.logic.models.ExtendedErrorInfo :keyword state: The network dependency health state. Known values are: "NotSpecified", "Healthy", "Unhealthy", and "Unknown". :paramtype state: str or ~azure.mgmt.logic.models.IntegrationServiceEnvironmentNetworkDependencyHealthState """ super().__init__(**kwargs) self.error = error self.state = state class IntegrationServiceEnvironmentNetworkEndpoint(_serialization.Model): """The network endpoint. :ivar accessibility: The accessibility state. Known values are: "NotSpecified", "Unknown", "Available", and "NotAvailable". :vartype accessibility: str or ~azure.mgmt.logic.models.IntegrationServiceEnvironmentNetworkEndPointAccessibilityState :ivar domain_name: The domain name. :vartype domain_name: str :ivar ports: The ports. :vartype ports: list[str] """ _attribute_map = { "accessibility": {"key": "accessibility", "type": "str"}, "domain_name": {"key": "domainName", "type": "str"}, "ports": {"key": "ports", "type": "[str]"}, } def __init__( self, *, accessibility: Optional[ Union[str, "_models.IntegrationServiceEnvironmentNetworkEndPointAccessibilityState"] ] = None, domain_name: Optional[str] = None, ports: Optional[List[str]] = None, **kwargs ): """ :keyword accessibility: The accessibility state. Known values are: "NotSpecified", "Unknown", "Available", and "NotAvailable". :paramtype accessibility: str or ~azure.mgmt.logic.models.IntegrationServiceEnvironmentNetworkEndPointAccessibilityState :keyword domain_name: The domain name. :paramtype domain_name: str :keyword ports: The ports. :paramtype ports: list[str] """ super().__init__(**kwargs) self.accessibility = accessibility self.domain_name = domain_name self.ports = ports class IntegrationServiceEnvironmentProperties(_serialization.Model): """The integration service environment properties. :ivar provisioning_state: The provisioning state. Known values are: "NotSpecified", "Accepted", "Running", "Ready", "Creating", "Created", "Deleting", "Deleted", "Canceled", "Failed", "Succeeded", "Moving", "Updating", "Registering", "Registered", "Unregistering", "Unregistered", "Completed", "Renewing", "Pending", "Waiting", and "InProgress". :vartype provisioning_state: str or ~azure.mgmt.logic.models.WorkflowProvisioningState :ivar state: The integration service environment state. Known values are: "NotSpecified", "Completed", "Enabled", "Disabled", "Deleted", and "Suspended". :vartype state: str or ~azure.mgmt.logic.models.WorkflowState :ivar integration_service_environment_id: Gets the tracking id. :vartype integration_service_environment_id: str :ivar endpoints_configuration: The endpoints configuration. :vartype endpoints_configuration: ~azure.mgmt.logic.models.FlowEndpointsConfiguration :ivar network_configuration: The network configuration. :vartype network_configuration: ~azure.mgmt.logic.models.NetworkConfiguration :ivar encryption_configuration: The encryption configuration. :vartype encryption_configuration: ~azure.mgmt.logic.models.IntegrationServiceEnvironmenEncryptionConfiguration """ _attribute_map = { "provisioning_state": {"key": "provisioningState", "type": "str"}, "state": {"key": "state", "type": "str"}, "integration_service_environment_id": {"key": "integrationServiceEnvironmentId", "type": "str"}, "endpoints_configuration": {"key": "endpointsConfiguration", "type": "FlowEndpointsConfiguration"}, "network_configuration": {"key": "networkConfiguration", "type": "NetworkConfiguration"}, "encryption_configuration": { "key": "encryptionConfiguration", "type": "IntegrationServiceEnvironmenEncryptionConfiguration", }, } def __init__( self, *, provisioning_state: Optional[Union[str, "_models.WorkflowProvisioningState"]] = None, state: Optional[Union[str, "_models.WorkflowState"]] = None, integration_service_environment_id: Optional[str] = None, endpoints_configuration: Optional["_models.FlowEndpointsConfiguration"] = None, network_configuration: Optional["_models.NetworkConfiguration"] = None, encryption_configuration: Optional["_models.IntegrationServiceEnvironmenEncryptionConfiguration"] = None, **kwargs ): """ :keyword provisioning_state: The provisioning state. Known values are: "NotSpecified", "Accepted", "Running", "Ready", "Creating", "Created", "Deleting", "Deleted", "Canceled", "Failed", "Succeeded", "Moving", "Updating", "Registering", "Registered", "Unregistering", "Unregistered", "Completed", "Renewing", "Pending", "Waiting", and "InProgress". :paramtype provisioning_state: str or ~azure.mgmt.logic.models.WorkflowProvisioningState :keyword state: The integration service environment state. Known values are: "NotSpecified", "Completed", "Enabled", "Disabled", "Deleted", and "Suspended". :paramtype state: str or ~azure.mgmt.logic.models.WorkflowState :keyword integration_service_environment_id: Gets the tracking id. :paramtype integration_service_environment_id: str :keyword endpoints_configuration: The endpoints configuration. :paramtype endpoints_configuration: ~azure.mgmt.logic.models.FlowEndpointsConfiguration :keyword network_configuration: The network configuration. :paramtype network_configuration: ~azure.mgmt.logic.models.NetworkConfiguration :keyword encryption_configuration: The encryption configuration. :paramtype encryption_configuration: ~azure.mgmt.logic.models.IntegrationServiceEnvironmenEncryptionConfiguration """ super().__init__(**kwargs) self.provisioning_state = provisioning_state self.state = state self.integration_service_environment_id = integration_service_environment_id self.endpoints_configuration = endpoints_configuration self.network_configuration = network_configuration self.encryption_configuration = encryption_configuration class IntegrationServiceEnvironmentSku(_serialization.Model): """The integration service environment sku. :ivar name: The sku name. Known values are: "NotSpecified", "Premium", and "Developer". :vartype name: str or ~azure.mgmt.logic.models.IntegrationServiceEnvironmentSkuName :ivar capacity: The sku capacity. :vartype capacity: int """ _attribute_map = { "name": {"key": "name", "type": "str"}, "capacity": {"key": "capacity", "type": "int"}, } def __init__( self, *, name: Optional[Union[str, "_models.IntegrationServiceEnvironmentSkuName"]] = None, capacity: Optional[int] = None, **kwargs ): """ :keyword name: The sku name. Known values are: "NotSpecified", "Premium", and "Developer". :paramtype name: str or ~azure.mgmt.logic.models.IntegrationServiceEnvironmentSkuName :keyword capacity: The sku capacity. :paramtype capacity: int """ super().__init__(**kwargs) self.name = name self.capacity = capacity class IntegrationServiceEnvironmentSkuCapacity(_serialization.Model): """The integration service environment sku capacity. :ivar minimum: The minimum capacity. :vartype minimum: int :ivar maximum: The maximum capacity. :vartype maximum: int :ivar default: The default capacity. :vartype default: int :ivar scale_type: The sku scale type. Known values are: "Manual", "Automatic", and "None". :vartype scale_type: str or ~azure.mgmt.logic.models.IntegrationServiceEnvironmentSkuScaleType """ _attribute_map = { "minimum": {"key": "minimum", "type": "int"}, "maximum": {"key": "maximum", "type": "int"}, "default": {"key": "default", "type": "int"}, "scale_type": {"key": "scaleType", "type": "str"}, } def __init__( self, *, minimum: Optional[int] = None, maximum: Optional[int] = None, default: Optional[int] = None, scale_type: Optional[Union[str, "_models.IntegrationServiceEnvironmentSkuScaleType"]] = None, **kwargs ): """ :keyword minimum: The minimum capacity. :paramtype minimum: int :keyword maximum: The maximum capacity. :paramtype maximum: int :keyword default: The default capacity. :paramtype default: int :keyword scale_type: The sku scale type. Known values are: "Manual", "Automatic", and "None". :paramtype scale_type: str or ~azure.mgmt.logic.models.IntegrationServiceEnvironmentSkuScaleType """ super().__init__(**kwargs) self.minimum = minimum self.maximum = maximum self.default = default self.scale_type = scale_type class IntegrationServiceEnvironmentSkuDefinition(_serialization.Model): """The integration service environment sku definition. :ivar resource_type: The resource type. :vartype resource_type: str :ivar sku: The sku. :vartype sku: ~azure.mgmt.logic.models.IntegrationServiceEnvironmentSkuDefinitionSku :ivar capacity: The sku capacity. :vartype capacity: ~azure.mgmt.logic.models.IntegrationServiceEnvironmentSkuCapacity """ _attribute_map = { "resource_type": {"key": "resourceType", "type": "str"}, "sku": {"key": "sku", "type": "IntegrationServiceEnvironmentSkuDefinitionSku"}, "capacity": {"key": "capacity", "type": "IntegrationServiceEnvironmentSkuCapacity"}, } def __init__( self, *, resource_type: Optional[str] = None, sku: Optional["_models.IntegrationServiceEnvironmentSkuDefinitionSku"] = None, capacity: Optional["_models.IntegrationServiceEnvironmentSkuCapacity"] = None, **kwargs ): """ :keyword resource_type: The resource type. :paramtype resource_type: str :keyword sku: The sku. :paramtype sku: ~azure.mgmt.logic.models.IntegrationServiceEnvironmentSkuDefinitionSku :keyword capacity: The sku capacity. :paramtype capacity: ~azure.mgmt.logic.models.IntegrationServiceEnvironmentSkuCapacity """ super().__init__(**kwargs) self.resource_type = resource_type self.sku = sku self.capacity = capacity class IntegrationServiceEnvironmentSkuDefinitionSku(_serialization.Model): """The sku. :ivar name: The sku name. Known values are: "NotSpecified", "Premium", and "Developer". :vartype name: str or ~azure.mgmt.logic.models.IntegrationServiceEnvironmentSkuName :ivar tier: The sku tier. :vartype tier: str """ _attribute_map = { "name": {"key": "name", "type": "str"}, "tier": {"key": "tier", "type": "str"}, } def __init__( self, *, name: Optional[Union[str, "_models.IntegrationServiceEnvironmentSkuName"]] = None, tier: Optional[str] = None, **kwargs ): """ :keyword name: The sku name. Known values are: "NotSpecified", "Premium", and "Developer". :paramtype name: str or ~azure.mgmt.logic.models.IntegrationServiceEnvironmentSkuName :keyword tier: The sku tier. :paramtype tier: str """ super().__init__(**kwargs) self.name = name self.tier = tier class IntegrationServiceEnvironmentSkuList(_serialization.Model): """The list of integration service environment skus. :ivar value: The list of integration service environment skus. :vartype value: list[~azure.mgmt.logic.models.IntegrationServiceEnvironmentSkuDefinition] :ivar next_link: The URL to get the next set of results. :vartype next_link: str """ _attribute_map = { "value": {"key": "value", "type": "[IntegrationServiceEnvironmentSkuDefinition]"}, "next_link": {"key": "nextLink", "type": "str"}, } def __init__( self, *, value: Optional[List["_models.IntegrationServiceEnvironmentSkuDefinition"]] = None, next_link: Optional[str] = None, **kwargs ): """ :keyword value: The list of integration service environment skus. :paramtype value: list[~azure.mgmt.logic.models.IntegrationServiceEnvironmentSkuDefinition] :keyword next_link: The URL to get the next set of results. :paramtype next_link: str """ super().__init__(**kwargs) self.value = value self.next_link = next_link class IntegrationServiceEnvironmentSubnetNetworkHealth(_serialization.Model): """The integration service environment subnet network health. All required parameters must be populated in order to send to Azure. :ivar outbound_network_dependencies: The outbound network dependencies. :vartype outbound_network_dependencies: list[~azure.mgmt.logic.models.IntegrationServiceEnvironmentNetworkDependency] :ivar outbound_network_health: The integration service environment network health. :vartype outbound_network_health: ~azure.mgmt.logic.models.IntegrationServiceEnvironmentNetworkDependencyHealth :ivar network_dependency_health_state: The integration service environment network health state. Required. Known values are: "NotSpecified", "Unknown", "Available", and "NotAvailable". :vartype network_dependency_health_state: str or ~azure.mgmt.logic.models.IntegrationServiceEnvironmentNetworkEndPointAccessibilityState """ _validation = { "network_dependency_health_state": {"required": True}, } _attribute_map = { "outbound_network_dependencies": { "key": "outboundNetworkDependencies", "type": "[IntegrationServiceEnvironmentNetworkDependency]", }, "outbound_network_health": { "key": "outboundNetworkHealth", "type": "IntegrationServiceEnvironmentNetworkDependencyHealth", }, "network_dependency_health_state": {"key": "networkDependencyHealthState", "type": "str"}, } def __init__( self, *, network_dependency_health_state: Union[ str, "_models.IntegrationServiceEnvironmentNetworkEndPointAccessibilityState" ], outbound_network_dependencies: Optional[List["_models.IntegrationServiceEnvironmentNetworkDependency"]] = None, outbound_network_health: Optional["_models.IntegrationServiceEnvironmentNetworkDependencyHealth"] = None, **kwargs ): """ :keyword outbound_network_dependencies: The outbound network dependencies. :paramtype outbound_network_dependencies: list[~azure.mgmt.logic.models.IntegrationServiceEnvironmentNetworkDependency] :keyword outbound_network_health: The integration service environment network health. :paramtype outbound_network_health: ~azure.mgmt.logic.models.IntegrationServiceEnvironmentNetworkDependencyHealth :keyword network_dependency_health_state: The integration service environment network health state. Required. Known values are: "NotSpecified", "Unknown", "Available", and "NotAvailable". :paramtype network_dependency_health_state: str or ~azure.mgmt.logic.models.IntegrationServiceEnvironmentNetworkEndPointAccessibilityState """ super().__init__(**kwargs) self.outbound_network_dependencies = outbound_network_dependencies self.outbound_network_health = outbound_network_health self.network_dependency_health_state = network_dependency_health_state class IpAddress(_serialization.Model): """The ip address. :ivar address: The address. :vartype address: str """ _attribute_map = { "address": {"key": "address", "type": "str"}, } def __init__(self, *, address: Optional[str] = None, **kwargs): """ :keyword address: The address. :paramtype address: str """ super().__init__(**kwargs) self.address = address class IpAddressRange(_serialization.Model): """The ip address range. :ivar address_range: The IP address range. :vartype address_range: str """ _attribute_map = { "address_range": {"key": "addressRange", "type": "str"}, } def __init__(self, *, address_range: Optional[str] = None, **kwargs): """ :keyword address_range: The IP address range. :paramtype address_range: str """ super().__init__(**kwargs) self.address_range = address_range class JsonSchema(_serialization.Model): """The JSON schema. :ivar title: The JSON title. :vartype title: str :ivar content: The JSON content. :vartype content: str """ _attribute_map = { "title": {"key": "title", "type": "str"}, "content": {"key": "content", "type": "str"}, } def __init__(self, *, title: Optional[str] = None, content: Optional[str] = None, **kwargs): """ :keyword title: The JSON title. :paramtype title: str :keyword content: The JSON content. :paramtype content: str """ super().__init__(**kwargs) self.title = title self.content = content class KeyVaultKey(_serialization.Model): """The key vault key. :ivar kid: The key id. :vartype kid: str :ivar attributes: The key attributes. :vartype attributes: ~azure.mgmt.logic.models.KeyVaultKeyAttributes """ _attribute_map = { "kid": {"key": "kid", "type": "str"}, "attributes": {"key": "attributes", "type": "KeyVaultKeyAttributes"}, } def __init__( self, *, kid: Optional[str] = None, attributes: Optional["_models.KeyVaultKeyAttributes"] = None, **kwargs ): """ :keyword kid: The key id. :paramtype kid: str :keyword attributes: The key attributes. :paramtype attributes: ~azure.mgmt.logic.models.KeyVaultKeyAttributes """ super().__init__(**kwargs) self.kid = kid self.attributes = attributes class KeyVaultKeyAttributes(_serialization.Model): """The key attributes. :ivar enabled: Whether the key is enabled or not. :vartype enabled: bool :ivar created: When the key was created. :vartype created: int :ivar updated: When the key was updated. :vartype updated: int """ _attribute_map = { "enabled": {"key": "enabled", "type": "bool"}, "created": {"key": "created", "type": "int"}, "updated": {"key": "updated", "type": "int"}, } def __init__( self, *, enabled: Optional[bool] = None, created: Optional[int] = None, updated: Optional[int] = None, **kwargs ): """ :keyword enabled: Whether the key is enabled or not. :paramtype enabled: bool :keyword created: When the key was created. :paramtype created: int :keyword updated: When the key was updated. :paramtype updated: int """ super().__init__(**kwargs) self.enabled = enabled self.created = created self.updated = updated class KeyVaultKeyCollection(_serialization.Model): """Collection of key vault keys. :ivar value: The key vault keys. :vartype value: list[~azure.mgmt.logic.models.KeyVaultKey] :ivar skip_token: The skip token. :vartype skip_token: str """ _attribute_map = { "value": {"key": "value", "type": "[KeyVaultKey]"}, "skip_token": {"key": "skipToken", "type": "str"}, } def __init__( self, *, value: Optional[List["_models.KeyVaultKey"]] = None, skip_token: Optional[str] = None, **kwargs ): """ :keyword value: The key vault keys. :paramtype value: list[~azure.mgmt.logic.models.KeyVaultKey] :keyword skip_token: The skip token. :paramtype skip_token: str """ super().__init__(**kwargs) self.value = value self.skip_token = skip_token class KeyVaultKeyReference(_serialization.Model): """The reference to the key vault key. All required parameters must be populated in order to send to Azure. :ivar key_vault: The key vault reference. Required. :vartype key_vault: ~azure.mgmt.logic.models.KeyVaultKeyReferenceKeyVault :ivar key_name: The private key name in key vault. Required. :vartype key_name: str :ivar key_version: The private key version in key vault. :vartype key_version: str """ _validation = { "key_vault": {"required": True}, "key_name": {"required": True}, } _attribute_map = { "key_vault": {"key": "keyVault", "type": "KeyVaultKeyReferenceKeyVault"}, "key_name": {"key": "keyName", "type": "str"}, "key_version": {"key": "keyVersion", "type": "str"}, } def __init__( self, *, key_vault: "_models.KeyVaultKeyReferenceKeyVault", key_name: str, key_version: Optional[str] = None, **kwargs ): """ :keyword key_vault: The key vault reference. Required. :paramtype key_vault: ~azure.mgmt.logic.models.KeyVaultKeyReferenceKeyVault :keyword key_name: The private key name in key vault. Required. :paramtype key_name: str :keyword key_version: The private key version in key vault. :paramtype key_version: str """ super().__init__(**kwargs) self.key_vault = key_vault self.key_name = key_name self.key_version = key_version class KeyVaultKeyReferenceKeyVault(_serialization.Model): """The key vault reference. Variables are only populated by the server, and will be ignored when sending a request. :ivar id: The resource id. :vartype id: str :ivar name: The resource name. :vartype name: str :ivar type: The resource type. :vartype type: str """ _validation = { "name": {"readonly": True}, "type": {"readonly": True}, } _attribute_map = { "id": {"key": "id", "type": "str"}, "name": {"key": "name", "type": "str"}, "type": {"key": "type", "type": "str"}, } def __init__(self, *, id: Optional[str] = None, **kwargs): # pylint: disable=redefined-builtin """ :keyword id: The resource id. :paramtype id: str """ super().__init__(**kwargs) self.id = id self.name = None self.type = None class KeyVaultReference(ResourceReference): """The key vault reference. Variables are only populated by the server, and will be ignored when sending a request. :ivar id: The resource id. :vartype id: str :ivar name: Gets the resource name. :vartype name: str :ivar type: Gets the resource type. :vartype type: str """ _validation = { "name": {"readonly": True}, "type": {"readonly": True}, } _attribute_map = { "id": {"key": "id", "type": "str"}, "name": {"key": "name", "type": "str"}, "type": {"key": "type", "type": "str"}, } def __init__(self, *, id: Optional[str] = None, **kwargs): # pylint: disable=redefined-builtin """ :keyword id: The resource id. :paramtype id: str """ super().__init__(id=id, **kwargs) class ListKeyVaultKeysDefinition(_serialization.Model): """The list key vault keys definition. All required parameters must be populated in order to send to Azure. :ivar key_vault: The key vault reference. Required. :vartype key_vault: ~azure.mgmt.logic.models.KeyVaultReference :ivar skip_token: The skip token. :vartype skip_token: str """ _validation = { "key_vault": {"required": True}, } _attribute_map = { "key_vault": {"key": "keyVault", "type": "KeyVaultReference"}, "skip_token": {"key": "skipToken", "type": "str"}, } def __init__(self, *, key_vault: "_models.KeyVaultReference", skip_token: Optional[str] = None, **kwargs): """ :keyword key_vault: The key vault reference. Required. :paramtype key_vault: ~azure.mgmt.logic.models.KeyVaultReference :keyword skip_token: The skip token. :paramtype skip_token: str """ super().__init__(**kwargs) self.key_vault = key_vault self.skip_token = skip_token class ManagedApi(Resource): """The managed api definition. Variables are only populated by the server, and will be ignored when sending a request. :ivar id: The resource id. :vartype id: str :ivar name: Gets the resource name. :vartype name: str :ivar type: Gets the resource type. :vartype type: str :ivar location: The resource location. :vartype location: str :ivar tags: The resource tags. :vartype tags: dict[str, str] :ivar properties: The api resource properties. :vartype properties: ~azure.mgmt.logic.models.ApiResourceProperties """ _validation = { "id": {"readonly": True}, "name": {"readonly": True}, "type": {"readonly": True}, } _attribute_map = { "id": {"key": "id", "type": "str"}, "name": {"key": "name", "type": "str"}, "type": {"key": "type", "type": "str"}, "location": {"key": "location", "type": "str"}, "tags": {"key": "tags", "type": "{str}"}, "properties": {"key": "properties", "type": "ApiResourceProperties"}, } def __init__( self, *, location: Optional[str] = None, tags: Optional[Dict[str, str]] = None, properties: Optional["_models.ApiResourceProperties"] = None, **kwargs ): """ :keyword location: The resource location. :paramtype location: str :keyword tags: The resource tags. :paramtype tags: dict[str, str] :keyword properties: The api resource properties. :paramtype properties: ~azure.mgmt.logic.models.ApiResourceProperties """ super().__init__(location=location, tags=tags, **kwargs) self.properties = properties class ManagedApiListResult(_serialization.Model): """The list of managed APIs. :ivar value: The managed APIs. :vartype value: list[~azure.mgmt.logic.models.ManagedApi] :ivar next_link: The URL to get the next set of results. :vartype next_link: str """ _attribute_map = { "value": {"key": "value", "type": "[ManagedApi]"}, "next_link": {"key": "nextLink", "type": "str"}, } def __init__( self, *, value: Optional[List["_models.ManagedApi"]] = None, next_link: Optional[str] = None, **kwargs ): """ :keyword value: The managed APIs. :paramtype value: list[~azure.mgmt.logic.models.ManagedApi] :keyword next_link: The URL to get the next set of results. :paramtype next_link: str """ super().__init__(**kwargs) self.value = value self.next_link = next_link class ManagedServiceIdentity(_serialization.Model): """Managed service identity properties. Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. :ivar type: Type of managed service identity. The type 'SystemAssigned' includes an implicitly created identity. The type 'None' will remove any identities from the resource. Required. Known values are: "SystemAssigned", "UserAssigned", and "None". :vartype type: str or ~azure.mgmt.logic.models.ManagedServiceIdentityType :ivar tenant_id: Tenant of managed service identity. :vartype tenant_id: str :ivar principal_id: Principal Id of managed service identity. :vartype principal_id: str :ivar user_assigned_identities: The list of user assigned identities associated with the resource. The user identity dictionary key references will be ARM resource ids in the form: '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}. :vartype user_assigned_identities: dict[str, ~azure.mgmt.logic.models.UserAssignedIdentity] """ _validation = { "type": {"required": True}, "tenant_id": {"readonly": True}, "principal_id": {"readonly": True}, } _attribute_map = { "type": {"key": "type", "type": "str"}, "tenant_id": {"key": "tenantId", "type": "str"}, "principal_id": {"key": "principalId", "type": "str"}, "user_assigned_identities": {"key": "userAssignedIdentities", "type": "{UserAssignedIdentity}"}, } def __init__( self, *, type: Union[str, "_models.ManagedServiceIdentityType"], user_assigned_identities: Optional[Dict[str, "_models.UserAssignedIdentity"]] = None, **kwargs ): """ :keyword type: Type of managed service identity. The type 'SystemAssigned' includes an implicitly created identity. The type 'None' will remove any identities from the resource. Required. Known values are: "SystemAssigned", "UserAssigned", and "None". :paramtype type: str or ~azure.mgmt.logic.models.ManagedServiceIdentityType :keyword user_assigned_identities: The list of user assigned identities associated with the resource. The user identity dictionary key references will be ARM resource ids in the form: '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}. :paramtype user_assigned_identities: dict[str, ~azure.mgmt.logic.models.UserAssignedIdentity] """ super().__init__(**kwargs) self.type = type self.tenant_id = None self.principal_id = None self.user_assigned_identities = user_assigned_identities class NetworkConfiguration(_serialization.Model): """The network configuration. :ivar virtual_network_address_space: Gets the virtual network address space. :vartype virtual_network_address_space: str :ivar access_endpoint: The access endpoint. :vartype access_endpoint: ~azure.mgmt.logic.models.IntegrationServiceEnvironmentAccessEndpoint :ivar subnets: The subnets. :vartype subnets: list[~azure.mgmt.logic.models.ResourceReference] """ _attribute_map = { "virtual_network_address_space": {"key": "virtualNetworkAddressSpace", "type": "str"}, "access_endpoint": {"key": "accessEndpoint", "type": "IntegrationServiceEnvironmentAccessEndpoint"}, "subnets": {"key": "subnets", "type": "[ResourceReference]"}, } def __init__( self, *, virtual_network_address_space: Optional[str] = None, access_endpoint: Optional["_models.IntegrationServiceEnvironmentAccessEndpoint"] = None, subnets: Optional[List["_models.ResourceReference"]] = None, **kwargs ): """ :keyword virtual_network_address_space: Gets the virtual network address space. :paramtype virtual_network_address_space: str :keyword access_endpoint: The access endpoint. :paramtype access_endpoint: ~azure.mgmt.logic.models.IntegrationServiceEnvironmentAccessEndpoint :keyword subnets: The subnets. :paramtype subnets: list[~azure.mgmt.logic.models.ResourceReference] """ super().__init__(**kwargs) self.virtual_network_address_space = virtual_network_address_space self.access_endpoint = access_endpoint self.subnets = subnets class OpenAuthenticationAccessPolicies(_serialization.Model): """AuthenticationPolicy of type Open. :ivar policies: Open authentication policies. :vartype policies: dict[str, ~azure.mgmt.logic.models.OpenAuthenticationAccessPolicy] """ _attribute_map = { "policies": {"key": "policies", "type": "{OpenAuthenticationAccessPolicy}"}, } def __init__(self, *, policies: Optional[Dict[str, "_models.OpenAuthenticationAccessPolicy"]] = None, **kwargs): """ :keyword policies: Open authentication policies. :paramtype policies: dict[str, ~azure.mgmt.logic.models.OpenAuthenticationAccessPolicy] """ super().__init__(**kwargs) self.policies = policies class OpenAuthenticationAccessPolicy(_serialization.Model): """Open authentication access policy defined by user. :ivar type: Type of provider for OAuth. "AAD" :vartype type: str or ~azure.mgmt.logic.models.OpenAuthenticationProviderType :ivar claims: The access policy claims. :vartype claims: list[~azure.mgmt.logic.models.OpenAuthenticationPolicyClaim] """ _attribute_map = { "type": {"key": "type", "type": "str"}, "claims": {"key": "claims", "type": "[OpenAuthenticationPolicyClaim]"}, } def __init__( self, *, type: Optional[Union[str, "_models.OpenAuthenticationProviderType"]] = None, claims: Optional[List["_models.OpenAuthenticationPolicyClaim"]] = None, **kwargs ): """ :keyword type: Type of provider for OAuth. "AAD" :paramtype type: str or ~azure.mgmt.logic.models.OpenAuthenticationProviderType :keyword claims: The access policy claims. :paramtype claims: list[~azure.mgmt.logic.models.OpenAuthenticationPolicyClaim] """ super().__init__(**kwargs) self.type = type self.claims = claims class OpenAuthenticationPolicyClaim(_serialization.Model): """Open authentication policy claim. :ivar name: The name of the claim. :vartype name: str :ivar value: The value of the claim. :vartype value: str """ _attribute_map = { "name": {"key": "name", "type": "str"}, "value": {"key": "value", "type": "str"}, } def __init__(self, *, name: Optional[str] = None, value: Optional[str] = None, **kwargs): """ :keyword name: The name of the claim. :paramtype name: str :keyword value: The value of the claim. :paramtype value: str """ super().__init__(**kwargs) self.name = name self.value = value class Operation(_serialization.Model): """Logic REST API operation. :ivar origin: Operation: origin. :vartype origin: str :ivar name: Operation name: {provider}/{resource}/{operation}. :vartype name: str :ivar display: The object that represents the operation. :vartype display: ~azure.mgmt.logic.models.OperationDisplay :ivar properties: The properties. :vartype properties: JSON """ _attribute_map = { "origin": {"key": "origin", "type": "str"}, "name": {"key": "name", "type": "str"}, "display": {"key": "display", "type": "OperationDisplay"}, "properties": {"key": "properties", "type": "object"}, } def __init__( self, *, origin: Optional[str] = None, name: Optional[str] = None, display: Optional["_models.OperationDisplay"] = None, properties: Optional[JSON] = None, **kwargs ): """ :keyword origin: Operation: origin. :paramtype origin: str :keyword name: Operation name: {provider}/{resource}/{operation}. :paramtype name: str :keyword display: The object that represents the operation. :paramtype display: ~azure.mgmt.logic.models.OperationDisplay :keyword properties: The properties. :paramtype properties: JSON """ super().__init__(**kwargs) self.origin = origin self.name = name self.display = display self.properties = properties class OperationDisplay(_serialization.Model): """The object that represents the operation. :ivar provider: Service provider: Microsoft.Logic. :vartype provider: str :ivar resource: Resource on which the operation is performed: Profile, endpoint, etc. :vartype resource: str :ivar operation: Operation type: Read, write, delete, etc. :vartype operation: str :ivar description: Operation: description. :vartype description: str """ _attribute_map = { "provider": {"key": "provider", "type": "str"}, "resource": {"key": "resource", "type": "str"}, "operation": {"key": "operation", "type": "str"}, "description": {"key": "description", "type": "str"}, } def __init__( self, *, provider: Optional[str] = None, resource: Optional[str] = None, operation: Optional[str] = None, description: Optional[str] = None, **kwargs ): """ :keyword provider: Service provider: Microsoft.Logic. :paramtype provider: str :keyword resource: Resource on which the operation is performed: Profile, endpoint, etc. :paramtype resource: str :keyword operation: Operation type: Read, write, delete, etc. :paramtype operation: str :keyword description: Operation: description. :paramtype description: str """ super().__init__(**kwargs) self.provider = provider self.resource = resource self.operation = operation self.description = description class OperationListResult(_serialization.Model): """Result of the request to list Logic operations. It contains a list of operations and a URL link to get the next set of results. :ivar value: List of Logic operations supported by the Logic resource provider. :vartype value: list[~azure.mgmt.logic.models.Operation] :ivar next_link: URL to get the next set of operation list results if there are any. :vartype next_link: str """ _attribute_map = { "value": {"key": "value", "type": "[Operation]"}, "next_link": {"key": "nextLink", "type": "str"}, } def __init__(self, *, value: Optional[List["_models.Operation"]] = None, next_link: Optional[str] = None, **kwargs): """ :keyword value: List of Logic operations supported by the Logic resource provider. :paramtype value: list[~azure.mgmt.logic.models.Operation] :keyword next_link: URL to get the next set of operation list results if there are any. :paramtype next_link: str """ super().__init__(**kwargs) self.value = value self.next_link = next_link class OperationResultProperties(_serialization.Model): """The run operation result properties. :ivar start_time: The start time of the workflow scope repetition. :vartype start_time: ~datetime.datetime :ivar end_time: The end time of the workflow scope repetition. :vartype end_time: ~datetime.datetime :ivar correlation: The correlation properties. :vartype correlation: ~azure.mgmt.logic.models.RunActionCorrelation :ivar status: The status of the workflow scope repetition. Known values are: "NotSpecified", "Paused", "Running", "Waiting", "Succeeded", "Skipped", "Suspended", "Cancelled", "Failed", "Faulted", "TimedOut", "Aborted", and "Ignored". :vartype status: str or ~azure.mgmt.logic.models.WorkflowStatus :ivar code: The workflow scope repetition code. :vartype code: str :ivar error: Anything. :vartype error: any """ _attribute_map = { "start_time": {"key": "startTime", "type": "iso-8601"}, "end_time": {"key": "endTime", "type": "iso-8601"}, "correlation": {"key": "correlation", "type": "RunActionCorrelation"}, "status": {"key": "status", "type": "str"}, "code": {"key": "code", "type": "str"}, "error": {"key": "error", "type": "object"}, } def __init__( self, *, start_time: Optional[datetime.datetime] = None, end_time: Optional[datetime.datetime] = None, correlation: Optional["_models.RunActionCorrelation"] = None, status: Optional[Union[str, "_models.WorkflowStatus"]] = None, code: Optional[str] = None, error: Optional[Any] = None, **kwargs ): """ :keyword start_time: The start time of the workflow scope repetition. :paramtype start_time: ~datetime.datetime :keyword end_time: The end time of the workflow scope repetition. :paramtype end_time: ~datetime.datetime :keyword correlation: The correlation properties. :paramtype correlation: ~azure.mgmt.logic.models.RunActionCorrelation :keyword status: The status of the workflow scope repetition. Known values are: "NotSpecified", "Paused", "Running", "Waiting", "Succeeded", "Skipped", "Suspended", "Cancelled", "Failed", "Faulted", "TimedOut", "Aborted", and "Ignored". :paramtype status: str or ~azure.mgmt.logic.models.WorkflowStatus :keyword code: The workflow scope repetition code. :paramtype code: str :keyword error: Anything. :paramtype error: any """ super().__init__(**kwargs) self.start_time = start_time self.end_time = end_time self.correlation = correlation self.status = status self.code = code self.error = error class OperationResult(OperationResultProperties): # pylint: disable=too-many-instance-attributes """The operation result definition. Variables are only populated by the server, and will be ignored when sending a request. :ivar start_time: The start time of the workflow scope repetition. :vartype start_time: ~datetime.datetime :ivar end_time: The end time of the workflow scope repetition. :vartype end_time: ~datetime.datetime :ivar correlation: The correlation properties. :vartype correlation: ~azure.mgmt.logic.models.RunActionCorrelation :ivar status: The status of the workflow scope repetition. Known values are: "NotSpecified", "Paused", "Running", "Waiting", "Succeeded", "Skipped", "Suspended", "Cancelled", "Failed", "Faulted", "TimedOut", "Aborted", and "Ignored". :vartype status: str or ~azure.mgmt.logic.models.WorkflowStatus :ivar code: The workflow scope repetition code. :vartype code: str :ivar error: Anything. :vartype error: any :ivar tracking_id: Gets the tracking id. :vartype tracking_id: str :ivar inputs: Gets the inputs. :vartype inputs: JSON :ivar inputs_link: Gets the link to inputs. :vartype inputs_link: ~azure.mgmt.logic.models.ContentLink :ivar outputs: Gets the outputs. :vartype outputs: JSON :ivar outputs_link: Gets the link to outputs. :vartype outputs_link: ~azure.mgmt.logic.models.ContentLink :ivar tracked_properties: Gets the tracked properties. :vartype tracked_properties: JSON :ivar retry_history: Gets the retry histories. :vartype retry_history: list[~azure.mgmt.logic.models.RetryHistory] :ivar iteration_count: :vartype iteration_count: int """ _validation = { "tracking_id": {"readonly": True}, "inputs": {"readonly": True}, "inputs_link": {"readonly": True}, "outputs": {"readonly": True}, "outputs_link": {"readonly": True}, "tracked_properties": {"readonly": True}, } _attribute_map = { "start_time": {"key": "startTime", "type": "iso-8601"}, "end_time": {"key": "endTime", "type": "iso-8601"}, "correlation": {"key": "correlation", "type": "RunActionCorrelation"}, "status": {"key": "status", "type": "str"}, "code": {"key": "code", "type": "str"}, "error": {"key": "error", "type": "object"}, "tracking_id": {"key": "trackingId", "type": "str"}, "inputs": {"key": "inputs", "type": "object"}, "inputs_link": {"key": "inputsLink", "type": "ContentLink"}, "outputs": {"key": "outputs", "type": "object"}, "outputs_link": {"key": "outputsLink", "type": "ContentLink"}, "tracked_properties": {"key": "trackedProperties", "type": "object"}, "retry_history": {"key": "retryHistory", "type": "[RetryHistory]"}, "iteration_count": {"key": "iterationCount", "type": "int"}, } def __init__( self, *, start_time: Optional[datetime.datetime] = None, end_time: Optional[datetime.datetime] = None, correlation: Optional["_models.RunActionCorrelation"] = None, status: Optional[Union[str, "_models.WorkflowStatus"]] = None, code: Optional[str] = None, error: Optional[Any] = None, retry_history: Optional[List["_models.RetryHistory"]] = None, iteration_count: Optional[int] = None, **kwargs ): """ :keyword start_time: The start time of the workflow scope repetition. :paramtype start_time: ~datetime.datetime :keyword end_time: The end time of the workflow scope repetition. :paramtype end_time: ~datetime.datetime :keyword correlation: The correlation properties. :paramtype correlation: ~azure.mgmt.logic.models.RunActionCorrelation :keyword status: The status of the workflow scope repetition. Known values are: "NotSpecified", "Paused", "Running", "Waiting", "Succeeded", "Skipped", "Suspended", "Cancelled", "Failed", "Faulted", "TimedOut", "Aborted", and "Ignored". :paramtype status: str or ~azure.mgmt.logic.models.WorkflowStatus :keyword code: The workflow scope repetition code. :paramtype code: str :keyword error: Anything. :paramtype error: any :keyword retry_history: Gets the retry histories. :paramtype retry_history: list[~azure.mgmt.logic.models.RetryHistory] :keyword iteration_count: :paramtype iteration_count: int """ super().__init__( start_time=start_time, end_time=end_time, correlation=correlation, status=status, code=code, error=error, **kwargs ) self.tracking_id = None self.inputs = None self.inputs_link = None self.outputs = None self.outputs_link = None self.tracked_properties = None self.retry_history = retry_history self.iteration_count = iteration_count class PartnerContent(_serialization.Model): """The integration account partner content. :ivar b2_b: The B2B partner content. :vartype b2_b: ~azure.mgmt.logic.models.B2BPartnerContent """ _attribute_map = { "b2_b": {"key": "b2b", "type": "B2BPartnerContent"}, } def __init__(self, *, b2_b: Optional["_models.B2BPartnerContent"] = None, **kwargs): """ :keyword b2_b: The B2B partner content. :paramtype b2_b: ~azure.mgmt.logic.models.B2BPartnerContent """ super().__init__(**kwargs) self.b2_b = b2_b class RecurrenceSchedule(_serialization.Model): """The recurrence schedule. :ivar minutes: The minutes. :vartype minutes: list[int] :ivar hours: The hours. :vartype hours: list[int] :ivar week_days: The days of the week. :vartype week_days: list[str or ~azure.mgmt.logic.models.DaysOfWeek] :ivar month_days: The month days. :vartype month_days: list[int] :ivar monthly_occurrences: The monthly occurrences. :vartype monthly_occurrences: list[~azure.mgmt.logic.models.RecurrenceScheduleOccurrence] """ _attribute_map = { "minutes": {"key": "minutes", "type": "[int]"}, "hours": {"key": "hours", "type": "[int]"}, "week_days": {"key": "weekDays", "type": "[str]"}, "month_days": {"key": "monthDays", "type": "[int]"}, "monthly_occurrences": {"key": "monthlyOccurrences", "type": "[RecurrenceScheduleOccurrence]"}, } def __init__( self, *, minutes: Optional[List[int]] = None, hours: Optional[List[int]] = None, week_days: Optional[List[Union[str, "_models.DaysOfWeek"]]] = None, month_days: Optional[List[int]] = None, monthly_occurrences: Optional[List["_models.RecurrenceScheduleOccurrence"]] = None, **kwargs ): """ :keyword minutes: The minutes. :paramtype minutes: list[int] :keyword hours: The hours. :paramtype hours: list[int] :keyword week_days: The days of the week. :paramtype week_days: list[str or ~azure.mgmt.logic.models.DaysOfWeek] :keyword month_days: The month days. :paramtype month_days: list[int] :keyword monthly_occurrences: The monthly occurrences. :paramtype monthly_occurrences: list[~azure.mgmt.logic.models.RecurrenceScheduleOccurrence] """ super().__init__(**kwargs) self.minutes = minutes self.hours = hours self.week_days = week_days self.month_days = month_days self.monthly_occurrences = monthly_occurrences class RecurrenceScheduleOccurrence(_serialization.Model): """The recurrence schedule occurrence. :ivar day: The day of the week. Known values are: "Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", and "Saturday". :vartype day: str or ~azure.mgmt.logic.models.DayOfWeek :ivar occurrence: The occurrence. :vartype occurrence: int """ _attribute_map = { "day": {"key": "day", "type": "str"}, "occurrence": {"key": "occurrence", "type": "int"}, } def __init__( self, *, day: Optional[Union[str, "_models.DayOfWeek"]] = None, occurrence: Optional[int] = None, **kwargs ): """ :keyword day: The day of the week. Known values are: "Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", and "Saturday". :paramtype day: str or ~azure.mgmt.logic.models.DayOfWeek :keyword occurrence: The occurrence. :paramtype occurrence: int """ super().__init__(**kwargs) self.day = day self.occurrence = occurrence class RegenerateActionParameter(_serialization.Model): """The access key regenerate action content. :ivar key_type: The key type. Known values are: "NotSpecified", "Primary", and "Secondary". :vartype key_type: str or ~azure.mgmt.logic.models.KeyType """ _attribute_map = { "key_type": {"key": "keyType", "type": "str"}, } def __init__(self, *, key_type: Optional[Union[str, "_models.KeyType"]] = None, **kwargs): """ :keyword key_type: The key type. Known values are: "NotSpecified", "Primary", and "Secondary". :paramtype key_type: str or ~azure.mgmt.logic.models.KeyType """ super().__init__(**kwargs) self.key_type = key_type class RepetitionIndex(_serialization.Model): """The workflow run action repetition index. All required parameters must be populated in order to send to Azure. :ivar scope_name: The scope. :vartype scope_name: str :ivar item_index: The index. Required. :vartype item_index: int """ _validation = { "item_index": {"required": True}, } _attribute_map = { "scope_name": {"key": "scopeName", "type": "str"}, "item_index": {"key": "itemIndex", "type": "int"}, } def __init__(self, *, item_index: int, scope_name: Optional[str] = None, **kwargs): """ :keyword scope_name: The scope. :paramtype scope_name: str :keyword item_index: The index. Required. :paramtype item_index: int """ super().__init__(**kwargs) self.scope_name = scope_name self.item_index = item_index class Request(_serialization.Model): """A request. :ivar headers: A list of all the headers attached to the request. :vartype headers: JSON :ivar uri: The destination for the request. :vartype uri: str :ivar method: The HTTP method used for the request. :vartype method: str """ _attribute_map = { "headers": {"key": "headers", "type": "object"}, "uri": {"key": "uri", "type": "str"}, "method": {"key": "method", "type": "str"}, } def __init__( self, *, headers: Optional[JSON] = None, uri: Optional[str] = None, method: Optional[str] = None, **kwargs ): """ :keyword headers: A list of all the headers attached to the request. :paramtype headers: JSON :keyword uri: The destination for the request. :paramtype uri: str :keyword method: The HTTP method used for the request. :paramtype method: str """ super().__init__(**kwargs) self.headers = headers self.uri = uri self.method = method class RequestHistory(Resource): """The request history. Variables are only populated by the server, and will be ignored when sending a request. :ivar id: The resource id. :vartype id: str :ivar name: Gets the resource name. :vartype name: str :ivar type: Gets the resource type. :vartype type: str :ivar location: The resource location. :vartype location: str :ivar tags: The resource tags. :vartype tags: dict[str, str] :ivar properties: The request history properties. :vartype properties: ~azure.mgmt.logic.models.RequestHistoryProperties """ _validation = { "id": {"readonly": True}, "name": {"readonly": True}, "type": {"readonly": True}, } _attribute_map = { "id": {"key": "id", "type": "str"}, "name": {"key": "name", "type": "str"}, "type": {"key": "type", "type": "str"}, "location": {"key": "location", "type": "str"}, "tags": {"key": "tags", "type": "{str}"}, "properties": {"key": "properties", "type": "RequestHistoryProperties"}, } def __init__( self, *, location: Optional[str] = None, tags: Optional[Dict[str, str]] = None, properties: Optional["_models.RequestHistoryProperties"] = None, **kwargs ): """ :keyword location: The resource location. :paramtype location: str :keyword tags: The resource tags. :paramtype tags: dict[str, str] :keyword properties: The request history properties. :paramtype properties: ~azure.mgmt.logic.models.RequestHistoryProperties """ super().__init__(location=location, tags=tags, **kwargs) self.properties = properties class RequestHistoryListResult(_serialization.Model): """The list of workflow request histories. :ivar value: A list of workflow request histories. :vartype value: list[~azure.mgmt.logic.models.RequestHistory] :ivar next_link: The URL to get the next set of results. :vartype next_link: str """ _attribute_map = { "value": {"key": "value", "type": "[RequestHistory]"}, "next_link": {"key": "nextLink", "type": "str"}, } def __init__( self, *, value: Optional[List["_models.RequestHistory"]] = None, next_link: Optional[str] = None, **kwargs ): """ :keyword value: A list of workflow request histories. :paramtype value: list[~azure.mgmt.logic.models.RequestHistory] :keyword next_link: The URL to get the next set of results. :paramtype next_link: str """ super().__init__(**kwargs) self.value = value self.next_link = next_link class RequestHistoryProperties(_serialization.Model): """The request history. :ivar start_time: The time the request started. :vartype start_time: ~datetime.datetime :ivar end_time: The time the request ended. :vartype end_time: ~datetime.datetime :ivar request: The request. :vartype request: ~azure.mgmt.logic.models.Request :ivar response: The response. :vartype response: ~azure.mgmt.logic.models.Response """ _attribute_map = { "start_time": {"key": "startTime", "type": "iso-8601"}, "end_time": {"key": "endTime", "type": "iso-8601"}, "request": {"key": "request", "type": "Request"}, "response": {"key": "response", "type": "Response"}, } def __init__( self, *, start_time: Optional[datetime.datetime] = None, end_time: Optional[datetime.datetime] = None, request: Optional["_models.Request"] = None, response: Optional["_models.Response"] = None, **kwargs ): """ :keyword start_time: The time the request started. :paramtype start_time: ~datetime.datetime :keyword end_time: The time the request ended. :paramtype end_time: ~datetime.datetime :keyword request: The request. :paramtype request: ~azure.mgmt.logic.models.Request :keyword response: The response. :paramtype response: ~azure.mgmt.logic.models.Response """ super().__init__(**kwargs) self.start_time = start_time self.end_time = end_time self.request = request self.response = response class Response(_serialization.Model): """A response. :ivar headers: A list of all the headers attached to the response. :vartype headers: JSON :ivar status_code: The status code of the response. :vartype status_code: int :ivar body_link: Details on the location of the body content. :vartype body_link: ~azure.mgmt.logic.models.ContentLink """ _attribute_map = { "headers": {"key": "headers", "type": "object"}, "status_code": {"key": "statusCode", "type": "int"}, "body_link": {"key": "bodyLink", "type": "ContentLink"}, } def __init__( self, *, headers: Optional[JSON] = None, status_code: Optional[int] = None, body_link: Optional["_models.ContentLink"] = None, **kwargs ): """ :keyword headers: A list of all the headers attached to the response. :paramtype headers: JSON :keyword status_code: The status code of the response. :paramtype status_code: int :keyword body_link: Details on the location of the body content. :paramtype body_link: ~azure.mgmt.logic.models.ContentLink """ super().__init__(**kwargs) self.headers = headers self.status_code = status_code self.body_link = body_link class RetryHistory(_serialization.Model): """The retry history. :ivar start_time: Gets the start time. :vartype start_time: ~datetime.datetime :ivar end_time: Gets the end time. :vartype end_time: ~datetime.datetime :ivar code: Gets the status code. :vartype code: str :ivar client_request_id: Gets the client request Id. :vartype client_request_id: str :ivar service_request_id: Gets the service request Id. :vartype service_request_id: str :ivar error: Gets the error response. :vartype error: ~azure.mgmt.logic.models.ErrorResponse """ _attribute_map = { "start_time": {"key": "startTime", "type": "iso-8601"}, "end_time": {"key": "endTime", "type": "iso-8601"}, "code": {"key": "code", "type": "str"}, "client_request_id": {"key": "clientRequestId", "type": "str"}, "service_request_id": {"key": "serviceRequestId", "type": "str"}, "error": {"key": "error", "type": "ErrorResponse"}, } def __init__( self, *, start_time: Optional[datetime.datetime] = None, end_time: Optional[datetime.datetime] = None, code: Optional[str] = None, client_request_id: Optional[str] = None, service_request_id: Optional[str] = None, error: Optional["_models.ErrorResponse"] = None, **kwargs ): """ :keyword start_time: Gets the start time. :paramtype start_time: ~datetime.datetime :keyword end_time: Gets the end time. :paramtype end_time: ~datetime.datetime :keyword code: Gets the status code. :paramtype code: str :keyword client_request_id: Gets the client request Id. :paramtype client_request_id: str :keyword service_request_id: Gets the service request Id. :paramtype service_request_id: str :keyword error: Gets the error response. :paramtype error: ~azure.mgmt.logic.models.ErrorResponse """ super().__init__(**kwargs) self.start_time = start_time self.end_time = end_time self.code = code self.client_request_id = client_request_id self.service_request_id = service_request_id self.error = error class RunCorrelation(_serialization.Model): """The correlation properties. :ivar client_tracking_id: The client tracking identifier. :vartype client_tracking_id: str :ivar client_keywords: The client keywords. :vartype client_keywords: list[str] """ _attribute_map = { "client_tracking_id": {"key": "clientTrackingId", "type": "str"}, "client_keywords": {"key": "clientKeywords", "type": "[str]"}, } def __init__( self, *, client_tracking_id: Optional[str] = None, client_keywords: Optional[List[str]] = None, **kwargs ): """ :keyword client_tracking_id: The client tracking identifier. :paramtype client_tracking_id: str :keyword client_keywords: The client keywords. :paramtype client_keywords: list[str] """ super().__init__(**kwargs) self.client_tracking_id = client_tracking_id self.client_keywords = client_keywords class RunActionCorrelation(RunCorrelation): """The workflow run action correlation properties. :ivar client_tracking_id: The client tracking identifier. :vartype client_tracking_id: str :ivar client_keywords: The client keywords. :vartype client_keywords: list[str] :ivar action_tracking_id: The action tracking identifier. :vartype action_tracking_id: str """ _attribute_map = { "client_tracking_id": {"key": "clientTrackingId", "type": "str"}, "client_keywords": {"key": "clientKeywords", "type": "[str]"}, "action_tracking_id": {"key": "actionTrackingId", "type": "str"}, } def __init__( self, *, client_tracking_id: Optional[str] = None, client_keywords: Optional[List[str]] = None, action_tracking_id: Optional[str] = None, **kwargs ): """ :keyword client_tracking_id: The client tracking identifier. :paramtype client_tracking_id: str :keyword client_keywords: The client keywords. :paramtype client_keywords: list[str] :keyword action_tracking_id: The action tracking identifier. :paramtype action_tracking_id: str """ super().__init__(client_tracking_id=client_tracking_id, client_keywords=client_keywords, **kwargs) self.action_tracking_id = action_tracking_id class SetTriggerStateActionDefinition(_serialization.Model): """The set trigger state action definition. All required parameters must be populated in order to send to Azure. :ivar source: The source. Required. :vartype source: ~azure.mgmt.logic.models.WorkflowTriggerReference """ _validation = { "source": {"required": True}, } _attribute_map = { "source": {"key": "source", "type": "WorkflowTriggerReference"}, } def __init__(self, *, source: "_models.WorkflowTriggerReference", **kwargs): """ :keyword source: The source. Required. :paramtype source: ~azure.mgmt.logic.models.WorkflowTriggerReference """ super().__init__(**kwargs) self.source = source class Sku(_serialization.Model): """The sku type. All required parameters must be populated in order to send to Azure. :ivar name: The name. Required. Known values are: "NotSpecified", "Free", "Shared", "Basic", "Standard", and "Premium". :vartype name: str or ~azure.mgmt.logic.models.SkuName :ivar plan: The reference to plan. :vartype plan: ~azure.mgmt.logic.models.ResourceReference """ _validation = { "name": {"required": True}, } _attribute_map = { "name": {"key": "name", "type": "str"}, "plan": {"key": "plan", "type": "ResourceReference"}, } def __init__( self, *, name: Union[str, "_models.SkuName"], plan: Optional["_models.ResourceReference"] = None, **kwargs ): """ :keyword name: The name. Required. Known values are: "NotSpecified", "Free", "Shared", "Basic", "Standard", and "Premium". :paramtype name: str or ~azure.mgmt.logic.models.SkuName :keyword plan: The reference to plan. :paramtype plan: ~azure.mgmt.logic.models.ResourceReference """ super().__init__(**kwargs) self.name = name self.plan = plan class SubResource(_serialization.Model): """The sub resource type. Variables are only populated by the server, and will be ignored when sending a request. :ivar id: The resource id. :vartype id: str """ _validation = { "id": {"readonly": True}, } _attribute_map = { "id": {"key": "id", "type": "str"}, } def __init__(self, **kwargs): """ """ super().__init__(**kwargs) self.id = None class SwaggerCustomDynamicList(_serialization.Model): """The swagger custom dynamic list. :ivar operation_id: The operation id to fetch dynamic schema. :vartype operation_id: str :ivar built_in_operation: The built in operation. :vartype built_in_operation: str :ivar items_path: The path to a response property (relative to the response object, not the response body) which contains an array of dynamic value items. :vartype items_path: str :ivar item_value_path: The path to a property which defines the value which should be used. :vartype item_value_path: str :ivar item_title_path: The path to an item property which defines the display name of the item. :vartype item_title_path: str :ivar parameters: The parameters. :vartype parameters: dict[str, ~azure.mgmt.logic.models.SwaggerCustomDynamicProperties] """ _attribute_map = { "operation_id": {"key": "operationId", "type": "str"}, "built_in_operation": {"key": "builtInOperation", "type": "str"}, "items_path": {"key": "itemsPath", "type": "str"}, "item_value_path": {"key": "itemValuePath", "type": "str"}, "item_title_path": {"key": "itemTitlePath", "type": "str"}, "parameters": {"key": "parameters", "type": "{SwaggerCustomDynamicProperties}"}, } def __init__( self, *, operation_id: Optional[str] = None, built_in_operation: Optional[str] = None, items_path: Optional[str] = None, item_value_path: Optional[str] = None, item_title_path: Optional[str] = None, parameters: Optional[Dict[str, "_models.SwaggerCustomDynamicProperties"]] = None, **kwargs ): """ :keyword operation_id: The operation id to fetch dynamic schema. :paramtype operation_id: str :keyword built_in_operation: The built in operation. :paramtype built_in_operation: str :keyword items_path: The path to a response property (relative to the response object, not the response body) which contains an array of dynamic value items. :paramtype items_path: str :keyword item_value_path: The path to a property which defines the value which should be used. :paramtype item_value_path: str :keyword item_title_path: The path to an item property which defines the display name of the item. :paramtype item_title_path: str :keyword parameters: The parameters. :paramtype parameters: dict[str, ~azure.mgmt.logic.models.SwaggerCustomDynamicProperties] """ super().__init__(**kwargs) self.operation_id = operation_id self.built_in_operation = built_in_operation self.items_path = items_path self.item_value_path = item_value_path self.item_title_path = item_title_path self.parameters = parameters class SwaggerCustomDynamicProperties(_serialization.Model): """The swagger custom dynamic properties. :ivar operation_id: The operation id to fetch dynamic schema. :vartype operation_id: str :ivar value_path: Json pointer to the dynamic schema on the response body. :vartype value_path: str :ivar parameters: The operation parameters. :vartype parameters: dict[str, ~azure.mgmt.logic.models.SwaggerCustomDynamicProperties] """ _attribute_map = { "operation_id": {"key": "operationId", "type": "str"}, "value_path": {"key": "valuePath", "type": "str"}, "parameters": {"key": "parameters", "type": "{SwaggerCustomDynamicProperties}"}, } def __init__( self, *, operation_id: Optional[str] = None, value_path: Optional[str] = None, parameters: Optional[Dict[str, "_models.SwaggerCustomDynamicProperties"]] = None, **kwargs ): """ :keyword operation_id: The operation id to fetch dynamic schema. :paramtype operation_id: str :keyword value_path: Json pointer to the dynamic schema on the response body. :paramtype value_path: str :keyword parameters: The operation parameters. :paramtype parameters: dict[str, ~azure.mgmt.logic.models.SwaggerCustomDynamicProperties] """ super().__init__(**kwargs) self.operation_id = operation_id self.value_path = value_path self.parameters = parameters class SwaggerCustomDynamicSchema(_serialization.Model): """The swagger custom dynamic schema. :ivar operation_id: The operation id to fetch dynamic schema. :vartype operation_id: str :ivar value_path: Json pointer to the dynamic schema on the response body. :vartype value_path: str :ivar parameters: The operation parameters. :vartype parameters: dict[str, JSON] """ _attribute_map = { "operation_id": {"key": "operationId", "type": "str"}, "value_path": {"key": "valuePath", "type": "str"}, "parameters": {"key": "parameters", "type": "{object}"}, } def __init__( self, *, operation_id: Optional[str] = None, value_path: Optional[str] = None, parameters: Optional[Dict[str, JSON]] = None, **kwargs ): """ :keyword operation_id: The operation id to fetch dynamic schema. :paramtype operation_id: str :keyword value_path: Json pointer to the dynamic schema on the response body. :paramtype value_path: str :keyword parameters: The operation parameters. :paramtype parameters: dict[str, JSON] """ super().__init__(**kwargs) self.operation_id = operation_id self.value_path = value_path self.parameters = parameters class SwaggerCustomDynamicTree(_serialization.Model): """The swagger custom dynamic tree. :ivar settings: The tree settings. :vartype settings: ~azure.mgmt.logic.models.SwaggerCustomDynamicTreeSettings :ivar open: The tree on-open configuration. :vartype open: ~azure.mgmt.logic.models.SwaggerCustomDynamicTreeCommand :ivar browse: The tree on-browse configuration. :vartype browse: ~azure.mgmt.logic.models.SwaggerCustomDynamicTreeCommand """ _attribute_map = { "settings": {"key": "settings", "type": "SwaggerCustomDynamicTreeSettings"}, "open": {"key": "open", "type": "SwaggerCustomDynamicTreeCommand"}, "browse": {"key": "browse", "type": "SwaggerCustomDynamicTreeCommand"}, } def __init__( self, *, settings: Optional["_models.SwaggerCustomDynamicTreeSettings"] = None, open: Optional["_models.SwaggerCustomDynamicTreeCommand"] = None, browse: Optional["_models.SwaggerCustomDynamicTreeCommand"] = None, **kwargs ): """ :keyword settings: The tree settings. :paramtype settings: ~azure.mgmt.logic.models.SwaggerCustomDynamicTreeSettings :keyword open: The tree on-open configuration. :paramtype open: ~azure.mgmt.logic.models.SwaggerCustomDynamicTreeCommand :keyword browse: The tree on-browse configuration. :paramtype browse: ~azure.mgmt.logic.models.SwaggerCustomDynamicTreeCommand """ super().__init__(**kwargs) self.settings = settings self.open = open self.browse = browse class SwaggerCustomDynamicTreeCommand(_serialization.Model): """The swagger tree command. :ivar operation_id: The path to an item property which defines the display name of the item. :vartype operation_id: str :ivar items_path: The path to an item property which defines the display name of the item. :vartype items_path: str :ivar item_value_path: The path to an item property which defines the display name of the item. :vartype item_value_path: str :ivar item_title_path: The path to an item property which defines the display name of the item. :vartype item_title_path: str :ivar item_full_title_path: The path to an item property which defines the display name of the item. :vartype item_full_title_path: str :ivar item_is_parent: The path to an item property which defines the display name of the item. :vartype item_is_parent: str :ivar selectable_filter: The path to an item property which defines the display name of the item. :vartype selectable_filter: str :ivar parameters: Dictionary of :code:`<SwaggerCustomDynamicTreeParameter>`. :vartype parameters: dict[str, ~azure.mgmt.logic.models.SwaggerCustomDynamicTreeParameter] """ _attribute_map = { "operation_id": {"key": "operationId", "type": "str"}, "items_path": {"key": "itemsPath", "type": "str"}, "item_value_path": {"key": "itemValuePath", "type": "str"}, "item_title_path": {"key": "itemTitlePath", "type": "str"}, "item_full_title_path": {"key": "itemFullTitlePath", "type": "str"}, "item_is_parent": {"key": "itemIsParent", "type": "str"}, "selectable_filter": {"key": "selectableFilter", "type": "str"}, "parameters": {"key": "parameters", "type": "{SwaggerCustomDynamicTreeParameter}"}, } def __init__( self, *, operation_id: Optional[str] = None, items_path: Optional[str] = None, item_value_path: Optional[str] = None, item_title_path: Optional[str] = None, item_full_title_path: Optional[str] = None, item_is_parent: Optional[str] = None, selectable_filter: Optional[str] = None, parameters: Optional[Dict[str, "_models.SwaggerCustomDynamicTreeParameter"]] = None, **kwargs ): """ :keyword operation_id: The path to an item property which defines the display name of the item. :paramtype operation_id: str :keyword items_path: The path to an item property which defines the display name of the item. :paramtype items_path: str :keyword item_value_path: The path to an item property which defines the display name of the item. :paramtype item_value_path: str :keyword item_title_path: The path to an item property which defines the display name of the item. :paramtype item_title_path: str :keyword item_full_title_path: The path to an item property which defines the display name of the item. :paramtype item_full_title_path: str :keyword item_is_parent: The path to an item property which defines the display name of the item. :paramtype item_is_parent: str :keyword selectable_filter: The path to an item property which defines the display name of the item. :paramtype selectable_filter: str :keyword parameters: Dictionary of :code:`<SwaggerCustomDynamicTreeParameter>`. :paramtype parameters: dict[str, ~azure.mgmt.logic.models.SwaggerCustomDynamicTreeParameter] """ super().__init__(**kwargs) self.operation_id = operation_id self.items_path = items_path self.item_value_path = item_value_path self.item_title_path = item_title_path self.item_full_title_path = item_full_title_path self.item_is_parent = item_is_parent self.selectable_filter = selectable_filter self.parameters = parameters class SwaggerCustomDynamicTreeParameter(_serialization.Model): """The swagger custom dynamic tree parameter. :ivar selected_item_value_path: Gets or sets a path to a property in the currently selected item to pass as a value to a parameter for the given operation. :vartype selected_item_value_path: str :ivar value: The parameter value. :vartype value: JSON :ivar parameter_reference: The parameter reference. :vartype parameter_reference: str :ivar required: Indicates whether the parameter is required. :vartype required: bool """ _attribute_map = { "selected_item_value_path": {"key": "selectedItemValuePath", "type": "str"}, "value": {"key": "value", "type": "object"}, "parameter_reference": {"key": "parameterReference", "type": "str"}, "required": {"key": "required", "type": "bool"}, } def __init__( self, *, selected_item_value_path: Optional[str] = None, value: Optional[JSON] = None, parameter_reference: Optional[str] = None, required: Optional[bool] = None, **kwargs ): """ :keyword selected_item_value_path: Gets or sets a path to a property in the currently selected item to pass as a value to a parameter for the given operation. :paramtype selected_item_value_path: str :keyword value: The parameter value. :paramtype value: JSON :keyword parameter_reference: The parameter reference. :paramtype parameter_reference: str :keyword required: Indicates whether the parameter is required. :paramtype required: bool """ super().__init__(**kwargs) self.selected_item_value_path = selected_item_value_path self.value = value self.parameter_reference = parameter_reference self.required = required class SwaggerCustomDynamicTreeSettings(_serialization.Model): """The swagger custom dynamic tree settings. :ivar can_select_parent_nodes: Indicates whether parent nodes can be selected. :vartype can_select_parent_nodes: bool :ivar can_select_leaf_nodes: Indicates whether leaf nodes can be selected. :vartype can_select_leaf_nodes: bool """ _attribute_map = { "can_select_parent_nodes": {"key": "CanSelectParentNodes", "type": "bool"}, "can_select_leaf_nodes": {"key": "CanSelectLeafNodes", "type": "bool"}, } def __init__( self, *, can_select_parent_nodes: Optional[bool] = None, can_select_leaf_nodes: Optional[bool] = None, **kwargs ): """ :keyword can_select_parent_nodes: Indicates whether parent nodes can be selected. :paramtype can_select_parent_nodes: bool :keyword can_select_leaf_nodes: Indicates whether leaf nodes can be selected. :paramtype can_select_leaf_nodes: bool """ super().__init__(**kwargs) self.can_select_parent_nodes = can_select_parent_nodes self.can_select_leaf_nodes = can_select_leaf_nodes class SwaggerExternalDocumentation(_serialization.Model): """The swagger external documentation. :ivar description: The document description. :vartype description: str :ivar uri: The documentation Uri. :vartype uri: str :ivar extensions: The vendor extensions. :vartype extensions: dict[str, JSON] """ _attribute_map = { "description": {"key": "description", "type": "str"}, "uri": {"key": "uri", "type": "str"}, "extensions": {"key": "extensions", "type": "{object}"}, } def __init__( self, *, description: Optional[str] = None, uri: Optional[str] = None, extensions: Optional[Dict[str, JSON]] = None, **kwargs ): """ :keyword description: The document description. :paramtype description: str :keyword uri: The documentation Uri. :paramtype uri: str :keyword extensions: The vendor extensions. :paramtype extensions: dict[str, JSON] """ super().__init__(**kwargs) self.description = description self.uri = uri self.extensions = extensions class SwaggerSchema(_serialization.Model): # pylint: disable=too-many-instance-attributes """The swagger schema. :ivar ref: The reference. :vartype ref: str :ivar type: The type. Known values are: "String", "Number", "Integer", "Boolean", "Array", "File", "Object", and "Null". :vartype type: str or ~azure.mgmt.logic.models.SwaggerSchemaType :ivar title: The title. :vartype title: str :ivar items: The items schema. :vartype items: ~azure.mgmt.logic.models.SwaggerSchema :ivar properties: The object properties. :vartype properties: dict[str, ~azure.mgmt.logic.models.SwaggerSchema] :ivar additional_properties: The additional properties. :vartype additional_properties: JSON :ivar required: The object required properties. :vartype required: list[str] :ivar max_properties: The maximum number of allowed properties. :vartype max_properties: int :ivar min_properties: The minimum number of allowed properties. :vartype min_properties: int :ivar all_of: The schemas which must pass validation when this schema is used. :vartype all_of: list[~azure.mgmt.logic.models.SwaggerSchema] :ivar discriminator: The discriminator. :vartype discriminator: str :ivar read_only: Indicates whether this property must be present in the a request. :vartype read_only: bool :ivar xml: The xml representation format for a property. :vartype xml: ~azure.mgmt.logic.models.SwaggerXml :ivar external_docs: The external documentation. :vartype external_docs: ~azure.mgmt.logic.models.SwaggerExternalDocumentation :ivar example: The example value. :vartype example: JSON :ivar notification_url_extension: Indicates the notification url extension. If this is set, the property's value should be a callback url for a webhook. :vartype notification_url_extension: bool :ivar dynamic_schema_old: The dynamic schema configuration. :vartype dynamic_schema_old: ~azure.mgmt.logic.models.SwaggerCustomDynamicSchema :ivar dynamic_schema_new: The dynamic schema configuration. :vartype dynamic_schema_new: ~azure.mgmt.logic.models.SwaggerCustomDynamicProperties :ivar dynamic_list_new: The dynamic list. :vartype dynamic_list_new: ~azure.mgmt.logic.models.SwaggerCustomDynamicList :ivar dynamic_tree: The dynamic values tree configuration. :vartype dynamic_tree: ~azure.mgmt.logic.models.SwaggerCustomDynamicTree """ _attribute_map = { "ref": {"key": "ref", "type": "str"}, "type": {"key": "type", "type": "str"}, "title": {"key": "title", "type": "str"}, "items": {"key": "items", "type": "SwaggerSchema"}, "properties": {"key": "properties", "type": "{SwaggerSchema}"}, "additional_properties": {"key": "additionalProperties", "type": "object"}, "required": {"key": "required", "type": "[str]"}, "max_properties": {"key": "maxProperties", "type": "int"}, "min_properties": {"key": "minProperties", "type": "int"}, "all_of": {"key": "allOf", "type": "[SwaggerSchema]"}, "discriminator": {"key": "discriminator", "type": "str"}, "read_only": {"key": "readOnly", "type": "bool"}, "xml": {"key": "xml", "type": "SwaggerXml"}, "external_docs": {"key": "externalDocs", "type": "SwaggerExternalDocumentation"}, "example": {"key": "example", "type": "object"}, "notification_url_extension": {"key": "notificationUrlExtension", "type": "bool"}, "dynamic_schema_old": {"key": "dynamicSchemaOld", "type": "SwaggerCustomDynamicSchema"}, "dynamic_schema_new": {"key": "dynamicSchemaNew", "type": "SwaggerCustomDynamicProperties"}, "dynamic_list_new": {"key": "dynamicListNew", "type": "SwaggerCustomDynamicList"}, "dynamic_tree": {"key": "dynamicTree", "type": "SwaggerCustomDynamicTree"}, } def __init__( self, *, ref: Optional[str] = None, type: Optional[Union[str, "_models.SwaggerSchemaType"]] = None, title: Optional[str] = None, items: Optional["_models.SwaggerSchema"] = None, properties: Optional[Dict[str, "_models.SwaggerSchema"]] = None, additional_properties: Optional[JSON] = None, required: Optional[List[str]] = None, max_properties: Optional[int] = None, min_properties: Optional[int] = None, all_of: Optional[List["_models.SwaggerSchema"]] = None, discriminator: Optional[str] = None, read_only: Optional[bool] = None, xml: Optional["_models.SwaggerXml"] = None, external_docs: Optional["_models.SwaggerExternalDocumentation"] = None, example: Optional[JSON] = None, notification_url_extension: Optional[bool] = None, dynamic_schema_old: Optional["_models.SwaggerCustomDynamicSchema"] = None, dynamic_schema_new: Optional["_models.SwaggerCustomDynamicProperties"] = None, dynamic_list_new: Optional["_models.SwaggerCustomDynamicList"] = None, dynamic_tree: Optional["_models.SwaggerCustomDynamicTree"] = None, **kwargs ): """ :keyword ref: The reference. :paramtype ref: str :keyword type: The type. Known values are: "String", "Number", "Integer", "Boolean", "Array", "File", "Object", and "Null". :paramtype type: str or ~azure.mgmt.logic.models.SwaggerSchemaType :keyword title: The title. :paramtype title: str :keyword items: The items schema. :paramtype items: ~azure.mgmt.logic.models.SwaggerSchema :keyword properties: The object properties. :paramtype properties: dict[str, ~azure.mgmt.logic.models.SwaggerSchema] :keyword additional_properties: The additional properties. :paramtype additional_properties: JSON :keyword required: The object required properties. :paramtype required: list[str] :keyword max_properties: The maximum number of allowed properties. :paramtype max_properties: int :keyword min_properties: The minimum number of allowed properties. :paramtype min_properties: int :keyword all_of: The schemas which must pass validation when this schema is used. :paramtype all_of: list[~azure.mgmt.logic.models.SwaggerSchema] :keyword discriminator: The discriminator. :paramtype discriminator: str :keyword read_only: Indicates whether this property must be present in the a request. :paramtype read_only: bool :keyword xml: The xml representation format for a property. :paramtype xml: ~azure.mgmt.logic.models.SwaggerXml :keyword external_docs: The external documentation. :paramtype external_docs: ~azure.mgmt.logic.models.SwaggerExternalDocumentation :keyword example: The example value. :paramtype example: JSON :keyword notification_url_extension: Indicates the notification url extension. If this is set, the property's value should be a callback url for a webhook. :paramtype notification_url_extension: bool :keyword dynamic_schema_old: The dynamic schema configuration. :paramtype dynamic_schema_old: ~azure.mgmt.logic.models.SwaggerCustomDynamicSchema :keyword dynamic_schema_new: The dynamic schema configuration. :paramtype dynamic_schema_new: ~azure.mgmt.logic.models.SwaggerCustomDynamicProperties :keyword dynamic_list_new: The dynamic list. :paramtype dynamic_list_new: ~azure.mgmt.logic.models.SwaggerCustomDynamicList :keyword dynamic_tree: The dynamic values tree configuration. :paramtype dynamic_tree: ~azure.mgmt.logic.models.SwaggerCustomDynamicTree """ super().__init__(**kwargs) self.ref = ref self.type = type self.title = title self.items = items self.properties = properties self.additional_properties = additional_properties self.required = required self.max_properties = max_properties self.min_properties = min_properties self.all_of = all_of self.discriminator = discriminator self.read_only = read_only self.xml = xml self.external_docs = external_docs self.example = example self.notification_url_extension = notification_url_extension self.dynamic_schema_old = dynamic_schema_old self.dynamic_schema_new = dynamic_schema_new self.dynamic_list_new = dynamic_list_new self.dynamic_tree = dynamic_tree class SwaggerXml(_serialization.Model): """The Swagger XML. :ivar name: The xml element or attribute name. :vartype name: str :ivar namespace: The xml namespace. :vartype namespace: str :ivar prefix: The name prefix. :vartype prefix: str :ivar attribute: Indicates whether the property should be an attribute instead of an element. :vartype attribute: bool :ivar wrapped: Indicates whether the array elements are wrapped in a container element. :vartype wrapped: bool :ivar extensions: The vendor extensions. :vartype extensions: dict[str, JSON] """ _attribute_map = { "name": {"key": "name", "type": "str"}, "namespace": {"key": "namespace", "type": "str"}, "prefix": {"key": "prefix", "type": "str"}, "attribute": {"key": "attribute", "type": "bool"}, "wrapped": {"key": "wrapped", "type": "bool"}, "extensions": {"key": "extensions", "type": "{object}"}, } def __init__( self, *, name: Optional[str] = None, namespace: Optional[str] = None, prefix: Optional[str] = None, attribute: Optional[bool] = None, wrapped: Optional[bool] = None, extensions: Optional[Dict[str, JSON]] = None, **kwargs ): """ :keyword name: The xml element or attribute name. :paramtype name: str :keyword namespace: The xml namespace. :paramtype namespace: str :keyword prefix: The name prefix. :paramtype prefix: str :keyword attribute: Indicates whether the property should be an attribute instead of an element. :paramtype attribute: bool :keyword wrapped: Indicates whether the array elements are wrapped in a container element. :paramtype wrapped: bool :keyword extensions: The vendor extensions. :paramtype extensions: dict[str, JSON] """ super().__init__(**kwargs) self.name = name self.namespace = namespace self.prefix = prefix self.attribute = attribute self.wrapped = wrapped self.extensions = extensions class TrackingEvent(_serialization.Model): """The tracking event. All required parameters must be populated in order to send to Azure. :ivar event_level: The event level. Required. Known values are: "LogAlways", "Critical", "Error", "Warning", "Informational", and "Verbose". :vartype event_level: str or ~azure.mgmt.logic.models.EventLevel :ivar event_time: The event time. Required. :vartype event_time: ~datetime.datetime :ivar record_type: The record type. Required. Known values are: "NotSpecified", "Custom", "AS2Message", "AS2MDN", "X12Interchange", "X12FunctionalGroup", "X12TransactionSet", "X12InterchangeAcknowledgment", "X12FunctionalGroupAcknowledgment", "X12TransactionSetAcknowledgment", "EdifactInterchange", "EdifactFunctionalGroup", "EdifactTransactionSet", "EdifactInterchangeAcknowledgment", "EdifactFunctionalGroupAcknowledgment", and "EdifactTransactionSetAcknowledgment". :vartype record_type: str or ~azure.mgmt.logic.models.TrackingRecordType :ivar record: The record. :vartype record: JSON :ivar error: The error. :vartype error: ~azure.mgmt.logic.models.TrackingEventErrorInfo """ _validation = { "event_level": {"required": True}, "event_time": {"required": True}, "record_type": {"required": True}, } _attribute_map = { "event_level": {"key": "eventLevel", "type": "str"}, "event_time": {"key": "eventTime", "type": "iso-8601"}, "record_type": {"key": "recordType", "type": "str"}, "record": {"key": "record", "type": "object"}, "error": {"key": "error", "type": "TrackingEventErrorInfo"}, } def __init__( self, *, event_level: Union[str, "_models.EventLevel"], event_time: datetime.datetime, record_type: Union[str, "_models.TrackingRecordType"], record: Optional[JSON] = None, error: Optional["_models.TrackingEventErrorInfo"] = None, **kwargs ): """ :keyword event_level: The event level. Required. Known values are: "LogAlways", "Critical", "Error", "Warning", "Informational", and "Verbose". :paramtype event_level: str or ~azure.mgmt.logic.models.EventLevel :keyword event_time: The event time. Required. :paramtype event_time: ~datetime.datetime :keyword record_type: The record type. Required. Known values are: "NotSpecified", "Custom", "AS2Message", "AS2MDN", "X12Interchange", "X12FunctionalGroup", "X12TransactionSet", "X12InterchangeAcknowledgment", "X12FunctionalGroupAcknowledgment", "X12TransactionSetAcknowledgment", "EdifactInterchange", "EdifactFunctionalGroup", "EdifactTransactionSet", "EdifactInterchangeAcknowledgment", "EdifactFunctionalGroupAcknowledgment", and "EdifactTransactionSetAcknowledgment". :paramtype record_type: str or ~azure.mgmt.logic.models.TrackingRecordType :keyword record: The record. :paramtype record: JSON :keyword error: The error. :paramtype error: ~azure.mgmt.logic.models.TrackingEventErrorInfo """ super().__init__(**kwargs) self.event_level = event_level self.event_time = event_time self.record_type = record_type self.record = record self.error = error class TrackingEventErrorInfo(_serialization.Model): """The tracking event error info. :ivar message: The message. :vartype message: str :ivar code: The code. :vartype code: str """ _attribute_map = { "message": {"key": "message", "type": "str"}, "code": {"key": "code", "type": "str"}, } def __init__(self, *, message: Optional[str] = None, code: Optional[str] = None, **kwargs): """ :keyword message: The message. :paramtype message: str :keyword code: The code. :paramtype code: str """ super().__init__(**kwargs) self.message = message self.code = code class TrackingEventsDefinition(_serialization.Model): """The tracking events definition. All required parameters must be populated in order to send to Azure. :ivar source_type: The source type. Required. :vartype source_type: str :ivar track_events_options: The track events options. Known values are: "None" and "DisableSourceInfoEnrich". :vartype track_events_options: str or ~azure.mgmt.logic.models.TrackEventsOperationOptions :ivar events: The events. Required. :vartype events: list[~azure.mgmt.logic.models.TrackingEvent] """ _validation = { "source_type": {"required": True}, "events": {"required": True}, } _attribute_map = { "source_type": {"key": "sourceType", "type": "str"}, "track_events_options": {"key": "trackEventsOptions", "type": "str"}, "events": {"key": "events", "type": "[TrackingEvent]"}, } def __init__( self, *, source_type: str, events: List["_models.TrackingEvent"], track_events_options: Optional[Union[str, "_models.TrackEventsOperationOptions"]] = None, **kwargs ): """ :keyword source_type: The source type. Required. :paramtype source_type: str :keyword track_events_options: The track events options. Known values are: "None" and "DisableSourceInfoEnrich". :paramtype track_events_options: str or ~azure.mgmt.logic.models.TrackEventsOperationOptions :keyword events: The events. Required. :paramtype events: list[~azure.mgmt.logic.models.TrackingEvent] """ super().__init__(**kwargs) self.source_type = source_type self.track_events_options = track_events_options self.events = events class UserAssignedIdentity(_serialization.Model): """User Assigned identity properties. Variables are only populated by the server, and will be ignored when sending a request. :ivar principal_id: Principal Id of user assigned identity. :vartype principal_id: str :ivar client_id: Client Id of user assigned identity. :vartype client_id: str """ _validation = { "principal_id": {"readonly": True}, "client_id": {"readonly": True}, } _attribute_map = { "principal_id": {"key": "principalId", "type": "str"}, "client_id": {"key": "clientId", "type": "str"}, } def __init__(self, **kwargs): """ """ super().__init__(**kwargs) self.principal_id = None self.client_id = None class Workflow(Resource): # pylint: disable=too-many-instance-attributes """The workflow type. Variables are only populated by the server, and will be ignored when sending a request. :ivar id: The resource id. :vartype id: str :ivar name: Gets the resource name. :vartype name: str :ivar type: Gets the resource type. :vartype type: str :ivar location: The resource location. :vartype location: str :ivar tags: The resource tags. :vartype tags: dict[str, str] :ivar identity: Managed service identity properties. :vartype identity: ~azure.mgmt.logic.models.ManagedServiceIdentity :ivar provisioning_state: Gets the provisioning state. Known values are: "NotSpecified", "Accepted", "Running", "Ready", "Creating", "Created", "Deleting", "Deleted", "Canceled", "Failed", "Succeeded", "Moving", "Updating", "Registering", "Registered", "Unregistering", "Unregistered", "Completed", "Renewing", "Pending", "Waiting", and "InProgress". :vartype provisioning_state: str or ~azure.mgmt.logic.models.WorkflowProvisioningState :ivar created_time: Gets the created time. :vartype created_time: ~datetime.datetime :ivar changed_time: Gets the changed time. :vartype changed_time: ~datetime.datetime :ivar state: The state. Known values are: "NotSpecified", "Completed", "Enabled", "Disabled", "Deleted", and "Suspended". :vartype state: str or ~azure.mgmt.logic.models.WorkflowState :ivar version: Gets the version. :vartype version: str :ivar access_endpoint: Gets the access endpoint. :vartype access_endpoint: str :ivar endpoints_configuration: The endpoints configuration. :vartype endpoints_configuration: ~azure.mgmt.logic.models.FlowEndpointsConfiguration :ivar access_control: The access control configuration. :vartype access_control: ~azure.mgmt.logic.models.FlowAccessControlConfiguration :ivar sku: The sku. :vartype sku: ~azure.mgmt.logic.models.Sku :ivar integration_account: The integration account. :vartype integration_account: ~azure.mgmt.logic.models.ResourceReference :ivar integration_service_environment: The integration service environment. :vartype integration_service_environment: ~azure.mgmt.logic.models.ResourceReference :ivar definition: The definition. :vartype definition: JSON :ivar parameters: The parameters. :vartype parameters: dict[str, ~azure.mgmt.logic.models.WorkflowParameter] """ _validation = { "id": {"readonly": True}, "name": {"readonly": True}, "type": {"readonly": True}, "provisioning_state": {"readonly": True}, "created_time": {"readonly": True}, "changed_time": {"readonly": True}, "version": {"readonly": True}, "access_endpoint": {"readonly": True}, "sku": {"readonly": True}, } _attribute_map = { "id": {"key": "id", "type": "str"}, "name": {"key": "name", "type": "str"}, "type": {"key": "type", "type": "str"}, "location": {"key": "location", "type": "str"}, "tags": {"key": "tags", "type": "{str}"}, "identity": {"key": "identity", "type": "ManagedServiceIdentity"}, "provisioning_state": {"key": "properties.provisioningState", "type": "str"}, "created_time": {"key": "properties.createdTime", "type": "iso-8601"}, "changed_time": {"key": "properties.changedTime", "type": "iso-8601"}, "state": {"key": "properties.state", "type": "str"}, "version": {"key": "properties.version", "type": "str"}, "access_endpoint": {"key": "properties.accessEndpoint", "type": "str"}, "endpoints_configuration": {"key": "properties.endpointsConfiguration", "type": "FlowEndpointsConfiguration"}, "access_control": {"key": "properties.accessControl", "type": "FlowAccessControlConfiguration"}, "sku": {"key": "properties.sku", "type": "Sku"}, "integration_account": {"key": "properties.integrationAccount", "type": "ResourceReference"}, "integration_service_environment": { "key": "properties.integrationServiceEnvironment", "type": "ResourceReference", }, "definition": {"key": "properties.definition", "type": "object"}, "parameters": {"key": "properties.parameters", "type": "{WorkflowParameter}"}, } def __init__( self, *, location: Optional[str] = None, tags: Optional[Dict[str, str]] = None, identity: Optional["_models.ManagedServiceIdentity"] = None, state: Optional[Union[str, "_models.WorkflowState"]] = None, endpoints_configuration: Optional["_models.FlowEndpointsConfiguration"] = None, access_control: Optional["_models.FlowAccessControlConfiguration"] = None, integration_account: Optional["_models.ResourceReference"] = None, integration_service_environment: Optional["_models.ResourceReference"] = None, definition: Optional[JSON] = None, parameters: Optional[Dict[str, "_models.WorkflowParameter"]] = None, **kwargs ): """ :keyword location: The resource location. :paramtype location: str :keyword tags: The resource tags. :paramtype tags: dict[str, str] :keyword identity: Managed service identity properties. :paramtype identity: ~azure.mgmt.logic.models.ManagedServiceIdentity :keyword state: The state. Known values are: "NotSpecified", "Completed", "Enabled", "Disabled", "Deleted", and "Suspended". :paramtype state: str or ~azure.mgmt.logic.models.WorkflowState :keyword endpoints_configuration: The endpoints configuration. :paramtype endpoints_configuration: ~azure.mgmt.logic.models.FlowEndpointsConfiguration :keyword access_control: The access control configuration. :paramtype access_control: ~azure.mgmt.logic.models.FlowAccessControlConfiguration :keyword integration_account: The integration account. :paramtype integration_account: ~azure.mgmt.logic.models.ResourceReference :keyword integration_service_environment: The integration service environment. :paramtype integration_service_environment: ~azure.mgmt.logic.models.ResourceReference :keyword definition: The definition. :paramtype definition: JSON :keyword parameters: The parameters. :paramtype parameters: dict[str, ~azure.mgmt.logic.models.WorkflowParameter] """ super().__init__(location=location, tags=tags, **kwargs) self.identity = identity self.provisioning_state = None self.created_time = None self.changed_time = None self.state = state self.version = None self.access_endpoint = None self.endpoints_configuration = endpoints_configuration self.access_control = access_control self.sku = None self.integration_account = integration_account self.integration_service_environment = integration_service_environment self.definition = definition self.parameters = parameters class WorkflowFilter(_serialization.Model): """The workflow filter. :ivar state: The state of workflows. Known values are: "NotSpecified", "Completed", "Enabled", "Disabled", "Deleted", and "Suspended". :vartype state: str or ~azure.mgmt.logic.models.WorkflowState """ _attribute_map = { "state": {"key": "state", "type": "str"}, } def __init__(self, *, state: Optional[Union[str, "_models.WorkflowState"]] = None, **kwargs): """ :keyword state: The state of workflows. Known values are: "NotSpecified", "Completed", "Enabled", "Disabled", "Deleted", and "Suspended". :paramtype state: str or ~azure.mgmt.logic.models.WorkflowState """ super().__init__(**kwargs) self.state = state class WorkflowListResult(_serialization.Model): """The list of workflows. :ivar value: The list of workflows. :vartype value: list[~azure.mgmt.logic.models.Workflow] :ivar next_link: The URL to get the next set of results. :vartype next_link: str """ _attribute_map = { "value": {"key": "value", "type": "[Workflow]"}, "next_link": {"key": "nextLink", "type": "str"}, } def __init__(self, *, value: Optional[List["_models.Workflow"]] = None, next_link: Optional[str] = None, **kwargs): """ :keyword value: The list of workflows. :paramtype value: list[~azure.mgmt.logic.models.Workflow] :keyword next_link: The URL to get the next set of results. :paramtype next_link: str """ super().__init__(**kwargs) self.value = value self.next_link = next_link class WorkflowParameter(_serialization.Model): """The workflow parameters. :ivar type: The type. Known values are: "NotSpecified", "String", "SecureString", "Int", "Float", "Bool", "Array", "Object", and "SecureObject". :vartype type: str or ~azure.mgmt.logic.models.ParameterType :ivar value: The value. :vartype value: JSON :ivar metadata: The metadata. :vartype metadata: JSON :ivar description: The description. :vartype description: str """ _attribute_map = { "type": {"key": "type", "type": "str"}, "value": {"key": "value", "type": "object"}, "metadata": {"key": "metadata", "type": "object"}, "description": {"key": "description", "type": "str"}, } def __init__( self, *, type: Optional[Union[str, "_models.ParameterType"]] = None, value: Optional[JSON] = None, metadata: Optional[JSON] = None, description: Optional[str] = None, **kwargs ): """ :keyword type: The type. Known values are: "NotSpecified", "String", "SecureString", "Int", "Float", "Bool", "Array", "Object", and "SecureObject". :paramtype type: str or ~azure.mgmt.logic.models.ParameterType :keyword value: The value. :paramtype value: JSON :keyword metadata: The metadata. :paramtype metadata: JSON :keyword description: The description. :paramtype description: str """ super().__init__(**kwargs) self.type = type self.value = value self.metadata = metadata self.description = description class WorkflowOutputParameter(WorkflowParameter): """The workflow output parameter. Variables are only populated by the server, and will be ignored when sending a request. :ivar type: The type. Known values are: "NotSpecified", "String", "SecureString", "Int", "Float", "Bool", "Array", "Object", and "SecureObject". :vartype type: str or ~azure.mgmt.logic.models.ParameterType :ivar value: The value. :vartype value: JSON :ivar metadata: The metadata. :vartype metadata: JSON :ivar description: The description. :vartype description: str :ivar error: Gets the error. :vartype error: JSON """ _validation = { "error": {"readonly": True}, } _attribute_map = { "type": {"key": "type", "type": "str"}, "value": {"key": "value", "type": "object"}, "metadata": {"key": "metadata", "type": "object"}, "description": {"key": "description", "type": "str"}, "error": {"key": "error", "type": "object"}, } def __init__( self, *, type: Optional[Union[str, "_models.ParameterType"]] = None, value: Optional[JSON] = None, metadata: Optional[JSON] = None, description: Optional[str] = None, **kwargs ): """ :keyword type: The type. Known values are: "NotSpecified", "String", "SecureString", "Int", "Float", "Bool", "Array", "Object", and "SecureObject". :paramtype type: str or ~azure.mgmt.logic.models.ParameterType :keyword value: The value. :paramtype value: JSON :keyword metadata: The metadata. :paramtype metadata: JSON :keyword description: The description. :paramtype description: str """ super().__init__(type=type, value=value, metadata=metadata, description=description, **kwargs) self.error = None class WorkflowReference(ResourceReference): """The workflow reference. Variables are only populated by the server, and will be ignored when sending a request. :ivar id: The resource id. :vartype id: str :ivar name: Gets the resource name. :vartype name: str :ivar type: Gets the resource type. :vartype type: str """ _validation = { "name": {"readonly": True}, "type": {"readonly": True}, } _attribute_map = { "id": {"key": "id", "type": "str"}, "name": {"key": "name", "type": "str"}, "type": {"key": "type", "type": "str"}, } def __init__(self, *, id: Optional[str] = None, **kwargs): # pylint: disable=redefined-builtin """ :keyword id: The resource id. :paramtype id: str """ super().__init__(id=id, **kwargs) class WorkflowRun(SubResource): # pylint: disable=too-many-instance-attributes """The workflow run. Variables are only populated by the server, and will be ignored when sending a request. :ivar id: The resource id. :vartype id: str :ivar name: Gets the workflow run name. :vartype name: str :ivar type: Gets the workflow run type. :vartype type: str :ivar wait_end_time: Gets the wait end time. :vartype wait_end_time: ~datetime.datetime :ivar start_time: Gets the start time. :vartype start_time: ~datetime.datetime :ivar end_time: Gets the end time. :vartype end_time: ~datetime.datetime :ivar status: Gets the status. Known values are: "NotSpecified", "Paused", "Running", "Waiting", "Succeeded", "Skipped", "Suspended", "Cancelled", "Failed", "Faulted", "TimedOut", "Aborted", and "Ignored". :vartype status: str or ~azure.mgmt.logic.models.WorkflowStatus :ivar code: Gets the code. :vartype code: str :ivar error: Gets the error. :vartype error: JSON :ivar correlation_id: Gets the correlation id. :vartype correlation_id: str :ivar correlation: The run correlation. :vartype correlation: ~azure.mgmt.logic.models.Correlation :ivar workflow: Gets the reference to workflow version. :vartype workflow: ~azure.mgmt.logic.models.ResourceReference :ivar trigger: Gets the fired trigger. :vartype trigger: ~azure.mgmt.logic.models.WorkflowRunTrigger :ivar outputs: Gets the outputs. :vartype outputs: dict[str, ~azure.mgmt.logic.models.WorkflowOutputParameter] :ivar response: Gets the response of the flow run. :vartype response: ~azure.mgmt.logic.models.WorkflowRunTrigger """ _validation = { "id": {"readonly": True}, "name": {"readonly": True}, "type": {"readonly": True}, "wait_end_time": {"readonly": True}, "start_time": {"readonly": True}, "end_time": {"readonly": True}, "status": {"readonly": True}, "code": {"readonly": True}, "error": {"readonly": True}, "correlation_id": {"readonly": True}, "workflow": {"readonly": True}, "trigger": {"readonly": True}, "outputs": {"readonly": True}, "response": {"readonly": True}, } _attribute_map = { "id": {"key": "id", "type": "str"}, "name": {"key": "name", "type": "str"}, "type": {"key": "type", "type": "str"}, "wait_end_time": {"key": "properties.waitEndTime", "type": "iso-8601"}, "start_time": {"key": "properties.startTime", "type": "iso-8601"}, "end_time": {"key": "properties.endTime", "type": "iso-8601"}, "status": {"key": "properties.status", "type": "str"}, "code": {"key": "properties.code", "type": "str"}, "error": {"key": "properties.error", "type": "object"}, "correlation_id": {"key": "properties.correlationId", "type": "str"}, "correlation": {"key": "properties.correlation", "type": "Correlation"}, "workflow": {"key": "properties.workflow", "type": "ResourceReference"}, "trigger": {"key": "properties.trigger", "type": "WorkflowRunTrigger"}, "outputs": {"key": "properties.outputs", "type": "{WorkflowOutputParameter}"}, "response": {"key": "properties.response", "type": "WorkflowRunTrigger"}, } def __init__(self, *, correlation: Optional["_models.Correlation"] = None, **kwargs): """ :keyword correlation: The run correlation. :paramtype correlation: ~azure.mgmt.logic.models.Correlation """ super().__init__(**kwargs) self.name = None self.type = None self.wait_end_time = None self.start_time = None self.end_time = None self.status = None self.code = None self.error = None self.correlation_id = None self.correlation = correlation self.workflow = None self.trigger = None self.outputs = None self.response = None class WorkflowRunAction(SubResource): # pylint: disable=too-many-instance-attributes """The workflow run action. Variables are only populated by the server, and will be ignored when sending a request. :ivar id: The resource id. :vartype id: str :ivar name: Gets the workflow run action name. :vartype name: str :ivar type: Gets the workflow run action type. :vartype type: str :ivar start_time: Gets the start time. :vartype start_time: ~datetime.datetime :ivar end_time: Gets the end time. :vartype end_time: ~datetime.datetime :ivar status: Gets the status. Known values are: "NotSpecified", "Paused", "Running", "Waiting", "Succeeded", "Skipped", "Suspended", "Cancelled", "Failed", "Faulted", "TimedOut", "Aborted", and "Ignored". :vartype status: str or ~azure.mgmt.logic.models.WorkflowStatus :ivar code: Gets the code. :vartype code: str :ivar error: Gets the error. :vartype error: JSON :ivar tracking_id: Gets the tracking id. :vartype tracking_id: str :ivar correlation: The correlation properties. :vartype correlation: ~azure.mgmt.logic.models.RunActionCorrelation :ivar inputs_link: Gets the link to inputs. :vartype inputs_link: ~azure.mgmt.logic.models.ContentLink :ivar outputs_link: Gets the link to outputs. :vartype outputs_link: ~azure.mgmt.logic.models.ContentLink :ivar tracked_properties: Gets the tracked properties. :vartype tracked_properties: JSON :ivar retry_history: Gets the retry histories. :vartype retry_history: list[~azure.mgmt.logic.models.RetryHistory] """ _validation = { "id": {"readonly": True}, "name": {"readonly": True}, "type": {"readonly": True}, "start_time": {"readonly": True}, "end_time": {"readonly": True}, "status": {"readonly": True}, "code": {"readonly": True}, "error": {"readonly": True}, "tracking_id": {"readonly": True}, "inputs_link": {"readonly": True}, "outputs_link": {"readonly": True}, "tracked_properties": {"readonly": True}, } _attribute_map = { "id": {"key": "id", "type": "str"}, "name": {"key": "name", "type": "str"}, "type": {"key": "type", "type": "str"}, "start_time": {"key": "properties.startTime", "type": "iso-8601"}, "end_time": {"key": "properties.endTime", "type": "iso-8601"}, "status": {"key": "properties.status", "type": "str"}, "code": {"key": "properties.code", "type": "str"}, "error": {"key": "properties.error", "type": "object"}, "tracking_id": {"key": "properties.trackingId", "type": "str"}, "correlation": {"key": "properties.correlation", "type": "RunActionCorrelation"}, "inputs_link": {"key": "properties.inputsLink", "type": "ContentLink"}, "outputs_link": {"key": "properties.outputsLink", "type": "ContentLink"}, "tracked_properties": {"key": "properties.trackedProperties", "type": "object"}, "retry_history": {"key": "properties.retryHistory", "type": "[RetryHistory]"}, } def __init__( self, *, correlation: Optional["_models.RunActionCorrelation"] = None, retry_history: Optional[List["_models.RetryHistory"]] = None, **kwargs ): """ :keyword correlation: The correlation properties. :paramtype correlation: ~azure.mgmt.logic.models.RunActionCorrelation :keyword retry_history: Gets the retry histories. :paramtype retry_history: list[~azure.mgmt.logic.models.RetryHistory] """ super().__init__(**kwargs) self.name = None self.type = None self.start_time = None self.end_time = None self.status = None self.code = None self.error = None self.tracking_id = None self.correlation = correlation self.inputs_link = None self.outputs_link = None self.tracked_properties = None self.retry_history = retry_history class WorkflowRunActionFilter(_serialization.Model): """The workflow run action filter. :ivar status: The status of workflow run action. Known values are: "NotSpecified", "Paused", "Running", "Waiting", "Succeeded", "Skipped", "Suspended", "Cancelled", "Failed", "Faulted", "TimedOut", "Aborted", and "Ignored". :vartype status: str or ~azure.mgmt.logic.models.WorkflowStatus """ _attribute_map = { "status": {"key": "status", "type": "str"}, } def __init__(self, *, status: Optional[Union[str, "_models.WorkflowStatus"]] = None, **kwargs): """ :keyword status: The status of workflow run action. Known values are: "NotSpecified", "Paused", "Running", "Waiting", "Succeeded", "Skipped", "Suspended", "Cancelled", "Failed", "Faulted", "TimedOut", "Aborted", and "Ignored". :paramtype status: str or ~azure.mgmt.logic.models.WorkflowStatus """ super().__init__(**kwargs) self.status = status class WorkflowRunActionListResult(_serialization.Model): """The list of workflow run actions. :ivar value: A list of workflow run actions. :vartype value: list[~azure.mgmt.logic.models.WorkflowRunAction] :ivar next_link: The URL to get the next set of results. :vartype next_link: str """ _attribute_map = { "value": {"key": "value", "type": "[WorkflowRunAction]"}, "next_link": {"key": "nextLink", "type": "str"}, } def __init__( self, *, value: Optional[List["_models.WorkflowRunAction"]] = None, next_link: Optional[str] = None, **kwargs ): """ :keyword value: A list of workflow run actions. :paramtype value: list[~azure.mgmt.logic.models.WorkflowRunAction] :keyword next_link: The URL to get the next set of results. :paramtype next_link: str """ super().__init__(**kwargs) self.value = value self.next_link = next_link class WorkflowRunActionRepetitionDefinition(Resource): # pylint: disable=too-many-instance-attributes """The workflow run action repetition definition. Variables are only populated by the server, and will be ignored when sending a request. :ivar id: The resource id. :vartype id: str :ivar name: Gets the resource name. :vartype name: str :ivar type: Gets the resource type. :vartype type: str :ivar location: The resource location. :vartype location: str :ivar tags: The resource tags. :vartype tags: dict[str, str] :ivar start_time: The start time of the workflow scope repetition. :vartype start_time: ~datetime.datetime :ivar end_time: The end time of the workflow scope repetition. :vartype end_time: ~datetime.datetime :ivar correlation: The correlation properties. :vartype correlation: ~azure.mgmt.logic.models.RunActionCorrelation :ivar status: The status of the workflow scope repetition. Known values are: "NotSpecified", "Paused", "Running", "Waiting", "Succeeded", "Skipped", "Suspended", "Cancelled", "Failed", "Faulted", "TimedOut", "Aborted", and "Ignored". :vartype status: str or ~azure.mgmt.logic.models.WorkflowStatus :ivar code: The workflow scope repetition code. :vartype code: str :ivar error: Anything. :vartype error: any :ivar tracking_id: Gets the tracking id. :vartype tracking_id: str :ivar inputs: Gets the inputs. :vartype inputs: JSON :ivar inputs_link: Gets the link to inputs. :vartype inputs_link: ~azure.mgmt.logic.models.ContentLink :ivar outputs: Gets the outputs. :vartype outputs: JSON :ivar outputs_link: Gets the link to outputs. :vartype outputs_link: ~azure.mgmt.logic.models.ContentLink :ivar tracked_properties: Gets the tracked properties. :vartype tracked_properties: JSON :ivar retry_history: Gets the retry histories. :vartype retry_history: list[~azure.mgmt.logic.models.RetryHistory] :ivar iteration_count: :vartype iteration_count: int :ivar repetition_indexes: The repetition indexes. :vartype repetition_indexes: list[~azure.mgmt.logic.models.RepetitionIndex] """ _validation = { "id": {"readonly": True}, "name": {"readonly": True}, "type": {"readonly": True}, "tracking_id": {"readonly": True}, "inputs": {"readonly": True}, "inputs_link": {"readonly": True}, "outputs": {"readonly": True}, "outputs_link": {"readonly": True}, "tracked_properties": {"readonly": True}, } _attribute_map = { "id": {"key": "id", "type": "str"}, "name": {"key": "name", "type": "str"}, "type": {"key": "type", "type": "str"}, "location": {"key": "location", "type": "str"}, "tags": {"key": "tags", "type": "{str}"}, "start_time": {"key": "properties.startTime", "type": "iso-8601"}, "end_time": {"key": "properties.endTime", "type": "iso-8601"}, "correlation": {"key": "properties.correlation", "type": "RunActionCorrelation"}, "status": {"key": "properties.status", "type": "str"}, "code": {"key": "properties.code", "type": "str"}, "error": {"key": "properties.error", "type": "object"}, "tracking_id": {"key": "properties.trackingId", "type": "str"}, "inputs": {"key": "properties.inputs", "type": "object"}, "inputs_link": {"key": "properties.inputsLink", "type": "ContentLink"}, "outputs": {"key": "properties.outputs", "type": "object"}, "outputs_link": {"key": "properties.outputsLink", "type": "ContentLink"}, "tracked_properties": {"key": "properties.trackedProperties", "type": "object"}, "retry_history": {"key": "properties.retryHistory", "type": "[RetryHistory]"}, "iteration_count": {"key": "properties.iterationCount", "type": "int"}, "repetition_indexes": {"key": "properties.repetitionIndexes", "type": "[RepetitionIndex]"}, } def __init__( self, *, location: Optional[str] = None, tags: Optional[Dict[str, str]] = None, start_time: Optional[datetime.datetime] = None, end_time: Optional[datetime.datetime] = None, correlation: Optional["_models.RunActionCorrelation"] = None, status: Optional[Union[str, "_models.WorkflowStatus"]] = None, code: Optional[str] = None, error: Optional[Any] = None, retry_history: Optional[List["_models.RetryHistory"]] = None, iteration_count: Optional[int] = None, repetition_indexes: Optional[List["_models.RepetitionIndex"]] = None, **kwargs ): """ :keyword location: The resource location. :paramtype location: str :keyword tags: The resource tags. :paramtype tags: dict[str, str] :keyword start_time: The start time of the workflow scope repetition. :paramtype start_time: ~datetime.datetime :keyword end_time: The end time of the workflow scope repetition. :paramtype end_time: ~datetime.datetime :keyword correlation: The correlation properties. :paramtype correlation: ~azure.mgmt.logic.models.RunActionCorrelation :keyword status: The status of the workflow scope repetition. Known values are: "NotSpecified", "Paused", "Running", "Waiting", "Succeeded", "Skipped", "Suspended", "Cancelled", "Failed", "Faulted", "TimedOut", "Aborted", and "Ignored". :paramtype status: str or ~azure.mgmt.logic.models.WorkflowStatus :keyword code: The workflow scope repetition code. :paramtype code: str :keyword error: Anything. :paramtype error: any :keyword retry_history: Gets the retry histories. :paramtype retry_history: list[~azure.mgmt.logic.models.RetryHistory] :keyword iteration_count: :paramtype iteration_count: int :keyword repetition_indexes: The repetition indexes. :paramtype repetition_indexes: list[~azure.mgmt.logic.models.RepetitionIndex] """ super().__init__(location=location, tags=tags, **kwargs) self.start_time = start_time self.end_time = end_time self.correlation = correlation self.status = status self.code = code self.error = error self.tracking_id = None self.inputs = None self.inputs_link = None self.outputs = None self.outputs_link = None self.tracked_properties = None self.retry_history = retry_history self.iteration_count = iteration_count self.repetition_indexes = repetition_indexes class WorkflowRunActionRepetitionDefinitionCollection(_serialization.Model): """A collection of workflow run action repetitions. :ivar next_link: The link used to get the next page of recommendations. :vartype next_link: str :ivar value: :vartype value: list[~azure.mgmt.logic.models.WorkflowRunActionRepetitionDefinition] """ _attribute_map = { "next_link": {"key": "nextLink", "type": "str"}, "value": {"key": "value", "type": "[WorkflowRunActionRepetitionDefinition]"}, } def __init__( self, *, next_link: Optional[str] = None, value: Optional[List["_models.WorkflowRunActionRepetitionDefinition"]] = None, **kwargs ): """ :keyword next_link: The link used to get the next page of recommendations. :paramtype next_link: str :keyword value: :paramtype value: list[~azure.mgmt.logic.models.WorkflowRunActionRepetitionDefinition] """ super().__init__(**kwargs) self.next_link = next_link self.value = value class WorkflowRunActionRepetitionProperties(OperationResult): # pylint: disable=too-many-instance-attributes """The workflow run action repetition properties definition. Variables are only populated by the server, and will be ignored when sending a request. :ivar start_time: The start time of the workflow scope repetition. :vartype start_time: ~datetime.datetime :ivar end_time: The end time of the workflow scope repetition. :vartype end_time: ~datetime.datetime :ivar correlation: The correlation properties. :vartype correlation: ~azure.mgmt.logic.models.RunActionCorrelation :ivar status: The status of the workflow scope repetition. Known values are: "NotSpecified", "Paused", "Running", "Waiting", "Succeeded", "Skipped", "Suspended", "Cancelled", "Failed", "Faulted", "TimedOut", "Aborted", and "Ignored". :vartype status: str or ~azure.mgmt.logic.models.WorkflowStatus :ivar code: The workflow scope repetition code. :vartype code: str :ivar error: Anything. :vartype error: any :ivar tracking_id: Gets the tracking id. :vartype tracking_id: str :ivar inputs: Gets the inputs. :vartype inputs: JSON :ivar inputs_link: Gets the link to inputs. :vartype inputs_link: ~azure.mgmt.logic.models.ContentLink :ivar outputs: Gets the outputs. :vartype outputs: JSON :ivar outputs_link: Gets the link to outputs. :vartype outputs_link: ~azure.mgmt.logic.models.ContentLink :ivar tracked_properties: Gets the tracked properties. :vartype tracked_properties: JSON :ivar retry_history: Gets the retry histories. :vartype retry_history: list[~azure.mgmt.logic.models.RetryHistory] :ivar iteration_count: :vartype iteration_count: int :ivar repetition_indexes: The repetition indexes. :vartype repetition_indexes: list[~azure.mgmt.logic.models.RepetitionIndex] """ _validation = { "tracking_id": {"readonly": True}, "inputs": {"readonly": True}, "inputs_link": {"readonly": True}, "outputs": {"readonly": True}, "outputs_link": {"readonly": True}, "tracked_properties": {"readonly": True}, } _attribute_map = { "start_time": {"key": "startTime", "type": "iso-8601"}, "end_time": {"key": "endTime", "type": "iso-8601"}, "correlation": {"key": "correlation", "type": "RunActionCorrelation"}, "status": {"key": "status", "type": "str"}, "code": {"key": "code", "type": "str"}, "error": {"key": "error", "type": "object"}, "tracking_id": {"key": "trackingId", "type": "str"}, "inputs": {"key": "inputs", "type": "object"}, "inputs_link": {"key": "inputsLink", "type": "ContentLink"}, "outputs": {"key": "outputs", "type": "object"}, "outputs_link": {"key": "outputsLink", "type": "ContentLink"}, "tracked_properties": {"key": "trackedProperties", "type": "object"}, "retry_history": {"key": "retryHistory", "type": "[RetryHistory]"}, "iteration_count": {"key": "iterationCount", "type": "int"}, "repetition_indexes": {"key": "repetitionIndexes", "type": "[RepetitionIndex]"}, } def __init__( self, *, start_time: Optional[datetime.datetime] = None, end_time: Optional[datetime.datetime] = None, correlation: Optional["_models.RunActionCorrelation"] = None, status: Optional[Union[str, "_models.WorkflowStatus"]] = None, code: Optional[str] = None, error: Optional[Any] = None, retry_history: Optional[List["_models.RetryHistory"]] = None, iteration_count: Optional[int] = None, repetition_indexes: Optional[List["_models.RepetitionIndex"]] = None, **kwargs ): """ :keyword start_time: The start time of the workflow scope repetition. :paramtype start_time: ~datetime.datetime :keyword end_time: The end time of the workflow scope repetition. :paramtype end_time: ~datetime.datetime :keyword correlation: The correlation properties. :paramtype correlation: ~azure.mgmt.logic.models.RunActionCorrelation :keyword status: The status of the workflow scope repetition. Known values are: "NotSpecified", "Paused", "Running", "Waiting", "Succeeded", "Skipped", "Suspended", "Cancelled", "Failed", "Faulted", "TimedOut", "Aborted", and "Ignored". :paramtype status: str or ~azure.mgmt.logic.models.WorkflowStatus :keyword code: The workflow scope repetition code. :paramtype code: str :keyword error: Anything. :paramtype error: any :keyword retry_history: Gets the retry histories. :paramtype retry_history: list[~azure.mgmt.logic.models.RetryHistory] :keyword iteration_count: :paramtype iteration_count: int :keyword repetition_indexes: The repetition indexes. :paramtype repetition_indexes: list[~azure.mgmt.logic.models.RepetitionIndex] """ super().__init__( start_time=start_time, end_time=end_time, correlation=correlation, status=status, code=code, error=error, retry_history=retry_history, iteration_count=iteration_count, **kwargs ) self.repetition_indexes = repetition_indexes class WorkflowRunFilter(_serialization.Model): """The workflow run filter. :ivar status: The status of workflow run. Known values are: "NotSpecified", "Paused", "Running", "Waiting", "Succeeded", "Skipped", "Suspended", "Cancelled", "Failed", "Faulted", "TimedOut", "Aborted", and "Ignored". :vartype status: str or ~azure.mgmt.logic.models.WorkflowStatus """ _attribute_map = { "status": {"key": "status", "type": "str"}, } def __init__(self, *, status: Optional[Union[str, "_models.WorkflowStatus"]] = None, **kwargs): """ :keyword status: The status of workflow run. Known values are: "NotSpecified", "Paused", "Running", "Waiting", "Succeeded", "Skipped", "Suspended", "Cancelled", "Failed", "Faulted", "TimedOut", "Aborted", and "Ignored". :paramtype status: str or ~azure.mgmt.logic.models.WorkflowStatus """ super().__init__(**kwargs) self.status = status class WorkflowRunListResult(_serialization.Model): """The list of workflow runs. :ivar value: A list of workflow runs. :vartype value: list[~azure.mgmt.logic.models.WorkflowRun] :ivar next_link: The URL to get the next set of results. :vartype next_link: str """ _attribute_map = { "value": {"key": "value", "type": "[WorkflowRun]"}, "next_link": {"key": "nextLink", "type": "str"}, } def __init__( self, *, value: Optional[List["_models.WorkflowRun"]] = None, next_link: Optional[str] = None, **kwargs ): """ :keyword value: A list of workflow runs. :paramtype value: list[~azure.mgmt.logic.models.WorkflowRun] :keyword next_link: The URL to get the next set of results. :paramtype next_link: str """ super().__init__(**kwargs) self.value = value self.next_link = next_link class WorkflowRunTrigger(_serialization.Model): # pylint: disable=too-many-instance-attributes """The workflow run trigger. Variables are only populated by the server, and will be ignored when sending a request. :ivar name: Gets the name. :vartype name: str :ivar inputs: Gets the inputs. :vartype inputs: JSON :ivar inputs_link: Gets the link to inputs. :vartype inputs_link: ~azure.mgmt.logic.models.ContentLink :ivar outputs: Gets the outputs. :vartype outputs: JSON :ivar outputs_link: Gets the link to outputs. :vartype outputs_link: ~azure.mgmt.logic.models.ContentLink :ivar scheduled_time: Gets the scheduled time. :vartype scheduled_time: ~datetime.datetime :ivar start_time: Gets the start time. :vartype start_time: ~datetime.datetime :ivar end_time: Gets the end time. :vartype end_time: ~datetime.datetime :ivar tracking_id: Gets the tracking id. :vartype tracking_id: str :ivar correlation: The run correlation. :vartype correlation: ~azure.mgmt.logic.models.Correlation :ivar code: Gets the code. :vartype code: str :ivar status: Gets the status. Known values are: "NotSpecified", "Paused", "Running", "Waiting", "Succeeded", "Skipped", "Suspended", "Cancelled", "Failed", "Faulted", "TimedOut", "Aborted", and "Ignored". :vartype status: str or ~azure.mgmt.logic.models.WorkflowStatus :ivar error: Gets the error. :vartype error: JSON :ivar tracked_properties: Gets the tracked properties. :vartype tracked_properties: JSON """ _validation = { "name": {"readonly": True}, "inputs": {"readonly": True}, "inputs_link": {"readonly": True}, "outputs": {"readonly": True}, "outputs_link": {"readonly": True}, "scheduled_time": {"readonly": True}, "start_time": {"readonly": True}, "end_time": {"readonly": True}, "tracking_id": {"readonly": True}, "code": {"readonly": True}, "status": {"readonly": True}, "error": {"readonly": True}, "tracked_properties": {"readonly": True}, } _attribute_map = { "name": {"key": "name", "type": "str"}, "inputs": {"key": "inputs", "type": "object"}, "inputs_link": {"key": "inputsLink", "type": "ContentLink"}, "outputs": {"key": "outputs", "type": "object"}, "outputs_link": {"key": "outputsLink", "type": "ContentLink"}, "scheduled_time": {"key": "scheduledTime", "type": "iso-8601"}, "start_time": {"key": "startTime", "type": "iso-8601"}, "end_time": {"key": "endTime", "type": "iso-8601"}, "tracking_id": {"key": "trackingId", "type": "str"}, "correlation": {"key": "correlation", "type": "Correlation"}, "code": {"key": "code", "type": "str"}, "status": {"key": "status", "type": "str"}, "error": {"key": "error", "type": "object"}, "tracked_properties": {"key": "trackedProperties", "type": "object"}, } def __init__(self, *, correlation: Optional["_models.Correlation"] = None, **kwargs): """ :keyword correlation: The run correlation. :paramtype correlation: ~azure.mgmt.logic.models.Correlation """ super().__init__(**kwargs) self.name = None self.inputs = None self.inputs_link = None self.outputs = None self.outputs_link = None self.scheduled_time = None self.start_time = None self.end_time = None self.tracking_id = None self.correlation = correlation self.code = None self.status = None self.error = None self.tracked_properties = None class WorkflowTrigger(SubResource): # pylint: disable=too-many-instance-attributes """The workflow trigger. Variables are only populated by the server, and will be ignored when sending a request. :ivar id: The resource id. :vartype id: str :ivar name: Gets the workflow trigger name. :vartype name: str :ivar type: Gets the workflow trigger type. :vartype type: str :ivar provisioning_state: Gets the provisioning state. Known values are: "NotSpecified", "Accepted", "Running", "Ready", "Creating", "Created", "Deleting", "Deleted", "Canceled", "Failed", "Succeeded", "Moving", "Updating", "Registering", "Registered", "Unregistering", "Unregistered", and "Completed". :vartype provisioning_state: str or ~azure.mgmt.logic.models.WorkflowTriggerProvisioningState :ivar created_time: Gets the created time. :vartype created_time: ~datetime.datetime :ivar changed_time: Gets the changed time. :vartype changed_time: ~datetime.datetime :ivar state: Gets the state. Known values are: "NotSpecified", "Completed", "Enabled", "Disabled", "Deleted", and "Suspended". :vartype state: str or ~azure.mgmt.logic.models.WorkflowState :ivar status: Gets the status. Known values are: "NotSpecified", "Paused", "Running", "Waiting", "Succeeded", "Skipped", "Suspended", "Cancelled", "Failed", "Faulted", "TimedOut", "Aborted", and "Ignored". :vartype status: str or ~azure.mgmt.logic.models.WorkflowStatus :ivar last_execution_time: Gets the last execution time. :vartype last_execution_time: ~datetime.datetime :ivar next_execution_time: Gets the next execution time. :vartype next_execution_time: ~datetime.datetime :ivar recurrence: Gets the workflow trigger recurrence. :vartype recurrence: ~azure.mgmt.logic.models.WorkflowTriggerRecurrence :ivar workflow: Gets the reference to workflow. :vartype workflow: ~azure.mgmt.logic.models.ResourceReference """ _validation = { "id": {"readonly": True}, "name": {"readonly": True}, "type": {"readonly": True}, "provisioning_state": {"readonly": True}, "created_time": {"readonly": True}, "changed_time": {"readonly": True}, "state": {"readonly": True}, "status": {"readonly": True}, "last_execution_time": {"readonly": True}, "next_execution_time": {"readonly": True}, "recurrence": {"readonly": True}, "workflow": {"readonly": True}, } _attribute_map = { "id": {"key": "id", "type": "str"}, "name": {"key": "name", "type": "str"}, "type": {"key": "type", "type": "str"}, "provisioning_state": {"key": "properties.provisioningState", "type": "str"}, "created_time": {"key": "properties.createdTime", "type": "iso-8601"}, "changed_time": {"key": "properties.changedTime", "type": "iso-8601"}, "state": {"key": "properties.state", "type": "str"}, "status": {"key": "properties.status", "type": "str"}, "last_execution_time": {"key": "properties.lastExecutionTime", "type": "iso-8601"}, "next_execution_time": {"key": "properties.nextExecutionTime", "type": "iso-8601"}, "recurrence": {"key": "properties.recurrence", "type": "WorkflowTriggerRecurrence"}, "workflow": {"key": "properties.workflow", "type": "ResourceReference"}, } def __init__(self, **kwargs): """ """ super().__init__(**kwargs) self.name = None self.type = None self.provisioning_state = None self.created_time = None self.changed_time = None self.state = None self.status = None self.last_execution_time = None self.next_execution_time = None self.recurrence = None self.workflow = None class WorkflowTriggerCallbackUrl(_serialization.Model): """The workflow trigger callback URL. Variables are only populated by the server, and will be ignored when sending a request. :ivar value: Gets the workflow trigger callback URL. :vartype value: str :ivar method: Gets the workflow trigger callback URL HTTP method. :vartype method: str :ivar base_path: Gets the workflow trigger callback URL base path. :vartype base_path: str :ivar relative_path: Gets the workflow trigger callback URL relative path. :vartype relative_path: str :ivar relative_path_parameters: Gets the workflow trigger callback URL relative path parameters. :vartype relative_path_parameters: list[str] :ivar queries: Gets the workflow trigger callback URL query parameters. :vartype queries: ~azure.mgmt.logic.models.WorkflowTriggerListCallbackUrlQueries """ _validation = { "value": {"readonly": True}, "method": {"readonly": True}, "base_path": {"readonly": True}, "relative_path": {"readonly": True}, } _attribute_map = { "value": {"key": "value", "type": "str"}, "method": {"key": "method", "type": "str"}, "base_path": {"key": "basePath", "type": "str"}, "relative_path": {"key": "relativePath", "type": "str"}, "relative_path_parameters": {"key": "relativePathParameters", "type": "[str]"}, "queries": {"key": "queries", "type": "WorkflowTriggerListCallbackUrlQueries"}, } def __init__( self, *, relative_path_parameters: Optional[List[str]] = None, queries: Optional["_models.WorkflowTriggerListCallbackUrlQueries"] = None, **kwargs ): """ :keyword relative_path_parameters: Gets the workflow trigger callback URL relative path parameters. :paramtype relative_path_parameters: list[str] :keyword queries: Gets the workflow trigger callback URL query parameters. :paramtype queries: ~azure.mgmt.logic.models.WorkflowTriggerListCallbackUrlQueries """ super().__init__(**kwargs) self.value = None self.method = None self.base_path = None self.relative_path = None self.relative_path_parameters = relative_path_parameters self.queries = queries class WorkflowTriggerFilter(_serialization.Model): """The workflow trigger filter. :ivar state: The state of workflow trigger. Known values are: "NotSpecified", "Completed", "Enabled", "Disabled", "Deleted", and "Suspended". :vartype state: str or ~azure.mgmt.logic.models.WorkflowState """ _attribute_map = { "state": {"key": "state", "type": "str"}, } def __init__(self, *, state: Optional[Union[str, "_models.WorkflowState"]] = None, **kwargs): """ :keyword state: The state of workflow trigger. Known values are: "NotSpecified", "Completed", "Enabled", "Disabled", "Deleted", and "Suspended". :paramtype state: str or ~azure.mgmt.logic.models.WorkflowState """ super().__init__(**kwargs) self.state = state class WorkflowTriggerHistory(SubResource): # pylint: disable=too-many-instance-attributes """The workflow trigger history. Variables are only populated by the server, and will be ignored when sending a request. :ivar id: The resource id. :vartype id: str :ivar name: Gets the workflow trigger history name. :vartype name: str :ivar type: Gets the workflow trigger history type. :vartype type: str :ivar start_time: Gets the start time. :vartype start_time: ~datetime.datetime :ivar end_time: Gets the end time. :vartype end_time: ~datetime.datetime :ivar scheduled_time: The scheduled time. :vartype scheduled_time: ~datetime.datetime :ivar status: Gets the status. Known values are: "NotSpecified", "Paused", "Running", "Waiting", "Succeeded", "Skipped", "Suspended", "Cancelled", "Failed", "Faulted", "TimedOut", "Aborted", and "Ignored". :vartype status: str or ~azure.mgmt.logic.models.WorkflowStatus :ivar code: Gets the code. :vartype code: str :ivar error: Gets the error. :vartype error: JSON :ivar tracking_id: Gets the tracking id. :vartype tracking_id: str :ivar correlation: The run correlation. :vartype correlation: ~azure.mgmt.logic.models.Correlation :ivar inputs_link: Gets the link to input parameters. :vartype inputs_link: ~azure.mgmt.logic.models.ContentLink :ivar outputs_link: Gets the link to output parameters. :vartype outputs_link: ~azure.mgmt.logic.models.ContentLink :ivar fired: The value indicating whether trigger was fired. :vartype fired: bool :ivar run: Gets the reference to workflow run. :vartype run: ~azure.mgmt.logic.models.ResourceReference """ _validation = { "id": {"readonly": True}, "name": {"readonly": True}, "type": {"readonly": True}, "start_time": {"readonly": True}, "end_time": {"readonly": True}, "scheduled_time": {"readonly": True}, "status": {"readonly": True}, "code": {"readonly": True}, "error": {"readonly": True}, "tracking_id": {"readonly": True}, "inputs_link": {"readonly": True}, "outputs_link": {"readonly": True}, "fired": {"readonly": True}, "run": {"readonly": True}, } _attribute_map = { "id": {"key": "id", "type": "str"}, "name": {"key": "name", "type": "str"}, "type": {"key": "type", "type": "str"}, "start_time": {"key": "properties.startTime", "type": "iso-8601"}, "end_time": {"key": "properties.endTime", "type": "iso-8601"}, "scheduled_time": {"key": "properties.scheduledTime", "type": "iso-8601"}, "status": {"key": "properties.status", "type": "str"}, "code": {"key": "properties.code", "type": "str"}, "error": {"key": "properties.error", "type": "object"}, "tracking_id": {"key": "properties.trackingId", "type": "str"}, "correlation": {"key": "properties.correlation", "type": "Correlation"}, "inputs_link": {"key": "properties.inputsLink", "type": "ContentLink"}, "outputs_link": {"key": "properties.outputsLink", "type": "ContentLink"}, "fired": {"key": "properties.fired", "type": "bool"}, "run": {"key": "properties.run", "type": "ResourceReference"}, } def __init__(self, *, correlation: Optional["_models.Correlation"] = None, **kwargs): """ :keyword correlation: The run correlation. :paramtype correlation: ~azure.mgmt.logic.models.Correlation """ super().__init__(**kwargs) self.name = None self.type = None self.start_time = None self.end_time = None self.scheduled_time = None self.status = None self.code = None self.error = None self.tracking_id = None self.correlation = correlation self.inputs_link = None self.outputs_link = None self.fired = None self.run = None class WorkflowTriggerHistoryFilter(_serialization.Model): """The workflow trigger history filter. :ivar status: The status of workflow trigger history. Known values are: "NotSpecified", "Paused", "Running", "Waiting", "Succeeded", "Skipped", "Suspended", "Cancelled", "Failed", "Faulted", "TimedOut", "Aborted", and "Ignored". :vartype status: str or ~azure.mgmt.logic.models.WorkflowStatus """ _attribute_map = { "status": {"key": "status", "type": "str"}, } def __init__(self, *, status: Optional[Union[str, "_models.WorkflowStatus"]] = None, **kwargs): """ :keyword status: The status of workflow trigger history. Known values are: "NotSpecified", "Paused", "Running", "Waiting", "Succeeded", "Skipped", "Suspended", "Cancelled", "Failed", "Faulted", "TimedOut", "Aborted", and "Ignored". :paramtype status: str or ~azure.mgmt.logic.models.WorkflowStatus """ super().__init__(**kwargs) self.status = status class WorkflowTriggerHistoryListResult(_serialization.Model): """The list of workflow trigger histories. :ivar value: A list of workflow trigger histories. :vartype value: list[~azure.mgmt.logic.models.WorkflowTriggerHistory] :ivar next_link: The URL to get the next set of results. :vartype next_link: str """ _attribute_map = { "value": {"key": "value", "type": "[WorkflowTriggerHistory]"}, "next_link": {"key": "nextLink", "type": "str"}, } def __init__( self, *, value: Optional[List["_models.WorkflowTriggerHistory"]] = None, next_link: Optional[str] = None, **kwargs ): """ :keyword value: A list of workflow trigger histories. :paramtype value: list[~azure.mgmt.logic.models.WorkflowTriggerHistory] :keyword next_link: The URL to get the next set of results. :paramtype next_link: str """ super().__init__(**kwargs) self.value = value self.next_link = next_link class WorkflowTriggerListCallbackUrlQueries(_serialization.Model): """Gets the workflow trigger callback URL query parameters. :ivar api_version: The api version. :vartype api_version: str :ivar sp: The SAS permissions. :vartype sp: str :ivar sv: The SAS version. :vartype sv: str :ivar sig: The SAS signature. :vartype sig: str :ivar se: The SAS timestamp. :vartype se: str """ _attribute_map = { "api_version": {"key": "api-version", "type": "str"}, "sp": {"key": "sp", "type": "str"}, "sv": {"key": "sv", "type": "str"}, "sig": {"key": "sig", "type": "str"}, "se": {"key": "se", "type": "str"}, } def __init__( self, *, api_version: Optional[str] = None, sp: Optional[str] = None, sv: Optional[str] = None, sig: Optional[str] = None, se: Optional[str] = None, **kwargs ): """ :keyword api_version: The api version. :paramtype api_version: str :keyword sp: The SAS permissions. :paramtype sp: str :keyword sv: The SAS version. :paramtype sv: str :keyword sig: The SAS signature. :paramtype sig: str :keyword se: The SAS timestamp. :paramtype se: str """ super().__init__(**kwargs) self.api_version = api_version self.sp = sp self.sv = sv self.sig = sig self.se = se class WorkflowTriggerListResult(_serialization.Model): """The list of workflow triggers. :ivar value: A list of workflow triggers. :vartype value: list[~azure.mgmt.logic.models.WorkflowTrigger] :ivar next_link: The URL to get the next set of results. :vartype next_link: str """ _attribute_map = { "value": {"key": "value", "type": "[WorkflowTrigger]"}, "next_link": {"key": "nextLink", "type": "str"}, } def __init__( self, *, value: Optional[List["_models.WorkflowTrigger"]] = None, next_link: Optional[str] = None, **kwargs ): """ :keyword value: A list of workflow triggers. :paramtype value: list[~azure.mgmt.logic.models.WorkflowTrigger] :keyword next_link: The URL to get the next set of results. :paramtype next_link: str """ super().__init__(**kwargs) self.value = value self.next_link = next_link class WorkflowTriggerRecurrence(_serialization.Model): """The workflow trigger recurrence. :ivar frequency: The frequency. Known values are: "NotSpecified", "Second", "Minute", "Hour", "Day", "Week", "Month", and "Year". :vartype frequency: str or ~azure.mgmt.logic.models.RecurrenceFrequency :ivar interval: The interval. :vartype interval: int :ivar start_time: The start time. :vartype start_time: str :ivar end_time: The end time. :vartype end_time: str :ivar time_zone: The time zone. :vartype time_zone: str :ivar schedule: The recurrence schedule. :vartype schedule: ~azure.mgmt.logic.models.RecurrenceSchedule """ _attribute_map = { "frequency": {"key": "frequency", "type": "str"}, "interval": {"key": "interval", "type": "int"}, "start_time": {"key": "startTime", "type": "str"}, "end_time": {"key": "endTime", "type": "str"}, "time_zone": {"key": "timeZone", "type": "str"}, "schedule": {"key": "schedule", "type": "RecurrenceSchedule"}, } def __init__( self, *, frequency: Optional[Union[str, "_models.RecurrenceFrequency"]] = None, interval: Optional[int] = None, start_time: Optional[str] = None, end_time: Optional[str] = None, time_zone: Optional[str] = None, schedule: Optional["_models.RecurrenceSchedule"] = None, **kwargs ): """ :keyword frequency: The frequency. Known values are: "NotSpecified", "Second", "Minute", "Hour", "Day", "Week", "Month", and "Year". :paramtype frequency: str or ~azure.mgmt.logic.models.RecurrenceFrequency :keyword interval: The interval. :paramtype interval: int :keyword start_time: The start time. :paramtype start_time: str :keyword end_time: The end time. :paramtype end_time: str :keyword time_zone: The time zone. :paramtype time_zone: str :keyword schedule: The recurrence schedule. :paramtype schedule: ~azure.mgmt.logic.models.RecurrenceSchedule """ super().__init__(**kwargs) self.frequency = frequency self.interval = interval self.start_time = start_time self.end_time = end_time self.time_zone = time_zone self.schedule = schedule class WorkflowTriggerReference(ResourceReference): """The workflow trigger reference. Variables are only populated by the server, and will be ignored when sending a request. :ivar id: The resource id. :vartype id: str :ivar name: Gets the resource name. :vartype name: str :ivar type: Gets the resource type. :vartype type: str :ivar flow_name: The workflow name. :vartype flow_name: str :ivar trigger_name: The workflow trigger name. :vartype trigger_name: str """ _validation = { "name": {"readonly": True}, "type": {"readonly": True}, } _attribute_map = { "id": {"key": "id", "type": "str"}, "name": {"key": "name", "type": "str"}, "type": {"key": "type", "type": "str"}, "flow_name": {"key": "flowName", "type": "str"}, "trigger_name": {"key": "triggerName", "type": "str"}, } def __init__( self, *, id: Optional[str] = None, # pylint: disable=redefined-builtin flow_name: Optional[str] = None, trigger_name: Optional[str] = None, **kwargs ): """ :keyword id: The resource id. :paramtype id: str :keyword flow_name: The workflow name. :paramtype flow_name: str :keyword trigger_name: The workflow trigger name. :paramtype trigger_name: str """ super().__init__(id=id, **kwargs) self.flow_name = flow_name self.trigger_name = trigger_name class WorkflowVersion(Resource): # pylint: disable=too-many-instance-attributes """The workflow version. Variables are only populated by the server, and will be ignored when sending a request. :ivar id: The resource id. :vartype id: str :ivar name: Gets the resource name. :vartype name: str :ivar type: Gets the resource type. :vartype type: str :ivar location: The resource location. :vartype location: str :ivar tags: The resource tags. :vartype tags: dict[str, str] :ivar provisioning_state: The provisioning state. Known values are: "NotSpecified", "Accepted", "Running", "Ready", "Creating", "Created", "Deleting", "Deleted", "Canceled", "Failed", "Succeeded", "Moving", "Updating", "Registering", "Registered", "Unregistering", "Unregistered", "Completed", "Renewing", "Pending", "Waiting", and "InProgress". :vartype provisioning_state: str or ~azure.mgmt.logic.models.WorkflowProvisioningState :ivar created_time: Gets the created time. :vartype created_time: ~datetime.datetime :ivar changed_time: Gets the changed time. :vartype changed_time: ~datetime.datetime :ivar state: The state. Known values are: "NotSpecified", "Completed", "Enabled", "Disabled", "Deleted", and "Suspended". :vartype state: str or ~azure.mgmt.logic.models.WorkflowState :ivar version: Gets the version. :vartype version: str :ivar access_endpoint: Gets the access endpoint. :vartype access_endpoint: str :ivar endpoints_configuration: The endpoints configuration. :vartype endpoints_configuration: ~azure.mgmt.logic.models.FlowEndpointsConfiguration :ivar access_control: The access control configuration. :vartype access_control: ~azure.mgmt.logic.models.FlowAccessControlConfiguration :ivar sku: The sku. :vartype sku: ~azure.mgmt.logic.models.Sku :ivar integration_account: The integration account. :vartype integration_account: ~azure.mgmt.logic.models.ResourceReference :ivar definition: The definition. :vartype definition: JSON :ivar parameters: The parameters. :vartype parameters: dict[str, ~azure.mgmt.logic.models.WorkflowParameter] """ _validation = { "id": {"readonly": True}, "name": {"readonly": True}, "type": {"readonly": True}, "provisioning_state": {"readonly": True}, "created_time": {"readonly": True}, "changed_time": {"readonly": True}, "version": {"readonly": True}, "access_endpoint": {"readonly": True}, "sku": {"readonly": True}, } _attribute_map = { "id": {"key": "id", "type": "str"}, "name": {"key": "name", "type": "str"}, "type": {"key": "type", "type": "str"}, "location": {"key": "location", "type": "str"}, "tags": {"key": "tags", "type": "{str}"}, "provisioning_state": {"key": "properties.provisioningState", "type": "str"}, "created_time": {"key": "properties.createdTime", "type": "iso-8601"}, "changed_time": {"key": "properties.changedTime", "type": "iso-8601"}, "state": {"key": "properties.state", "type": "str"}, "version": {"key": "properties.version", "type": "str"}, "access_endpoint": {"key": "properties.accessEndpoint", "type": "str"}, "endpoints_configuration": {"key": "properties.endpointsConfiguration", "type": "FlowEndpointsConfiguration"}, "access_control": {"key": "properties.accessControl", "type": "FlowAccessControlConfiguration"}, "sku": {"key": "properties.sku", "type": "Sku"}, "integration_account": {"key": "properties.integrationAccount", "type": "ResourceReference"}, "definition": {"key": "properties.definition", "type": "object"}, "parameters": {"key": "properties.parameters", "type": "{WorkflowParameter}"}, } def __init__( self, *, location: Optional[str] = None, tags: Optional[Dict[str, str]] = None, state: Optional[Union[str, "_models.WorkflowState"]] = None, endpoints_configuration: Optional["_models.FlowEndpointsConfiguration"] = None, access_control: Optional["_models.FlowAccessControlConfiguration"] = None, integration_account: Optional["_models.ResourceReference"] = None, definition: Optional[JSON] = None, parameters: Optional[Dict[str, "_models.WorkflowParameter"]] = None, **kwargs ): """ :keyword location: The resource location. :paramtype location: str :keyword tags: The resource tags. :paramtype tags: dict[str, str] :keyword state: The state. Known values are: "NotSpecified", "Completed", "Enabled", "Disabled", "Deleted", and "Suspended". :paramtype state: str or ~azure.mgmt.logic.models.WorkflowState :keyword endpoints_configuration: The endpoints configuration. :paramtype endpoints_configuration: ~azure.mgmt.logic.models.FlowEndpointsConfiguration :keyword access_control: The access control configuration. :paramtype access_control: ~azure.mgmt.logic.models.FlowAccessControlConfiguration :keyword integration_account: The integration account. :paramtype integration_account: ~azure.mgmt.logic.models.ResourceReference :keyword definition: The definition. :paramtype definition: JSON :keyword parameters: The parameters. :paramtype parameters: dict[str, ~azure.mgmt.logic.models.WorkflowParameter] """ super().__init__(location=location, tags=tags, **kwargs) self.provisioning_state = None self.created_time = None self.changed_time = None self.state = state self.version = None self.access_endpoint = None self.endpoints_configuration = endpoints_configuration self.access_control = access_control self.sku = None self.integration_account = integration_account self.definition = definition self.parameters = parameters class WorkflowVersionListResult(_serialization.Model): """The list of workflow versions. :ivar value: A list of workflow versions. :vartype value: list[~azure.mgmt.logic.models.WorkflowVersion] :ivar next_link: The URL to get the next set of results. :vartype next_link: str """ _attribute_map = { "value": {"key": "value", "type": "[WorkflowVersion]"}, "next_link": {"key": "nextLink", "type": "str"}, } def __init__( self, *, value: Optional[List["_models.WorkflowVersion"]] = None, next_link: Optional[str] = None, **kwargs ): """ :keyword value: A list of workflow versions. :paramtype value: list[~azure.mgmt.logic.models.WorkflowVersion] :keyword next_link: The URL to get the next set of results. :paramtype next_link: str """ super().__init__(**kwargs) self.value = value self.next_link = next_link class WsdlService(_serialization.Model): """The WSDL service. :ivar qualified_name: The qualified name. :vartype qualified_name: str :ivar endpoint_qualified_names: The list of endpoints' qualified names. :vartype endpoint_qualified_names: list[str] """ _attribute_map = { "qualified_name": {"key": "qualifiedName", "type": "str"}, "endpoint_qualified_names": {"key": "EndpointQualifiedNames", "type": "[str]"}, } def __init__( self, *, qualified_name: Optional[str] = None, endpoint_qualified_names: Optional[List[str]] = None, **kwargs ): """ :keyword qualified_name: The qualified name. :paramtype qualified_name: str :keyword endpoint_qualified_names: The list of endpoints' qualified names. :paramtype endpoint_qualified_names: list[str] """ super().__init__(**kwargs) self.qualified_name = qualified_name self.endpoint_qualified_names = endpoint_qualified_names class X12AcknowledgementSettings(_serialization.Model): # pylint: disable=too-many-instance-attributes """The X12 agreement acknowledgement settings. All required parameters must be populated in order to send to Azure. :ivar need_technical_acknowledgement: The value indicating whether technical acknowledgement is needed. Required. :vartype need_technical_acknowledgement: bool :ivar batch_technical_acknowledgements: The value indicating whether to batch the technical acknowledgements. Required. :vartype batch_technical_acknowledgements: bool :ivar need_functional_acknowledgement: The value indicating whether functional acknowledgement is needed. Required. :vartype need_functional_acknowledgement: bool :ivar functional_acknowledgement_version: The functional acknowledgement version. :vartype functional_acknowledgement_version: str :ivar batch_functional_acknowledgements: The value indicating whether to batch functional acknowledgements. Required. :vartype batch_functional_acknowledgements: bool :ivar need_implementation_acknowledgement: The value indicating whether implementation acknowledgement is needed. Required. :vartype need_implementation_acknowledgement: bool :ivar implementation_acknowledgement_version: The implementation acknowledgement version. :vartype implementation_acknowledgement_version: str :ivar batch_implementation_acknowledgements: The value indicating whether to batch implementation acknowledgements. Required. :vartype batch_implementation_acknowledgements: bool :ivar need_loop_for_valid_messages: The value indicating whether a loop is needed for valid messages. Required. :vartype need_loop_for_valid_messages: bool :ivar send_synchronous_acknowledgement: The value indicating whether to send synchronous acknowledgement. Required. :vartype send_synchronous_acknowledgement: bool :ivar acknowledgement_control_number_prefix: The acknowledgement control number prefix. :vartype acknowledgement_control_number_prefix: str :ivar acknowledgement_control_number_suffix: The acknowledgement control number suffix. :vartype acknowledgement_control_number_suffix: str :ivar acknowledgement_control_number_lower_bound: The acknowledgement control number lower bound. Required. :vartype acknowledgement_control_number_lower_bound: int :ivar acknowledgement_control_number_upper_bound: The acknowledgement control number upper bound. Required. :vartype acknowledgement_control_number_upper_bound: int :ivar rollover_acknowledgement_control_number: The value indicating whether to rollover acknowledgement control number. Required. :vartype rollover_acknowledgement_control_number: bool """ _validation = { "need_technical_acknowledgement": {"required": True}, "batch_technical_acknowledgements": {"required": True}, "need_functional_acknowledgement": {"required": True}, "batch_functional_acknowledgements": {"required": True}, "need_implementation_acknowledgement": {"required": True}, "batch_implementation_acknowledgements": {"required": True}, "need_loop_for_valid_messages": {"required": True}, "send_synchronous_acknowledgement": {"required": True}, "acknowledgement_control_number_lower_bound": {"required": True}, "acknowledgement_control_number_upper_bound": {"required": True}, "rollover_acknowledgement_control_number": {"required": True}, } _attribute_map = { "need_technical_acknowledgement": {"key": "needTechnicalAcknowledgement", "type": "bool"}, "batch_technical_acknowledgements": {"key": "batchTechnicalAcknowledgements", "type": "bool"}, "need_functional_acknowledgement": {"key": "needFunctionalAcknowledgement", "type": "bool"}, "functional_acknowledgement_version": {"key": "functionalAcknowledgementVersion", "type": "str"}, "batch_functional_acknowledgements": {"key": "batchFunctionalAcknowledgements", "type": "bool"}, "need_implementation_acknowledgement": {"key": "needImplementationAcknowledgement", "type": "bool"}, "implementation_acknowledgement_version": {"key": "implementationAcknowledgementVersion", "type": "str"}, "batch_implementation_acknowledgements": {"key": "batchImplementationAcknowledgements", "type": "bool"}, "need_loop_for_valid_messages": {"key": "needLoopForValidMessages", "type": "bool"}, "send_synchronous_acknowledgement": {"key": "sendSynchronousAcknowledgement", "type": "bool"}, "acknowledgement_control_number_prefix": {"key": "acknowledgementControlNumberPrefix", "type": "str"}, "acknowledgement_control_number_suffix": {"key": "acknowledgementControlNumberSuffix", "type": "str"}, "acknowledgement_control_number_lower_bound": {"key": "acknowledgementControlNumberLowerBound", "type": "int"}, "acknowledgement_control_number_upper_bound": {"key": "acknowledgementControlNumberUpperBound", "type": "int"}, "rollover_acknowledgement_control_number": {"key": "rolloverAcknowledgementControlNumber", "type": "bool"}, } def __init__( self, *, need_technical_acknowledgement: bool, batch_technical_acknowledgements: bool, need_functional_acknowledgement: bool, batch_functional_acknowledgements: bool, need_implementation_acknowledgement: bool, batch_implementation_acknowledgements: bool, need_loop_for_valid_messages: bool, send_synchronous_acknowledgement: bool, acknowledgement_control_number_lower_bound: int, acknowledgement_control_number_upper_bound: int, rollover_acknowledgement_control_number: bool, functional_acknowledgement_version: Optional[str] = None, implementation_acknowledgement_version: Optional[str] = None, acknowledgement_control_number_prefix: Optional[str] = None, acknowledgement_control_number_suffix: Optional[str] = None, **kwargs ): """ :keyword need_technical_acknowledgement: The value indicating whether technical acknowledgement is needed. Required. :paramtype need_technical_acknowledgement: bool :keyword batch_technical_acknowledgements: The value indicating whether to batch the technical acknowledgements. Required. :paramtype batch_technical_acknowledgements: bool :keyword need_functional_acknowledgement: The value indicating whether functional acknowledgement is needed. Required. :paramtype need_functional_acknowledgement: bool :keyword functional_acknowledgement_version: The functional acknowledgement version. :paramtype functional_acknowledgement_version: str :keyword batch_functional_acknowledgements: The value indicating whether to batch functional acknowledgements. Required. :paramtype batch_functional_acknowledgements: bool :keyword need_implementation_acknowledgement: The value indicating whether implementation acknowledgement is needed. Required. :paramtype need_implementation_acknowledgement: bool :keyword implementation_acknowledgement_version: The implementation acknowledgement version. :paramtype implementation_acknowledgement_version: str :keyword batch_implementation_acknowledgements: The value indicating whether to batch implementation acknowledgements. Required. :paramtype batch_implementation_acknowledgements: bool :keyword need_loop_for_valid_messages: The value indicating whether a loop is needed for valid messages. Required. :paramtype need_loop_for_valid_messages: bool :keyword send_synchronous_acknowledgement: The value indicating whether to send synchronous acknowledgement. Required. :paramtype send_synchronous_acknowledgement: bool :keyword acknowledgement_control_number_prefix: The acknowledgement control number prefix. :paramtype acknowledgement_control_number_prefix: str :keyword acknowledgement_control_number_suffix: The acknowledgement control number suffix. :paramtype acknowledgement_control_number_suffix: str :keyword acknowledgement_control_number_lower_bound: The acknowledgement control number lower bound. Required. :paramtype acknowledgement_control_number_lower_bound: int :keyword acknowledgement_control_number_upper_bound: The acknowledgement control number upper bound. Required. :paramtype acknowledgement_control_number_upper_bound: int :keyword rollover_acknowledgement_control_number: The value indicating whether to rollover acknowledgement control number. Required. :paramtype rollover_acknowledgement_control_number: bool """ super().__init__(**kwargs) self.need_technical_acknowledgement = need_technical_acknowledgement self.batch_technical_acknowledgements = batch_technical_acknowledgements self.need_functional_acknowledgement = need_functional_acknowledgement self.functional_acknowledgement_version = functional_acknowledgement_version self.batch_functional_acknowledgements = batch_functional_acknowledgements self.need_implementation_acknowledgement = need_implementation_acknowledgement self.implementation_acknowledgement_version = implementation_acknowledgement_version self.batch_implementation_acknowledgements = batch_implementation_acknowledgements self.need_loop_for_valid_messages = need_loop_for_valid_messages self.send_synchronous_acknowledgement = send_synchronous_acknowledgement self.acknowledgement_control_number_prefix = acknowledgement_control_number_prefix self.acknowledgement_control_number_suffix = acknowledgement_control_number_suffix self.acknowledgement_control_number_lower_bound = acknowledgement_control_number_lower_bound self.acknowledgement_control_number_upper_bound = acknowledgement_control_number_upper_bound self.rollover_acknowledgement_control_number = rollover_acknowledgement_control_number class X12AgreementContent(_serialization.Model): """The X12 agreement content. All required parameters must be populated in order to send to Azure. :ivar receive_agreement: The X12 one-way receive agreement. Required. :vartype receive_agreement: ~azure.mgmt.logic.models.X12OneWayAgreement :ivar send_agreement: The X12 one-way send agreement. Required. :vartype send_agreement: ~azure.mgmt.logic.models.X12OneWayAgreement """ _validation = { "receive_agreement": {"required": True}, "send_agreement": {"required": True}, } _attribute_map = { "receive_agreement": {"key": "receiveAgreement", "type": "X12OneWayAgreement"}, "send_agreement": {"key": "sendAgreement", "type": "X12OneWayAgreement"}, } def __init__( self, *, receive_agreement: "_models.X12OneWayAgreement", send_agreement: "_models.X12OneWayAgreement", **kwargs ): """ :keyword receive_agreement: The X12 one-way receive agreement. Required. :paramtype receive_agreement: ~azure.mgmt.logic.models.X12OneWayAgreement :keyword send_agreement: The X12 one-way send agreement. Required. :paramtype send_agreement: ~azure.mgmt.logic.models.X12OneWayAgreement """ super().__init__(**kwargs) self.receive_agreement = receive_agreement self.send_agreement = send_agreement class X12DelimiterOverrides(_serialization.Model): """The X12 delimiter override settings. All required parameters must be populated in order to send to Azure. :ivar protocol_version: The protocol version. :vartype protocol_version: str :ivar message_id: The message id. :vartype message_id: str :ivar data_element_separator: The data element separator. Required. :vartype data_element_separator: int :ivar component_separator: The component separator. Required. :vartype component_separator: int :ivar segment_terminator: The segment terminator. Required. :vartype segment_terminator: int :ivar segment_terminator_suffix: The segment terminator suffix. Required. Known values are: "NotSpecified", "None", "CR", "LF", and "CRLF". :vartype segment_terminator_suffix: str or ~azure.mgmt.logic.models.SegmentTerminatorSuffix :ivar replace_character: The replacement character. Required. :vartype replace_character: int :ivar replace_separators_in_payload: The value indicating whether to replace separators in payload. Required. :vartype replace_separators_in_payload: bool :ivar target_namespace: The target namespace on which this delimiter settings has to be applied. :vartype target_namespace: str """ _validation = { "data_element_separator": {"required": True}, "component_separator": {"required": True}, "segment_terminator": {"required": True}, "segment_terminator_suffix": {"required": True}, "replace_character": {"required": True}, "replace_separators_in_payload": {"required": True}, } _attribute_map = { "protocol_version": {"key": "protocolVersion", "type": "str"}, "message_id": {"key": "messageId", "type": "str"}, "data_element_separator": {"key": "dataElementSeparator", "type": "int"}, "component_separator": {"key": "componentSeparator", "type": "int"}, "segment_terminator": {"key": "segmentTerminator", "type": "int"}, "segment_terminator_suffix": {"key": "segmentTerminatorSuffix", "type": "str"}, "replace_character": {"key": "replaceCharacter", "type": "int"}, "replace_separators_in_payload": {"key": "replaceSeparatorsInPayload", "type": "bool"}, "target_namespace": {"key": "targetNamespace", "type": "str"}, } def __init__( self, *, data_element_separator: int, component_separator: int, segment_terminator: int, segment_terminator_suffix: Union[str, "_models.SegmentTerminatorSuffix"], replace_character: int, replace_separators_in_payload: bool, protocol_version: Optional[str] = None, message_id: Optional[str] = None, target_namespace: Optional[str] = None, **kwargs ): """ :keyword protocol_version: The protocol version. :paramtype protocol_version: str :keyword message_id: The message id. :paramtype message_id: str :keyword data_element_separator: The data element separator. Required. :paramtype data_element_separator: int :keyword component_separator: The component separator. Required. :paramtype component_separator: int :keyword segment_terminator: The segment terminator. Required. :paramtype segment_terminator: int :keyword segment_terminator_suffix: The segment terminator suffix. Required. Known values are: "NotSpecified", "None", "CR", "LF", and "CRLF". :paramtype segment_terminator_suffix: str or ~azure.mgmt.logic.models.SegmentTerminatorSuffix :keyword replace_character: The replacement character. Required. :paramtype replace_character: int :keyword replace_separators_in_payload: The value indicating whether to replace separators in payload. Required. :paramtype replace_separators_in_payload: bool :keyword target_namespace: The target namespace on which this delimiter settings has to be applied. :paramtype target_namespace: str """ super().__init__(**kwargs) self.protocol_version = protocol_version self.message_id = message_id self.data_element_separator = data_element_separator self.component_separator = component_separator self.segment_terminator = segment_terminator self.segment_terminator_suffix = segment_terminator_suffix self.replace_character = replace_character self.replace_separators_in_payload = replace_separators_in_payload self.target_namespace = target_namespace class X12EnvelopeOverride(_serialization.Model): """The X12 envelope override settings. All required parameters must be populated in order to send to Azure. :ivar target_namespace: The target namespace on which this envelope settings has to be applied. Required. :vartype target_namespace: str :ivar protocol_version: The protocol version on which this envelope settings has to be applied. Required. :vartype protocol_version: str :ivar message_id: The message id on which this envelope settings has to be applied. Required. :vartype message_id: str :ivar responsible_agency_code: The responsible agency code. Required. :vartype responsible_agency_code: str :ivar header_version: The header version. Required. :vartype header_version: str :ivar sender_application_id: The sender application id. Required. :vartype sender_application_id: str :ivar receiver_application_id: The receiver application id. Required. :vartype receiver_application_id: str :ivar functional_identifier_code: The functional identifier code. :vartype functional_identifier_code: str :ivar date_format: The date format. Required. Known values are: "NotSpecified", "CCYYMMDD", and "YYMMDD". :vartype date_format: str or ~azure.mgmt.logic.models.X12DateFormat :ivar time_format: The time format. Required. Known values are: "NotSpecified", "HHMM", "HHMMSS", "HHMMSSdd", and "HHMMSSd". :vartype time_format: str or ~azure.mgmt.logic.models.X12TimeFormat """ _validation = { "target_namespace": {"required": True}, "protocol_version": {"required": True}, "message_id": {"required": True}, "responsible_agency_code": {"required": True}, "header_version": {"required": True}, "sender_application_id": {"required": True}, "receiver_application_id": {"required": True}, "date_format": {"required": True}, "time_format": {"required": True}, } _attribute_map = { "target_namespace": {"key": "targetNamespace", "type": "str"}, "protocol_version": {"key": "protocolVersion", "type": "str"}, "message_id": {"key": "messageId", "type": "str"}, "responsible_agency_code": {"key": "responsibleAgencyCode", "type": "str"}, "header_version": {"key": "headerVersion", "type": "str"}, "sender_application_id": {"key": "senderApplicationId", "type": "str"}, "receiver_application_id": {"key": "receiverApplicationId", "type": "str"}, "functional_identifier_code": {"key": "functionalIdentifierCode", "type": "str"}, "date_format": {"key": "dateFormat", "type": "str"}, "time_format": {"key": "timeFormat", "type": "str"}, } def __init__( self, *, target_namespace: str, protocol_version: str, message_id: str, responsible_agency_code: str, header_version: str, sender_application_id: str, receiver_application_id: str, date_format: Union[str, "_models.X12DateFormat"], time_format: Union[str, "_models.X12TimeFormat"], functional_identifier_code: Optional[str] = None, **kwargs ): """ :keyword target_namespace: The target namespace on which this envelope settings has to be applied. Required. :paramtype target_namespace: str :keyword protocol_version: The protocol version on which this envelope settings has to be applied. Required. :paramtype protocol_version: str :keyword message_id: The message id on which this envelope settings has to be applied. Required. :paramtype message_id: str :keyword responsible_agency_code: The responsible agency code. Required. :paramtype responsible_agency_code: str :keyword header_version: The header version. Required. :paramtype header_version: str :keyword sender_application_id: The sender application id. Required. :paramtype sender_application_id: str :keyword receiver_application_id: The receiver application id. Required. :paramtype receiver_application_id: str :keyword functional_identifier_code: The functional identifier code. :paramtype functional_identifier_code: str :keyword date_format: The date format. Required. Known values are: "NotSpecified", "CCYYMMDD", and "YYMMDD". :paramtype date_format: str or ~azure.mgmt.logic.models.X12DateFormat :keyword time_format: The time format. Required. Known values are: "NotSpecified", "HHMM", "HHMMSS", "HHMMSSdd", and "HHMMSSd". :paramtype time_format: str or ~azure.mgmt.logic.models.X12TimeFormat """ super().__init__(**kwargs) self.target_namespace = target_namespace self.protocol_version = protocol_version self.message_id = message_id self.responsible_agency_code = responsible_agency_code self.header_version = header_version self.sender_application_id = sender_application_id self.receiver_application_id = receiver_application_id self.functional_identifier_code = functional_identifier_code self.date_format = date_format self.time_format = time_format class X12EnvelopeSettings(_serialization.Model): # pylint: disable=too-many-instance-attributes """The X12 agreement envelope settings. All required parameters must be populated in order to send to Azure. :ivar control_standards_id: The controls standards id. Required. :vartype control_standards_id: int :ivar use_control_standards_id_as_repetition_character: The value indicating whether to use control standards id as repetition character. Required. :vartype use_control_standards_id_as_repetition_character: bool :ivar sender_application_id: The sender application id. Required. :vartype sender_application_id: str :ivar receiver_application_id: The receiver application id. Required. :vartype receiver_application_id: str :ivar control_version_number: The control version number. Required. :vartype control_version_number: str :ivar interchange_control_number_lower_bound: The interchange control number lower bound. Required. :vartype interchange_control_number_lower_bound: int :ivar interchange_control_number_upper_bound: The interchange control number upper bound. Required. :vartype interchange_control_number_upper_bound: int :ivar rollover_interchange_control_number: The value indicating whether to rollover interchange control number. Required. :vartype rollover_interchange_control_number: bool :ivar enable_default_group_headers: The value indicating whether to enable default group headers. Required. :vartype enable_default_group_headers: bool :ivar functional_group_id: The functional group id. :vartype functional_group_id: str :ivar group_control_number_lower_bound: The group control number lower bound. Required. :vartype group_control_number_lower_bound: int :ivar group_control_number_upper_bound: The group control number upper bound. Required. :vartype group_control_number_upper_bound: int :ivar rollover_group_control_number: The value indicating whether to rollover group control number. Required. :vartype rollover_group_control_number: bool :ivar group_header_agency_code: The group header agency code. Required. :vartype group_header_agency_code: str :ivar group_header_version: The group header version. Required. :vartype group_header_version: str :ivar transaction_set_control_number_lower_bound: The transaction set control number lower bound. Required. :vartype transaction_set_control_number_lower_bound: int :ivar transaction_set_control_number_upper_bound: The transaction set control number upper bound. Required. :vartype transaction_set_control_number_upper_bound: int :ivar rollover_transaction_set_control_number: The value indicating whether to rollover transaction set control number. Required. :vartype rollover_transaction_set_control_number: bool :ivar transaction_set_control_number_prefix: The transaction set control number prefix. :vartype transaction_set_control_number_prefix: str :ivar transaction_set_control_number_suffix: The transaction set control number suffix. :vartype transaction_set_control_number_suffix: str :ivar overwrite_existing_transaction_set_control_number: The value indicating whether to overwrite existing transaction set control number. Required. :vartype overwrite_existing_transaction_set_control_number: bool :ivar group_header_date_format: The group header date format. Required. Known values are: "NotSpecified", "CCYYMMDD", and "YYMMDD". :vartype group_header_date_format: str or ~azure.mgmt.logic.models.X12DateFormat :ivar group_header_time_format: The group header time format. Required. Known values are: "NotSpecified", "HHMM", "HHMMSS", "HHMMSSdd", and "HHMMSSd". :vartype group_header_time_format: str or ~azure.mgmt.logic.models.X12TimeFormat :ivar usage_indicator: The usage indicator. Required. Known values are: "NotSpecified", "Test", "Information", and "Production". :vartype usage_indicator: str or ~azure.mgmt.logic.models.UsageIndicator """ _validation = { "control_standards_id": {"required": True}, "use_control_standards_id_as_repetition_character": {"required": True}, "sender_application_id": {"required": True}, "receiver_application_id": {"required": True}, "control_version_number": {"required": True}, "interchange_control_number_lower_bound": {"required": True}, "interchange_control_number_upper_bound": {"required": True}, "rollover_interchange_control_number": {"required": True}, "enable_default_group_headers": {"required": True}, "group_control_number_lower_bound": {"required": True}, "group_control_number_upper_bound": {"required": True}, "rollover_group_control_number": {"required": True}, "group_header_agency_code": {"required": True}, "group_header_version": {"required": True}, "transaction_set_control_number_lower_bound": {"required": True}, "transaction_set_control_number_upper_bound": {"required": True}, "rollover_transaction_set_control_number": {"required": True}, "overwrite_existing_transaction_set_control_number": {"required": True}, "group_header_date_format": {"required": True}, "group_header_time_format": {"required": True}, "usage_indicator": {"required": True}, } _attribute_map = { "control_standards_id": {"key": "controlStandardsId", "type": "int"}, "use_control_standards_id_as_repetition_character": { "key": "useControlStandardsIdAsRepetitionCharacter", "type": "bool", }, "sender_application_id": {"key": "senderApplicationId", "type": "str"}, "receiver_application_id": {"key": "receiverApplicationId", "type": "str"}, "control_version_number": {"key": "controlVersionNumber", "type": "str"}, "interchange_control_number_lower_bound": {"key": "interchangeControlNumberLowerBound", "type": "int"}, "interchange_control_number_upper_bound": {"key": "interchangeControlNumberUpperBound", "type": "int"}, "rollover_interchange_control_number": {"key": "rolloverInterchangeControlNumber", "type": "bool"}, "enable_default_group_headers": {"key": "enableDefaultGroupHeaders", "type": "bool"}, "functional_group_id": {"key": "functionalGroupId", "type": "str"}, "group_control_number_lower_bound": {"key": "groupControlNumberLowerBound", "type": "int"}, "group_control_number_upper_bound": {"key": "groupControlNumberUpperBound", "type": "int"}, "rollover_group_control_number": {"key": "rolloverGroupControlNumber", "type": "bool"}, "group_header_agency_code": {"key": "groupHeaderAgencyCode", "type": "str"}, "group_header_version": {"key": "groupHeaderVersion", "type": "str"}, "transaction_set_control_number_lower_bound": {"key": "transactionSetControlNumberLowerBound", "type": "int"}, "transaction_set_control_number_upper_bound": {"key": "transactionSetControlNumberUpperBound", "type": "int"}, "rollover_transaction_set_control_number": {"key": "rolloverTransactionSetControlNumber", "type": "bool"}, "transaction_set_control_number_prefix": {"key": "transactionSetControlNumberPrefix", "type": "str"}, "transaction_set_control_number_suffix": {"key": "transactionSetControlNumberSuffix", "type": "str"}, "overwrite_existing_transaction_set_control_number": { "key": "overwriteExistingTransactionSetControlNumber", "type": "bool", }, "group_header_date_format": {"key": "groupHeaderDateFormat", "type": "str"}, "group_header_time_format": {"key": "groupHeaderTimeFormat", "type": "str"}, "usage_indicator": {"key": "usageIndicator", "type": "str"}, } def __init__( # pylint: disable=too-many-locals self, *, control_standards_id: int, use_control_standards_id_as_repetition_character: bool, sender_application_id: str, receiver_application_id: str, control_version_number: str, interchange_control_number_lower_bound: int, interchange_control_number_upper_bound: int, rollover_interchange_control_number: bool, enable_default_group_headers: bool, group_control_number_lower_bound: int, group_control_number_upper_bound: int, rollover_group_control_number: bool, group_header_agency_code: str, group_header_version: str, transaction_set_control_number_lower_bound: int, transaction_set_control_number_upper_bound: int, rollover_transaction_set_control_number: bool, overwrite_existing_transaction_set_control_number: bool, group_header_date_format: Union[str, "_models.X12DateFormat"], group_header_time_format: Union[str, "_models.X12TimeFormat"], usage_indicator: Union[str, "_models.UsageIndicator"], functional_group_id: Optional[str] = None, transaction_set_control_number_prefix: Optional[str] = None, transaction_set_control_number_suffix: Optional[str] = None, **kwargs ): """ :keyword control_standards_id: The controls standards id. Required. :paramtype control_standards_id: int :keyword use_control_standards_id_as_repetition_character: The value indicating whether to use control standards id as repetition character. Required. :paramtype use_control_standards_id_as_repetition_character: bool :keyword sender_application_id: The sender application id. Required. :paramtype sender_application_id: str :keyword receiver_application_id: The receiver application id. Required. :paramtype receiver_application_id: str :keyword control_version_number: The control version number. Required. :paramtype control_version_number: str :keyword interchange_control_number_lower_bound: The interchange control number lower bound. Required. :paramtype interchange_control_number_lower_bound: int :keyword interchange_control_number_upper_bound: The interchange control number upper bound. Required. :paramtype interchange_control_number_upper_bound: int :keyword rollover_interchange_control_number: The value indicating whether to rollover interchange control number. Required. :paramtype rollover_interchange_control_number: bool :keyword enable_default_group_headers: The value indicating whether to enable default group headers. Required. :paramtype enable_default_group_headers: bool :keyword functional_group_id: The functional group id. :paramtype functional_group_id: str :keyword group_control_number_lower_bound: The group control number lower bound. Required. :paramtype group_control_number_lower_bound: int :keyword group_control_number_upper_bound: The group control number upper bound. Required. :paramtype group_control_number_upper_bound: int :keyword rollover_group_control_number: The value indicating whether to rollover group control number. Required. :paramtype rollover_group_control_number: bool :keyword group_header_agency_code: The group header agency code. Required. :paramtype group_header_agency_code: str :keyword group_header_version: The group header version. Required. :paramtype group_header_version: str :keyword transaction_set_control_number_lower_bound: The transaction set control number lower bound. Required. :paramtype transaction_set_control_number_lower_bound: int :keyword transaction_set_control_number_upper_bound: The transaction set control number upper bound. Required. :paramtype transaction_set_control_number_upper_bound: int :keyword rollover_transaction_set_control_number: The value indicating whether to rollover transaction set control number. Required. :paramtype rollover_transaction_set_control_number: bool :keyword transaction_set_control_number_prefix: The transaction set control number prefix. :paramtype transaction_set_control_number_prefix: str :keyword transaction_set_control_number_suffix: The transaction set control number suffix. :paramtype transaction_set_control_number_suffix: str :keyword overwrite_existing_transaction_set_control_number: The value indicating whether to overwrite existing transaction set control number. Required. :paramtype overwrite_existing_transaction_set_control_number: bool :keyword group_header_date_format: The group header date format. Required. Known values are: "NotSpecified", "CCYYMMDD", and "YYMMDD". :paramtype group_header_date_format: str or ~azure.mgmt.logic.models.X12DateFormat :keyword group_header_time_format: The group header time format. Required. Known values are: "NotSpecified", "HHMM", "HHMMSS", "HHMMSSdd", and "HHMMSSd". :paramtype group_header_time_format: str or ~azure.mgmt.logic.models.X12TimeFormat :keyword usage_indicator: The usage indicator. Required. Known values are: "NotSpecified", "Test", "Information", and "Production". :paramtype usage_indicator: str or ~azure.mgmt.logic.models.UsageIndicator """ super().__init__(**kwargs) self.control_standards_id = control_standards_id self.use_control_standards_id_as_repetition_character = use_control_standards_id_as_repetition_character self.sender_application_id = sender_application_id self.receiver_application_id = receiver_application_id self.control_version_number = control_version_number self.interchange_control_number_lower_bound = interchange_control_number_lower_bound self.interchange_control_number_upper_bound = interchange_control_number_upper_bound self.rollover_interchange_control_number = rollover_interchange_control_number self.enable_default_group_headers = enable_default_group_headers self.functional_group_id = functional_group_id self.group_control_number_lower_bound = group_control_number_lower_bound self.group_control_number_upper_bound = group_control_number_upper_bound self.rollover_group_control_number = rollover_group_control_number self.group_header_agency_code = group_header_agency_code self.group_header_version = group_header_version self.transaction_set_control_number_lower_bound = transaction_set_control_number_lower_bound self.transaction_set_control_number_upper_bound = transaction_set_control_number_upper_bound self.rollover_transaction_set_control_number = rollover_transaction_set_control_number self.transaction_set_control_number_prefix = transaction_set_control_number_prefix self.transaction_set_control_number_suffix = transaction_set_control_number_suffix self.overwrite_existing_transaction_set_control_number = overwrite_existing_transaction_set_control_number self.group_header_date_format = group_header_date_format self.group_header_time_format = group_header_time_format self.usage_indicator = usage_indicator class X12FramingSettings(_serialization.Model): """The X12 agreement framing settings. All required parameters must be populated in order to send to Azure. :ivar data_element_separator: The data element separator. Required. :vartype data_element_separator: int :ivar component_separator: The component separator. Required. :vartype component_separator: int :ivar replace_separators_in_payload: The value indicating whether to replace separators in payload. Required. :vartype replace_separators_in_payload: bool :ivar replace_character: The replacement character. Required. :vartype replace_character: int :ivar segment_terminator: The segment terminator. Required. :vartype segment_terminator: int :ivar character_set: The X12 character set. Required. Known values are: "NotSpecified", "Basic", "Extended", and "UTF8". :vartype character_set: str or ~azure.mgmt.logic.models.X12CharacterSet :ivar segment_terminator_suffix: The segment terminator suffix. Required. Known values are: "NotSpecified", "None", "CR", "LF", and "CRLF". :vartype segment_terminator_suffix: str or ~azure.mgmt.logic.models.SegmentTerminatorSuffix """ _validation = { "data_element_separator": {"required": True}, "component_separator": {"required": True}, "replace_separators_in_payload": {"required": True}, "replace_character": {"required": True}, "segment_terminator": {"required": True}, "character_set": {"required": True}, "segment_terminator_suffix": {"required": True}, } _attribute_map = { "data_element_separator": {"key": "dataElementSeparator", "type": "int"}, "component_separator": {"key": "componentSeparator", "type": "int"}, "replace_separators_in_payload": {"key": "replaceSeparatorsInPayload", "type": "bool"}, "replace_character": {"key": "replaceCharacter", "type": "int"}, "segment_terminator": {"key": "segmentTerminator", "type": "int"}, "character_set": {"key": "characterSet", "type": "str"}, "segment_terminator_suffix": {"key": "segmentTerminatorSuffix", "type": "str"}, } def __init__( self, *, data_element_separator: int, component_separator: int, replace_separators_in_payload: bool, replace_character: int, segment_terminator: int, character_set: Union[str, "_models.X12CharacterSet"], segment_terminator_suffix: Union[str, "_models.SegmentTerminatorSuffix"], **kwargs ): """ :keyword data_element_separator: The data element separator. Required. :paramtype data_element_separator: int :keyword component_separator: The component separator. Required. :paramtype component_separator: int :keyword replace_separators_in_payload: The value indicating whether to replace separators in payload. Required. :paramtype replace_separators_in_payload: bool :keyword replace_character: The replacement character. Required. :paramtype replace_character: int :keyword segment_terminator: The segment terminator. Required. :paramtype segment_terminator: int :keyword character_set: The X12 character set. Required. Known values are: "NotSpecified", "Basic", "Extended", and "UTF8". :paramtype character_set: str or ~azure.mgmt.logic.models.X12CharacterSet :keyword segment_terminator_suffix: The segment terminator suffix. Required. Known values are: "NotSpecified", "None", "CR", "LF", and "CRLF". :paramtype segment_terminator_suffix: str or ~azure.mgmt.logic.models.SegmentTerminatorSuffix """ super().__init__(**kwargs) self.data_element_separator = data_element_separator self.component_separator = component_separator self.replace_separators_in_payload = replace_separators_in_payload self.replace_character = replace_character self.segment_terminator = segment_terminator self.character_set = character_set self.segment_terminator_suffix = segment_terminator_suffix class X12MessageFilter(_serialization.Model): """The X12 message filter for odata query. All required parameters must be populated in order to send to Azure. :ivar message_filter_type: The message filter type. Required. Known values are: "NotSpecified", "Include", and "Exclude". :vartype message_filter_type: str or ~azure.mgmt.logic.models.MessageFilterType """ _validation = { "message_filter_type": {"required": True}, } _attribute_map = { "message_filter_type": {"key": "messageFilterType", "type": "str"}, } def __init__(self, *, message_filter_type: Union[str, "_models.MessageFilterType"], **kwargs): """ :keyword message_filter_type: The message filter type. Required. Known values are: "NotSpecified", "Include", and "Exclude". :paramtype message_filter_type: str or ~azure.mgmt.logic.models.MessageFilterType """ super().__init__(**kwargs) self.message_filter_type = message_filter_type class X12MessageIdentifier(_serialization.Model): """The X12 message identifier. All required parameters must be populated in order to send to Azure. :ivar message_id: The message id. Required. :vartype message_id: str """ _validation = { "message_id": {"required": True}, } _attribute_map = { "message_id": {"key": "messageId", "type": "str"}, } def __init__(self, *, message_id: str, **kwargs): """ :keyword message_id: The message id. Required. :paramtype message_id: str """ super().__init__(**kwargs) self.message_id = message_id class X12OneWayAgreement(_serialization.Model): """The X12 one-way agreement. All required parameters must be populated in order to send to Azure. :ivar sender_business_identity: The sender business identity. Required. :vartype sender_business_identity: ~azure.mgmt.logic.models.BusinessIdentity :ivar receiver_business_identity: The receiver business identity. Required. :vartype receiver_business_identity: ~azure.mgmt.logic.models.BusinessIdentity :ivar protocol_settings: The X12 protocol settings. Required. :vartype protocol_settings: ~azure.mgmt.logic.models.X12ProtocolSettings """ _validation = { "sender_business_identity": {"required": True}, "receiver_business_identity": {"required": True}, "protocol_settings": {"required": True}, } _attribute_map = { "sender_business_identity": {"key": "senderBusinessIdentity", "type": "BusinessIdentity"}, "receiver_business_identity": {"key": "receiverBusinessIdentity", "type": "BusinessIdentity"}, "protocol_settings": {"key": "protocolSettings", "type": "X12ProtocolSettings"}, } def __init__( self, *, sender_business_identity: "_models.BusinessIdentity", receiver_business_identity: "_models.BusinessIdentity", protocol_settings: "_models.X12ProtocolSettings", **kwargs ): """ :keyword sender_business_identity: The sender business identity. Required. :paramtype sender_business_identity: ~azure.mgmt.logic.models.BusinessIdentity :keyword receiver_business_identity: The receiver business identity. Required. :paramtype receiver_business_identity: ~azure.mgmt.logic.models.BusinessIdentity :keyword protocol_settings: The X12 protocol settings. Required. :paramtype protocol_settings: ~azure.mgmt.logic.models.X12ProtocolSettings """ super().__init__(**kwargs) self.sender_business_identity = sender_business_identity self.receiver_business_identity = receiver_business_identity self.protocol_settings = protocol_settings class X12ProcessingSettings(_serialization.Model): """The X12 processing settings. All required parameters must be populated in order to send to Azure. :ivar mask_security_info: The value indicating whether to mask security information. Required. :vartype mask_security_info: bool :ivar convert_implied_decimal: The value indicating whether to convert numerical type to implied decimal. Required. :vartype convert_implied_decimal: bool :ivar preserve_interchange: The value indicating whether to preserve interchange. Required. :vartype preserve_interchange: bool :ivar suspend_interchange_on_error: The value indicating whether to suspend interchange on error. Required. :vartype suspend_interchange_on_error: bool :ivar create_empty_xml_tags_for_trailing_separators: The value indicating whether to create empty xml tags for trailing separators. Required. :vartype create_empty_xml_tags_for_trailing_separators: bool :ivar use_dot_as_decimal_separator: The value indicating whether to use dot as decimal separator. Required. :vartype use_dot_as_decimal_separator: bool """ _validation = { "mask_security_info": {"required": True}, "convert_implied_decimal": {"required": True}, "preserve_interchange": {"required": True}, "suspend_interchange_on_error": {"required": True}, "create_empty_xml_tags_for_trailing_separators": {"required": True}, "use_dot_as_decimal_separator": {"required": True}, } _attribute_map = { "mask_security_info": {"key": "maskSecurityInfo", "type": "bool"}, "convert_implied_decimal": {"key": "convertImpliedDecimal", "type": "bool"}, "preserve_interchange": {"key": "preserveInterchange", "type": "bool"}, "suspend_interchange_on_error": {"key": "suspendInterchangeOnError", "type": "bool"}, "create_empty_xml_tags_for_trailing_separators": { "key": "createEmptyXmlTagsForTrailingSeparators", "type": "bool", }, "use_dot_as_decimal_separator": {"key": "useDotAsDecimalSeparator", "type": "bool"}, } def __init__( self, *, mask_security_info: bool, convert_implied_decimal: bool, preserve_interchange: bool, suspend_interchange_on_error: bool, create_empty_xml_tags_for_trailing_separators: bool, use_dot_as_decimal_separator: bool, **kwargs ): """ :keyword mask_security_info: The value indicating whether to mask security information. Required. :paramtype mask_security_info: bool :keyword convert_implied_decimal: The value indicating whether to convert numerical type to implied decimal. Required. :paramtype convert_implied_decimal: bool :keyword preserve_interchange: The value indicating whether to preserve interchange. Required. :paramtype preserve_interchange: bool :keyword suspend_interchange_on_error: The value indicating whether to suspend interchange on error. Required. :paramtype suspend_interchange_on_error: bool :keyword create_empty_xml_tags_for_trailing_separators: The value indicating whether to create empty xml tags for trailing separators. Required. :paramtype create_empty_xml_tags_for_trailing_separators: bool :keyword use_dot_as_decimal_separator: The value indicating whether to use dot as decimal separator. Required. :paramtype use_dot_as_decimal_separator: bool """ super().__init__(**kwargs) self.mask_security_info = mask_security_info self.convert_implied_decimal = convert_implied_decimal self.preserve_interchange = preserve_interchange self.suspend_interchange_on_error = suspend_interchange_on_error self.create_empty_xml_tags_for_trailing_separators = create_empty_xml_tags_for_trailing_separators self.use_dot_as_decimal_separator = use_dot_as_decimal_separator class X12ProtocolSettings(_serialization.Model): # pylint: disable=too-many-instance-attributes """The X12 agreement protocol settings. All required parameters must be populated in order to send to Azure. :ivar validation_settings: The X12 validation settings. Required. :vartype validation_settings: ~azure.mgmt.logic.models.X12ValidationSettings :ivar framing_settings: The X12 framing settings. Required. :vartype framing_settings: ~azure.mgmt.logic.models.X12FramingSettings :ivar envelope_settings: The X12 envelope settings. Required. :vartype envelope_settings: ~azure.mgmt.logic.models.X12EnvelopeSettings :ivar acknowledgement_settings: The X12 acknowledgment settings. Required. :vartype acknowledgement_settings: ~azure.mgmt.logic.models.X12AcknowledgementSettings :ivar message_filter: The X12 message filter. Required. :vartype message_filter: ~azure.mgmt.logic.models.X12MessageFilter :ivar security_settings: The X12 security settings. Required. :vartype security_settings: ~azure.mgmt.logic.models.X12SecuritySettings :ivar processing_settings: The X12 processing settings. Required. :vartype processing_settings: ~azure.mgmt.logic.models.X12ProcessingSettings :ivar envelope_overrides: The X12 envelope override settings. :vartype envelope_overrides: list[~azure.mgmt.logic.models.X12EnvelopeOverride] :ivar validation_overrides: The X12 validation override settings. :vartype validation_overrides: list[~azure.mgmt.logic.models.X12ValidationOverride] :ivar message_filter_list: The X12 message filter list. :vartype message_filter_list: list[~azure.mgmt.logic.models.X12MessageIdentifier] :ivar schema_references: The X12 schema references. Required. :vartype schema_references: list[~azure.mgmt.logic.models.X12SchemaReference] :ivar x12_delimiter_overrides: The X12 delimiter override settings. :vartype x12_delimiter_overrides: list[~azure.mgmt.logic.models.X12DelimiterOverrides] """ _validation = { "validation_settings": {"required": True}, "framing_settings": {"required": True}, "envelope_settings": {"required": True}, "acknowledgement_settings": {"required": True}, "message_filter": {"required": True}, "security_settings": {"required": True}, "processing_settings": {"required": True}, "schema_references": {"required": True}, } _attribute_map = { "validation_settings": {"key": "validationSettings", "type": "X12ValidationSettings"}, "framing_settings": {"key": "framingSettings", "type": "X12FramingSettings"}, "envelope_settings": {"key": "envelopeSettings", "type": "X12EnvelopeSettings"}, "acknowledgement_settings": {"key": "acknowledgementSettings", "type": "X12AcknowledgementSettings"}, "message_filter": {"key": "messageFilter", "type": "X12MessageFilter"}, "security_settings": {"key": "securitySettings", "type": "X12SecuritySettings"}, "processing_settings": {"key": "processingSettings", "type": "X12ProcessingSettings"}, "envelope_overrides": {"key": "envelopeOverrides", "type": "[X12EnvelopeOverride]"}, "validation_overrides": {"key": "validationOverrides", "type": "[X12ValidationOverride]"}, "message_filter_list": {"key": "messageFilterList", "type": "[X12MessageIdentifier]"}, "schema_references": {"key": "schemaReferences", "type": "[X12SchemaReference]"}, "x12_delimiter_overrides": {"key": "x12DelimiterOverrides", "type": "[X12DelimiterOverrides]"}, } def __init__( self, *, validation_settings: "_models.X12ValidationSettings", framing_settings: "_models.X12FramingSettings", envelope_settings: "_models.X12EnvelopeSettings", acknowledgement_settings: "_models.X12AcknowledgementSettings", message_filter: "_models.X12MessageFilter", security_settings: "_models.X12SecuritySettings", processing_settings: "_models.X12ProcessingSettings", schema_references: List["_models.X12SchemaReference"], envelope_overrides: Optional[List["_models.X12EnvelopeOverride"]] = None, validation_overrides: Optional[List["_models.X12ValidationOverride"]] = None, message_filter_list: Optional[List["_models.X12MessageIdentifier"]] = None, x12_delimiter_overrides: Optional[List["_models.X12DelimiterOverrides"]] = None, **kwargs ): """ :keyword validation_settings: The X12 validation settings. Required. :paramtype validation_settings: ~azure.mgmt.logic.models.X12ValidationSettings :keyword framing_settings: The X12 framing settings. Required. :paramtype framing_settings: ~azure.mgmt.logic.models.X12FramingSettings :keyword envelope_settings: The X12 envelope settings. Required. :paramtype envelope_settings: ~azure.mgmt.logic.models.X12EnvelopeSettings :keyword acknowledgement_settings: The X12 acknowledgment settings. Required. :paramtype acknowledgement_settings: ~azure.mgmt.logic.models.X12AcknowledgementSettings :keyword message_filter: The X12 message filter. Required. :paramtype message_filter: ~azure.mgmt.logic.models.X12MessageFilter :keyword security_settings: The X12 security settings. Required. :paramtype security_settings: ~azure.mgmt.logic.models.X12SecuritySettings :keyword processing_settings: The X12 processing settings. Required. :paramtype processing_settings: ~azure.mgmt.logic.models.X12ProcessingSettings :keyword envelope_overrides: The X12 envelope override settings. :paramtype envelope_overrides: list[~azure.mgmt.logic.models.X12EnvelopeOverride] :keyword validation_overrides: The X12 validation override settings. :paramtype validation_overrides: list[~azure.mgmt.logic.models.X12ValidationOverride] :keyword message_filter_list: The X12 message filter list. :paramtype message_filter_list: list[~azure.mgmt.logic.models.X12MessageIdentifier] :keyword schema_references: The X12 schema references. Required. :paramtype schema_references: list[~azure.mgmt.logic.models.X12SchemaReference] :keyword x12_delimiter_overrides: The X12 delimiter override settings. :paramtype x12_delimiter_overrides: list[~azure.mgmt.logic.models.X12DelimiterOverrides] """ super().__init__(**kwargs) self.validation_settings = validation_settings self.framing_settings = framing_settings self.envelope_settings = envelope_settings self.acknowledgement_settings = acknowledgement_settings self.message_filter = message_filter self.security_settings = security_settings self.processing_settings = processing_settings self.envelope_overrides = envelope_overrides self.validation_overrides = validation_overrides self.message_filter_list = message_filter_list self.schema_references = schema_references self.x12_delimiter_overrides = x12_delimiter_overrides class X12SchemaReference(_serialization.Model): """The X12 schema reference. All required parameters must be populated in order to send to Azure. :ivar message_id: The message id. Required. :vartype message_id: str :ivar sender_application_id: The sender application id. :vartype sender_application_id: str :ivar schema_version: The schema version. Required. :vartype schema_version: str :ivar schema_name: The schema name. Required. :vartype schema_name: str """ _validation = { "message_id": {"required": True}, "schema_version": {"required": True}, "schema_name": {"required": True}, } _attribute_map = { "message_id": {"key": "messageId", "type": "str"}, "sender_application_id": {"key": "senderApplicationId", "type": "str"}, "schema_version": {"key": "schemaVersion", "type": "str"}, "schema_name": {"key": "schemaName", "type": "str"}, } def __init__( self, *, message_id: str, schema_version: str, schema_name: str, sender_application_id: Optional[str] = None, **kwargs ): """ :keyword message_id: The message id. Required. :paramtype message_id: str :keyword sender_application_id: The sender application id. :paramtype sender_application_id: str :keyword schema_version: The schema version. Required. :paramtype schema_version: str :keyword schema_name: The schema name. Required. :paramtype schema_name: str """ super().__init__(**kwargs) self.message_id = message_id self.sender_application_id = sender_application_id self.schema_version = schema_version self.schema_name = schema_name class X12SecuritySettings(_serialization.Model): """The X12 agreement security settings. All required parameters must be populated in order to send to Azure. :ivar authorization_qualifier: The authorization qualifier. Required. :vartype authorization_qualifier: str :ivar authorization_value: The authorization value. :vartype authorization_value: str :ivar security_qualifier: The security qualifier. Required. :vartype security_qualifier: str :ivar password_value: The password value. :vartype password_value: str """ _validation = { "authorization_qualifier": {"required": True}, "security_qualifier": {"required": True}, } _attribute_map = { "authorization_qualifier": {"key": "authorizationQualifier", "type": "str"}, "authorization_value": {"key": "authorizationValue", "type": "str"}, "security_qualifier": {"key": "securityQualifier", "type": "str"}, "password_value": {"key": "passwordValue", "type": "str"}, } def __init__( self, *, authorization_qualifier: str, security_qualifier: str, authorization_value: Optional[str] = None, password_value: Optional[str] = None, **kwargs ): """ :keyword authorization_qualifier: The authorization qualifier. Required. :paramtype authorization_qualifier: str :keyword authorization_value: The authorization value. :paramtype authorization_value: str :keyword security_qualifier: The security qualifier. Required. :paramtype security_qualifier: str :keyword password_value: The password value. :paramtype password_value: str """ super().__init__(**kwargs) self.authorization_qualifier = authorization_qualifier self.authorization_value = authorization_value self.security_qualifier = security_qualifier self.password_value = password_value class X12ValidationOverride(_serialization.Model): """The X12 validation override settings. All required parameters must be populated in order to send to Azure. :ivar message_id: The message id on which the validation settings has to be applied. Required. :vartype message_id: str :ivar validate_edi_types: The value indicating whether to validate EDI types. Required. :vartype validate_edi_types: bool :ivar validate_xsd_types: The value indicating whether to validate XSD types. Required. :vartype validate_xsd_types: bool :ivar allow_leading_and_trailing_spaces_and_zeroes: The value indicating whether to allow leading and trailing spaces and zeroes. Required. :vartype allow_leading_and_trailing_spaces_and_zeroes: bool :ivar validate_character_set: The value indicating whether to validate character Set. Required. :vartype validate_character_set: bool :ivar trim_leading_and_trailing_spaces_and_zeroes: The value indicating whether to trim leading and trailing spaces and zeroes. Required. :vartype trim_leading_and_trailing_spaces_and_zeroes: bool :ivar trailing_separator_policy: The trailing separator policy. Required. Known values are: "NotSpecified", "NotAllowed", "Optional", and "Mandatory". :vartype trailing_separator_policy: str or ~azure.mgmt.logic.models.TrailingSeparatorPolicy """ _validation = { "message_id": {"required": True}, "validate_edi_types": {"required": True}, "validate_xsd_types": {"required": True}, "allow_leading_and_trailing_spaces_and_zeroes": {"required": True}, "validate_character_set": {"required": True}, "trim_leading_and_trailing_spaces_and_zeroes": {"required": True}, "trailing_separator_policy": {"required": True}, } _attribute_map = { "message_id": {"key": "messageId", "type": "str"}, "validate_edi_types": {"key": "validateEDITypes", "type": "bool"}, "validate_xsd_types": {"key": "validateXSDTypes", "type": "bool"}, "allow_leading_and_trailing_spaces_and_zeroes": { "key": "allowLeadingAndTrailingSpacesAndZeroes", "type": "bool", }, "validate_character_set": {"key": "validateCharacterSet", "type": "bool"}, "trim_leading_and_trailing_spaces_and_zeroes": {"key": "trimLeadingAndTrailingSpacesAndZeroes", "type": "bool"}, "trailing_separator_policy": {"key": "trailingSeparatorPolicy", "type": "str"}, } def __init__( self, *, message_id: str, validate_edi_types: bool, validate_xsd_types: bool, allow_leading_and_trailing_spaces_and_zeroes: bool, validate_character_set: bool, trim_leading_and_trailing_spaces_and_zeroes: bool, trailing_separator_policy: Union[str, "_models.TrailingSeparatorPolicy"], **kwargs ): """ :keyword message_id: The message id on which the validation settings has to be applied. Required. :paramtype message_id: str :keyword validate_edi_types: The value indicating whether to validate EDI types. Required. :paramtype validate_edi_types: bool :keyword validate_xsd_types: The value indicating whether to validate XSD types. Required. :paramtype validate_xsd_types: bool :keyword allow_leading_and_trailing_spaces_and_zeroes: The value indicating whether to allow leading and trailing spaces and zeroes. Required. :paramtype allow_leading_and_trailing_spaces_and_zeroes: bool :keyword validate_character_set: The value indicating whether to validate character Set. Required. :paramtype validate_character_set: bool :keyword trim_leading_and_trailing_spaces_and_zeroes: The value indicating whether to trim leading and trailing spaces and zeroes. Required. :paramtype trim_leading_and_trailing_spaces_and_zeroes: bool :keyword trailing_separator_policy: The trailing separator policy. Required. Known values are: "NotSpecified", "NotAllowed", "Optional", and "Mandatory". :paramtype trailing_separator_policy: str or ~azure.mgmt.logic.models.TrailingSeparatorPolicy """ super().__init__(**kwargs) self.message_id = message_id self.validate_edi_types = validate_edi_types self.validate_xsd_types = validate_xsd_types self.allow_leading_and_trailing_spaces_and_zeroes = allow_leading_and_trailing_spaces_and_zeroes self.validate_character_set = validate_character_set self.trim_leading_and_trailing_spaces_and_zeroes = trim_leading_and_trailing_spaces_and_zeroes self.trailing_separator_policy = trailing_separator_policy class X12ValidationSettings(_serialization.Model): """The X12 agreement validation settings. All required parameters must be populated in order to send to Azure. :ivar validate_character_set: The value indicating whether to validate character set in the message. Required. :vartype validate_character_set: bool :ivar check_duplicate_interchange_control_number: The value indicating whether to check for duplicate interchange control number. Required. :vartype check_duplicate_interchange_control_number: bool :ivar interchange_control_number_validity_days: The validity period of interchange control number. Required. :vartype interchange_control_number_validity_days: int :ivar check_duplicate_group_control_number: The value indicating whether to check for duplicate group control number. Required. :vartype check_duplicate_group_control_number: bool :ivar check_duplicate_transaction_set_control_number: The value indicating whether to check for duplicate transaction set control number. Required. :vartype check_duplicate_transaction_set_control_number: bool :ivar validate_edi_types: The value indicating whether to Whether to validate EDI types. Required. :vartype validate_edi_types: bool :ivar validate_xsd_types: The value indicating whether to Whether to validate XSD types. Required. :vartype validate_xsd_types: bool :ivar allow_leading_and_trailing_spaces_and_zeroes: The value indicating whether to allow leading and trailing spaces and zeroes. Required. :vartype allow_leading_and_trailing_spaces_and_zeroes: bool :ivar trim_leading_and_trailing_spaces_and_zeroes: The value indicating whether to trim leading and trailing spaces and zeroes. Required. :vartype trim_leading_and_trailing_spaces_and_zeroes: bool :ivar trailing_separator_policy: The trailing separator policy. Required. Known values are: "NotSpecified", "NotAllowed", "Optional", and "Mandatory". :vartype trailing_separator_policy: str or ~azure.mgmt.logic.models.TrailingSeparatorPolicy """ _validation = { "validate_character_set": {"required": True}, "check_duplicate_interchange_control_number": {"required": True}, "interchange_control_number_validity_days": {"required": True}, "check_duplicate_group_control_number": {"required": True}, "check_duplicate_transaction_set_control_number": {"required": True}, "validate_edi_types": {"required": True}, "validate_xsd_types": {"required": True}, "allow_leading_and_trailing_spaces_and_zeroes": {"required": True}, "trim_leading_and_trailing_spaces_and_zeroes": {"required": True}, "trailing_separator_policy": {"required": True}, } _attribute_map = { "validate_character_set": {"key": "validateCharacterSet", "type": "bool"}, "check_duplicate_interchange_control_number": {"key": "checkDuplicateInterchangeControlNumber", "type": "bool"}, "interchange_control_number_validity_days": {"key": "interchangeControlNumberValidityDays", "type": "int"}, "check_duplicate_group_control_number": {"key": "checkDuplicateGroupControlNumber", "type": "bool"}, "check_duplicate_transaction_set_control_number": { "key": "checkDuplicateTransactionSetControlNumber", "type": "bool", }, "validate_edi_types": {"key": "validateEDITypes", "type": "bool"}, "validate_xsd_types": {"key": "validateXSDTypes", "type": "bool"}, "allow_leading_and_trailing_spaces_and_zeroes": { "key": "allowLeadingAndTrailingSpacesAndZeroes", "type": "bool", }, "trim_leading_and_trailing_spaces_and_zeroes": {"key": "trimLeadingAndTrailingSpacesAndZeroes", "type": "bool"}, "trailing_separator_policy": {"key": "trailingSeparatorPolicy", "type": "str"}, } def __init__( self, *, validate_character_set: bool, check_duplicate_interchange_control_number: bool, interchange_control_number_validity_days: int, check_duplicate_group_control_number: bool, check_duplicate_transaction_set_control_number: bool, validate_edi_types: bool, validate_xsd_types: bool, allow_leading_and_trailing_spaces_and_zeroes: bool, trim_leading_and_trailing_spaces_and_zeroes: bool, trailing_separator_policy: Union[str, "_models.TrailingSeparatorPolicy"], **kwargs ): """ :keyword validate_character_set: The value indicating whether to validate character set in the message. Required. :paramtype validate_character_set: bool :keyword check_duplicate_interchange_control_number: The value indicating whether to check for duplicate interchange control number. Required. :paramtype check_duplicate_interchange_control_number: bool :keyword interchange_control_number_validity_days: The validity period of interchange control number. Required. :paramtype interchange_control_number_validity_days: int :keyword check_duplicate_group_control_number: The value indicating whether to check for duplicate group control number. Required. :paramtype check_duplicate_group_control_number: bool :keyword check_duplicate_transaction_set_control_number: The value indicating whether to check for duplicate transaction set control number. Required. :paramtype check_duplicate_transaction_set_control_number: bool :keyword validate_edi_types: The value indicating whether to Whether to validate EDI types. Required. :paramtype validate_edi_types: bool :keyword validate_xsd_types: The value indicating whether to Whether to validate XSD types. Required. :paramtype validate_xsd_types: bool :keyword allow_leading_and_trailing_spaces_and_zeroes: The value indicating whether to allow leading and trailing spaces and zeroes. Required. :paramtype allow_leading_and_trailing_spaces_and_zeroes: bool :keyword trim_leading_and_trailing_spaces_and_zeroes: The value indicating whether to trim leading and trailing spaces and zeroes. Required. :paramtype trim_leading_and_trailing_spaces_and_zeroes: bool :keyword trailing_separator_policy: The trailing separator policy. Required. Known values are: "NotSpecified", "NotAllowed", "Optional", and "Mandatory". :paramtype trailing_separator_policy: str or ~azure.mgmt.logic.models.TrailingSeparatorPolicy """ super().__init__(**kwargs) self.validate_character_set = validate_character_set self.check_duplicate_interchange_control_number = check_duplicate_interchange_control_number self.interchange_control_number_validity_days = interchange_control_number_validity_days self.check_duplicate_group_control_number = check_duplicate_group_control_number self.check_duplicate_transaction_set_control_number = check_duplicate_transaction_set_control_number self.validate_edi_types = validate_edi_types self.validate_xsd_types = validate_xsd_types self.allow_leading_and_trailing_spaces_and_zeroes = allow_leading_and_trailing_spaces_and_zeroes self.trim_leading_and_trailing_spaces_and_zeroes = trim_leading_and_trailing_spaces_and_zeroes self.trailing_separator_policy = trailing_separator_policy
{ "content_hash": "cc6149df8b05f1ac7859c932396a678f", "timestamp": "", "source": "github", "line_count": 11359, "max_line_length": 151, "avg_line_length": 43.69944537371247, "alnum_prop": 0.6537827721391992, "repo_name": "Azure/azure-sdk-for-python", "id": "03479a0e8329fd7fcc6b0b59ac60f5a1d6d7058e", "size": "496883", "binary": false, "copies": "1", "ref": "refs/heads/main", "path": "sdk/logic/azure-mgmt-logic/azure/mgmt/logic/models/_models_py3.py", "mode": "33188", "license": "mit", "language": [ { "name": "Batchfile", "bytes": "1224" }, { "name": "Bicep", "bytes": "24196" }, { "name": "CSS", "bytes": "6089" }, { "name": "Dockerfile", "bytes": "4892" }, { "name": "HTML", "bytes": "12058" }, { "name": "JavaScript", "bytes": "8137" }, { "name": "Jinja", "bytes": "10377" }, { "name": "Jupyter Notebook", "bytes": "272022" }, { "name": "PowerShell", "bytes": "518535" }, { "name": "Python", "bytes": "715484989" }, { "name": "Shell", "bytes": "3631" } ], "symlink_target": "" }
from reportlab.lib.testutils import setOutDir,makeSuiteForClasses, outputfile, printLocation setOutDir(__name__) import sys, string from xml.dom import minidom from xml.sax._exceptions import SAXReaderNotAvailable import unittest from reportlab.graphics.shapes import * from reportlab.graphics import renderSVG def warnIgnoredRestofTest(): "Raise a warning (if possible) about a not fully completed test." version = sys.version_info[:2] msg = "XML parser not found - consider installing expat! Rest of test(s) ignored!" if version >= (2, 1): import warnings warnings.warn(msg) else: # should better also be printed only once... print msg # Check if we have a default XML parser available or not. try: import xml from xml.sax import make_parser p = xml.sax.make_parser() HAVE_XML_PARSER = 1 except SAXReaderNotAvailable: HAVE_XML_PARSER = 0 def load(path): "Helper function to read the generated SVG again." doc = minidom.parse(path) doc.normalize() return doc.documentElement class RenderSvgSimpleTestCase(unittest.TestCase): "Testing renderSVG module." def test0(self): "Test two strings in drawing." path = outputfile("test_renderSVG_simple_test0.svg") d = Drawing(200, 100) d.add(String(0, 0, "foo")) d.add(String(100, 0, "bar")) renderSVG.drawToFile(d, path) if not HAVE_XML_PARSER: warnIgnoredRestofTest() return svg = load(path) fg = svg.getElementsByTagName('g')[0] # flipping group dg = fg.getElementsByTagName('g')[0] # diagram group textChildren = dg.getElementsByTagName('text') # text nodes t0 = string.strip(textChildren[0].childNodes[0].nodeValue) t1 = string.strip(textChildren[1].childNodes[0].nodeValue) assert t0 == 'foo' assert t1 == 'bar' def test1(self): "Test two strings in group in drawing." path = outputfile("test_renderSVG_simple_test1.svg") d = Drawing(200, 100) g = Group() g.add(String(0, 0, "foo")) g.add(String(100, 0, "bar")) d.add(g) renderSVG.drawToFile(d, path) if not HAVE_XML_PARSER: warnIgnoredRestofTest() return svg = load(path) fg = svg.getElementsByTagName('g')[0] # flipping group dg = fg.getElementsByTagName('g')[0] # diagram group g = dg.getElementsByTagName('g')[0] # custom group textChildren = g.getElementsByTagName('text') # text nodes t0 = string.strip(textChildren[0].childNodes[0].nodeValue) t1 = string.strip(textChildren[1].childNodes[0].nodeValue) assert t0 == 'foo' assert t1 == 'bar' def test2(self): "Test two strings in transformed group in drawing." path = outputfile("test_renderSVG_simple_test2.svg") d = Drawing(200, 100) g = Group() g.add(String(0, 0, "foo")) g.add(String(100, 0, "bar")) g.scale(1.5, 1.2) g.translate(50, 0) d.add(g) renderSVG.drawToFile(d, path) if not HAVE_XML_PARSER: warnIgnoredRestofTest() return svg = load(path) fg = svg.getElementsByTagName('g')[0] # flipping group dg = fg.getElementsByTagName('g')[0] # diagram group g = dg.getElementsByTagName('g')[0] # custom group textChildren = g.getElementsByTagName('text') # text nodes t0 = string.strip(textChildren[0].childNodes[0].nodeValue) t1 = string.strip(textChildren[1].childNodes[0].nodeValue) assert t0 == 'foo' assert t1 == 'bar' def test3(self): from reportlab.lib.units import cm from reportlab.lib import colors width=300 height=60 #Create fairly simple drawing object, drawing=Drawing(width, height) p=ArcPath(strokeColor=colors.darkgreen, fillColor=colors.green, hrefURL="http://en.wikipedia.org/wiki/Vector_path", hrefTitle="This big letter C is actually a closed vector path.", strokewidth=0) p.addArc(1*cm, 1*cm, 0.8*cm, 20, 340, moveTo=True) p.addArc(1*cm, 1*cm, 0.9*cm, 20, 340, reverse=True) p.closePath() drawing.add(p) drawing.add(Rect(2.25*cm, 0.1*cm, 1.5*cm, 0.8*cm, rx=0.25*cm, ry=0.25*cm, hrefURL="http://en.wikipedia.org/wiki/Rounded_rectangle", hrefTitle="Rounded Rectangle", strokeColor=colors.red, fillColor=colors.yellow)) drawing.add(String(1*cm, 1*cm, "Hello World!", hrefURL="http://en.wikipedia.org/wiki/Hello_world", hrefTitle="Why 'Hello World'?", fillColor=colors.darkgreen)) drawing.add(Rect(4.5*cm, 0.5*cm, 5*cm, 1*cm, hrefURL="http://en.wikipedia.org/wiki/Rectangle", hrefTitle="Wikipedia page on rectangles", strokeColor=colors.blue, fillColor=colors.red)) drawing.add(Ellipse(7*cm, 1*cm, 2*cm, 0.95*cm, hrefURL="http://en.wikipedia.org/wiki/Ellipse", strokeColor=colors.black, fillColor=colors.yellow)) drawing.add(Circle(7*cm, 1*cm, 0.9*cm, hrefURL="http://en.wikipedia.org/wiki/Circle", strokeColor=colors.black, fillColor=colors.brown)) drawing.add(Ellipse(7*cm, 1*cm, 0.5*cm, 0.9*cm, hrefTitle="Tooltip with no link?", strokeColor=colors.black, fillColor=colors.black)) drawing.add(Polygon([4.5*cm, 1.25*cm, 5*cm, 0.1*cm, 4*cm, 0.1*cm], hrefURL="http://en.wikipedia.org/wiki/Polygon", hrefTitle="This triangle is a simple polygon.", strokeColor=colors.darkgreen, fillColor=colors.green)) renderSVG.drawToFile(drawing, outputfile("test_renderSVG_simple_test3.svg")) def test4(self): "Test character encoding." path = outputfile("test_renderSVG_simple_test4.svg") specialChar = u'\u2019' d = Drawing(200, 100) d.add(String(0, 0, "foo"+specialChar)) d.add(String(100, 0, "bar")) renderSVG.drawToFile(d, path) if not HAVE_XML_PARSER: warnIgnoredRestofTest() return svg = load(path) fg = svg.getElementsByTagName('g')[0] # flipping group dg = fg.getElementsByTagName('g')[0] # diagram group textChildren = dg.getElementsByTagName('text') # text nodes t0 = string.strip(textChildren[0].childNodes[0].nodeValue) t1 = string.strip(textChildren[1].childNodes[0].nodeValue) assert t0 == 'foo'+specialChar assert t1 == 'bar' def tearDown(self): "When finished, make a little index page to view them in situ" body = """<html> <head><title>renderSVG test output</title></head> <body> <h1>renderSVG test output in a web page</h1> <p>We have four SVG diagrams embedded in this page. Each is within a cyan-coloured div. The first 3 have a native size of 400x200, thus consume a height of 200 pixels on the page. The last is 300x60.</p> <div style="background-color:cyan"> <embed src="test_renderSVG_simple_test0.svg" type="image/svg+xml" /> </div> <hr/> <div style="background-color:cyan"> <embed src="test_renderSVG_simple_test1.svg" type="image/svg+xml" /> </div> <hr/> <div style="background-color:cyan"> <embed src="test_renderSVG_simple_test2.svg" type="image/svg+xml" /> </div> <hr/> <div style="background-color:cyan"> <embed src="test_renderSVG_simple_test3.svg" type="image/svg+xml" /> </div> <hr> <p>Test of resizing: the ones below are sized 50%, 100%, 150%. We did this by explicitly setting the width and height in the <code>embed</code> tag.</p> <div style="background-color:cyan"> <embed src="test_renderSVG_simple_test3.svg" type="image/svg+xml" width="150" height="45"/> </div> <hr/> <div style="background-color:cyan"> <embed src="test_renderSVG_simple_test3.svg" type="image/svg+xml" width="300" height="60"/> </div> <hr/> <div style="background-color:cyan"> <embed src="test_renderSVG_simple_test3.svg" type="image/svg+xml" width="450" height="90"/> </div> <hr/> <p>Test of resizing again: the ones below are sized 50%, 100%, 150% by setting width only.</p> <div style="background-color:cyan"> <embed src="test_renderSVG_simple_test3.svg" type="image/svg+xml" width="150"/> </div> <hr/> <div style="background-color:cyan"> <embed src="test_renderSVG_simple_test3.svg" type="image/svg+xml" width="300"/> </div> <hr/> <div style="background-color:cyan"> <embed src="test_renderSVG_simple_test3.svg" type="image/svg+xml" width="450"/> </div> <hr/> </body> <html> """ open('test_renderSVG_output.html', 'w').write(body) class RenderSvgAxesTestCase(unittest.TestCase): "Testing renderSVG module on Axes widgets." def test0(self): "Test two strings in drawing." path = outputfile("axestest0.svg") from reportlab.graphics.charts.axes import XCategoryAxis d = XCategoryAxis().demo() renderSVG.drawToFile(d, path) def makeSuite(): return makeSuiteForClasses(RenderSvgSimpleTestCase, RenderSvgAxesTestCase) #noruntests if __name__ == "__main__": unittest.TextTestRunner().run(makeSuite()) printLocation()
{ "content_hash": "bd7e72b496605d547cee6aac36b11ab6", "timestamp": "", "source": "github", "line_count": 283, "max_line_length": 105, "avg_line_length": 37.024734982332156, "alnum_prop": 0.5623210536361901, "repo_name": "MatthewWilkes/reportlab", "id": "c9b9587e8386cbb2565d7df2946f32815fec777c", "size": "10500", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tests/test_renderSVG.py", "mode": "33261", "license": "bsd-3-clause", "language": [ { "name": "C", "bytes": "740949" }, { "name": "C++", "bytes": "14870" }, { "name": "CSS", "bytes": "16419" }, { "name": "Java", "bytes": "6333" }, { "name": "Python", "bytes": "3186241" }, { "name": "Shell", "bytes": "4864" } ], "symlink_target": "" }
"""Python wrappers around TensorFlow ops. This file is MACHINE GENERATED! Do not edit. """ import collections as _collections from google.protobuf import text_format as _text_format from tensorflow.core.framework import op_def_pb2 as _op_def_pb2 # Needed to trigger the call to _set_call_cpp_shape_fn. from tensorflow.python.framework import common_shapes as _common_shapes from tensorflow.python.framework import op_def_registry as _op_def_registry from tensorflow.python.framework import ops as _ops from tensorflow.python.framework import op_def_library as _op_def_library def bytes_limit(name=None): r"""TODO: add doc. Args: name: A name for the operation (optional). Returns: A `Tensor` of type `int64`. """ result = _op_def_lib.apply_op("BytesLimit", name=name) return result _ops.RegisterShape("BytesLimit")(None) def max_bytes_in_use(name=None): r"""TODO: add doc. Args: name: A name for the operation (optional). Returns: A `Tensor` of type `int64`. """ result = _op_def_lib.apply_op("MaxBytesInUse", name=name) return result _ops.RegisterShape("MaxBytesInUse")(None) def _InitOpDefLibrary(): op_list = _op_def_pb2.OpList() _text_format.Merge(_InitOpDefLibrary.op_list_ascii, op_list) _op_def_registry.register_op_list(op_list) op_def_lib = _op_def_library.OpDefLibrary() op_def_lib.add_op_list(op_list) return op_def_lib _InitOpDefLibrary.op_list_ascii = """op { name: "BytesLimit" output_arg { name: "out" type: DT_INT64 } is_stateful: true } op { name: "MaxBytesInUse" output_arg { name: "out" type: DT_INT64 } is_stateful: true } """ _op_def_lib = _InitOpDefLibrary()
{ "content_hash": "4f56d938dcb8904dbf3e2ab80c558956", "timestamp": "", "source": "github", "line_count": 76, "max_line_length": 75, "avg_line_length": 22.17105263157895, "alnum_prop": 0.6991097922848665, "repo_name": "ryfeus/lambda-packs", "id": "ef59a4488e5bfce020c44f9d358fd68f01273d6a", "size": "1685", "binary": false, "copies": "3", "ref": "refs/heads/master", "path": "Tensorflow_LightGBM_Scipy_nightly/source/tensorflow/contrib/memory_stats/ops/gen_memory_stats_ops.py", "mode": "33188", "license": "mit", "language": [ { "name": "C", "bytes": "9768343" }, { "name": "C++", "bytes": "76566960" }, { "name": "CMake", "bytes": "191097" }, { "name": "CSS", "bytes": "153538" }, { "name": "Cuda", "bytes": "61768" }, { "name": "Cython", "bytes": "3110222" }, { "name": "Fortran", "bytes": "110284" }, { "name": "HTML", "bytes": "248658" }, { "name": "JavaScript", "bytes": "62920" }, { "name": "MATLAB", "bytes": "17384" }, { "name": "Makefile", "bytes": "152150" }, { "name": "Python", "bytes": "549307737" }, { "name": "Roff", "bytes": "26398" }, { "name": "SWIG", "bytes": "142" }, { "name": "Shell", "bytes": "7790" }, { "name": "Smarty", "bytes": "4090" }, { "name": "TeX", "bytes": "152062" }, { "name": "XSLT", "bytes": "305540" } ], "symlink_target": "" }
from tempest_lib import decorators import testtools from tempest.api.telemetry import base from tempest import config from tempest import test CONF = config.CONF class TelemetryNotificationAPITestJSON(base.BaseTelemetryTest): @classmethod def skip_checks(cls): super(TelemetryNotificationAPITestJSON, cls).skip_checks() if CONF.telemetry.too_slow_to_test: raise cls.skipException("Ceilometer feature for fast work mysql " "is disabled") @test.idempotent_id('d7f8c1c8-d470-4731-8604-315d3956caad') @testtools.skipIf(not CONF.service_available.nova, "Nova is not available.") def test_check_nova_notification(self): body = self.create_server() query = ('resource', 'eq', body['id']) for metric in self.nova_notifications: self.await_samples(metric, query) @test.attr(type="smoke") @test.idempotent_id('04b10bfe-a5dc-47af-b22f-0460426bf498') @test.services("image") @testtools.skipIf(not CONF.image_feature_enabled.api_v1, "Glance api v1 is disabled") @decorators.skip_because(bug='1351627') def test_check_glance_v1_notifications(self): body = self.create_image(self.image_client) self.image_client.update_image(body['id'], data='data') query = 'resource', 'eq', body['id'] self.image_client.delete_image(body['id']) for metric in self.glance_notifications: self.await_samples(metric, query) @test.attr(type="smoke") @test.idempotent_id('c240457d-d943-439b-8aea-85e26d64fe8e') @test.services("image") @testtools.skipIf(not CONF.image_feature_enabled.api_v2, "Glance api v2 is disabled") @decorators.skip_because(bug='1351627') def test_check_glance_v2_notifications(self): body = self.create_image(self.image_client_v2) self.image_client_v2.store_image(body['id'], "file") self.image_client_v2.get_image_file(body['id']) query = 'resource', 'eq', body['id'] for metric in self.glance_v2_notifications: self.await_samples(metric, query)
{ "content_hash": "fa3a40a723215928b7142d8d28922b68", "timestamp": "", "source": "github", "line_count": 64, "max_line_length": 77, "avg_line_length": 34.203125, "alnum_prop": 0.6473275468250342, "repo_name": "danielmellado/tempest", "id": "73a5fd4d87b0441ba0e058a26a1731063317f99f", "size": "2762", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "tempest/api/telemetry/test_telemetry_notification_api.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "2724850" }, { "name": "Shell", "bytes": "8560" } ], "symlink_target": "" }
__author__ = 'greg' from cassandra.cluster import Cluster import cassandra import pymongo import uuid import json from cassandra.concurrent import execute_concurrent cluster = Cluster() cassandra_session = cluster.connect('serengeti') # try: # cassandra_session.execute("drop table classifications") # print "table dropped" # except cassandra.InvalidRequest: # print "table did not exist" # pass # cassandra_session.execute("CREATE TABLE classifications(id int, created_at timestamp,zooniverse_id text,annotations text,user_name text, user_ip inet, PRIMARY KEY(id, created_at,user_ip)) WITH CLUSTERING ORDER BY (created_at ASC, user_ip ASC);") cassandra_session.execute("CREATE TABLE ip_classifications (id int, created_at timestamp,zooniverse_id text,annotations text,user_name text, user_ip inet, PRIMARY KEY(id, user_ip,created_at)) WITH CLUSTERING ORDER BY (user_ip ASC,created_at ASC);") # connect to the mongo server client = pymongo.MongoClient() db = client['serengeti_2015-02-22'] classification_collection = db["serengeti_classifications"] subject_collection = db["serengeti_subjects"] user_collection = db["serengeti_users"] insert_statement = cassandra_session.prepare("""insert into ip_classifications (id,created_at, zooniverse_id,annotations, user_name,user_ip) values (?,?,?,?,?,?)""") statements_and_params = [] for ii,classification in enumerate(classification_collection.find()): created_at = classification["created_at"] if "user_name" in classification: user_name = classification["user_name"] else: user_name = "" user_ip = classification["user_ip"] annotations = classification["annotations"] id = uuid.uuid1() zooniverse_id = classification["subjects"][0]["zooniverse_id"] params = (1,created_at,zooniverse_id,json.dumps(annotations),user_name,user_ip) statements_and_params.append((insert_statement, params)) if (ii > 0) and (ii % 50000 == 0): print ii r = execute_concurrent(cassandra_session, statements_and_params, raise_on_first_error=True) statements_and_params = [] r = execute_concurrent(cassandra_session, statements_and_params, raise_on_first_error=True)
{ "content_hash": "a7918b39cef8b3691cbda0b832b12043", "timestamp": "", "source": "github", "line_count": 54, "max_line_length": 248, "avg_line_length": 40.96296296296296, "alnum_prop": 0.7246835443037974, "repo_name": "camallen/aggregation", "id": "0e2c8839e63e9aa64a9e6e66663e17d327bd9c70", "size": "2234", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "algorithms/serengeti_blank/serengeti_cass.py", "mode": "33261", "license": "apache-2.0", "language": [ { "name": "C", "bytes": "723" }, { "name": "Python", "bytes": "1676640" }, { "name": "Scala", "bytes": "629" }, { "name": "Shell", "bytes": "95" } ], "symlink_target": "" }
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer import json class Api(object): def __init__(self, options): pass def start(self): pass def halt(self): pass class HttpRequestHandler(BaseHTTPRequestHandler): def do_OPTIONS(self): self.send_response(204) self.send_header('Access-Control-Allow-Origin', '*') self.send_header('Access-Control-Allow-Headers', 'Content-Type') self.end_headers() def do_GET(self): import traceback if not hasattr(self, "method"): self.method = "GET" self.send_response(200) path_parts = filter(len, self.path.split("/")) try: response = self.get_response(path_parts) except Exception as ex: print(traceback.format_exc()) response = { "error": str(ex), } json_response = json.dumps(response) self.send_header('Content-Type', 'application/json') self.send_header('Content-Length', len(json_response)) self.send_header('Access-Control-Allow-Origin', '*') self.end_headers() self.wfile.write(json_response) self.wfile.close() def do_POST(self): self.method = "POST" return self.do_GET() def get_response(self, path): if not path or path[0] != "api": return {} if len(path) == 1: return self.handle_api() if len(path) == 2 and path[1] == "robots": return self.handle_robots() if len(path) == 3 and path[1] == "robots": return self.handle_robot(path[2]) if len(path) == 4 and path[1] == "robots": if path[3] == "devices": return self.handle_robot_devices(path[2]) if len(path) == 5 and path[1] == "robots": if path[3] == "devices": return self.handle_robot_device(path[2], path[4]) if len(path) == 6 and path[1] == "robots": if path[3] == "devices": if path[5] == "commands": return self.handle_robot_device_commands(path[2], path[4]) if path[5] == "events": return self.handle_robot_device_events(path[2], path[4]) if len(path) == 7 and path[1] == "robots": if path[3] == "devices": if path[5] == "commands": return self.handle_robot_device_command( path[2], path[4], path[6] ) return {} def handle_api(self): response = { "commands": [], "events": [], } response.update(self.handle_robots()) return { "MCP": response, } def handle_robots(self): from zorg import main robots = main.robots response = { "robots": [], } for robot_name, robot in robots.items(): serialized = robot.serialize() response["robots"].append(serialized) return response def handle_robot(self, name): from zorg import main robots = main.robots robot = robots[name] response = { "robot": robot.serialize(), } return response def handle_robot_devices(self, name): from zorg import main robots = main.robots robot = robots[name] response = { "devices": robot.serialize_devices(), } return response def handle_robot_device(self, robot_name, device_name): from zorg import main robots = main.robots robot = robots[robot_name] device = getattr(robot.helper, device_name) return { "device": device.serialize() } def handle_robot_device_commands(self, robot_name, device_name): from zorg import main robots = main.robots robot = robots[robot_name] device = getattr(robot.helper, device_name) return { "commands": device.commands, } def handle_robot_device_events(self, robot_name, device_name): from zorg import main robots = main.robots robot = robots[robot_name] device = getattr(robot.helper, device_name) return { "events": device.events, } def handle_robot_device_command( self, robot_name, device_name, command_name): from zorg import main robots = main.robots robot = robots[robot_name] device = getattr(robot.helper, device_name) command = getattr(device, command_name) if self.method == "GET": request_body = "" else: request_body = self.rfile.read( int(self.headers.getheader('content-length')) ) if request_body: args = json.loads(request_body) else: args = {} result = command(**args) return { "result": result, } class Http(Api): def start(self): server = HTTPServer(('0.0.0.0', 8000), HttpRequestHandler) server.serve_forever()
{ "content_hash": "d3f2c722cdd42cc2de0ec938cb392bd5", "timestamp": "", "source": "github", "line_count": 220, "max_line_length": 78, "avg_line_length": 23.87272727272727, "alnum_prop": 0.524942878903275, "repo_name": "zorg/zorg", "id": "404d5af1b1e9ce45a602769f28b4509cc60f3b4f", "size": "5252", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "zorg/api.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "13855" } ], "symlink_target": "" }
from Crypto.Cipher import AES from cryptopals.set1.challenge_02 import fixed_xor import struct def block_iterator(text): return (text[i: i + 16] for i in xrange(0, len(text), 16)) def _aes_ctr_xor(text, key, nonce): # Set up the cipher cipher = AES.new(key, AES.MODE_ECB) # Start the counter counter = 0 output = '' for block in block_iterator(text): keystream = cipher.encrypt(struct.pack('<8sQ', nonce, counter)) output += fixed_xor(block, keystream) counter += 1 return output def encrypt_aes_ctr(plaintext, key, nonce): """Encrypt the given plaintex with AES using CTR mode.""" return _aes_ctr_xor(plaintext, key, nonce) def decrypt_aes_ctr(ciphertext, key, nonce): """Decrypt the given ciphertext with AES using CTR mode.""" return _aes_ctr_xor(ciphertext, key, nonce) def test(key, nonce, ciphertext, plaintext): assert decrypt_aes_ctr(ciphertext, key, nonce) == plaintext assert encrypt_aes_ctr(plaintext, key, nonce) == ciphertext
{ "content_hash": "b4448eb447eb8de05fcd1795676933b4", "timestamp": "", "source": "github", "line_count": 40, "max_line_length": 71, "avg_line_length": 25.85, "alnum_prop": 0.6711798839458414, "repo_name": "ericnorris/cryptopals-solutions", "id": "d20b6072f43f59e0e9fe0989530e21897d57919c", "size": "1034", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "cryptopals/set3/challenge_18.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "34169" } ], "symlink_target": "" }
from twisted.internet import defer from synapse.api.errors import AuthError, SynapseError, Codes from synapse.types import RoomAlias from .base import ClientV1RestServlet, client_path_pattern import simplejson as json import logging logger = logging.getLogger(__name__) def register_servlets(hs, http_server): ClientDirectoryServer(hs).register(http_server) class ClientDirectoryServer(ClientV1RestServlet): PATTERN = client_path_pattern("/directory/room/(?P<room_alias>[^/]*)$") @defer.inlineCallbacks def on_GET(self, request, room_alias): room_alias = RoomAlias.from_string(room_alias) dir_handler = self.handlers.directory_handler res = yield dir_handler.get_association(room_alias) defer.returnValue((200, res)) @defer.inlineCallbacks def on_PUT(self, request, room_alias): content = _parse_json(request) if "room_id" not in content: raise SynapseError(400, "Missing room_id key", errcode=Codes.BAD_JSON) logger.debug("Got content: %s", content) room_alias = RoomAlias.from_string(room_alias) logger.debug("Got room name: %s", room_alias.to_string()) room_id = content["room_id"] servers = content["servers"] if "servers" in content else None logger.debug("Got room_id: %s", room_id) logger.debug("Got servers: %s", servers) # TODO(erikj): Check types. # TODO(erikj): Check that room exists dir_handler = self.handlers.directory_handler try: # try to auth as a user user, client = yield self.auth.get_user_by_req(request) try: user_id = user.to_string() yield dir_handler.create_association( user_id, room_alias, room_id, servers ) yield dir_handler.send_room_alias_update_event(user_id, room_id) except SynapseError as e: raise e except: logger.exception("Failed to create association") raise except AuthError: # try to auth as an application service service = yield self.auth.get_appservice_by_req(request) yield dir_handler.create_appservice_association( service, room_alias, room_id, servers ) logger.info( "Application service at %s created alias %s pointing to %s", service.url, room_alias.to_string(), room_id ) defer.returnValue((200, {})) @defer.inlineCallbacks def on_DELETE(self, request, room_alias): dir_handler = self.handlers.directory_handler try: service = yield self.auth.get_appservice_by_req(request) room_alias = RoomAlias.from_string(room_alias) yield dir_handler.delete_appservice_association( service, room_alias ) logger.info( "Application service at %s deleted alias %s", service.url, room_alias.to_string() ) defer.returnValue((200, {})) except AuthError: # fallback to default user behaviour if they aren't an AS pass user, client = yield self.auth.get_user_by_req(request) is_admin = yield self.auth.is_server_admin(user) if not is_admin: raise AuthError(403, "You need to be a server admin") room_alias = RoomAlias.from_string(room_alias) yield dir_handler.delete_association( user.to_string(), room_alias ) logger.info( "User %s deleted alias %s", user.to_string(), room_alias.to_string() ) defer.returnValue((200, {})) def _parse_json(request): try: content = json.loads(request.content.read()) if type(content) != dict: raise SynapseError(400, "Content must be a JSON object.", errcode=Codes.NOT_JSON) return content except ValueError: raise SynapseError(400, "Content not JSON.", errcode=Codes.NOT_JSON)
{ "content_hash": "d093e88fb06530f0f4f9f5a7c79f6115", "timestamp": "", "source": "github", "line_count": 131, "max_line_length": 80, "avg_line_length": 32.48091603053435, "alnum_prop": 0.581433607520564, "repo_name": "howethomas/synapse", "id": "6758a888b32b01f43485f727eec52868004eac67", "size": "4865", "binary": false, "copies": "3", "ref": "refs/heads/master", "path": "synapse/rest/client/v1/directory.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "1020" }, { "name": "HTML", "bytes": "1223" }, { "name": "JavaScript", "bytes": "172643" }, { "name": "Perl", "bytes": "31842" }, { "name": "Python", "bytes": "1571632" }, { "name": "Shell", "bytes": "3281" } ], "symlink_target": "" }
__all__ = ['skymodel_arlexecute']
{ "content_hash": "ca875841e48e07b89c8842ff26cc82ee", "timestamp": "", "source": "github", "line_count": 1, "max_line_length": 33, "avg_line_length": 33, "alnum_prop": 0.6363636363636364, "repo_name": "SKA-ScienceDataProcessor/algorithm-reference-library", "id": "fccd544da2866928a269e1a4b6b690ee6675d369", "size": "33", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "workflows/arlexecute/skymodel/__init__.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "C", "bytes": "171056" }, { "name": "C++", "bytes": "520" }, { "name": "Dockerfile", "bytes": "4686" }, { "name": "Java", "bytes": "748" }, { "name": "Jupyter Notebook", "bytes": "8158663" }, { "name": "Makefile", "bytes": "19263" }, { "name": "Nix", "bytes": "3599" }, { "name": "Python", "bytes": "1854561" }, { "name": "Shell", "bytes": "73453" }, { "name": "Smarty", "bytes": "1057" } ], "symlink_target": "" }
import os import sys current_dir = os.path.dirname(os.path.abspath(__file__)) test_dir = os.path.join(current_dir, '..') project_dir = os.path.join(test_dir, '..', '..') sys.path.insert(0, project_dir)
{ "content_hash": "1c146819ad1550a796dc76e8fd745f31", "timestamp": "", "source": "github", "line_count": 8, "max_line_length": 56, "avg_line_length": 25.5, "alnum_prop": 0.6617647058823529, "repo_name": "RaviSoji/probabilistic_LDA", "id": "f0f246f2240481539e9fcaa9238deacb33cdaed0", "size": "883", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tests/test_optimizer/__init__.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "114184" } ], "symlink_target": "" }
''' Created by auto_sdk on 2015.06.23 ''' from aliyun.api.base import RestApi class Mts20140618DeleteTemplateRequest(RestApi): def __init__(self,domain='mts.aliyuncs.com',port=80): RestApi.__init__(self,domain, port) self.TemplateId = None def getapiname(self): return 'mts.aliyuncs.com.DeleteTemplate.2014-06-18'
{ "content_hash": "7f0ceadea2d21446ddd49b0f049c9128", "timestamp": "", "source": "github", "line_count": 11, "max_line_length": 54, "avg_line_length": 29.363636363636363, "alnum_prop": 0.739938080495356, "repo_name": "francisar/rds_manager", "id": "3a2d6ee453be85f8180042bc6344b09be984e2b4", "size": "323", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "aliyun/api/rest/Mts20140618DeleteTemplateRequest.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "259509" }, { "name": "Shell", "bytes": "1481" } ], "symlink_target": "" }
import os def ruler(char): """ Dynamically generates a string containing a 'horizontal line' or 'ruler' sized to fit the current width of the tty. Takes one argument, a string containing the desired ruler char, and returns a string. Examples: Arg "*" will return "****...<tty width>...****" Arg "=" will return "====...<tty width>...====" """ rows, columns = os.popen('stty size', 'r').read().split() # use stty output to generate a tuple denoting size string = [] # list used to build return string for i in range(int(columns)): # for every column string.append(char.rstrip()) # append a char to the list, stripping newlines return "".join(string) # turn the list into a string def get_port_mapping_dicts(): with open('etc/juniper_port_nicknames', 'r') as f: data = f.read() dict={} rev_dict={} for item in data.split(): name, number = item.split(",") dict[name] = number # key is name, value is number rev_dict[number] = name return dict, rev_dict
{ "content_hash": "d18428ca277e64ee41c1835cfda65106", "timestamp": "", "source": "github", "line_count": 30, "max_line_length": 117, "avg_line_length": 34.03333333333333, "alnum_prop": 0.643486777668952, "repo_name": "ndonaghy/hephaestus", "id": "b3ecfd8ba6aff8f162b62897e5f8645f2f9faee1", "size": "1021", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "lib/hephaestus_utils.py", "mode": "33261", "license": "mit", "language": [ { "name": "Python", "bytes": "45575" }, { "name": "Shell", "bytes": "131" } ], "symlink_target": "" }
import os import uuid from oslo_config import cfg import six from sahara import conductor as c from sahara import context from sahara import exceptions as e from sahara.i18n import _ from sahara.service.edp import base_engine from sahara.service.edp.binary_retrievers import dispatch from sahara.service.edp import hdfs_helper as h from sahara.service.edp import job_utils from sahara.service.validations.edp import job_execution as j from sahara.swift import swift_helper as sw from sahara.swift import utils as su from sahara.utils import cluster as c_u from sahara.utils import edp from sahara.utils import files from sahara.utils import remote from sahara.utils import xmlutils conductor = c.API CONF = cfg.CONF class SparkJobEngine(base_engine.JobEngine): def __init__(self, cluster): self.cluster = cluster # We'll always run the driver program on the master self.master = None # These parameters depend on engine that is used self.plugin_params = {"master": "", "spark-user": "", "deploy-mode": "", "spark-submit": "", "driver-class-path": "", } def _get_pid_and_inst_id(self, job_id): try: pid, inst_id = job_id.split("@", 1) if pid and inst_id: return (pid, inst_id) except Exception: pass return "", "" def _get_instance_if_running(self, job_execution): pid, inst_id = self._get_pid_and_inst_id(job_execution.oozie_job_id) if not pid or not inst_id or ( job_execution.info['status'] in edp.JOB_STATUSES_TERMINATED): return None, None # TODO(tmckay): well, if there is a list index out of range # error here it probably means that the instance is gone. If we # have a job execution that is not terminated, and the instance # is gone, we should probably change the status somehow. # For now, do nothing. try: instance = c_u.get_instances(self.cluster, [inst_id])[0] except Exception: instance = None return pid, instance def _get_result_file(self, r, job_execution): result = os.path.join(job_execution.extra['spark-path'], "result") return r.execute_command("cat %s" % result, raise_when_error=False) def _check_pid(self, r, pid): ret, stdout = r.execute_command("ps hp %s" % pid, raise_when_error=False) return ret def _get_job_status_from_remote(self, r, pid, job_execution): # If the pid is there, it's still running if self._check_pid(r, pid) == 0: return {"status": edp.JOB_STATUS_RUNNING} # The process ended. Look in the result file to get the exit status ret, stdout = self._get_result_file(r, job_execution) if ret == 0: exit_status = stdout.strip() if exit_status == "0": return {"status": edp.JOB_STATUS_SUCCEEDED} # SIGINT will yield either -2 or 130 elif exit_status in ["-2", "130"]: return {"status": edp.JOB_STATUS_KILLED} # Well, process is done and result is missing or unexpected return {"status": edp.JOB_STATUS_DONEWITHERROR} def _job_script(self): path = "service/edp/resources/launch_command.py" return files.get_file_text(path) def _upload_wrapper_xml(self, where, job_dir, job_configs): xml_name = 'spark.xml' proxy_configs = job_configs.get('proxy_configs') configs = {} if proxy_configs: configs[sw.HADOOP_SWIFT_USERNAME] = proxy_configs.get( 'proxy_username') configs[sw.HADOOP_SWIFT_PASSWORD] = proxy_configs.get( 'proxy_password') configs[sw.HADOOP_SWIFT_TRUST_ID] = proxy_configs.get( 'proxy_trust_id') configs[sw.HADOOP_SWIFT_DOMAIN_NAME] = CONF.proxy_user_domain_name else: cfgs = job_configs.get('configs', {}) targets = [sw.HADOOP_SWIFT_USERNAME, sw.HADOOP_SWIFT_PASSWORD] configs = {k: cfgs[k] for k in targets if k in cfgs} content = xmlutils.create_hadoop_xml(configs) with remote.get_remote(where) as r: dst = os.path.join(job_dir, xml_name) r.write_file_to(dst, content) return xml_name def _upload_job_files(self, where, job_dir, job, job_configs): def upload(r, dir, job_file, proxy_configs): dst = os.path.join(dir, job_file.name) raw_data = dispatch.get_raw_binary( job_file, proxy_configs=proxy_configs, remote=r) if isinstance(raw_data, dict) and raw_data["type"] == "path": dst = raw_data['path'] else: r.write_file_to(dst, raw_data) return dst def upload_builtin(r, dir, builtin): dst = os.path.join(dir, builtin['name']) r.write_file_to(dst, builtin['raw']) return dst builtin_libs = [] if edp.is_adapt_spark_for_swift_enabled( job_configs.get('configs', {})): path = 'service/edp/resources/edp-spark-wrapper.jar' name = 'builtin-%s.jar' % six.text_type(uuid.uuid4()) builtin_libs = [{'raw': files.get_file_text(path), 'name': name}] uploaded_paths = [] builtin_paths = [] with remote.get_remote(where) as r: mains = list(job.mains) if job.mains else [] libs = list(job.libs) if job.libs else [] for job_file in mains+libs: uploaded_paths.append( upload(r, job_dir, job_file, job_configs.get('proxy_configs'))) for builtin in builtin_libs: builtin_paths.append( upload_builtin(r, job_dir, builtin)) return uploaded_paths, builtin_paths def _check_driver_class_path(self, param_dict): cp = param_dict['driver-class-path'] or "" if param_dict['deploy-mode'] == 'client' and not ( cp.startswith(":") or cp.endswith(":")): cp += ":" param_dict['driver-class-path'] = " --driver-class-path " + cp def cancel_job(self, job_execution): pid, instance = self._get_instance_if_running(job_execution) if instance is not None: with remote.get_remote(instance) as r: ret, stdout = r.execute_command("kill -SIGINT %s" % pid, raise_when_error=False) if ret == 0: # We had some effect, check the status return self._get_job_status_from_remote(r, pid, job_execution) def get_job_status(self, job_execution): pid, instance = self._get_instance_if_running(job_execution) if instance is not None: with remote.get_remote(instance) as r: return self._get_job_status_from_remote(r, pid, job_execution) def _build_command(self, wf_dir, paths, builtin_paths, updated_job_configs): indep_params = {} # TODO(tmckay): for now, paths[0] is always assumed to be the app # jar and we generate paths in order (mains, then libs). # When we have a Spark job type, we can require a "main" and set # the app jar explicitly to be "main" indep_params["app_jar"] = paths.pop(0) indep_params["job_class"] = ( updated_job_configs["configs"]["edp.java.main_class"]) # If we uploaded builtins then we are using a wrapper jar. It will # be the first one on the builtin list and the original app_jar needs # to be added to the 'additional' jars if builtin_paths: indep_params["wrapper_jar"] = builtin_paths.pop(0) indep_params["wrapper_class"] = ( 'org.openstack.sahara.edp.SparkWrapper') wrapper_xml = self._upload_wrapper_xml(self.master, wf_dir, updated_job_configs) indep_params["wrapper_args"] = "%s %s" % ( wrapper_xml, indep_params["job_class"]) indep_params["addnl_files"] = wrapper_xml indep_params["addnl_jars"] = ",".join( [indep_params["wrapper_jar"]] + paths + builtin_paths) else: indep_params["addnl_jars"] = ",".join(paths) # All additional jars are passed with the --jars option if indep_params["addnl_jars"]: indep_params["addnl_jars"] = ( " --jars " + indep_params["addnl_jars"]) # Launch the spark job using spark-submit and deploy_mode = client # TODO(tmckay): we need to clean up wf_dirs on long running clusters # TODO(tmckay): probably allow for general options to spark-submit indep_params["args"] = updated_job_configs.get('args', []) indep_params["args"] = " ".join([su.inject_swift_url_suffix(arg) for arg in indep_params["args"]]) if indep_params.get("args"): indep_params["args"] = (" " + indep_params["args"]) mutual_dict = self.plugin_params.copy() mutual_dict.update(indep_params) # Handle driver classpath. Because of the way the hadoop # configuration is handled in the wrapper class, using # wrapper_xml, the working directory must be on the classpath self._check_driver_class_path(mutual_dict) if mutual_dict.get("wrapper_jar"): # Substrings which may be empty have spaces # embedded if they are non-empty cmd = ( '%(spark-user)s%(spark-submit)s%(driver-class-path)s' ' --files %(addnl_files)s' ' --class %(wrapper_class)s%(addnl_jars)s' ' --master %(master)s' ' --deploy-mode %(deploy-mode)s' ' %(app_jar)s %(wrapper_args)s%(args)s') % dict( mutual_dict) else: cmd = ( '%(spark-user)s%(spark-submit)s' ' --class %(job_class)s%(addnl_jars)s' ' --master %(master)s' ' --deploy-mode %(deploy-mode)s' ' %(app_jar)s%(args)s') % dict( mutual_dict) return cmd def run_job(self, job_execution): ctx = context.ctx() job = conductor.job_get(ctx, job_execution.job_id) # This will be a dictionary of tuples, (native_url, runtime_url) # keyed by data_source id data_source_urls = {} additional_sources, updated_job_configs = ( job_utils.resolve_data_source_references(job_execution.job_configs, job_execution.id, data_source_urls, self.cluster) ) job_execution = conductor.job_execution_update( ctx, job_execution, {"data_source_urls": job_utils.to_url_dict(data_source_urls)}) # Now that we've recorded the native urls, we can switch to the # runtime urls data_source_urls = job_utils.to_url_dict(data_source_urls, runtime=True) for data_source in additional_sources: if data_source and data_source.type == 'hdfs': h.configure_cluster_for_hdfs(self.cluster, data_source) break # It is needed in case we are working with Spark plugin self.plugin_params['master'] = ( self.plugin_params['master'] % {'host': self.master.hostname()}) # TODO(tmckay): wf_dir should probably be configurable. # The only requirement is that the dir is writable by the image user wf_dir = job_utils.create_workflow_dir(self.master, '/tmp/spark-edp', job, job_execution.id, "700") paths, builtin_paths = self._upload_job_files( self.master, wf_dir, job, updated_job_configs) # We can shorten the paths in this case since we'll run out of wf_dir paths = [os.path.basename(p) if p.startswith(wf_dir) else p for p in paths] builtin_paths = [os.path.basename(p) for p in builtin_paths] cmd = self._build_command(wf_dir, paths, builtin_paths, updated_job_configs) job_execution = conductor.job_execution_get(ctx, job_execution.id) if job_execution.info['status'] == edp.JOB_STATUS_TOBEKILLED: return (None, edp.JOB_STATUS_KILLED, None) # If an exception is raised here, the job_manager will mark # the job failed and log the exception # The redirects of stdout and stderr will preserve output in the wf_dir with remote.get_remote(self.master) as r: # Upload the command launch script launch = os.path.join(wf_dir, "launch_command") r.write_file_to(launch, self._job_script()) r.execute_command("chmod u+rwx,g+rx,o+rx %s" % wf_dir) r.execute_command("chmod +x %s" % launch) ret, stdout = r.execute_command( "cd %s; ./launch_command %s > /dev/null 2>&1 & echo $!" % (wf_dir, cmd)) if ret == 0: # Success, we'll add the wf_dir in job_execution.extra and store # pid@instance_id as the job id # We know the job is running so return "RUNNING" return (stdout.strip() + "@" + self.master.id, edp.JOB_STATUS_RUNNING, {'spark-path': wf_dir}) # Hmm, no execption but something failed. # Since we're using backgrounding with redirect, this is unlikely. raise e.EDPError(_("Spark job execution failed. Exit status = " "%(status)s, stdout = %(stdout)s") % {'status': ret, 'stdout': stdout}) def validate_job_execution(self, cluster, job, data): j.check_main_class_present(data, job) @staticmethod def get_possible_job_config(job_type): return {'job_config': {'configs': [], 'args': []}} @staticmethod def get_supported_job_types(): return [edp.JOB_TYPE_SPARK] class SparkShellJobEngine(SparkJobEngine): def _build_command(self, wf_dir, paths, builtin_paths, updated_job_configs): main_script = paths.pop(0) args = " ".join(updated_job_configs.get('args', [])) env_params = "" params = updated_job_configs.get('params', {}) for key, value in params.items(): env_params += "{key}={value} ".format(key=key, value=value) cmd = ("{env_params}{cmd} {main_script} {args}".format( cmd='/bin/sh', main_script=main_script, env_params=env_params, args=args)) return cmd def validate_job_execution(self, cluster, job, data): # Shell job doesn't require any special validation pass @staticmethod def get_possible_job_config(job_type): return {'job_config': {'configs': {}, 'args': [], 'params': {}}} @staticmethod def get_supported_job_types(): return [edp.JOB_TYPE_SHELL]
{ "content_hash": "be90d9d4f3ef0d449d29cb1513eca9d4", "timestamp": "", "source": "github", "line_count": 380, "max_line_length": 79, "avg_line_length": 41.50526315789474, "alnum_prop": 0.5536393608927213, "repo_name": "zhangjunli177/sahara", "id": "f614fe25022fa2a76028ca10dccf601909fc5426", "size": "16390", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "sahara/service/edp/spark/engine.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Java", "bytes": "3609" }, { "name": "Mako", "bytes": "29432" }, { "name": "PigLatin", "bytes": "792" }, { "name": "Python", "bytes": "3131969" }, { "name": "Shell", "bytes": "60900" } ], "symlink_target": "" }
"""dump_timeseries.py: Dumps the displacements as a timeseries in a data frame""" from argparse import ArgumentParser import logging import sys import os from os import path from time import time from glob import glob import pickle import pandas as pd import numpy as np import more_itertools from scipy.interpolate import interp1d from scipy.interpolate import UnivariateSpline from joblib import Parallel, delayed import psutil from multiprocessing import cpu_count p = psutil.Process(os.getpid()) p.set_cpu_affinity(list(range(cpu_count()))) __author__ = "Vivek Kulkarni" __email__ = "viveksck@gmail.com" LOGFORMAT = "%(asctime).19s %(levelname)s %(filename)s: %(lineno)s %(message)s" def interpolate(x, xinter, values, finter): # Find all the points which we need to interpolate xmissing = [xm for xm in xinter if xm not in x] # Interpolate the function value at those points yintervalues = finter(xmissing) # Original points and values pairs orig_pairs = zip(x, values) # Interpolated points and values pairs interp_pairs = zip(xmissing, yintervalues) # Find the final values assert(len(orig_pairs) + len(interp_pairs) == len(xinter)) final_pairs = sorted(orig_pairs + interp_pairs) return final_pairs def create_word_time_series(old_df, new_df, w, sourcexinter, destxinter, metric_name="", interpolate=False): """ Create the time series for a word. """ sourcex = np.asarray(old_df[old_df.word == w].s.values, dtype=int) destx = np.asarray(new_df[new_df.word == w].s.values, dtype=int) old_values = old_df[old_df.word == w][metric_name].values new_values = new_df[new_df.word == w][metric_name].values try: fold = interp1d(sourcex, old_values, bounds_error=False) fnew = interp1d(destx, new_values, bounds_error=False) except: print "Failed to interpolate", w return None, None if interpolate: final_old_pairs = interpolate(sourcex, sourcexinter, old_values, fold) final_new_pairs = interpolate(destx, destxinter, new_values, fnew) xinterold, yinterold = zip(*final_old_pairs) xinternew, yinternew = zip(*final_new_pairs) else: yinterold = old_values yinternew = new_values OL = [w] NL = [w] OL.extend(yinterold) NL.extend(yinternew) return (OL, NL) def process_chunk(chunk, func, olddf, newdf, sourcexinter, destxinter, metric_name, interpolate): """ Process each chunk. """ results = [func(olddf, newdf, e, sourcexinter, destxinter, metric_name, interpolate) for e in chunk] return results def main(args): # get the arguments method = args.method win_size = args.win_size step = args.step metric_name = args.metric_name n_jobs = args.workers # Load the data. L, H, olddf, newdf = pickle.load(open(args.filename)) words = pd.Series(olddf.word.values.ravel()).unique() oldrows = [] newrows = [] sourcexrange = np.arange(args.mint, args.maxt, step) destxrange = np.arange(args.mint, args.maxt, step) if method == 'win': sourcexrange = sourcexrange[win_size:] destxrange = destxrange[:-win_size] if args.interpolate: sourcexinter = np.arange(sourcexrange[0], sourcexrange[-1] + 1, 1) destxinter = np.arange(destxrange[0], destxrange[-1] + 1, 1) else: sourcexinter = sourcexrange destxinter = destxrange # Construct the series assert(len(sourcexinter) == len(destxinter)) chunk_sz = np.ceil(len(words)/float(n_jobs)) words_chunks = more_itertools.chunked(words, chunk_sz) timeseries_chunks = Parallel(n_jobs=n_jobs, verbose=20)(delayed(process_chunk)(chunk, create_word_time_series, olddf, newdf, sourcexinter, destxinter, metric_name=metric_name, interpolate=args.interpolate) for chunk in words_chunks) timeseries = list(more_itertools.flatten(timeseries_chunks)) # Dump the data frame for orow, newrow in timeseries: if orow and newrow: oldrows.append(orow) newrows.append(newrow) oldtimeseries = pd.DataFrame() newtimeseries = pd.DataFrame() header = ['word'] header.extend(sourcexinter) newheader = ['word'] newheader.extend(destxinter) oldtimeseries = oldtimeseries.from_records(oldrows, columns=header) oldtimeseries = oldtimeseries.fillna(method='backfill', axis=1) newtimeseries = newtimeseries.from_records(newrows, columns=newheader) newtimeseries = newtimeseries.fillna(method='backfill', axis=1) oldtimeseries.to_csv(args.sourcetimef, encoding='utf-8') newtimeseries.to_csv(args.endtimef, encoding='utf-8') def debug(type_, value, tb): if hasattr(sys, 'ps1') or not sys.stderr.isatty(): # we are in interactive mode or we don't have a tty-like device, so we # call the default hook sys.__excepthook__(type_, value, tb) else: import traceback import pdb # we are NOT in interactive mode, print the exception... traceback.print_exception(type_, value, tb) print("\n") # ...then start the debugger in post-mortem mode. pdb.pm() if __name__ == "__main__": parser = ArgumentParser() parser.add_argument("-f", "--file", dest="filename", help="Input file") parser.add_argument("-i", "--interpolate", dest="interpolate", help="interpolate", action='store_true', default=False) parser.add_argument("-s", "--sfile", dest="sourcetimef", help="Input file") parser.add_argument("-e", "--efile", dest="endtimef", help="Input file") parser.add_argument("-l", "--log", dest="log", help="log verbosity level", default="INFO") parser.add_argument("-m", "--min", dest="mint", help="starting time point", default=1900, type=int) parser.add_argument("-n", "--max", dest="maxt", help="ending timepoint(not included)", default=2010, type=int) parser.add_argument("-st", "--step", dest="step", help="stepsize", default=5, type=int) parser.add_argument("-me", "--method", dest="method", default="polar", help="Method to use") parser.add_argument("-metric", "--metric_name", dest="metric_name", default="cosine", help="Metric name to use") parser.add_argument("-w", "--win_size", dest="win_size", default=-1, help="Window size to use if not polar", type=int) parser.add_argument("-workers", "--workers", dest="workers", default=1, help="Maximum number of workers", type=int) args = parser.parse_args() if args.log == 'DEBUG': sys.excepthook = debug numeric_level = getattr(logging, args.log.upper(), None) logging.basicConfig(level=numeric_level, format=LOGFORMAT) main(args)
{ "content_hash": "cd304a49b009dfe6ef7d6171016c0229", "timestamp": "", "source": "github", "line_count": 176, "max_line_length": 135, "avg_line_length": 39.40340909090909, "alnum_prop": 0.6462869502523432, "repo_name": "viveksck/langchangetrack", "id": "16b81cb6976031f79d155ea40622fb0d05c7d311", "size": "6982", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "langchangetrack/tsconstruction/dump_timeseries.py", "mode": "33261", "license": "bsd-3-clause", "language": [ { "name": "Makefile", "bytes": "1531" }, { "name": "Python", "bytes": "107572" }, { "name": "Shell", "bytes": "4295" } ], "symlink_target": "" }
import os from pybmpdb import summary def test_run(): report = Report() report.makeInputFiles(report.std_tables) class Report: def __init__(self): self.std_tables = ["bacteria", "metals"] self.std_docs = ["Bacteria", "Metals"] self.cbay_tables = [ "tss_cbay", "nutrients_cbay", "tss_noncbay", "nutrients_noncbay", ] self.cbay_docs = [ "Total Suspended Solids in Chesapeake Bay", "Nutrients in Chesapeake Bay", "Total Suspended Solids outside of Chesapeake Bay", "Nutrients outside of Chesapeake Bay", ] self.md_tables = ["metals_md", "tss_md", "nutrients_md"] self.md_docs = [ "Metals (Manufactured devices only)", "TSS (Manufactured devices only)", "Nutrients (Manufactured devices only)", ] self.all_tables = [ "bacteria", "metals", "tss", "nutrients", "tss_cbay", "nutrients_cbay", "tss_noncbay", "nutrients_noncbay", "metals_md", "tss_md", "nutrients_md", ] self.all_docs = [ "Bacteria", "Metals", "Total Suspended Solids", "Nutrients", "Total Suspended Solids in Chesapeake Bay", "Nutrients in Chesapeake Bay", "Total Suspended Solids outside of Chesapeake Bay", "Nutrients outside of Chesapeake Bay", "Metals (Manufactured devices only)", "TSS (Manufactured devices only)", "Nutrients (Manufactured devices only)", ] self.sbpat_tables = ["bacteria_sbpat", "tss", "nutrients", "metals"] def makeSBPAT_tables(self): for t in self.sbpat_tables: print("\n\nsummarizing %s for SBPAT" % t) summary.sbpat_stats(t) def makeBoxplots(self, tables): for t in tables: print("\n\nboxplot summaries for %s" % t) summary.paramBoxplots(t) def makeInputFiles(self, tables): for t in tables: print("\n\nmaking input files for %s" % t) summary.latexInputFile(t, regenFigs=True) def makeReports(self, tables, docs): versions = ["draft", "final"] for t, d in zip(tables, docs): for v in versions: print("\n\nsummarizing %s" % t) summary.latexReport(t, d, template=v) def compileReport(self, docs, version="draft"): os.chdir("bmp/tex") for d in docs: filename = "%s_%s.tex" % (version, d.replace(" ", "")) print("Compiling report %s" % filename) os.system("pdflatex -quiet %s" % filename) print("Updating references in %s" % filename) os.system("pdflatex -quiet %s" % filename) os.chdir("../..") def makeTables(self, tables): for t in tables: print("\n\nsummary table for %s" % t) summary.paramTables(t) def dumpData(self, tables): for t in tables: print("\n\ndumping %s table" % t) summary.dataDump(t) def fullSuite(self, tables, docs, version): self.dumpData(tables) self.makeTables(tables) self.makeBoxplots(tables) self.makeReports(tables, docs) self.makeInputFiles(tables) self.compileReport(docs, version=version)
{ "content_hash": "f7fb4e21b62bc5736f3d23629c34c003", "timestamp": "", "source": "github", "line_count": 115, "max_line_length": 76, "avg_line_length": 30.669565217391305, "alnum_prop": 0.5318967961440317, "repo_name": "phobson/pybmpdb", "id": "5788f7ad32d3f834cbd9b191c29afee4becf7b36", "size": "3527", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "pybmpdb/reports.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Python", "bytes": "223253" }, { "name": "TSQL", "bytes": "1575" }, { "name": "TeX", "bytes": "17939" } ], "symlink_target": "" }
"""Base Estimator class.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import abc import os import tempfile import time import six from tensorflow.contrib import framework as contrib_framework from tensorflow.contrib import layers from tensorflow.contrib import losses from tensorflow.contrib.learn.python.learn.estimators import _sklearn as sklearn from tensorflow.contrib.learn.python.learn.estimators import run_config from tensorflow.contrib.learn.python.learn.estimators import tensor_signature from tensorflow.contrib.learn.python.learn.graph_actions import evaluate from tensorflow.contrib.learn.python.learn.graph_actions import infer from tensorflow.contrib.learn.python.learn.graph_actions import train from tensorflow.contrib.learn.python.learn.io import data_feeder from tensorflow.python.framework import ops from tensorflow.python.framework import random_seed from tensorflow.python.platform import tf_logging as logging from tensorflow.python.training import device_setter from tensorflow.python.training import saver # Default metrics for evaluation. _EVAL_METRICS = { 'regression': { 'mean_squared_error': losses.sum_of_squares, }, 'classification': { 'logistic': losses.sigmoid_cross_entropy, },} class ModeKeys(object): """Standard names for model modes. The following standard keys are defined: * `TRAIN`: training mode. * `EVAL`: evaluation mode. * `INFER`: inference mode. """ TRAIN = 'train' EVAL = 'eval' INFER = 'infer' def _get_input_fn(x, y, batch_size): # TODO(ipoloshukin): Remove this when refactor of data_feeder is done if hasattr(x, 'create_graph') and hasattr(y, 'create_graph'): def input_fn(): return x.create_graph(), y.create_graph() return input_fn, None df = data_feeder.setup_train_data_feeder(x, y, n_classes=None, batch_size=batch_size) return df.input_builder, df.get_feed_dict_fn() def _get_predict_input_fn(x, batch_size): # TODO(ipoloshukin): Remove this when refactor of data_feeder is done if hasattr(x, 'create_graph'): def input_fn(): return x.create_graph() return input_fn, None df = data_feeder.setup_train_data_feeder(x, None, n_classes=None, batch_size=batch_size) return df.input_builder, df.get_feed_dict_fn() class BaseEstimator(sklearn.BaseEstimator): """Abstract BaseEstimator class to train and evaluate TensorFlow models. Concrete implementation of this class should provide following functions: * _get_train_ops * _get_eval_ops * _get_predict_ops It may override _get_default_metric_functions. `Estimator` implemented below is a good example of how to use this class. Parameters: model_dir: Directory to save model parameters, graph and etc. """ __metaclass__ = abc.ABCMeta # TODO(wicke): Remove this once launcher takes over config functionality _Config = run_config.RunConfig # pylint: disable=invalid-name def __init__(self, model_dir=None): # Model directory. self._model_dir = model_dir if self._model_dir is None: self._model_dir = tempfile.mkdtemp() logging.info('Using temporary folder as model directory: %s', self._model_dir) # Create a run configuration self._config = BaseEstimator._Config() # Set device function depending if there are replicas or not. if self._config.num_ps_replicas > 0: ps_ops = ['Variable', 'AutoReloadVariable'] self._device_fn = device_setter.replica_device_setter( ps_tasks=self._config.num_ps_replicas, merge_devices=False, ps_ops=ps_ops) else: self._device_fn = None # Features and targets TensorSingature objects. self._features_info = None self._targets_info = None @abc.abstractproperty def _get_train_ops(self, features, targets): """Method that builds model graph and returns trainer ops. Expected to be overriden by sub-classes that require custom support. Args: features: `Tensor` or `dict` of `Tensor` objects. targets: `Tensor` or `dict` of `Tensor` objects. Returns: Tuple of train `Operation` and loss `Tensor`. """ pass @abc.abstractproperty def _get_predict_ops(self, features): """Method that builds model graph and returns prediction ops. Args: features: `Tensor` or `dict` of `Tensor` objects. Returns: predictions: `Tensor` or `dict` of `Tensor` objects. """ pass def _get_eval_ops(self, features, targets, metrics): """Method that builds model graph and returns evaluation ops. Args: features: `Tensor` or `dict` of `Tensor` objects. targets: `Tensor` or `dict` of `Tensor` objects. metrics: `dict` of functions that take predictions and targets. Returns: metrics: `dict` of `Tensor` objects. """ predictions = self._get_predict_ops(features) result = {} for name, metric in six.iteritems(metrics): result[name] = metric(predictions, targets) return result def _get_feature_ops_from_example(self, examples_batch): """Method that returns features given the batch of examples. This method will be used to export model into a server. Args: examples_batch: batch of tf.Example Returns: features: `Tensor` or `dict` of `Tensor` objects. """ raise NotImplementedError('_get_feature_ops_from_example not implemented ' 'in BaseEstimator') def _get_default_metric_functions(self): """Method that provides default metric operations. This functions is intented to be overridden by sub-classes. Returns: `dict` of functions that take predictions and targets `Tensor` objects and return `Tensor`. """ return {} def fit(self, x, y, steps, batch_size=32, monitor=None): """Trains a model given training data X and y. Args: x: matrix or tensor of shape [n_samples, n_features...]. Can be iterator that returns arrays of features. The training input samples for fitting the model. y: vector or matrix [n_samples] or [n_samples, n_outputs]. Can be iterator that returns array of targets. The training target values (class labels in classification, real numbers in regression). steps: number of steps to train model for. batch_size: minibatch size to use on the input, defaults to 32. monitor: monitor object to print training progress and invoke early stopping. Returns: Returns self. """ input_fn, feed_fn = _get_input_fn(x, y, batch_size) return self._train_model(input_fn=input_fn, feed_fn=feed_fn, steps=steps, monitor=monitor) def train(self, input_fn, steps, monitor=None): """Trains a model given input builder function. Args: input_fn: Input builder function, returns tuple of dicts or dict and Tensor. steps: number of steps to train model for. monitor: monitor object to print training progress and invoke early stopping. Returns: Returns self. """ return self._train_model(input_fn=input_fn, steps=steps, monitor=monitor) def partial_fit(self, x, y, steps=1, batch_size=32, monitor=None): """Incremental fit on a batch of samples. This method is expected to be called several times consecutively on different or the same chunks of the dataset. This either can implement iterative training or out-of-core/online training. This is especially useful when the whole dataset is too big to fit in memory at the same time. Or when model is taking long time to converge, and you want to split up training into subparts. Args: x: matrix or tensor of shape [n_samples, n_features...]. Can be iterator that returns arrays of features. The training input samples for fitting the model. y: vector or matrix [n_samples] or [n_samples, n_outputs]. Can be iterator that returns array of targets. The training target values (class label in classification, real numbers in regression). steps: number of steps to train model for. batch_size: minibatch size to use on the input, defaults to 32. monitor: Monitor object to print training progress and invoke early stopping. Returns: Returns self. """ input_fn, feed_fn = _get_input_fn(x, y, batch_size) return self._train_model(input_fn=input_fn, feed_fn=feed_fn, steps=steps, monitor=monitor) def evaluate(self, x=None, y=None, input_fn=None, feed_fn=None, batch_size=32, steps=100, metrics=None): """Evaluates given model with provided evaluation data. Args: x: features. y: targets. input_fn: Input function. If set, x and y must be None. feed_fn: Function creating a feed dict every time it is called. Called once per iteration. batch_size: minibatch size to use on the input, defaults to 32. Ignored if input_fn is set. steps: Number of steps to evalute for. metrics: Dict of metric ops to run. Returns: Returns self. Raises: ValueError: If x or y are not None while input_fn or feed_fn is not None. """ if (x is not None or y is not None) and input_fn is not None: raise ValueError('Either x and y or input_fn must be None.') if input_fn is None: assert x is not None input_fn, feed_fn = _get_input_fn(x, y, batch_size) return self._evaluate_model(input_fn=input_fn, feed_fn=feed_fn, steps=steps, metrics=metrics) def predict(self, x, axis=None, batch_size=None): """Returns predictions for given features. Args: x: features. axis: Axis on which to argmax. (for classification). batch_size: Override default batch size. Returns: Numpy array of predicted classes or regression values. """ return self._infer_model(x=x, batch_size=batch_size, axis=axis) def predict_proba(self, x, batch_size=None): """Returns prediction probabilities for given features (classification). Args: x: features. batch_size: OVerride default batch size. Returns: Numpy array of predicted probabilities. """ return self._infer_model(x=x, batch_size=batch_size, proba=True) def _check_inputs(self, features, targets): if self._features_info is not None: if not tensor_signature.tensors_compatible(features, self._features_info): raise ValueError('Features are incompatible with given information. ' 'Given features: %s, required signatures: %s.' % (str(features), str(self._features_info))) else: self._features_info = tensor_signature.create_signatures(features) if self._targets_info is not None: if not tensor_signature.tensors_compatible(targets, self._targets_info): raise ValueError('Targets are incompatible with given information. ' 'Given targets: %s, required signatures: %s.' % (str(targets), str(self._targets_info))) else: self._targets_info = tensor_signature.create_signatures(targets) def _train_model(self, input_fn, steps, feed_fn=None, device_fn=None, monitor=None, log_every_steps=100, fail_on_nan_loss=True): if self._config.execution_mode not in ('all', 'train'): return # Stagger startup of worker sessions based on task id. sleep_secs = min(self._config.training_worker_max_startup_secs, self._config.task * self._config.training_worker_session_startup_stagger_secs) if sleep_secs: logging.info('Waiting %d secs before starting task %d.', sleep_secs, self._config.task) time.sleep(sleep_secs) # Device allocation device_fn = device_fn or self._device_fn with ops.Graph().as_default() as g, g.device(device_fn): random_seed.set_random_seed(self._config.tf_random_seed) global_step = contrib_framework.create_global_step(g) features, targets = input_fn() self._check_inputs(features, targets) train_op, loss_op = self._get_train_ops(features, targets) return train( graph=g, output_dir=self._model_dir, train_op=train_op, loss_op=loss_op, global_step_tensor=global_step, log_every_steps=log_every_steps, supervisor_is_chief=(self._config.task == 0), supervisor_master=self._config.master, feed_fn=feed_fn, max_steps=steps, fail_on_nan_loss=fail_on_nan_loss) def _evaluate_model(self, input_fn, steps, feed_fn=None, metrics=None): if self._config.execution_mode not in ('all', 'evaluate', 'eval_evalset'): return checkpoint_path = saver.latest_checkpoint(self._model_dir) eval_dir = os.path.join(self._model_dir, 'eval') with ops.Graph().as_default() as g: random_seed.set_random_seed(self._config.tf_random_seed) global_step = contrib_framework.create_global_step(g) features, targets = input_fn() self._check_inputs(features, targets) eval_dict = self._get_eval_ops(features, targets, metrics or self._get_default_metric_functions()) eval_results, _ = evaluate( graph=g, output_dir=eval_dir, checkpoint_path=checkpoint_path, eval_dict=eval_dict, global_step_tensor=global_step, supervisor_master=self._config.master, feed_fn=feed_fn, max_steps=steps) return eval_results def _infer_model(self, x, batch_size=None, axis=None, proba=False): # Converts inputs into tf.DataFrame / tf.Series. batch_size = -1 if batch_size is None else batch_size input_fn, feed_fn = _get_predict_input_fn(x, batch_size) checkpoint_path = saver.latest_checkpoint(self._model_dir) with ops.Graph().as_default() as g: random_seed.set_random_seed(self._config.tf_random_seed) contrib_framework.create_global_step(g) features, _ = input_fn() feed_dict = feed_fn() if feed_fn is not None else None predictions = self._get_predict_ops(features) if not isinstance(predictions, dict): predictions = {'predictions': predictions} # TODO(ipolosukhin): Support batching return infer(checkpoint_path, predictions, feed_dict=feed_dict) class Estimator(BaseEstimator): """Estimator class is the basic TensorFlow model trainer/evaluator. Parameters: model_fn: Model function, takes features and targets tensors or dicts of tensors and returns predictions and loss tensors. E.g. `(features, targets) -> (predictions, loss)`. model_dir: Directory to save model parameters, graph and etc. classification: boolean, true if classification problem. learning_rate: learning rate for the model. optimizer: optimizer for the model, can be: string: name of optimizer, like 'SGD', 'Adam', 'Adagrad', 'Ftl', 'Momentum', 'RMSProp', 'Momentum'). Full list in contrib/layers/optimizers.py class: sub-class of Optimizer (like tf.train.GradientDescentOptimizer). clip_gradients: clip_norm value for call to `clip_by_global_norm`. None denotes no gradient clipping. """ def __init__(self, model_fn=None, model_dir=None, classification=True, learning_rate=0.01, optimizer='SGD', clip_gradients=None): super(Estimator, self).__init__(model_dir=model_dir) self._model_fn = model_fn self._classification = classification if isinstance(optimizer, six.string_types): if optimizer not in layers.OPTIMIZER_CLS_NAMES: raise ValueError( 'Optimizer name should be one of [%s], you provided %s.' % (', '.join(layers.OPTIMIZER_CLS_NAMES), optimizer)) self.optimizer = optimizer self.learning_rate = learning_rate self.clip_gradients = clip_gradients def _get_train_ops(self, features, targets): """Method that builds model graph and returns trainer ops. Expected to be overriden by sub-classes that require custom support. This implementation uses `model_fn` passed as parameter to constructor to build model. Args: features: `Tensor` or `dict` of `Tensor` objects. targets: `Tensor` or `dict` of `Tensor` objects. Returns: Tuple of train `Operation` and loss `Tensor`. """ _, loss = self._model_fn(features, targets, ModeKeys.TRAIN) train_op = layers.optimize_loss( loss, contrib_framework.get_global_step(), learning_rate=self.learning_rate, optimizer=self.optimizer, clip_gradients=self.clip_gradients) return train_op, loss def _get_eval_ops(self, features, targets, metrics): """Method that builds model graph and returns evaluation ops. Expected to be overriden by sub-classes that require custom support. This implementation uses `model_fn` passed as parameter to constructor to build model. Args: features: `Tensor` or `dict` of `Tensor` objects. targets: `Tensor` or `dict` of `Tensor` objects. metrics: `dict` of functions that take predictions and targets. Returns: metrics: `dict` of `Tensor` objects. """ predictions, loss = self._model_fn(features, targets, ModeKeys.EVAL) result = {'loss': loss} if isinstance(targets, dict) and len(targets) == 1: # Unpack single target into just tensor. targets = targets[targets.keys()[0]] for name, metric in six.iteritems(metrics): # TODO(ipolosukhin): Add support for multi-head metrics. result[name] = metric(predictions, targets) return result def _get_predict_ops(self, features): """Method that builds model graph and returns prediction ops. Expected to be overriden by sub-classes that require custom support. This implementation uses `model_fn` passed as parameter to constructor to build model. Args: features: `Tensor` or `dict` of `Tensor` objects. Returns: predictions: `Tensor` or `dict` of `Tensor` objects. """ targets = tensor_signature.create_placeholders_from_signatures( self._targets_info) predictions, _ = self._model_fn(features, targets, ModeKeys.INFER) return predictions def _get_default_metric_functions(self): """Method that provides default metric operations. Returns: a dictionary of metric operations. """ return _EVAL_METRICS[ 'classification' if self._classification else 'regression'] def _get_feature_ops_from_example(self, examples_batch): """Unimplemented. TODO(vihanjain): We need a way to parse tf.Example into features. Args: examples_batch: batch of tf.Example Returns: features: `Tensor` or `dict` of `Tensor` objects. Raises: Exception: Unimplemented """ raise NotImplementedError('_get_feature_ops_from_example not yet ' 'implemented')
{ "content_hash": "b9588f50551de669b601067318dc69eb", "timestamp": "", "source": "github", "line_count": 549, "max_line_length": 80, "avg_line_length": 36.032786885245905, "alnum_prop": 0.6480638964715398, "repo_name": "plowman/python-mcparseface", "id": "f752b2b3a9c27bd7e5fe5ddc77d33af8b95e02a2", "size": "20385", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "models/syntaxnet/tensorflow/tensorflow/contrib/learn/python/learn/estimators/estimator.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Batchfile", "bytes": "1130" }, { "name": "C", "bytes": "792202" }, { "name": "C#", "bytes": "1883817" }, { "name": "C++", "bytes": "17605262" }, { "name": "CMake", "bytes": "68613" }, { "name": "CSS", "bytes": "1297" }, { "name": "Emacs Lisp", "bytes": "7809" }, { "name": "GCC Machine Description", "bytes": "1" }, { "name": "Go", "bytes": "8549" }, { "name": "HTML", "bytes": "764474" }, { "name": "Java", "bytes": "2864887" }, { "name": "JavaScript", "bytes": "404087" }, { "name": "Jupyter Notebook", "bytes": "1772913" }, { "name": "M4", "bytes": "27350" }, { "name": "Makefile", "bytes": "122687" }, { "name": "Objective-C", "bytes": "2664448" }, { "name": "Objective-C++", "bytes": "2897" }, { "name": "Protocol Buffer", "bytes": "904354" }, { "name": "Python", "bytes": "7674638" }, { "name": "Ruby", "bytes": "83163" }, { "name": "Shell", "bytes": "277892" }, { "name": "Swift", "bytes": "20550" }, { "name": "TypeScript", "bytes": "403037" }, { "name": "VimL", "bytes": "3759" } ], "symlink_target": "" }
from __future__ import absolute_import from __future__ import division from __future__ import print_function import logging import pytest import time import ray import ray.ray_constants as ray_constants from ray.tests.cluster_utils import Cluster from ray.tests.conftest import generate_internal_config_map logger = logging.getLogger(__name__) def test_cluster(): """Basic test for adding and removing nodes in cluster.""" g = Cluster(initialize_head=False) node = g.add_node() node2 = g.add_node() assert node.remaining_processes_alive() assert node2.remaining_processes_alive() g.remove_node(node2) g.remove_node(node) assert not any(n.any_processes_alive() for n in [node, node2]) def test_shutdown(): g = Cluster(initialize_head=False) node = g.add_node() node2 = g.add_node() g.shutdown() assert not any(n.any_processes_alive() for n in [node, node2]) @pytest.mark.parametrize( "ray_start_cluster_head", [generate_internal_config_map(num_heartbeats_timeout=20)], indirect=True) def test_internal_config(ray_start_cluster_head): """Checks that the internal configuration setting works. We set the cluster to timeout nodes after 2 seconds of no timeouts. We then remove a node, wait for 1 second to check that the cluster is out of sync, then wait another 2 seconds (giving 1 second of leeway) to check that the client has timed out. """ cluster = ray_start_cluster_head worker = cluster.add_node() cluster.wait_for_nodes() cluster.remove_node(worker) time.sleep(1) assert ray.cluster_resources()["CPU"] == 2 time.sleep(2) assert ray.cluster_resources()["CPU"] == 1 def test_wait_for_nodes(ray_start_cluster_head): """Unit test for `Cluster.wait_for_nodes`. Adds 4 workers, waits, then removes 4 workers, waits, then adds 1 worker, waits, and removes 1 worker, waits. """ cluster = ray_start_cluster_head workers = [cluster.add_node() for i in range(4)] cluster.wait_for_nodes() [cluster.remove_node(w) for w in workers] cluster.wait_for_nodes() assert ray.cluster_resources()["CPU"] == 1 worker2 = cluster.add_node() cluster.wait_for_nodes() cluster.remove_node(worker2) cluster.wait_for_nodes() assert ray.cluster_resources()["CPU"] == 1 def test_worker_plasma_store_failure(ray_start_cluster_head): cluster = ray_start_cluster_head worker = cluster.add_node() cluster.wait_for_nodes() # Log monitor doesn't die for some reason worker.kill_log_monitor() worker.kill_reporter() worker.kill_plasma_store() worker.all_processes[ray_constants.PROCESS_TYPE_RAYLET][0].process.wait() assert not worker.any_processes_alive(), worker.live_processes()
{ "content_hash": "ed523f6a1edaa69ac8afdb8c8ef204b2", "timestamp": "", "source": "github", "line_count": 90, "max_line_length": 77, "avg_line_length": 30.955555555555556, "alnum_prop": 0.6920315865039484, "repo_name": "atumanov/ray", "id": "979f4728330f9d394ad6d1c73da768fa039ee36e", "size": "2786", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "python/ray/tests/test_multi_node_2.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "C", "bytes": "20715" }, { "name": "C++", "bytes": "1036803" }, { "name": "CSS", "bytes": "9262" }, { "name": "Dockerfile", "bytes": "3411" }, { "name": "HTML", "bytes": "32704" }, { "name": "Java", "bytes": "517715" }, { "name": "JavaScript", "bytes": "8178" }, { "name": "Jupyter Notebook", "bytes": "1610" }, { "name": "Python", "bytes": "3081422" }, { "name": "Ruby", "bytes": "956" }, { "name": "Shell", "bytes": "76928" }, { "name": "Smarty", "bytes": "955" } ], "symlink_target": "" }
import urllib import simplejson googleGeocodeUrl = 'http://maps.googleapis.com/maps/api/geocode/json?' def get_coordinates(query, from_sensor=False): query = query.encode('utf-8') params = { 'address': query, 'sensor': "true" if from_sensor else "false" } url = googleGeocodeUrl + urllib.urlencode(params) json_response = urllib.urlopen(url) response = simplejson.loads(json_response.read()) if response['results']: location = response['results'][0]['geometry']['location'] latitude, longitude = location['lat'], location['lng'] print query, latitude, longitude else: latitude, longitude = None, None print query, "<no results>" return latitude, longitude
{ "content_hash": "dbc83f2c2bafcebc28271a103120e45b", "timestamp": "", "source": "github", "line_count": 22, "max_line_length": 70, "avg_line_length": 35, "alnum_prop": 0.6363636363636364, "repo_name": "ameybhole77/BE-project", "id": "e3976c8b27c51495caca3df93adf8cf374ede573", "size": "770", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "Code/geo.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "7642" }, { "name": "Shell", "bytes": "2348" } ], "symlink_target": "" }
import os from setuptools import setup def read(fname): return open(os.path.join(os.path.dirname(__file__), fname)).read() setup( name="Demo of launcher.", version="0.0.0", author="Matthew Goodman", author_email="meawoppl@gmail.com", description="An example of how to use the conda launcher.", license="BSD", url="http://www.github.com/meawoppl/hello-launcher", packages=['hello'], long_description=read('README.md') )
{ "content_hash": "43d3c8b2ff5807a2910a8e33ef4d9563", "timestamp": "", "source": "github", "line_count": 18, "max_line_length": 70, "avg_line_length": 25.666666666666668, "alnum_prop": 0.6645021645021645, "repo_name": "meawoppl/hello-launcher", "id": "c06d236c0d488d8e91eae9f46a8fe6356fbf774a", "size": "462", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "setup.py", "mode": "33188", "license": "bsd-2-clause", "language": [ { "name": "Python", "bytes": "551" } ], "symlink_target": "" }
from __future__ import unicode_literals from django.db import models, migrations import django_mobile_app_distribution.models from django.conf import settings import django.core.files.storage class Migration(migrations.Migration): dependencies = [ ('auth', '0001_initial'), migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='App', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(max_length=200, verbose_name='App name')), ('comment', models.CharField(max_length=200, null=True, verbose_name='Comment', blank=True)), ('version', models.CharField(max_length=200, verbose_name='Bundle version')), ('updatedAt', models.DateTimeField(auto_now=True, auto_now_add=True)), ('createdAt', models.DateTimeField(auto_now_add=True)), ], options={ }, bases=(models.Model,), ), migrations.CreateModel( name='AndroidApp', fields=[ ('app_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='django_mobile_app_distribution.App', on_delete=models.deletion.CASCADE)), ('operating_system', models.CharField(default=b'Android', verbose_name='Operating System', max_length=50, editable=False, choices=[(b'iOS', b'iOS'), (b'Android', b'Android')])), ('app_binary', models.FileField(upload_to=django_mobile_app_distribution.models.normalize_android_filename, storage=django.core.files.storage.FileSystemStorage(location=b'/Users/moritz/Alp-Phone/Projects/mobile_app_distribution/migrations_generator/migrations_generator/android'), verbose_name='APK file')), ], options={ 'ordering': ('name', 'operating_system', '-version', '-updatedAt'), 'verbose_name': 'Android app', 'verbose_name_plural': 'Android apps', }, bases=('django_mobile_app_distribution.app',), ), migrations.CreateModel( name='IosApp', fields=[ ('app_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='django_mobile_app_distribution.App', on_delete=models.deletion.CASCADE)), ('operating_system', models.CharField(default=b'iOS', verbose_name='Operating System', max_length=50, editable=False, choices=[(b'iOS', b'iOS'), (b'Android', b'Android')])), ('app_binary', models.FileField(upload_to=django_mobile_app_distribution.models.normalize_ios_filename, verbose_name='IPA file')), ('bundle_identifier', models.CharField(default=b'', help_text='e.g. org.example.app', max_length=200, verbose_name='Bundle identifier')), ], options={ 'ordering': ('name', 'operating_system', '-version', '-updatedAt'), 'verbose_name': 'iOS App', 'verbose_name_plural': 'iOS Apps', }, bases=('django_mobile_app_distribution.app',), ), migrations.CreateModel( name='UserInfo', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('language', models.CharField(default=b'en', max_length=20, choices=[(b'en', b'English'), (b'de', b'Deutsch')])), ('user', models.OneToOneField(to=settings.AUTH_USER_MODEL, on_delete=models.deletion.CASCADE)), ], options={ 'verbose_name': 'Extended user info', 'verbose_name_plural': 'Extended user info', }, bases=(models.Model,), ), migrations.AddField( model_name='app', name='groups', field=models.ManyToManyField(related_name='apps', default=None, to='auth.Group', blank=True, null=True, verbose_name='Groups'), preserve_default=True, ), migrations.AddField( model_name='app', name='user', field=models.ForeignKey(related_name='apps', default=None, blank=True, to=settings.AUTH_USER_MODEL, null=True, verbose_name='User', on_delete=models.deletion.CASCADE), preserve_default=True, ), ]
{ "content_hash": "ce1296f96a904c198a23edf459e454d5", "timestamp": "", "source": "github", "line_count": 85, "max_line_length": 323, "avg_line_length": 53.411764705882355, "alnum_prop": 0.5944933920704846, "repo_name": "Proper-Job/django-mobile-app-distribution", "id": "e9c085d3e472f549192ad360ef7bf52c0e96a34e", "size": "4564", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "django_mobile_app_distribution/migrations/0001_initial.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "340" }, { "name": "HTML", "bytes": "6329" }, { "name": "Python", "bytes": "38337" } ], "symlink_target": "" }
import os on_rtd = os.environ.get('READTHEDOCS') == 'True' import sys REPO_ROOT = os.path.abspath('..') sys.path.insert(0, REPO_ROOT) # Set $PYTHONPATH for nbsphinx to run notebooks pythonpath = os.environ.get('PYTHONPATH', '') if pythonpath: os.environ['PYTHONPATH'] = REPO_ROOT + ':' + pythonpath else: os.environ['PYTHONPATH'] = REPO_ROOT # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. # # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'nbsphinx', 'sphinx.ext.autodoc' ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # # source_suffix = ['.rst', '.md'] source_suffix = '.rst' # The master toctree document. master_doc = 'index' # General information about the project. project = 'Magnics' copyright = '2017, Computational Modelling Group' author = 'Computational Modelling Group' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '0.1' # The full version, including alpha/beta/rc tags. release = '0.1' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This patterns also effect to html_static_path and html_extra_path exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', '**.ipynb_checkpoints'] # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # if not on_rtd: html_theme = 'alabaster' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # # html_theme_options = {} # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # -- Options for HTMLHelp output ------------------------------------------ # Output file base name for HTML help builder. htmlhelp_basename = 'Magnicsdoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # # 'preamble': '', # Latex figure (float) alignment # # 'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ (master_doc, 'Magnics.tex', 'Magnics Documentation', 'Computational Modelling Group', 'manual'), ] # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ (master_doc, 'magnics', 'Magnics Documentation', [author], 1) ] # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ (master_doc, 'Magnics', 'Magnics Documentation', author, 'Magnics', 'One line description of project.', 'Miscellaneous'), ]
{ "content_hash": "08b34c2359687a542ea696d4aef768cc", "timestamp": "", "source": "github", "line_count": 149, "max_line_length": 79, "avg_line_length": 29.86577181208054, "alnum_prop": 0.6631460674157303, "repo_name": "computationalmodelling/magnics", "id": "946a3c161d73accdef9fb8315048eb54c4c0f8e3", "size": "5133", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "docs/conf.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Jupyter Notebook", "bytes": "75731" }, { "name": "Python", "bytes": "17184" }, { "name": "Shell", "bytes": "130" } ], "symlink_target": "" }
from tensorforce.core import layer_modules, TensorDict, TensorsSpec, tf_function, tf_util from tensorforce.core.policies import ActionValue, ParametrizedPolicy class ParametrizedActionValue(ActionValue, ParametrizedPolicy): """ Policy which parametrizes an action-value function, conditioned on the output of a neural network processing the input state (specification key: `parametrized_action_value`). Args: network ('auto' | specification): Policy network configuration, see [networks](../modules/networks.html) (<span style="color:#00C000"><b>default</b></span>: 'auto', automatically configured network). device (string): Device name (<span style="color:#00C000"><b>default</b></span>: inherit value of parent module). l2_regularization (float >= 0.0): Scalar controlling L2 regularization (<span style="color:#00C000"><b>default</b></span>: inherit value of parent module). name (string): <span style="color:#0000C0"><b>internal use</b></span>. states_spec (specification): <span style="color:#0000C0"><b>internal use</b></span>. auxiliaries_spec (specification): <span style="color:#0000C0"><b>internal use</b></span>. internals_spec (specification): <span style="color:#0000C0"><b>internal use</b></span>. actions_spec (specification): <span style="color:#0000C0"><b>internal use</b></span>. """ # Network first def __init__( self, network='auto', *, device=None, l2_regularization=None, name=None, states_spec=None, auxiliaries_spec=None, internals_spec=None, actions_spec=None ): super().__init__( device=device, l2_regularization=l2_regularization, name=name, states_spec=states_spec, auxiliaries_spec=auxiliaries_spec, actions_spec=actions_spec ) inputs_spec = TensorsSpec() if self.states_spec.is_singleton(): inputs_spec['states'] = self.states_spec.singleton() else: inputs_spec['states'] = self.states_spec if self.actions_spec.is_singleton(): inputs_spec['actions'] = self.actions_spec.singleton() else: inputs_spec['actions'] = self.actions_spec ParametrizedPolicy.__init__(self=self, network=network, inputs_spec=inputs_spec) output_spec = self.network.output_spec() # Action value self.value = self.submodule( name='value', module='linear', modules=layer_modules, size=0, input_spec=output_spec ) def get_architecture(self): return 'Network: {}\nAction-value: {}'.format( self.network.get_architecture().replace('\n', '\n '), self.value.get_architecture().replace('\n', '\n ') ) @tf_function(num_args=5) def next_internals(self, *, states, horizons, internals, actions, deterministic, independent): inputs = TensorDict() if self.states_spec.is_singleton(): inputs['states'] = states.singleton() else: inputs['states'] = states if self.actions_spec.is_singleton(): inputs['actions'] = actions.singleton() else: inputs['actions'] = actions return super().next_internals( states=inputs, horizons=horizons, internals=internals, deterministic=deterministic, independent=independent ) @tf_function(num_args=5) def action_value(self, *, states, horizons, internals, auxiliaries, actions): inputs = TensorDict() if self.states_spec.is_singleton(): inputs['states'] = states.singleton() else: inputs['states'] = states if self.actions_spec.is_singleton(): inputs['actions'] = actions.singleton() else: inputs['actions'] = actions deterministic = tf_util.constant(value=True, dtype='bool') embedding, _ = self.network.apply( x=inputs, horizons=horizons, internals=internals, deterministic=deterministic, independent=True ) return self.value.apply(x=embedding)
{ "content_hash": "22324025d5ed28d51c41f34ba36c2fc1", "timestamp": "", "source": "github", "line_count": 94, "max_line_length": 99, "avg_line_length": 44.3936170212766, "alnum_prop": 0.6240115025161754, "repo_name": "reinforceio/tensorforce", "id": "f2bcd074cf6a0835526bd10fc0137ecca3324820", "size": "4857", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tensorforce/core/policies/parametrized_action_value.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "869657" } ], "symlink_target": "" }
""" Common functions and regex expressions for github """ import re GITHUB_REGEX = re.compile( r'^(https://github.com/|\w+@github\.com:)(?P<repository>[a-zA-Z0-9_\-]+/[a-zA-Z0-9_\-]+)\.git$') GITHUB_ISSUE_REGEX = re.compile(r'^(([a-zA-Z0-9_\-]*|[a-zA-Z0-9_\-]+/[a-zA-Z0-9_\-]+)#|GH-)(?P<issue>\d+)$') def extract_repository_from_url(git_url): """ :param git_url: :return: :rtype: str | None >>> extract_repository_from_url('https://github.com/jmcs/turnstile.git') 'jmcs/turnstile' >>> extract_repository_from_url('git@github.com:jmcs/senza.git') 'jmcs/senza' >>> extract_repository_from_url('git@bitbucket.org:jmcs/somerepo.git') is None True """ match = GITHUB_REGEX.match(git_url) if match: return match.group('repository') else: return None def extract_issue_number(github_reference): """ Extracts the issue number from the github reference :param github_reference: :type github_reference: str :return: :rtype: int | None >>> extract_issue_number('#42') 42 >>> extract_issue_number('GH-24') 24 >>> extract_issue_number('jmcs#26') 26 >>> extract_issue_number('jmcs/turnstile#36') 36 >>> extract_issue_number('JIRA-1000') is None True """ match = GITHUB_ISSUE_REGEX.match(github_reference) if match: return int(match.group('issue')) else: return None
{ "content_hash": "cb1fa6735dd9cd77315efeb4d8d1f214", "timestamp": "", "source": "github", "line_count": 60, "max_line_length": 108, "avg_line_length": 23.95, "alnum_prop": 0.6040361864996521, "repo_name": "zalando/turnstile", "id": "42c82a6c7f9252021cd7aa0f772af75291a4da58", "size": "1437", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "turnstile/common/github.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "73441" } ], "symlink_target": "" }
from django import http from django.shortcuts import render_to_response from django.template.loader import get_template from django.template import Context import xhtml2pdf.pisa as pisa try: import StringIO StringIO = StringIO.StringIO except Exception: from io import StringIO import cgi def index(request): return http.HttpResponse(""" <html><body> <h1>Example 1</h1> Please enter some HTML code: <form action="/download/" method="post" enctype="multipart/form-data"> <textarea name="data">Hello <strong>World</strong></textarea> <br /> <input type="submit" value="Convert HTML to PDF" /> </form> <hr> <h1>Example 2</h1> <p><a href="ezpdf_sample">Example with template</a> </body></html> """) def download(request): if request.POST: result = StringIO() pdf = pisa.CreatePDF( StringIO(request.POST["data"]), result ) #==============README=================== #Django < 1.7 is content_type is mimetype #======================================== if not pdf.err: return http.HttpResponse( result.getvalue(), content_type='application/pdf') return http.HttpResponse('We had some errors') def render_to_pdf(template_src, context_dict): template = get_template(template_src) context = Context(context_dict) html = template.render(context) result = StringIO() pdf = pisa.pisa_document(StringIO( "{0}".format(html) ), result) if not pdf.err: #==============README=================== #Django < 1.7 is content_type is mimetype #======================================== return http.HttpResponse(result.getvalue(), content_type='application/pdf') return http.HttpResponse('We had some errors<pre>%s</pre>' % cgi.escape(html)) def ezpdf_sample(request): blog_entries = [] for i in range(1,10): blog_entries.append({ 'id': i, 'title':'Playing with pisa 3.0.16 and dJango Template Engine', 'body':'This is a simple example..' }) return render_to_pdf('entries.html',{ 'pagesize':'A4', 'title':'My amazing blog', 'blog_entries':blog_entries})
{ "content_hash": "bc80a8c0fbb7042e6ec3c3399df93276", "timestamp": "", "source": "github", "line_count": 70, "max_line_length": 83, "avg_line_length": 33.98571428571429, "alnum_prop": 0.5519125683060109, "repo_name": "zulumarketing/html2pdf", "id": "d59e870d1e3d0f995bda92f7d9d00256dcec49b3", "size": "2425", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "demo/djangoproject/views.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "25356" }, { "name": "Genshi", "bytes": "7610" }, { "name": "HTML", "bytes": "469495" }, { "name": "Python", "bytes": "485798" } ], "symlink_target": "" }
import json import os from io import BytesIO from contextlib import closing, contextmanager from datetime import datetime from mox import Mox, IgnoreArg from shutil import rmtree from tempfile import mkdtemp from unittest import TestCase from jsonlog import ConcurrentModificationException from jsonlog import DataStore from jsonlog import DataStoreException from jsonlog import DataStoreFS from jsonlog import FileAlreadyExistsException from jsonlog.datastore import atomic_rename class TestAtomicRename(TestCase): def setUp(self): self.tempdir = mkdtemp() self.fs = DataStoreFS(self.tempdir) def tearDown(self): rmtree(self.tempdir) def test_atomic_rename_fails_when_file_exists(self): existing_filename = os.path.join(self.tempdir, 'not-to-be-overwritten') source_filename = os.path.join(self.tempdir, 'new-version') with open(existing_filename, 'w') as outf: outf.write('this must be preserved') with open(source_filename, 'w') as outf: outf.write('this is the new version') self.assertRaises( FileAlreadyExistsException, atomic_rename, source_filename, existing_filename ) class TestDataStoreFS(TestCase): def setUp(self): self.tempdir = mkdtemp() self.fs = DataStoreFS(self.tempdir) def tearDown(self): rmtree(self.tempdir) @contextmanager def assert_tempdir_stays_empty(self): yield files_in_temp = list(os.listdir(os.path.join(self.tempdir, '__temp'))) self.assertTrue( not files_in_temp, "we do not expect files in temp dir, found %s" % files_in_temp ) def create_file(self, path, content): with open(os.path.join(self.tempdir, path), mode='w') as outf: outf.write(content) def create_empty_file(self, path): self.create_file(path, '') def test_it_list_files(self): self.create_empty_file('b') self.create_empty_file('c') self.assertEquals(2, len(list(self.fs.listfiles('')))) def test_it_list_top_dirs(self): dirpath = os.path.join(self.tempdir, 'd') os.mkdir(dirpath) self.assertEquals(set([ os.path.join(self.tempdir, '__temp'), dirpath, ]), set(self.fs.listdirs())) def test_it_opens_file_for_reading(self): sentinel = 'abcdefg' self.create_file('b', sentinel) with self.fs.open_for_reading('b') as inf: self.assertEquals(sentinel, inf.read()) def test_it_creates_files(self): sentinel = 'abcdefg' with self.assert_tempdir_stays_empty(): with self.fs.open_new_file('c') as outf: outf.write(sentinel) with self.fs.open_for_reading('c') as inf: self.assertEquals(sentinel, inf.read()) def test_it_create_files_iff_they_dont_exist_yet(self): with self.assert_tempdir_stays_empty(): with self.fs.open_new_file('c') as outf: outf.write('A') has_entered = [] def create_again(): with self.fs.open_new_file('c') as outf: has_entered.append(True) with self.assert_tempdir_stays_empty(): self.assertRaises(FileAlreadyExistsException, create_again) self.assertTrue(has_entered) class TestDataStore(TestCase): def setUp(self): self.mox = Mox() def tearDown(self): self.mox.UnsetStubs() def test_it_queries_the_filesystem_for_the_latest_version(self): fs = self.mox.CreateMock(DataStoreFS) ds = DataStore(fs) pj = os.path.join fs.listfiles('my-id').AndReturn([ pj('my-id', 'item-0.json'), pj('my-id', 'item-15.json'), pj('my-id', 'item-1.json'), pj('my-id', 'item-6.json'), pj('my-id', 'readme.txt'), ]) fs.open_for_reading(os.path.join('my-id', 'item-15.json')).AndReturn( closing(BytesIO('{"name":"the_name"}')) ) self.mox.ReplayAll() item = ds.get('my-id') self.assertEquals(dict(name='the_name', version=15), item) self.mox.VerifyAll() def test_it_bails_out_when_no_item_is_found(self): fs = self.mox.CreateMock(DataStoreFS) ds = DataStore(fs) fs.listfiles('my-id').AndReturn([ os.path.join('my-id', 'readme.txt') ]) self.mox.ReplayAll() self.assertRaises(DataStoreException, ds.get, 'my-id') self.mox.VerifyAll() def test_it_bails_out_when_item_cannot_be_read(self): fs = self.mox.CreateMock(DataStoreFS) ds = DataStore(fs) fs.listfiles('my-id').AndReturn([ os.path.join('my-id', 'item-0.json') ]) fs.open_for_reading(os.path.join('my-id', 'item-0.txt')).AndRaise( Exception() ) self.mox.ReplayAll() self.assertRaises(DataStoreException, ds.get, 'my-id') self.mox.VerifyAll() def test_it_creates_a_new_version_on_put(self): fs = self.mox.CreateMock(DataStoreFS) def clock_now(): return datetime(1970, 1, 1) ds = DataStore(fs, clock_now) first_version = os.path.join('my-id', 'item-0.json') fs.listfiles('my-id').AndReturn([ first_version ]) fs.open_for_reading(first_version).AndReturn( closing(BytesIO('{"name":"the name"}')) ) new_version = os.path.join('my-id', 'item-1.json') new_content = BytesIO() @contextmanager def not_closing_content(): yield new_content fs.open_new_file(new_version).AndReturn( not_closing_content() ) self.mox.ReplayAll() item = ds.get('my-id') item['name'] = 'the new name' ds.put('item', 'my-id', item) self.assertEquals( json.loads(new_content.getvalue()), dict( name='the new name', version=1, creation_date={ '__datetime__': True, 'iso8601': '19700101T000000' } ) ) self.mox.VerifyAll() def test_it_fails_when_two_concurrent_puts_happen(self): fs = self.mox.CreateMock(DataStoreFS) ds = DataStore(fs) fp = os.path.join('my-id', 'item-32.json') fs.open_new_file(fp).AndReturn(closing(BytesIO())) fs.open_new_file(fp).AndRaise(FileAlreadyExistsException('file already exists')) self.mox.ReplayAll() ds.put('item', 'my-id', dict(name='hello', version=31)) self.assertRaises( ConcurrentModificationException, ds.put, 'item', 'my-id', dict(name='hello', version=31) ) self.mox.VerifyAll() def test_it_can_list_all_ids(self): fs = self.mox.CreateMock(DataStoreFS) ds = DataStore(fs) items = [ os.path.join('item1', 'item-1.json'), os.path.join('item2', 'item-5.json') ] fs.listdirs().AndReturn(['__temp', 'item1', 'item2']) fs.listfiles('item1').AndReturn([items[0]]) fs.open_for_reading(items[0]).AndReturn( closing(BytesIO('{"name":"a"}')) ) fs.listfiles('item2').AndReturn([items[1]]) fs.open_for_reading(items[1]).AndReturn( closing(BytesIO('{"name":"b"}')) ) self.mox.ReplayAll() self.assertEquals(set(['item1', 'item2']), set(ds.all_ids())) self.mox.VerifyAll()
{ "content_hash": "2dacbcd2ba94c8b85c745fb63ce3cf2d", "timestamp": "", "source": "github", "line_count": 257, "max_line_length": 88, "avg_line_length": 29.72762645914397, "alnum_prop": 0.5760471204188482, "repo_name": "uucidl/pyjsonlog", "id": "c9dddb38f73e3425c426adc7282fcb3df234aa38", "size": "7640", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tests/test_datastore.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "13138" } ], "symlink_target": "" }
<<<<<<< HEAD <<<<<<< HEAD """Python 'zlib_codec' Codec - zlib compression encoding. This codec de/encodes from bytes to bytes. Written by Marc-Andre Lemburg (mal@lemburg.com). """ import codecs import zlib # this codec needs the optional zlib module ! ### Codec APIs def zlib_encode(input, errors='strict'): assert errors == 'strict' return (zlib.compress(input), len(input)) def zlib_decode(input, errors='strict'): assert errors == 'strict' return (zlib.decompress(input), len(input)) class Codec(codecs.Codec): def encode(self, input, errors='strict'): return zlib_encode(input, errors) def decode(self, input, errors='strict'): return zlib_decode(input, errors) class IncrementalEncoder(codecs.IncrementalEncoder): def __init__(self, errors='strict'): assert errors == 'strict' self.errors = errors self.compressobj = zlib.compressobj() def encode(self, input, final=False): if final: c = self.compressobj.compress(input) return c + self.compressobj.flush() else: return self.compressobj.compress(input) def reset(self): self.compressobj = zlib.compressobj() class IncrementalDecoder(codecs.IncrementalDecoder): def __init__(self, errors='strict'): assert errors == 'strict' self.errors = errors self.decompressobj = zlib.decompressobj() def decode(self, input, final=False): if final: c = self.decompressobj.decompress(input) return c + self.decompressobj.flush() else: return self.decompressobj.decompress(input) def reset(self): self.decompressobj = zlib.decompressobj() class StreamWriter(Codec, codecs.StreamWriter): charbuffertype = bytes class StreamReader(Codec, codecs.StreamReader): charbuffertype = bytes ### encodings module API def getregentry(): return codecs.CodecInfo( name='zlib', encode=zlib_encode, decode=zlib_decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter, _is_text_encoding=False, ) ======= """Python 'zlib_codec' Codec - zlib compression encoding. This codec de/encodes from bytes to bytes. Written by Marc-Andre Lemburg (mal@lemburg.com). """ import codecs import zlib # this codec needs the optional zlib module ! ### Codec APIs def zlib_encode(input, errors='strict'): assert errors == 'strict' return (zlib.compress(input), len(input)) def zlib_decode(input, errors='strict'): assert errors == 'strict' return (zlib.decompress(input), len(input)) class Codec(codecs.Codec): def encode(self, input, errors='strict'): return zlib_encode(input, errors) def decode(self, input, errors='strict'): return zlib_decode(input, errors) class IncrementalEncoder(codecs.IncrementalEncoder): def __init__(self, errors='strict'): assert errors == 'strict' self.errors = errors self.compressobj = zlib.compressobj() def encode(self, input, final=False): if final: c = self.compressobj.compress(input) return c + self.compressobj.flush() else: return self.compressobj.compress(input) def reset(self): self.compressobj = zlib.compressobj() class IncrementalDecoder(codecs.IncrementalDecoder): def __init__(self, errors='strict'): assert errors == 'strict' self.errors = errors self.decompressobj = zlib.decompressobj() def decode(self, input, final=False): if final: c = self.decompressobj.decompress(input) return c + self.decompressobj.flush() else: return self.decompressobj.decompress(input) def reset(self): self.decompressobj = zlib.decompressobj() class StreamWriter(Codec, codecs.StreamWriter): charbuffertype = bytes class StreamReader(Codec, codecs.StreamReader): charbuffertype = bytes ### encodings module API def getregentry(): return codecs.CodecInfo( name='zlib', encode=zlib_encode, decode=zlib_decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter, _is_text_encoding=False, ) >>>>>>> b875702c9c06ab5012e52ff4337439b03918f453 ======= """Python 'zlib_codec' Codec - zlib compression encoding. This codec de/encodes from bytes to bytes. Written by Marc-Andre Lemburg (mal@lemburg.com). """ import codecs import zlib # this codec needs the optional zlib module ! ### Codec APIs def zlib_encode(input, errors='strict'): assert errors == 'strict' return (zlib.compress(input), len(input)) def zlib_decode(input, errors='strict'): assert errors == 'strict' return (zlib.decompress(input), len(input)) class Codec(codecs.Codec): def encode(self, input, errors='strict'): return zlib_encode(input, errors) def decode(self, input, errors='strict'): return zlib_decode(input, errors) class IncrementalEncoder(codecs.IncrementalEncoder): def __init__(self, errors='strict'): assert errors == 'strict' self.errors = errors self.compressobj = zlib.compressobj() def encode(self, input, final=False): if final: c = self.compressobj.compress(input) return c + self.compressobj.flush() else: return self.compressobj.compress(input) def reset(self): self.compressobj = zlib.compressobj() class IncrementalDecoder(codecs.IncrementalDecoder): def __init__(self, errors='strict'): assert errors == 'strict' self.errors = errors self.decompressobj = zlib.decompressobj() def decode(self, input, final=False): if final: c = self.decompressobj.decompress(input) return c + self.decompressobj.flush() else: return self.decompressobj.decompress(input) def reset(self): self.decompressobj = zlib.decompressobj() class StreamWriter(Codec, codecs.StreamWriter): charbuffertype = bytes class StreamReader(Codec, codecs.StreamReader): charbuffertype = bytes ### encodings module API def getregentry(): return codecs.CodecInfo( name='zlib', encode=zlib_encode, decode=zlib_decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter, _is_text_encoding=False, ) >>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
{ "content_hash": "8151edc2852f4132022d741931190ecc", "timestamp": "", "source": "github", "line_count": 237, "max_line_length": 57, "avg_line_length": 28.48945147679325, "alnum_prop": 0.6590639810426541, "repo_name": "ArcherSys/ArcherSys", "id": "63429284fbf794d0a1638467a6eb8e4cda971725", "size": "6752", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "Lib/encodings/zlib_codec.py", "mode": "33188", "license": "mit", "language": [], "symlink_target": "" }
from functools import update_wrapper, partial from django import forms from django.conf import settings from django.forms.formsets import all_valid from django.forms.models import (modelform_factory, modelformset_factory, inlineformset_factory, BaseInlineFormSet) from django.contrib.contenttypes.models import ContentType from django.contrib.admin import widgets, helpers from django.contrib.admin.util import unquote, flatten_fieldsets, get_deleted_objects, model_format_dict from django.contrib.admin.templatetags.admin_static import static from django.contrib import messages from django.views.decorators.csrf import csrf_protect from django.core.exceptions import PermissionDenied, ValidationError from django.core.paginator import Paginator from django.core.urlresolvers import reverse from django.db import models, transaction, router from django.db.models.related import RelatedObject from django.db.models.fields import BLANK_CHOICE_DASH, FieldDoesNotExist from django.db.models.sql.constants import LOOKUP_SEP, QUERY_TERMS from django.http import Http404, HttpResponse, HttpResponseRedirect from django.shortcuts import get_object_or_404 from django.template.response import SimpleTemplateResponse, TemplateResponse from django.utils.decorators import method_decorator from django.utils.datastructures import SortedDict from django.utils.html import escape, escapejs from django.utils.safestring import mark_safe from django.utils.text import capfirst, get_text_list from django.utils.translation import ugettext as _ from django.utils.translation import ungettext from django.utils.encoding import force_unicode HORIZONTAL, VERTICAL = 1, 2 # returns the <ul> class for a given radio_admin field get_ul_class = lambda x: 'radiolist%s' % ((x == HORIZONTAL) and ' inline' or '') class IncorrectLookupParameters(Exception): pass # Defaults for formfield_overrides. ModelAdmin subclasses can change this # by adding to ModelAdmin.formfield_overrides. FORMFIELD_FOR_DBFIELD_DEFAULTS = { models.DateTimeField: { 'form_class': forms.SplitDateTimeField, 'widget': widgets.AdminSplitDateTime }, models.DateField: {'widget': widgets.AdminDateWidget}, models.TimeField: {'widget': widgets.AdminTimeWidget}, models.TextField: {'widget': widgets.AdminTextareaWidget}, models.URLField: {'widget': widgets.AdminURLFieldWidget}, models.IntegerField: {'widget': widgets.AdminIntegerFieldWidget}, models.BigIntegerField: {'widget': widgets.AdminBigIntegerFieldWidget}, models.CharField: {'widget': widgets.AdminTextInputWidget}, models.ImageField: {'widget': widgets.AdminFileWidget}, models.FileField: {'widget': widgets.AdminFileWidget}, } csrf_protect_m = method_decorator(csrf_protect) class BaseModelAdmin(object): """Functionality common to both ModelAdmin and InlineAdmin.""" __metaclass__ = forms.MediaDefiningClass raw_id_fields = () fields = None exclude = None fieldsets = None form = forms.ModelForm filter_vertical = () filter_horizontal = () radio_fields = {} prepopulated_fields = {} formfield_overrides = {} readonly_fields = () ordering = None def __init__(self): overrides = FORMFIELD_FOR_DBFIELD_DEFAULTS.copy() overrides.update(self.formfield_overrides) self.formfield_overrides = overrides def formfield_for_dbfield(self, db_field, **kwargs): """ Hook for specifying the form Field instance for a given database Field instance. If kwargs are given, they're passed to the form Field's constructor. """ request = kwargs.pop("request", None) # If the field specifies choices, we don't need to look for special # admin widgets - we just need to use a select widget of some kind. if db_field.choices: return self.formfield_for_choice_field(db_field, request, **kwargs) # ForeignKey or ManyToManyFields if isinstance(db_field, (models.ForeignKey, models.ManyToManyField)): # Combine the field kwargs with any options for formfield_overrides. # Make sure the passed in **kwargs override anything in # formfield_overrides because **kwargs is more specific, and should # always win. if db_field.__class__ in self.formfield_overrides: kwargs = dict(self.formfield_overrides[db_field.__class__], **kwargs) # Get the correct formfield. if isinstance(db_field, models.ForeignKey): formfield = self.formfield_for_foreignkey(db_field, request, **kwargs) elif isinstance(db_field, models.ManyToManyField): formfield = self.formfield_for_manytomany(db_field, request, **kwargs) # For non-raw_id fields, wrap the widget with a wrapper that adds # extra HTML -- the "add other" interface -- to the end of the # rendered output. formfield can be None if it came from a # OneToOneField with parent_link=True or a M2M intermediary. if formfield and db_field.name not in self.raw_id_fields: related_modeladmin = self.admin_site._registry.get( db_field.rel.to) can_add_related = bool(related_modeladmin and related_modeladmin.has_add_permission(request)) formfield.widget = widgets.RelatedFieldWidgetWrapper( formfield.widget, db_field.rel, self.admin_site, can_add_related=can_add_related) return formfield # If we've got overrides for the formfield defined, use 'em. **kwargs # passed to formfield_for_dbfield override the defaults. for klass in db_field.__class__.mro(): if klass in self.formfield_overrides: kwargs = dict(self.formfield_overrides[klass], **kwargs) return db_field.formfield(**kwargs) # For any other type of field, just call its formfield() method. return db_field.formfield(**kwargs) def formfield_for_choice_field(self, db_field, request=None, **kwargs): """ Get a form Field for a database Field that has declared choices. """ # If the field is named as a radio_field, use a RadioSelect if db_field.name in self.radio_fields: # Avoid stomping on custom widget/choices arguments. if 'widget' not in kwargs: kwargs['widget'] = widgets.AdminRadioSelect(attrs={ 'class': get_ul_class(self.radio_fields[db_field.name]), }) if 'choices' not in kwargs: kwargs['choices'] = db_field.get_choices( include_blank = db_field.blank, blank_choice=[('', _('None'))] ) return db_field.formfield(**kwargs) def formfield_for_foreignkey(self, db_field, request=None, **kwargs): """ Get a form Field for a ForeignKey. """ db = kwargs.get('using') if db_field.name in self.raw_id_fields: kwargs['widget'] = widgets.ForeignKeyRawIdWidget(db_field.rel, self.admin_site, using=db) elif db_field.name in self.radio_fields: kwargs['widget'] = widgets.AdminRadioSelect(attrs={ 'class': get_ul_class(self.radio_fields[db_field.name]), }) kwargs['empty_label'] = db_field.blank and _('None') or None return db_field.formfield(**kwargs) def formfield_for_manytomany(self, db_field, request=None, **kwargs): """ Get a form Field for a ManyToManyField. """ # If it uses an intermediary model that isn't auto created, don't show # a field in admin. if not db_field.rel.through._meta.auto_created: return None db = kwargs.get('using') if db_field.name in self.raw_id_fields: kwargs['widget'] = widgets.ManyToManyRawIdWidget(db_field.rel, self.admin_site, using=db) kwargs['help_text'] = '' elif db_field.name in (list(self.filter_vertical) + list(self.filter_horizontal)): kwargs['widget'] = widgets.FilteredSelectMultiple(db_field.verbose_name, (db_field.name in self.filter_vertical)) return db_field.formfield(**kwargs) def _declared_fieldsets(self): if self.fieldsets: return self.fieldsets elif self.fields: return [(None, {'fields': self.fields})] return None declared_fieldsets = property(_declared_fieldsets) def get_ordering(self, request): """ Hook for specifying field ordering. """ return self.ordering or () # otherwise we might try to *None, which is bad ;) def get_readonly_fields(self, request, obj=None): """ Hook for specifying custom readonly fields. """ return self.readonly_fields def get_prepopulated_fields(self, request, obj=None): """ Hook for specifying custom prepopulated fields. """ return self.prepopulated_fields def queryset(self, request): """ Returns a QuerySet of all model instances that can be edited by the admin site. This is used by changelist_view. """ qs = self.model._default_manager.get_query_set() # TODO: this should be handled by some parameter to the ChangeList. ordering = self.get_ordering(request) if ordering: qs = qs.order_by(*ordering) return qs def lookup_allowed(self, lookup, value): model = self.model # Check FKey lookups that are allowed, so that popups produced by # ForeignKeyRawIdWidget, on the basis of ForeignKey.limit_choices_to, # are allowed to work. for l in model._meta.related_fkey_lookups: for k, v in widgets.url_params_from_lookup_dict(l).items(): if k == lookup and v == value: return True parts = lookup.split(LOOKUP_SEP) # Last term in lookup is a query term (__exact, __startswith etc) # This term can be ignored. if len(parts) > 1 and parts[-1] in QUERY_TERMS: parts.pop() # Special case -- foo__id__exact and foo__id queries are implied # if foo has been specificially included in the lookup list; so # drop __id if it is the last part. However, first we need to find # the pk attribute name. rel_name = None for part in parts[:-1]: try: field, _, _, _ = model._meta.get_field_by_name(part) except FieldDoesNotExist: # Lookups on non-existants fields are ok, since they're ignored # later. return True if hasattr(field, 'rel'): model = field.rel.to rel_name = field.rel.get_related_field().name elif isinstance(field, RelatedObject): model = field.model rel_name = model._meta.pk.name else: rel_name = None if rel_name and len(parts) > 1 and parts[-1] == rel_name: parts.pop() if len(parts) == 1: return True clean_lookup = LOOKUP_SEP.join(parts) return clean_lookup in self.list_filter or clean_lookup == self.date_hierarchy def has_add_permission(self, request): """ Returns True if the given request has permission to add an object. Can be overriden by the user in subclasses. """ opts = self.opts return request.user.has_perm(opts.app_label + '.' + opts.get_add_permission()) def has_change_permission(self, request, obj=None): """ Returns True if the given request has permission to change the given Django model instance, the default implementation doesn't examine the `obj` parameter. Can be overriden by the user in subclasses. In such case it should return True if the given request has permission to change the `obj` model instance. If `obj` is None, this should return True if the given request has permission to change *any* object of the given type. """ opts = self.opts return request.user.has_perm(opts.app_label + '.' + opts.get_change_permission()) def has_delete_permission(self, request, obj=None): """ Returns True if the given request has permission to change the given Django model instance, the default implementation doesn't examine the `obj` parameter. Can be overriden by the user in subclasses. In such case it should return True if the given request has permission to delete the `obj` model instance. If `obj` is None, this should return True if the given request has permission to delete *any* object of the given type. """ opts = self.opts return request.user.has_perm(opts.app_label + '.' + opts.get_delete_permission()) class ModelAdmin(BaseModelAdmin): "Encapsulates all admin options and functionality for a given model." list_display = ('__str__',) list_display_links = () list_filter = () list_select_related = False list_per_page = 100 list_max_show_all = 200 list_editable = () search_fields = () date_hierarchy = None save_as = False save_on_top = False paginator = Paginator inlines = [] # Custom templates (designed to be over-ridden in subclasses) add_form_template = None change_form_template = None change_list_template = None delete_confirmation_template = None delete_selected_confirmation_template = None object_history_template = None # Actions actions = [] action_form = helpers.ActionForm actions_on_top = True actions_on_bottom = False actions_selection_counter = True def __init__(self, model, admin_site): self.model = model self.opts = model._meta self.admin_site = admin_site super(ModelAdmin, self).__init__() def get_inline_instances(self, request): inline_instances = [] for inline_class in self.inlines: inline = inline_class(self.model, self.admin_site) if request: if not (inline.has_add_permission(request) or inline.has_change_permission(request) or inline.has_delete_permission(request)): continue if not inline.has_add_permission(request): inline.max_num = 0 inline_instances.append(inline) return inline_instances def get_urls(self): from django.conf.urls import patterns, url def wrap(view): def wrapper(*args, **kwargs): return self.admin_site.admin_view(view)(*args, **kwargs) return update_wrapper(wrapper, view) info = self.model._meta.app_label, self.model._meta.module_name urlpatterns = patterns('', url(r'^$', wrap(self.changelist_view), name='%s_%s_changelist' % info), url(r'^add/$', wrap(self.add_view), name='%s_%s_add' % info), url(r'^(.+)/history/$', wrap(self.history_view), name='%s_%s_history' % info), url(r'^(.+)/delete/$', wrap(self.delete_view), name='%s_%s_delete' % info), url(r'^(.+)/$', wrap(self.change_view), name='%s_%s_change' % info), ) return urlpatterns def urls(self): return self.get_urls() urls = property(urls) @property def media(self): extra = '' if settings.DEBUG else '.min' js = [ 'core.js', 'admin/RelatedObjectLookups.js', 'jquery%s.js' % extra, 'jquery.init.js' ] if self.actions is not None: js.append('actions%s.js' % extra) if self.prepopulated_fields: js.extend(['urlify.js', 'prepopulate%s.js' % extra]) if self.opts.get_ordered_objects(): js.extend(['getElementsBySelector.js', 'dom-drag.js' , 'admin/ordering.js']) return forms.Media(js=[static('admin/js/%s' % url) for url in js]) def get_model_perms(self, request): """ Returns a dict of all perms for this model. This dict has the keys ``add``, ``change``, and ``delete`` mapping to the True/False for each of those actions. """ return { 'add': self.has_add_permission(request), 'change': self.has_change_permission(request), 'delete': self.has_delete_permission(request), } def get_fieldsets(self, request, obj=None): "Hook for specifying fieldsets for the add form." if self.declared_fieldsets: return self.declared_fieldsets form = self.get_form(request, obj) fields = form.base_fields.keys() + list(self.get_readonly_fields(request, obj)) return [(None, {'fields': fields})] def get_form(self, request, obj=None, **kwargs): """ Returns a Form class for use in the admin add view. This is used by add_view and change_view. """ if self.declared_fieldsets: fields = flatten_fieldsets(self.declared_fieldsets) else: fields = None if self.exclude is None: exclude = [] else: exclude = list(self.exclude) exclude.extend(self.get_readonly_fields(request, obj)) if self.exclude is None and hasattr(self.form, '_meta') and self.form._meta.exclude: # Take the custom ModelForm's Meta.exclude into account only if the # ModelAdmin doesn't define its own. exclude.extend(self.form._meta.exclude) # if exclude is an empty list we pass None to be consistant with the # default on modelform_factory exclude = exclude or None defaults = { "form": self.form, "fields": fields, "exclude": exclude, "formfield_callback": partial(self.formfield_for_dbfield, request=request), } defaults.update(kwargs) return modelform_factory(self.model, **defaults) def get_changelist(self, request, **kwargs): """ Returns the ChangeList class for use on the changelist page. """ from django.contrib.admin.views.main import ChangeList return ChangeList def get_object(self, request, object_id): """ Returns an instance matching the primary key provided. ``None`` is returned if no match is found (or the object_id failed validation against the primary key field). """ queryset = self.queryset(request) model = queryset.model try: object_id = model._meta.pk.to_python(object_id) return queryset.get(pk=object_id) except (model.DoesNotExist, ValidationError): return None def get_changelist_form(self, request, **kwargs): """ Returns a Form class for use in the Formset on the changelist page. """ defaults = { "formfield_callback": partial(self.formfield_for_dbfield, request=request), } defaults.update(kwargs) return modelform_factory(self.model, **defaults) def get_changelist_formset(self, request, **kwargs): """ Returns a FormSet class for use on the changelist page if list_editable is used. """ defaults = { "formfield_callback": partial(self.formfield_for_dbfield, request=request), } defaults.update(kwargs) return modelformset_factory(self.model, self.get_changelist_form(request), extra=0, fields=self.list_editable, **defaults) def get_formsets(self, request, obj=None): for inline in self.get_inline_instances(request): yield inline.get_formset(request, obj) def get_paginator(self, request, queryset, per_page, orphans=0, allow_empty_first_page=True): return self.paginator(queryset, per_page, orphans, allow_empty_first_page) def log_addition(self, request, object): """ Log that an object has been successfully added. The default implementation creates an admin LogEntry object. """ from django.contrib.admin.models import LogEntry, ADDITION LogEntry.objects.log_action( user_id = request.user.pk, content_type_id = ContentType.objects.get_for_model(object).pk, object_id = object.pk, object_repr = force_unicode(object), action_flag = ADDITION ) def log_change(self, request, object, message): """ Log that an object has been successfully changed. The default implementation creates an admin LogEntry object. """ from django.contrib.admin.models import LogEntry, CHANGE LogEntry.objects.log_action( user_id = request.user.pk, content_type_id = ContentType.objects.get_for_model(object).pk, object_id = object.pk, object_repr = force_unicode(object), action_flag = CHANGE, change_message = message ) def log_deletion(self, request, object, object_repr): """ Log that an object will be deleted. Note that this method is called before the deletion. The default implementation creates an admin LogEntry object. """ from django.contrib.admin.models import LogEntry, DELETION LogEntry.objects.log_action( user_id = request.user.id, content_type_id = ContentType.objects.get_for_model(self.model).pk, object_id = object.pk, object_repr = object_repr, action_flag = DELETION ) def action_checkbox(self, obj): """ A list_display column containing a checkbox widget. """ return helpers.checkbox.render(helpers.ACTION_CHECKBOX_NAME, force_unicode(obj.pk)) action_checkbox.short_description = mark_safe('<input type="checkbox" id="action-toggle" />') action_checkbox.allow_tags = True def get_actions(self, request): """ Return a dictionary mapping the names of all actions for this ModelAdmin to a tuple of (callable, name, description) for each action. """ # If self.actions is explicitally set to None that means that we don't # want *any* actions enabled on this page. from django.contrib.admin.views.main import IS_POPUP_VAR if self.actions is None or IS_POPUP_VAR in request.GET: return SortedDict() actions = [] # Gather actions from the admin site first for (name, func) in self.admin_site.actions: description = getattr(func, 'short_description', name.replace('_', ' ')) actions.append((func, name, description)) # Then gather them from the model admin and all parent classes, # starting with self and working back up. for klass in self.__class__.mro()[::-1]: class_actions = getattr(klass, 'actions', []) # Avoid trying to iterate over None if not class_actions: continue actions.extend([self.get_action(action) for action in class_actions]) # get_action might have returned None, so filter any of those out. actions = filter(None, actions) # Convert the actions into a SortedDict keyed by name. actions = SortedDict([ (name, (func, name, desc)) for func, name, desc in actions ]) return actions def get_action_choices(self, request, default_choices=BLANK_CHOICE_DASH): """ Return a list of choices for use in a form object. Each choice is a tuple (name, description). """ choices = [] + default_choices for func, name, description in self.get_actions(request).itervalues(): choice = (name, description % model_format_dict(self.opts)) choices.append(choice) return choices def get_action(self, action): """ Return a given action from a parameter, which can either be a callable, or the name of a method on the ModelAdmin. Return is a tuple of (callable, name, description). """ # If the action is a callable, just use it. if callable(action): func = action action = action.__name__ # Next, look for a method. Grab it off self.__class__ to get an unbound # method instead of a bound one; this ensures that the calling # conventions are the same for functions and methods. elif hasattr(self.__class__, action): func = getattr(self.__class__, action) # Finally, look for a named method on the admin site else: try: func = self.admin_site.get_action(action) except KeyError: return None if hasattr(func, 'short_description'): description = func.short_description else: description = capfirst(action.replace('_', ' ')) return func, action, description def get_list_display(self, request): """ Return a sequence containing the fields to be displayed on the changelist. """ return self.list_display def get_list_display_links(self, request, list_display): """ Return a sequence containing the fields to be displayed as links on the changelist. The list_display parameter is the list of fields returned by get_list_display(). """ if self.list_display_links or not list_display: return self.list_display_links else: # Use only the first item in list_display as link return list(list_display)[:1] def construct_change_message(self, request, form, formsets): """ Construct a change message from a changed object. """ change_message = [] if form.changed_data: change_message.append(_('Changed %s.') % get_text_list(form.changed_data, _('and'))) if formsets: for formset in formsets: for added_object in formset.new_objects: change_message.append(_('Added %(name)s "%(object)s".') % {'name': force_unicode(added_object._meta.verbose_name), 'object': force_unicode(added_object)}) for changed_object, changed_fields in formset.changed_objects: change_message.append(_('Changed %(list)s for %(name)s "%(object)s".') % {'list': get_text_list(changed_fields, _('and')), 'name': force_unicode(changed_object._meta.verbose_name), 'object': force_unicode(changed_object)}) for deleted_object in formset.deleted_objects: change_message.append(_('Deleted %(name)s "%(object)s".') % {'name': force_unicode(deleted_object._meta.verbose_name), 'object': force_unicode(deleted_object)}) change_message = ' '.join(change_message) return change_message or _('No fields changed.') def message_user(self, request, message): """ Send a message to the user. The default implementation posts a message using the django.contrib.messages backend. """ messages.info(request, message) def save_form(self, request, form, change): """ Given a ModelForm return an unsaved instance. ``change`` is True if the object is being changed, and False if it's being added. """ return form.save(commit=False) def save_model(self, request, obj, form, change): """ Given a model instance save it to the database. """ obj.save() def delete_model(self, request, obj): """ Given a model instance delete it from the database. """ obj.delete() def save_formset(self, request, form, formset, change): """ Given an inline formset save it to the database. """ formset.save() def save_related(self, request, form, formsets, change): """ Given the ``HttpRequest``, the parent ``ModelForm`` instance, the list of inline formsets and a boolean value based on whether the parent is being added or changed, save the related objects to the database. Note that at this point save_form() and save_model() have already been called. """ form.save_m2m() for formset in formsets: self.save_formset(request, form, formset, change=change) def render_change_form(self, request, context, add=False, change=False, form_url='', obj=None): opts = self.model._meta app_label = opts.app_label ordered_objects = opts.get_ordered_objects() context.update({ 'add': add, 'change': change, 'has_add_permission': self.has_add_permission(request), 'has_change_permission': self.has_change_permission(request, obj), 'has_delete_permission': self.has_delete_permission(request, obj), 'has_file_field': True, # FIXME - this should check if form or formsets have a FileField, 'has_absolute_url': hasattr(self.model, 'get_absolute_url'), 'ordered_objects': ordered_objects, 'form_url': form_url, 'opts': opts, 'content_type_id': ContentType.objects.get_for_model(self.model).id, 'save_as': self.save_as, 'save_on_top': self.save_on_top, }) if add and self.add_form_template is not None: form_template = self.add_form_template else: form_template = self.change_form_template return TemplateResponse(request, form_template or [ "admin/%s/%s/change_form.html" % (app_label, opts.object_name.lower()), "admin/%s/change_form.html" % app_label, "admin/change_form.html" ], context, current_app=self.admin_site.name) def response_add(self, request, obj, post_url_continue='../%s/'): """ Determines the HttpResponse for the add_view stage. """ opts = obj._meta pk_value = obj._get_pk_val() msg = _('The %(name)s "%(obj)s" was added successfully.') % {'name': force_unicode(opts.verbose_name), 'obj': force_unicode(obj)} # Here, we distinguish between different save types by checking for # the presence of keys in request.POST. if "_continue" in request.POST: self.message_user(request, msg + ' ' + _("You may edit it again below.")) if "_popup" in request.POST: post_url_continue += "?_popup=1" return HttpResponseRedirect(post_url_continue % pk_value) if "_popup" in request.POST: return HttpResponse( '<!DOCTYPE html><html><head><title></title></head><body>' '<script type="text/javascript">opener.dismissAddAnotherPopup(window, "%s", "%s");</script></body></html>' % \ # escape() calls force_unicode. (escape(pk_value), escapejs(obj))) elif "_addanother" in request.POST: self.message_user(request, msg + ' ' + (_("You may add another %s below.") % force_unicode(opts.verbose_name))) return HttpResponseRedirect(request.path) else: self.message_user(request, msg) # Figure out where to redirect. If the user has change permission, # redirect to the change-list page for this object. Otherwise, # redirect to the admin index. if self.has_change_permission(request, None): post_url = reverse('admin:%s_%s_changelist' % (opts.app_label, opts.module_name), current_app=self.admin_site.name) else: post_url = reverse('admin:index', current_app=self.admin_site.name) return HttpResponseRedirect(post_url) def response_change(self, request, obj): """ Determines the HttpResponse for the change_view stage. """ opts = obj._meta # Handle proxy models automatically created by .only() or .defer(). # Refs #14529 verbose_name = opts.verbose_name module_name = opts.module_name if obj._deferred: opts_ = opts.proxy_for_model._meta verbose_name = opts_.verbose_name module_name = opts_.module_name pk_value = obj._get_pk_val() msg = _('The %(name)s "%(obj)s" was changed successfully.') % {'name': force_unicode(verbose_name), 'obj': force_unicode(obj)} if "_continue" in request.POST: self.message_user(request, msg + ' ' + _("You may edit it again below.")) if "_popup" in request.REQUEST: return HttpResponseRedirect(request.path + "?_popup=1") else: return HttpResponseRedirect(request.path) elif "_saveasnew" in request.POST: msg = _('The %(name)s "%(obj)s" was added successfully. You may edit it again below.') % {'name': force_unicode(verbose_name), 'obj': obj} self.message_user(request, msg) return HttpResponseRedirect(reverse('admin:%s_%s_change' % (opts.app_label, module_name), args=(pk_value,), current_app=self.admin_site.name)) elif "_addanother" in request.POST: self.message_user(request, msg + ' ' + (_("You may add another %s below.") % force_unicode(verbose_name))) return HttpResponseRedirect(reverse('admin:%s_%s_add' % (opts.app_label, module_name), current_app=self.admin_site.name)) else: self.message_user(request, msg) # Figure out where to redirect. If the user has change permission, # redirect to the change-list page for this object. Otherwise, # redirect to the admin index. if self.has_change_permission(request, None): post_url = reverse('admin:%s_%s_changelist' % (opts.app_label, module_name), current_app=self.admin_site.name) else: post_url = reverse('admin:index', current_app=self.admin_site.name) return HttpResponseRedirect(post_url) def response_action(self, request, queryset): """ Handle an admin action. This is called if a request is POSTed to the changelist; it returns an HttpResponse if the action was handled, and None otherwise. """ # There can be multiple action forms on the page (at the top # and bottom of the change list, for example). Get the action # whose button was pushed. try: action_index = int(request.POST.get('index', 0)) except ValueError: action_index = 0 # Construct the action form. data = request.POST.copy() data.pop(helpers.ACTION_CHECKBOX_NAME, None) data.pop("index", None) # Use the action whose button was pushed try: data.update({'action': data.getlist('action')[action_index]}) except IndexError: # If we didn't get an action from the chosen form that's invalid # POST data, so by deleting action it'll fail the validation check # below. So no need to do anything here pass action_form = self.action_form(data, auto_id=None) action_form.fields['action'].choices = self.get_action_choices(request) # If the form's valid we can handle the action. if action_form.is_valid(): action = action_form.cleaned_data['action'] select_across = action_form.cleaned_data['select_across'] func, name, description = self.get_actions(request)[action] # Get the list of selected PKs. If nothing's selected, we can't # perform an action on it, so bail. Except we want to perform # the action explicitly on all objects. selected = request.POST.getlist(helpers.ACTION_CHECKBOX_NAME) if not selected and not select_across: # Reminder that something needs to be selected or nothing will happen msg = _("Items must be selected in order to perform " "actions on them. No items have been changed.") self.message_user(request, msg) return None if not select_across: # Perform the action only on the selected objects queryset = queryset.filter(pk__in=selected) response = func(self, request, queryset) # Actions may return an HttpResponse, which will be used as the # response from the POST. If not, we'll be a good little HTTP # citizen and redirect back to the changelist page. if isinstance(response, HttpResponse): return response else: return HttpResponseRedirect(request.get_full_path()) else: msg = _("No action selected.") self.message_user(request, msg) return None @csrf_protect_m @transaction.commit_on_success def add_view(self, request, form_url='', extra_context=None): "The 'add' admin view for this model." model = self.model opts = model._meta if not self.has_add_permission(request): raise PermissionDenied ModelForm = self.get_form(request) formsets = [] inline_instances = self.get_inline_instances(request) if request.method == 'POST': form = ModelForm(request.POST, request.FILES) if form.is_valid(): new_object = self.save_form(request, form, change=False) form_validated = True else: form_validated = False new_object = self.model() prefixes = {} for FormSet, inline in zip(self.get_formsets(request), inline_instances): prefix = FormSet.get_default_prefix() prefixes[prefix] = prefixes.get(prefix, 0) + 1 if prefixes[prefix] != 1 or not prefix: prefix = "%s-%s" % (prefix, prefixes[prefix]) formset = FormSet(data=request.POST, files=request.FILES, instance=new_object, save_as_new="_saveasnew" in request.POST, prefix=prefix, queryset=inline.queryset(request)) formsets.append(formset) if all_valid(formsets) and form_validated: self.save_model(request, new_object, form, False) self.save_related(request, form, formsets, False) self.log_addition(request, new_object) return self.response_add(request, new_object) else: # Prepare the dict of initial data from the request. # We have to special-case M2Ms as a list of comma-separated PKs. initial = dict(request.GET.items()) for k in initial: try: f = opts.get_field(k) except models.FieldDoesNotExist: continue if isinstance(f, models.ManyToManyField): initial[k] = initial[k].split(",") form = ModelForm(initial=initial) prefixes = {} for FormSet, inline in zip(self.get_formsets(request), inline_instances): prefix = FormSet.get_default_prefix() prefixes[prefix] = prefixes.get(prefix, 0) + 1 if prefixes[prefix] != 1 or not prefix: prefix = "%s-%s" % (prefix, prefixes[prefix]) formset = FormSet(instance=self.model(), prefix=prefix, queryset=inline.queryset(request)) formsets.append(formset) adminForm = helpers.AdminForm(form, list(self.get_fieldsets(request)), self.get_prepopulated_fields(request), self.get_readonly_fields(request), model_admin=self) media = self.media + adminForm.media inline_admin_formsets = [] for inline, formset in zip(inline_instances, formsets): fieldsets = list(inline.get_fieldsets(request)) readonly = list(inline.get_readonly_fields(request)) prepopulated = dict(inline.get_prepopulated_fields(request)) inline_admin_formset = helpers.InlineAdminFormSet(inline, formset, fieldsets, prepopulated, readonly, model_admin=self) inline_admin_formsets.append(inline_admin_formset) media = media + inline_admin_formset.media context = { 'title': _('Add %s') % force_unicode(opts.verbose_name), 'adminform': adminForm, 'is_popup': "_popup" in request.REQUEST, 'show_delete': False, 'media': media, 'inline_admin_formsets': inline_admin_formsets, 'errors': helpers.AdminErrorList(form, formsets), 'app_label': opts.app_label, } context.update(extra_context or {}) return self.render_change_form(request, context, form_url=form_url, add=True) @csrf_protect_m @transaction.commit_on_success def change_view(self, request, object_id, form_url='', extra_context=None): "The 'change' admin view for this model." model = self.model opts = model._meta obj = self.get_object(request, unquote(object_id)) if not self.has_change_permission(request, obj): raise PermissionDenied if obj is None: raise Http404(_('%(name)s object with primary key %(key)r does not exist.') % {'name': force_unicode(opts.verbose_name), 'key': escape(object_id)}) if request.method == 'POST' and "_saveasnew" in request.POST: return self.add_view(request, form_url=reverse('admin:%s_%s_add' % (opts.app_label, opts.module_name), current_app=self.admin_site.name)) ModelForm = self.get_form(request, obj) formsets = [] inline_instances = self.get_inline_instances(request) if request.method == 'POST': form = ModelForm(request.POST, request.FILES, instance=obj) if form.is_valid(): form_validated = True new_object = self.save_form(request, form, change=True) else: form_validated = False new_object = obj prefixes = {} for FormSet, inline in zip(self.get_formsets(request, new_object), inline_instances): prefix = FormSet.get_default_prefix() prefixes[prefix] = prefixes.get(prefix, 0) + 1 if prefixes[prefix] != 1 or not prefix: prefix = "%s-%s" % (prefix, prefixes[prefix]) formset = FormSet(request.POST, request.FILES, instance=new_object, prefix=prefix, queryset=inline.queryset(request)) formsets.append(formset) if all_valid(formsets) and form_validated: self.save_model(request, new_object, form, True) self.save_related(request, form, formsets, True) change_message = self.construct_change_message(request, form, formsets) self.log_change(request, new_object, change_message) return self.response_change(request, new_object) else: form = ModelForm(instance=obj) prefixes = {} for FormSet, inline in zip(self.get_formsets(request, obj), inline_instances): prefix = FormSet.get_default_prefix() prefixes[prefix] = prefixes.get(prefix, 0) + 1 if prefixes[prefix] != 1 or not prefix: prefix = "%s-%s" % (prefix, prefixes[prefix]) formset = FormSet(instance=obj, prefix=prefix, queryset=inline.queryset(request)) formsets.append(formset) adminForm = helpers.AdminForm(form, self.get_fieldsets(request, obj), self.get_prepopulated_fields(request, obj), self.get_readonly_fields(request, obj), model_admin=self) media = self.media + adminForm.media inline_admin_formsets = [] for inline, formset in zip(inline_instances, formsets): fieldsets = list(inline.get_fieldsets(request, obj)) readonly = list(inline.get_readonly_fields(request, obj)) prepopulated = dict(inline.get_prepopulated_fields(request, obj)) inline_admin_formset = helpers.InlineAdminFormSet(inline, formset, fieldsets, prepopulated, readonly, model_admin=self) inline_admin_formsets.append(inline_admin_formset) media = media + inline_admin_formset.media context = { 'title': _('Change %s') % force_unicode(opts.verbose_name), 'adminform': adminForm, 'object_id': object_id, 'original': obj, 'is_popup': "_popup" in request.REQUEST, 'media': media, 'inline_admin_formsets': inline_admin_formsets, 'errors': helpers.AdminErrorList(form, formsets), 'app_label': opts.app_label, } context.update(extra_context or {}) return self.render_change_form(request, context, change=True, obj=obj, form_url=form_url) @csrf_protect_m def changelist_view(self, request, extra_context=None): """ The 'change list' admin view for this model. """ from django.contrib.admin.views.main import ERROR_FLAG opts = self.model._meta app_label = opts.app_label if not self.has_change_permission(request, None): raise PermissionDenied list_display = self.get_list_display(request) list_display_links = self.get_list_display_links(request, list_display) # Check actions to see if any are available on this changelist actions = self.get_actions(request) if actions: # Add the action checkboxes if there are any actions available. list_display = ['action_checkbox'] + list(list_display) ChangeList = self.get_changelist(request) try: cl = ChangeList(request, self.model, list_display, list_display_links, self.list_filter, self.date_hierarchy, self.search_fields, self.list_select_related, self.list_per_page, self.list_max_show_all, self.list_editable, self) except IncorrectLookupParameters: # Wacky lookup parameters were given, so redirect to the main # changelist page, without parameters, and pass an 'invalid=1' # parameter via the query string. If wacky parameters were given # and the 'invalid=1' parameter was already in the query string, # something is screwed up with the database, so display an error # page. if ERROR_FLAG in request.GET.keys(): return SimpleTemplateResponse('admin/invalid_setup.html', { 'title': _('Database error'), }) return HttpResponseRedirect(request.path + '?' + ERROR_FLAG + '=1') # If the request was POSTed, this might be a bulk action or a bulk # edit. Try to look up an action or confirmation first, but if this # isn't an action the POST will fall through to the bulk edit check, # below. action_failed = False selected = request.POST.getlist(helpers.ACTION_CHECKBOX_NAME) # Actions with no confirmation if (actions and request.method == 'POST' and 'index' in request.POST and '_save' not in request.POST): if selected: response = self.response_action(request, queryset=cl.get_query_set(request)) if response: return response else: action_failed = True else: msg = _("Items must be selected in order to perform " "actions on them. No items have been changed.") self.message_user(request, msg) action_failed = True # Actions with confirmation if (actions and request.method == 'POST' and helpers.ACTION_CHECKBOX_NAME in request.POST and 'index' not in request.POST and '_save' not in request.POST): if selected: response = self.response_action(request, queryset=cl.get_query_set(request)) if response: return response else: action_failed = True # If we're allowing changelist editing, we need to construct a formset # for the changelist given all the fields to be edited. Then we'll # use the formset to validate/process POSTed data. formset = cl.formset = None # Handle POSTed bulk-edit data. if (request.method == "POST" and cl.list_editable and '_save' in request.POST and not action_failed): FormSet = self.get_changelist_formset(request) formset = cl.formset = FormSet(request.POST, request.FILES, queryset=cl.result_list) if formset.is_valid(): changecount = 0 for form in formset.forms: if form.has_changed(): obj = self.save_form(request, form, change=True) self.save_model(request, obj, form, change=True) self.save_related(request, form, formsets=[], change=True) change_msg = self.construct_change_message(request, form, None) self.log_change(request, obj, change_msg) changecount += 1 if changecount: if changecount == 1: name = force_unicode(opts.verbose_name) else: name = force_unicode(opts.verbose_name_plural) msg = ungettext("%(count)s %(name)s was changed successfully.", "%(count)s %(name)s were changed successfully.", changecount) % {'count': changecount, 'name': name, 'obj': force_unicode(obj)} self.message_user(request, msg) return HttpResponseRedirect(request.get_full_path()) # Handle GET -- construct a formset for display. elif cl.list_editable: FormSet = self.get_changelist_formset(request) formset = cl.formset = FormSet(queryset=cl.result_list) # Build the list of media to be used by the formset. if formset: media = self.media + formset.media else: media = self.media # Build the action form and populate it with available actions. if actions: action_form = self.action_form(auto_id=None) action_form.fields['action'].choices = self.get_action_choices(request) else: action_form = None selection_note_all = ungettext('%(total_count)s selected', 'All %(total_count)s selected', cl.result_count) context = { 'module_name': force_unicode(opts.verbose_name_plural), 'selection_note': _('0 of %(cnt)s selected') % {'cnt': len(cl.result_list)}, 'selection_note_all': selection_note_all % {'total_count': cl.result_count}, 'title': cl.title, 'is_popup': cl.is_popup, 'cl': cl, 'media': media, 'has_add_permission': self.has_add_permission(request), 'app_label': app_label, 'action_form': action_form, 'actions_on_top': self.actions_on_top, 'actions_on_bottom': self.actions_on_bottom, 'actions_selection_counter': self.actions_selection_counter, } context.update(extra_context or {}) return TemplateResponse(request, self.change_list_template or [ 'admin/%s/%s/change_list.html' % (app_label, opts.object_name.lower()), 'admin/%s/change_list.html' % app_label, 'admin/change_list.html' ], context, current_app=self.admin_site.name) @csrf_protect_m @transaction.commit_on_success def delete_view(self, request, object_id, extra_context=None): "The 'delete' admin view for this model." opts = self.model._meta app_label = opts.app_label obj = self.get_object(request, unquote(object_id)) if not self.has_delete_permission(request, obj): raise PermissionDenied if obj is None: raise Http404(_('%(name)s object with primary key %(key)r does not exist.') % {'name': force_unicode(opts.verbose_name), 'key': escape(object_id)}) using = router.db_for_write(self.model) # Populate deleted_objects, a data structure of all related objects that # will also be deleted. (deleted_objects, perms_needed, protected) = get_deleted_objects( [obj], opts, request.user, self.admin_site, using) if request.POST: # The user has already confirmed the deletion. if perms_needed: raise PermissionDenied obj_display = force_unicode(obj) self.log_deletion(request, obj, obj_display) self.delete_model(request, obj) self.message_user(request, _('The %(name)s "%(obj)s" was deleted successfully.') % {'name': force_unicode(opts.verbose_name), 'obj': force_unicode(obj_display)}) if not self.has_change_permission(request, None): return HttpResponseRedirect(reverse('admin:index', current_app=self.admin_site.name)) return HttpResponseRedirect(reverse('admin:%s_%s_changelist' % (opts.app_label, opts.module_name), current_app=self.admin_site.name)) object_name = force_unicode(opts.verbose_name) if perms_needed or protected: title = _("Cannot delete %(name)s") % {"name": object_name} else: title = _("Are you sure?") context = { "title": title, "object_name": object_name, "object": obj, "deleted_objects": deleted_objects, "perms_lacking": perms_needed, "protected": protected, "opts": opts, "app_label": app_label, } context.update(extra_context or {}) return TemplateResponse(request, self.delete_confirmation_template or [ "admin/%s/%s/delete_confirmation.html" % (app_label, opts.object_name.lower()), "admin/%s/delete_confirmation.html" % app_label, "admin/delete_confirmation.html" ], context, current_app=self.admin_site.name) def history_view(self, request, object_id, extra_context=None): "The 'history' admin view for this model." from django.contrib.admin.models import LogEntry model = self.model opts = model._meta app_label = opts.app_label action_list = LogEntry.objects.filter( object_id = unquote(object_id), content_type__id__exact = ContentType.objects.get_for_model(model).id ).select_related().order_by('action_time') # If no history was found, see whether this object even exists. obj = get_object_or_404(model, pk=unquote(object_id)) context = { 'title': _('Change history: %s') % force_unicode(obj), 'action_list': action_list, 'module_name': capfirst(force_unicode(opts.verbose_name_plural)), 'object': obj, 'app_label': app_label, 'opts': opts, } context.update(extra_context or {}) return TemplateResponse(request, self.object_history_template or [ "admin/%s/%s/object_history.html" % (app_label, opts.object_name.lower()), "admin/%s/object_history.html" % app_label, "admin/object_history.html" ], context, current_app=self.admin_site.name) class InlineModelAdmin(BaseModelAdmin): """ Options for inline editing of ``model`` instances. Provide ``name`` to specify the attribute name of the ``ForeignKey`` from ``model`` to its parent. This is required if ``model`` has more than one ``ForeignKey`` to its parent. """ model = None fk_name = None formset = BaseInlineFormSet extra = 3 max_num = None template = None verbose_name = None verbose_name_plural = None can_delete = True def __init__(self, parent_model, admin_site): self.admin_site = admin_site self.parent_model = parent_model self.opts = self.model._meta super(InlineModelAdmin, self).__init__() if self.verbose_name is None: self.verbose_name = self.model._meta.verbose_name if self.verbose_name_plural is None: self.verbose_name_plural = self.model._meta.verbose_name_plural @property def media(self): extra = '' if settings.DEBUG else '.min' js = ['jquery%s.js' % extra, 'jquery.init.js', 'inlines%s.js' % extra] if self.prepopulated_fields: js.extend(['urlify.js', 'prepopulate%s.js' % extra]) if self.filter_vertical or self.filter_horizontal: js.extend(['SelectBox.js', 'SelectFilter2.js']) return forms.Media(js=[static('admin/js/%s' % url) for url in js]) def get_formset(self, request, obj=None, **kwargs): """Returns a BaseInlineFormSet class for use in admin add/change views.""" if self.declared_fieldsets: fields = flatten_fieldsets(self.declared_fieldsets) else: fields = None if self.exclude is None: exclude = [] else: exclude = list(self.exclude) exclude.extend(self.get_readonly_fields(request, obj)) if self.exclude is None and hasattr(self.form, '_meta') and self.form._meta.exclude: # Take the custom ModelForm's Meta.exclude into account only if the # InlineModelAdmin doesn't define its own. exclude.extend(self.form._meta.exclude) # if exclude is an empty list we use None, since that's the actual # default exclude = exclude or None can_delete = self.can_delete and self.has_delete_permission(request, obj) defaults = { "form": self.form, "formset": self.formset, "fk_name": self.fk_name, "fields": fields, "exclude": exclude, "formfield_callback": partial(self.formfield_for_dbfield, request=request), "extra": self.extra, "max_num": self.max_num, "can_delete": can_delete, } defaults.update(kwargs) return inlineformset_factory(self.parent_model, self.model, **defaults) def get_fieldsets(self, request, obj=None): if self.declared_fieldsets: return self.declared_fieldsets form = self.get_formset(request, obj).form fields = form.base_fields.keys() + list(self.get_readonly_fields(request, obj)) return [(None, {'fields': fields})] def queryset(self, request): queryset = super(InlineModelAdmin, self).queryset(request) if not self.has_change_permission(request): queryset = queryset.none() return queryset def has_add_permission(self, request): if self.opts.auto_created: # We're checking the rights to an auto-created intermediate model, # which doesn't have its own individual permissions. The user needs # to have the change permission for the related model in order to # be able to do anything with the intermediate model. return self.has_change_permission(request) return request.user.has_perm( self.opts.app_label + '.' + self.opts.get_add_permission()) def has_change_permission(self, request, obj=None): opts = self.opts if opts.auto_created: # The model was auto-created as intermediary for a # ManyToMany-relationship, find the target model for field in opts.fields: if field.rel and field.rel.to != self.parent_model: opts = field.rel.to._meta break return request.user.has_perm( opts.app_label + '.' + opts.get_change_permission()) def has_delete_permission(self, request, obj=None): if self.opts.auto_created: # We're checking the rights to an auto-created intermediate model, # which doesn't have its own individual permissions. The user needs # to have the change permission for the related model in order to # be able to do anything with the intermediate model. return self.has_change_permission(request, obj) return request.user.has_perm( self.opts.app_label + '.' + self.opts.get_delete_permission()) class StackedInline(InlineModelAdmin): template = 'admin/edit_inline/stacked.html' class TabularInline(InlineModelAdmin): template = 'admin/edit_inline/tabular.html'
{ "content_hash": "e40c8e084b77188fab37b152654f558a", "timestamp": "", "source": "github", "line_count": 1464, "max_line_length": 173, "avg_line_length": 43.372950819672134, "alnum_prop": 0.5864279189895745, "repo_name": "aleida/django", "id": "4d23f8f3842854c3b49684e9c81948e42282d318", "size": "63498", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "django/contrib/admin/options.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "CSS", "bytes": "50207" }, { "name": "JavaScript", "bytes": "89078" }, { "name": "Python", "bytes": "8135526" }, { "name": "Shell", "bytes": "11901" } ], "symlink_target": "" }
import logging import re from sqlalchemy.exc import IntegrityError # dicom refers to pydicom library import dicom from dicom.errors import InvalidDicomError from . import utils ####################################################################################################################### # GLOBAL VARIABLES ####################################################################################################################### conn = None ####################################################################################################################### # PUBLIC FUNCTIONS ####################################################################################################################### def dicom2db(file_path, file_type, is_copy, step_id, db_conn, sid_by_patient=False, pid_in_vid=False, visit_in_path=False, rep_in_path=False): """Extract some meta-data from a DICOM file and store in a DB. Arguments: :param file_path: File path. :param file_type: File type (should be 'DICOM'). :param is_copy: Indicate if this file is a copy. :param step_id: Step ID :param db_conn: Database connection. :param sid_by_patient: Rarely, a data set might use study IDs which are unique by patient (not for the whole study). E.g.: LREN data. In such a case, you have to enable this flag. This will use PatientID + StudyID as a session ID. :param pid_in_vid: Rarely, a data set might mix patient IDs and visit IDs. E.g. : LREN data. In such a case, you to enable this flag. This will try to split PatientID into VisitID and PatientID. :param visit_in_path: Enable this flag to get the visit ID from the folder hierarchy instead of DICOM meta-data (e.g. can be useful for PPMI). :param rep_in_path: Enable this flag to get the repetition ID from the folder hierarchy instead of DICOM meta-data (e.g. can be useful for PPMI). :return: A dictionary containing the following IDs : participant_id, visit_id, session_id, sequence_type_id, sequence_id, repetition_id, file_id. """ global conn conn = db_conn tags = dict() logging.info("Extracting DICOM headers from '%s'" % file_path) try: dcm = dicom.read_file(file_path) dataset = db_conn.get_dataset(step_id) tags['participant_id'] = _extract_participant(dcm, dataset, pid_in_vid) if visit_in_path: tags['visit_id'] = _extract_visit_from_path( dcm, file_path, pid_in_vid, sid_by_patient, dataset, tags['participant_id']) else: tags['visit_id'] = _extract_visit(dcm, dataset, tags['participant_id'], sid_by_patient, pid_in_vid) tags['session_id'] = _extract_session(dcm, tags['visit_id']) tags['sequence_type_id'] = _extract_sequence_type(dcm) tags['sequence_id'] = _extract_sequence(tags['session_id'], tags['sequence_type_id']) if rep_in_path: tags['repetition_id'] = _extract_repetition_from_path(dcm, file_path, tags['sequence_id']) else: tags['repetition_id'] = _extract_repetition(dcm, tags['sequence_id']) tags['file_id'] = extract_dicom(file_path, file_type, is_copy, tags['repetition_id'], step_id) except InvalidDicomError: logging.warning("%s is not a DICOM file !" % step_id) except IntegrityError: # TODO: properly deal with concurrency problems logging.warning("A problem occurred with the DB ! A rollback will be performed...") conn.db_session.rollback() return tags def extract_dicom(path, file_type, is_copy, repetition_id, processing_step_id): df = conn.db_session.query(conn.DataFile).filter_by(path=path).one_or_none() if not df: df = conn.DataFile( path=path, type=file_type, repetition_id=repetition_id, processing_step_id=processing_step_id, is_copy=is_copy ) conn.db_session.merge(df) else: df.file_type = file_type df.repetition_id = repetition_id df.processing_step_id = processing_step_id df.is_copy = is_copy conn.db_session.commit() return conn.db_session.query(conn.DataFile).filter_by(path=path).one_or_none().id ####################################################################################################################### # PRIVATE FUNCTIONS ####################################################################################################################### def _extract_participant(dcm, dataset, pid_in_vid=False): try: participant_name = dcm.PatientID if pid_in_vid: try: participant_name = utils.split_patient_id(participant_name)[1] except TypeError: pass except AttributeError: logging.warning("Patient ID was not found !") participant_name = None try: participant_birth_date = utils.format_date(dcm.PatientBirthDate) except AttributeError: logging.debug("Field PatientBirthDate was not found") participant_birth_date = None try: participant_gender = utils.format_gender(dcm.PatientSex) except AttributeError: logging.debug("Field PatientSex was not found") participant_gender = None participant_id = conn.get_participant_id(participant_name, dataset) participant = conn.db_session.query( conn.Participant).filter_by(id=participant_id).one_or_none() if not participant: participant = conn.Participant( id=participant_id, gender=participant_gender, birth_date=participant_birth_date, ) conn.db_session.merge(participant) else: participant.gender = participant_gender participant.birth_date = participant_birth_date conn.db_session.commit() return conn.db_session.query( conn.Participant).filter_by(id=participant_id).one_or_none().id def _extract_visit(dcm, dataset, participant_id, by_patient=False, pid_in_vid=False): visit_name = None if pid_in_vid: # If the patient ID and the visit ID are mixed into the PatientID field (e.g. LREN data) try: patient_id = dcm.PatientID visit_name = utils.split_patient_id(patient_id)[0] except (AttributeError, TypeError): visit_name = None if not pid_in_vid or not visit_name: # Otherwise, we use the StudyID (also used as a session ID) (e.g. PPMI data) try: visit_name = str(dcm.StudyID) if by_patient: # If the Study ID is given at the patient level (e.g. LREN data), here is a little trick visit_name = str(dcm.PatientID) + "_" + visit_name except AttributeError: visit_name = None try: scan_date = utils.format_date(dcm.AcquisitionDate) if not scan_date: raise AttributeError except AttributeError: try: scan_date = utils.format_date(dcm.SeriesDate) # If acquisition date is missing, we use the series date except AttributeError: scan_date = None try: participant_age = utils.format_age(dcm.PatientAge) except AttributeError: logging.debug("Field PatientAge was not found") participant_age = None visit_id = conn.get_visit_id(visit_name, dataset) visit = conn.db_session.query(conn.Visit).filter_by(id=visit_id).one_or_none() if not visit: visit = conn.Visit( id=visit_id, date=scan_date, participant_id=participant_id, patient_age=participant_age ) conn.db_session.merge(visit) else: visit.date = scan_date visit.participant_id = participant_id visit.patient_age = participant_age conn.db_session.commit() return conn.db_session.query(conn.Visit).filter_by(id=visit_id).one_or_none().id def _extract_session(dcm, visit_id): try: session_value = str(dcm.StudyID) except AttributeError: logging.debug("Field StudyID was not found") session_value = None session = conn.db_session.query(conn.Session).filter_by( visit_id=visit_id, name=session_value).first() if not session: session = conn.Session( visit_id=visit_id, name=session_value, ) conn.db_session.merge(session) conn.db_session.commit() return conn.db_session.query(conn.Session).filter_by( visit_id=visit_id, name=session_value).first().id def _extract_sequence_type(dcm): fields = _extract_sequence_type_fields(dcm) sequence_type = conn.db_session.query(conn.SequenceType).filter_by( name=fields['sequence_name'], manufacturer=fields['manufacturer'], manufacturer_model_name=fields['manufacturer_model_name'], institution_name=fields['institution_name'], slice_thickness=fields['slice_thickness'], repetition_time=fields['repetition_time'], echo_time=fields['echo_time'], echo_number=fields['echo_number'], number_of_phase_encoding_steps=fields['number_of_phase_encoding_steps'], percent_phase_field_of_view=fields['percent_phase_field_of_view'], pixel_bandwidth=fields['pixel_bandwidth'], flip_angle=fields['flip_angle'], rows=fields['rows'], columns=fields['columns'], magnetic_field_strength=fields['magnetic_field_strength'], space_between_slices=fields['space_between_slices'], echo_train_length=fields['echo_train_length'], percent_sampling=fields['percent_sampling'], pixel_spacing_0=fields['pixel_spacing_0'], pixel_spacing_1=fields['pixel_spacing_1'] ).one_or_none() if not sequence_type: sequence_type = conn.SequenceType( name=fields['sequence_name'], manufacturer=fields['manufacturer'], manufacturer_model_name=fields['manufacturer_model_name'], institution_name=fields['institution_name'], slice_thickness=fields['slice_thickness'], repetition_time=fields['repetition_time'], echo_time=fields['echo_time'], echo_number=fields['echo_number'], number_of_phase_encoding_steps=fields['number_of_phase_encoding_steps'], percent_phase_field_of_view=fields['percent_phase_field_of_view'], pixel_bandwidth=fields['pixel_bandwidth'], flip_angle=fields['flip_angle'], rows=fields['rows'], columns=fields['columns'], magnetic_field_strength=fields['magnetic_field_strength'], space_between_slices=fields['space_between_slices'], echo_train_length=fields['echo_train_length'], percent_sampling=fields['percent_sampling'], pixel_spacing_0=fields['pixel_spacing_0'], pixel_spacing_1=fields['pixel_spacing_1'] ) conn.db_session.merge(sequence_type) conn.db_session.commit() return conn.db_session.query(conn.SequenceType).filter_by( name=fields['sequence_name'], manufacturer=fields['manufacturer'], manufacturer_model_name=fields['manufacturer_model_name'], institution_name=fields['institution_name'], slice_thickness=fields['slice_thickness'], repetition_time=fields['repetition_time'], echo_time=fields['echo_time'], echo_number=fields['echo_number'], number_of_phase_encoding_steps=fields['number_of_phase_encoding_steps'], percent_phase_field_of_view=fields['percent_phase_field_of_view'], pixel_bandwidth=fields['pixel_bandwidth'], flip_angle=fields['flip_angle'], rows=fields['rows'], columns=fields['columns'], magnetic_field_strength=fields['magnetic_field_strength'], space_between_slices=fields['space_between_slices'], echo_train_length=fields['echo_train_length'], percent_sampling=fields['percent_sampling'], pixel_spacing_0=fields['pixel_spacing_0'], pixel_spacing_1=fields['pixel_spacing_1'] ).one_or_none().id def _extract_sequence_type_fields(dcm): fields = dict() try: fields['sequence_name'] = dcm.SeriesDescription # It seems better to use this instead of ProtocolName except AttributeError: logging.debug("Field SeriesDescription was not found") try: fields['sequence_name'] = dcm.ProtocolName # If SeriesDescription is missing, we use ProtocolName except AttributeError: fields['sequence_name'] = None try: fields['manufacturer'] = dcm.Manufacturer except AttributeError: logging.debug("Field Manufacturer was not found") fields['manufacturer'] = None try: fields['manufacturer_model_name'] = dcm.ManufacturerModelName except AttributeError: logging.debug("Field ManufacturerModelName was not found") fields['manufacturer_model_name'] = None try: fields['institution_name'] = dcm.InstitutionName except AttributeError: logging.debug("Field InstitutionName was not found") fields['institution_name'] = None try: fields['slice_thickness'] = float(dcm.SliceThickness) except (AttributeError, ValueError): logging.debug("Field SliceThickness was not found") fields['slice_thickness'] = None try: fields['repetition_time'] = float(dcm.RepetitionTime) except (AttributeError, ValueError): logging.debug("Field RepetitionTime was not found") fields['repetition_time'] = None try: fields['echo_time'] = float(dcm.EchoTime) except (AttributeError, ValueError): logging.debug("Field EchoTime was not found") fields['echo_time'] = None try: fields['number_of_phase_encoding_steps'] = int(dcm.NumberOfPhaseEncodingSteps) except (AttributeError, ValueError): logging.debug("Field NumberOfPhaseEncodingSteps was not found") fields['number_of_phase_encoding_steps'] = None try: fields['percent_phase_field_of_view'] = float(dcm.PercentPhaseFieldOfView) except (AttributeError, ValueError): logging.debug("Field PercentPhaseFieldOfView was not found") fields['percent_phase_field_of_view'] = None try: fields['pixel_bandwidth'] = int(dcm.PixelBandwidth) except (AttributeError, ValueError): logging.debug("Field PixelBandwidth was not found") fields['pixel_bandwidth'] = None try: fields['flip_angle'] = float(dcm.FlipAngle) except (AttributeError, ValueError): logging.debug("Field FlipAngle was not found") fields['flip_angle'] = None try: fields['rows'] = int(dcm.Rows) except (AttributeError, ValueError): logging.debug("Field Rows was not found") fields['rows'] = None try: fields['columns'] = int(dcm.Columns) except (AttributeError, ValueError): logging.debug("Field Columns was not found") fields['columns'] = None try: fields['magnetic_field_strength'] = float(dcm.MagneticFieldStrength) except (AttributeError, ValueError): logging.debug("Field MagneticFieldStrength was not found") fields['magnetic_field_strength'] = None try: fields['echo_train_length'] = int(dcm.EchoTrainLength) except (AttributeError, ValueError): logging.debug("Field EchoTrainLength was not found") fields['echo_train_length'] = None try: fields['percent_sampling'] = float(dcm.PercentSampling) except (AttributeError, ValueError): logging.debug("Field PercentSampling was not found") fields['percent_sampling'] = None try: pixel_spacing = dcm.PixelSpacing except AttributeError: logging.debug("Field PixelSpacing was not found") pixel_spacing = None try: fields['pixel_spacing_0'] = float(pixel_spacing[0]) except (AttributeError, ValueError, TypeError): logging.debug("Field pixel_spacing0 was not found") fields['pixel_spacing_0'] = None try: fields['pixel_spacing_1'] = float(pixel_spacing[1]) except (AttributeError, ValueError, TypeError): logging.debug("Field pixel_spacing1 was not found") fields['pixel_spacing_1'] = None try: fields['echo_number'] = int(dcm.EchoNumber) except (AttributeError, ValueError): logging.debug("Field echo_number was not found") fields['echo_number'] = None try: fields['space_between_slices'] = float(dcm[0x0018, 0x0088].value) except (AttributeError, ValueError, KeyError): logging.debug("Field space_between_slices was not found") fields['space_between_slices'] = None return fields def _extract_sequence(session_id, sequence_type_id): name = conn.db_session.query(conn.SequenceType).filter_by(id=sequence_type_id).one_or_none().name sequence = conn.db_session.query(conn.Sequence).filter_by(session_id=session_id, name=name).one_or_none() if not sequence: sequence = conn.Sequence( name=name, session_id=session_id, sequence_type_id=sequence_type_id, ) conn.db_session.merge(sequence) else: sequence.sequence_type_id = sequence_type_id conn.db_session.commit() return conn.db_session.query(conn.Sequence).filter_by(session_id=session_id, name=name).one_or_none().id def _extract_repetition(dcm, sequence_id): try: repetition_name = str(dcm.SeriesNumber) except AttributeError: logging.warning("Field SeriesNumber was not found") repetition_name = None try: series_date = utils.format_date(dcm.SeriesDate) if not series_date: raise AttributeError except AttributeError: series_date = None repetition = conn.db_session.query(conn.Repetition).filter_by( sequence_id=sequence_id, name=repetition_name).one_or_none() if not repetition: repetition = conn.Repetition( sequence_id=sequence_id, name=repetition_name, date=series_date ) conn.db_session.merge(repetition) else: repetition.date = series_date conn.db_session.commit() return conn.db_session.query(conn.Repetition).filter_by( sequence_id=sequence_id, name=repetition_name).one_or_none().id def _extract_visit_from_path(dcm, file_path, pid_in_vid, by_patient, dataset, participant_id): visit_name = None if pid_in_vid: # If the patient ID and the visit ID are mixed into the PatientID field (e.g. LREN data) try: patient_name = dcm.PatientID if pid_in_vid: try: visit_name = utils.split_patient_id(patient_name)[0] except TypeError: pass except AttributeError: logging.warning("Patient ID was not found !") visit_name = None if not pid_in_vid or not visit_name: # Otherwise, we use the StudyID (also used as a session ID) (e.g. PPMI data) try: visit_name = str(re.findall('/([^/]+?)/[^/]+?/[^/]+?/[^/]+?\.dcm', file_path)[0]) if by_patient: # If the Study ID is given at the patient level (e.g. LREN data), here is a little trick visit_name = dcm.PatientID + "_" + visit_name except AttributeError: logging.debug("Field StudyID or PatientID was not found") visit_name = None try: scan_date = utils.format_date(dcm.AcquisitionDate) if not scan_date: raise AttributeError except AttributeError: scan_date = utils.format_date(dcm.SeriesDate) # If acquisition date is missing, we use the series date visit_id = conn.get_visit_id(visit_name, dataset) visit = conn.db_session.query(conn.Visit).filter_by(id=visit_id).one_or_none() if not visit: visit = conn.Visit( id=visit_id, date=scan_date, participant_id=participant_id, ) conn.db_session.merge(visit) else: visit.date = scan_date visit.participant_id = participant_id conn.db_session.commit() return conn.db_session.query(conn.Visit).filter_by(id=visit_id).one_or_none().id def _extract_repetition_from_path(dcm, file_path, sequence_id): repetition_name = str(re.findall('/([^/]+?)/[^/]+?\.dcm', file_path)[0]) try: series_date = utils.format_date(dcm.SeriesDate) if not series_date: raise AttributeError except AttributeError: series_date = None repetition = conn.db_session.query(conn.Repetition).filter_by( sequence_id=sequence_id, name=repetition_name).one_or_none() if not repetition: repetition = conn.Repetition( sequence_id=sequence_id, name=repetition_name, date=series_date ) conn.db_session.merge(repetition) else: repetition.date = series_date conn.db_session.commit() return conn.db_session.query(conn.Repetition).filter_by( sequence_id=sequence_id, name=repetition_name).one_or_none().id
{ "content_hash": "e6a3ee75cb52412f41d9f6398d92a769", "timestamp": "", "source": "github", "line_count": 533, "max_line_length": 119, "avg_line_length": 40.0281425891182, "alnum_prop": 0.6176236231544411, "repo_name": "LREN-CHUV/mri-meta-extract", "id": "e4b1109b10e35542728949eb78579aa0c28d2c9f", "size": "21335", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "data_tracking/dicom_import.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "46549" }, { "name": "Shell", "bytes": "3030" } ], "symlink_target": "" }