text stringlengths 4 1.02M | meta dict |
|---|---|
"""Tests for gem5Run object"""
import hashlib
from pathlib import Path
import os
import unittest
from uuid import uuid4
from gem5art.artifact import artifact
from gem5art.run import gem5Run
class TestSERun(unittest.TestCase):
def setUp(self):
self.gem5art = artifact.Artifact(
{
"_id": uuid4(),
"name": "test-gem5",
"type": "test-binary",
"documentation": "This is a description of gem5 artifact",
"command": "scons build/X86/gem5.opt",
"path": "gem5/build/X86/gem5.opt",
"hash": hashlib.md5().hexdigest(),
"git": artifact.getGit(Path(".")),
"cwd": "/",
"inputs": [],
}
)
self.gem5gitart = artifact.Artifact(
{
"_id": uuid4(),
"name": "test-gem5-git",
"type": "test-git",
"documentation": "This is a description of gem5 git artifact",
"command": "git clone something",
"path": "/",
"hash": hashlib.md5().hexdigest(),
"git": artifact.getGit(Path(".")),
"cwd": "/",
"inputs": [],
}
)
self.runscptart = artifact.Artifact(
{
"_id": uuid4(),
"name": "test-runscript",
"type": "test-git",
"documentation": "This is a description of runscript aritfact",
"command": "git clone something",
"path": "/",
"hash": hashlib.md5().hexdigest(),
"git": artifact.getGit(Path(".")),
"cwd": "/",
"inputs": [],
}
)
self.run = gem5Run.createSERun(
"test SE run",
"configs-tests/run_test.py",
"results/run_test/out",
self.gem5art,
self.gem5gitart,
self.runscptart,
"extra",
"params",
)
def test_out_dir(self):
relative_outdir = "results/run_test/out"
self.assertEqual(
self.run.outdir.relative_to(Path(".").resolve()),
Path(relative_outdir),
)
self.assertTrue(
self.run.outdir.is_absolute(),
"outdir should be absolute directory",
)
def test_command(self):
self.assertEqual(
self.run.command,
[
"gem5/build/X86/gem5.opt",
"-re",
"--outdir={}".format(os.path.abspath("results/run_test/out")),
"configs-tests/run_test.py",
"extra",
"params",
],
)
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "88030ad9424da964f8396483e7d9763e",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 79,
"avg_line_length": 28.989795918367346,
"alnum_prop": 0.44456177402323127,
"repo_name": "gem5/gem5",
"id": "1710dbc7069fbd39c7ac2e6e156d128b94bec6e6",
"size": "4397",
"binary": false,
"copies": "1",
"ref": "refs/heads/stable",
"path": "util/gem5art/run/tests/test_run.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "145626"
},
{
"name": "Awk",
"bytes": "3386"
},
{
"name": "BASIC",
"bytes": "2884"
},
{
"name": "C",
"bytes": "3927153"
},
{
"name": "C++",
"bytes": "42960484"
},
{
"name": "CMake",
"bytes": "133888"
},
{
"name": "Dockerfile",
"bytes": "34102"
},
{
"name": "Emacs Lisp",
"bytes": "1914"
},
{
"name": "Forth",
"bytes": "354"
},
{
"name": "Fortran",
"bytes": "15436"
},
{
"name": "HTML",
"bytes": "146414"
},
{
"name": "Hack",
"bytes": "139769"
},
{
"name": "Java",
"bytes": "6966"
},
{
"name": "M4",
"bytes": "42624"
},
{
"name": "Makefile",
"bytes": "39573"
},
{
"name": "Perl",
"bytes": "23784"
},
{
"name": "Python",
"bytes": "8079781"
},
{
"name": "Roff",
"bytes": "8754"
},
{
"name": "SCSS",
"bytes": "2971"
},
{
"name": "SWIG",
"bytes": "173"
},
{
"name": "Scala",
"bytes": "5328"
},
{
"name": "Shell",
"bytes": "95638"
},
{
"name": "Starlark",
"bytes": "25668"
},
{
"name": "SuperCollider",
"bytes": "8869"
},
{
"name": "Vim Script",
"bytes": "4343"
},
{
"name": "sed",
"bytes": "3897"
}
],
"symlink_target": ""
} |
"""
A Docker Hypervisor which allows running Linux Containers instead of VMs.
"""
import os
import random
import socket
import time
from oslo.config import cfg
from nova.compute import flavors
from nova.compute import power_state
from nova.compute import task_states
from nova import exception
from nova.image import glance
from nova.openstack.common.gettextutils import _
from nova.openstack.common import jsonutils
from nova.openstack.common import log
from nova.openstack.common import units
from nova import utils
import novadocker.virt.docker.client as docker_client
from novadocker.virt.docker import hostinfo
from novadocker.virt.docker import network
from novadocker.virt import driver
docker_opts = [
cfg.IntOpt('registry_default_port',
default=5042,
help=_('Default TCP port to find the '
'docker-registry container'),
deprecated_group='DEFAULT',
deprecated_name='docker_registry_default_port'),
]
CONF = cfg.CONF
CONF.register_opts(docker_opts, 'docker')
CONF.import_opt('my_ip', 'nova.netconf')
LOG = log.getLogger(__name__)
class DockerDriver(driver.ComputeDriver):
"""Docker hypervisor driver."""
def __init__(self, virtapi):
super(DockerDriver, self).__init__(virtapi)
self._docker = None
@property
def docker(self):
if self._docker is None:
self._docker = docker_client.DockerHTTPClient()
return self._docker
def init_host(self, host):
LOG.warning(_('The docker driver does not meet the Nova project\'s '
'requirements for quality verification and is planned '
'for removal. This may change, but users should plan '
'accordingly. Additional details here: '
'https://wiki.openstack.org/wiki/HypervisorSupportMatrix'
'/DeprecationPlan'))
if self._is_daemon_running() is False:
raise exception.NovaException(_('Docker daemon is not running or '
'is not reachable (check the rights on /var/run/docker.sock)'))
def _is_daemon_running(self):
try:
self.docker.list_containers()
return True
except socket.error:
# NOTE(samalba): If the daemon is not running, we'll get a socket
# error. The list_containers call is safe to call often, there
# is an internal hard limit in docker if the amount of containers
# is huge.
return False
def list_instances(self, inspect=False):
res = []
for container in self.docker.list_containers():
info = self.docker.inspect_container(container['id'])
if inspect:
res.append(info)
else:
res.append(info['Config'].get('Hostname'))
return res
def plug_vifs(self, instance, network_info):
"""Plug VIFs into networks."""
msg = _("VIF plugging is not supported by the Docker driver.")
raise NotImplementedError(msg)
def unplug_vifs(self, instance, network_info):
"""Unplug VIFs from networks."""
msg = _("VIF unplugging is not supported by the Docker driver.")
raise NotImplementedError(msg)
def _find_container_by_name(self, name):
for info in self.list_instances(inspect=True):
if info['Config'].get('Hostname') == name:
return info
return {}
def get_info(self, instance):
container = self._find_container_by_name(instance['name'])
if not container:
raise exception.InstanceNotFound(instance_id=instance['name'])
running = container['State'].get('Running')
info = {
'max_mem': 0,
'mem': 0,
'num_cpu': 1,
'cpu_time': 0
}
info['state'] = power_state.RUNNING if running \
else power_state.SHUTDOWN
return info
def get_host_stats(self, refresh=False):
hostname = socket.gethostname()
memory = hostinfo.get_memory_usage()
disk = hostinfo.get_disk_usage()
stats = self.get_available_resource(hostname)
stats['hypervisor_hostname'] = stats['hypervisor_hostname']
stats['host_hostname'] = stats['hypervisor_hostname']
stats['host_name_label'] = stats['hypervisor_hostname']
return stats
def get_available_resource(self, nodename):
if not hasattr(self, '_nodename'):
self._nodename = nodename
if nodename != self._nodename:
LOG.error(_('Hostname has changed from %(old)s to %(new)s. '
'A restart is required to take effect.'
) % {'old': self._nodename,
'new': nodename})
memory = hostinfo.get_memory_usage()
disk = hostinfo.get_disk_usage()
stats = {
'vcpus': 1,
'vcpus_used': 0,
'memory_mb': memory['total'] / units.Mi,
'memory_mb_used': memory['used'] / units.Mi,
'local_gb': disk['total'] / units.Gi,
'local_gb_used': disk['used'] / units.Gi,
'disk_available_least': disk['available'] / units.Gi,
'hypervisor_type': 'docker',
'hypervisor_version': utils.convert_version_to_int('1.0'),
'hypervisor_hostname': self._nodename,
'cpu_info': '?',
'supported_instances': jsonutils.dumps([
('i686', 'docker', 'lxc'),
('x86_64', 'docker', 'lxc')
])
}
return stats
def _find_container_pid(self, container_id):
cgroup_path = hostinfo.get_cgroup_devices_path()
lxc_path = os.path.join(cgroup_path, 'lxc')
tasks_path = os.path.join(lxc_path, container_id, 'tasks')
n = 0
while True:
# NOTE(samalba): We wait for the process to be spawned inside the
# container in order to get the the "container pid". This is
# usually really fast. To avoid race conditions on a slow
# machine, we allow 10 seconds as a hard limit.
if n > 20:
return
try:
with open(tasks_path) as f:
pids = f.readlines()
if pids:
return int(pids[0].strip())
except IOError:
pass
time.sleep(0.5)
n += 1
def _setup_network(self, instance, network_info):
if not network_info:
return
container_id = self._find_container_by_name(instance['name']).get('id')
if not container_id:
return
network_info = network_info[0]['network']
netns_path = '/var/run/netns'
if not os.path.exists(netns_path):
utils.execute(
'mkdir', '-p', netns_path, run_as_root=True)
nspid = self._find_container_pid(container_id)
if not nspid:
msg = _('Cannot find any PID under container "{0}"')
raise RuntimeError(msg.format(container_id))
netns_path = os.path.join(netns_path, container_id)
utils.execute(
'ln', '-sf', '/proc/{0}/ns/net'.format(nspid),
'/var/run/netns/{0}'.format(container_id),
run_as_root=True)
rand = random.randint(0, 100000)
if_local_name = 'pvnetl{0}'.format(rand)
if_remote_name = 'pvnetr{0}'.format(rand)
bridge = network_info['bridge']
gateway = network.find_gateway(instance, network_info)
ip = network.find_fixed_ip(instance, network_info)
undo_mgr = utils.UndoManager()
try:
utils.execute(
'ip', 'link', 'add', 'name', if_local_name, 'type',
'veth', 'peer', 'name', if_remote_name,
run_as_root=True)
undo_mgr.undo_with(lambda: utils.execute(
'ip', 'link', 'delete', if_local_name, run_as_root=True))
# NOTE(samalba): Deleting the interface will delete all associated
# resources (remove from the bridge, its pair, etc...)
utils.execute(
'brctl', 'addif', bridge, if_local_name,
run_as_root=True)
utils.execute(
'ip', 'link', 'set', if_local_name, 'up',
run_as_root=True)
utils.execute(
'ip', 'link', 'set', if_remote_name, 'netns', nspid,
run_as_root=True)
utils.execute(
'ip', 'netns', 'exec', container_id, 'ifconfig',
if_remote_name, ip,
run_as_root=True)
utils.execute(
'ip', 'netns', 'exec', container_id,
'ip', 'route', 'replace', 'default', 'via', gateway, 'dev',
if_remote_name, run_as_root=True)
except Exception:
msg = _('Failed to setup the network, rolling back')
undo_mgr.rollback_and_reraise(msg=msg, instance=instance)
def _get_memory_limit_bytes(self, instance):
system_meta = utils.instance_sys_meta(instance)
return int(system_meta.get('instance_type_memory_mb', 0)) * units.Mi
def _get_image_name(self, context, instance, image):
fmt = image['container_format']
if fmt != 'docker':
msg = _('Image container format not supported ({0})')
raise exception.InstanceDeployFailure(msg.format(fmt),
instance_id=instance['name'])
registry_port = self._get_registry_port()
return '{0}:{1}/{2}'.format(CONF.my_ip,
registry_port,
image['name'])
def _get_default_cmd(self, image_name):
default_cmd = ['sh']
info = self.docker.inspect_image(image_name)
if not info:
return default_cmd
if not info['container_config']['Cmd']:
return default_cmd
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info=None, block_device_info=None):
image_name = self._get_image_name(context, instance, image_meta)
args = {
'Hostname': instance['name'],
'Image': image_name,
'Memory': self._get_memory_limit_bytes(instance),
'CpuShares': self._get_cpu_shares(instance)
}
default_cmd = self._get_default_cmd(image_name)
if default_cmd:
args['Cmd'] = default_cmd
container_id = self._create_container(instance, args)
if not container_id:
msg = _('Image name "{0}" does not exist, fetching it...')
LOG.info(msg.format(image_name))
res = self.docker.pull_repository(image_name)
if res is False:
raise exception.InstanceDeployFailure(
_('Cannot pull missing image'),
instance_id=instance['name'])
container_id = self._create_container(instance, args)
if not container_id:
raise exception.InstanceDeployFailure(
_('Cannot create container'),
instance_id=instance['name'])
self.docker.start_container(container_id)
try:
self._setup_network(instance, network_info)
except Exception as e:
msg = _('Cannot setup network: {0}')
self.docker.kill_container(container_id)
self.docker.destroy_container(container_id)
raise exception.InstanceDeployFailure(msg.format(e),
instance_id=instance['name'])
def destroy(self, context, instance, network_info, block_device_info=None,
destroy_disks=True):
container_id = self._find_container_by_name(instance['name']).get('id')
if not container_id:
return
self.docker.stop_container(container_id)
self.docker.destroy_container(container_id)
network.teardown_network(container_id)
def cleanup(self, context, instance, network_info, block_device_info=None,
destroy_disks=True):
"""Cleanup after instance being destroyed by Hypervisor."""
pass
def reboot(self, context, instance, network_info, reboot_type,
block_device_info=None, bad_volumes_callback=None):
container_id = self._find_container_by_name(instance['name']).get('id')
if not container_id:
return
if not self.docker.stop_container(container_id):
LOG.warning(_('Cannot stop the container, '
'please check docker logs'))
if not self.docker.start_container(container_id):
LOG.warning(_('Cannot restart the container, '
'please check docker logs'))
def power_on(self, context, instance, network_info, block_device_info):
container_id = self._find_container_by_name(instance['name']).get('id')
if not container_id:
return
self.docker.start_container(container_id)
def power_off(self, instance):
container_id = self._find_container_by_name(instance['name']).get('id')
if not container_id:
return
self.docker.stop_container(container_id)
def get_console_output(self, context, instance):
container_id = self._find_container_by_name(instance.name).get('id')
if not container_id:
return
return self.docker.get_container_logs(container_id)
def _get_registry_port(self):
default_port = CONF.docker.registry_default_port
registry = None
for container in self.docker.list_containers(_all=False):
container = self.docker.inspect_container(container['id'])
if 'docker-registry' in container['Path']:
registry = container
break
if not registry:
return default_port
# NOTE(samalba): The registry service always binds on port 5000 in the
# container
try:
return container['NetworkSettings']['PortMapping']['Tcp']['5000']
except (KeyError, TypeError):
# NOTE(samalba): Falling back to a default port allows more
# flexibility (run docker-registry outside a container)
return default_port
def snapshot(self, context, instance, image_href, update_task_state):
container_id = self._find_container_by_name(instance['name']).get('id')
if not container_id:
raise exception.InstanceNotRunning(instance_id=instance['uuid'])
update_task_state(task_state=task_states.IMAGE_PENDING_UPLOAD)
(image_service, image_id) = glance.get_remote_image_service(
context, image_href)
image = image_service.show(context, image_id)
registry_port = self._get_registry_port()
name = image['name']
default_tag = (':' not in name)
name = '{0}:{1}/{2}'.format(CONF.my_ip,
registry_port,
name)
commit_name = name if not default_tag else name + ':latest'
self.docker.commit_container(container_id, commit_name)
update_task_state(task_state=task_states.IMAGE_UPLOADING,
expected_state=task_states.IMAGE_PENDING_UPLOAD)
headers = {'X-Meta-Glance-Image-Id': image_href}
self.docker.push_repository(name, headers=headers)
def _get_cpu_shares(self, instance):
"""Get allocated CPUs from configured flavor.
Docker/lxc supports relative CPU allocation.
cgroups specifies following:
/sys/fs/cgroup/lxc/cpu.shares = 1024
/sys/fs/cgroup/cpu.shares = 1024
For that reason we use 1024 as multiplier.
This multiplier allows to divide the CPU
resources fair with containers started by
the user (e.g. docker registry) which has
the default CpuShares value of zero.
"""
flavor = flavors.extract_flavor(instance)
return int(flavor['vcpus']) * 1024
def _create_container(self, instance, args):
name = "nova-" + instance['uuid']
return self.docker.create_container(args, name)
| {
"content_hash": "4a3fb0c38bd942bda97b5c9bc26ca8cf",
"timestamp": "",
"source": "github",
"line_count": 402,
"max_line_length": 79,
"avg_line_length": 40.83582089552239,
"alnum_prop": 0.5745004873294347,
"repo_name": "russellb/nova-docker",
"id": "63d98123e5c0bd68913dcd797c0ce4fe0a427bff",
"size": "17050",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "novadocker/virt/docker/driver.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "77084"
}
],
"symlink_target": ""
} |
import re
from bs4 import BeautifulSoup
from error import BookingServiceError
from model.booking import Booking
from model.event import Event
from service import raven_service
BOOKING_SERVICE_URL = 'https://www.mealbookings.cai.cam.ac.uk/index.php'
_AVAILABLE_EVENTS_CACHE = None
_ATTENDEE_NAME_CACHE = {}
_MENU_TEXT_CACHE = {}
_EVENT_OCCURRING_CACHE = {}
def get_available_events():
""" Find all available events.
:return: list of Event instances, representing events that exist in the
booking system
"""
global _AVAILABLE_EVENTS_CACHE
if _AVAILABLE_EVENTS_CACHE is None:
_AVAILABLE_EVENTS_CACHE = []
browser = raven_service.get_default_authenticated_browser()
hall_html = browser.open(BOOKING_SERVICE_URL).read()
hall_soup = BeautifulSoup(hall_html)
events_table = hall_soup.find_all('table', {"class": "list"})[1]
for event_row in events_table.find_all('td'):
event_name = event_row.get_text()
event_links = event_row.find_all('a')
if len(event_links) > 0:
event_link = event_links[0].get('href')
event_code = int(re.search('\d+', event_link).group(0))
event = Event(code=event_code, name=event_name)
_AVAILABLE_EVENTS_CACHE.append(event)
return _AVAILABLE_EVENTS_CACHE[:]
def get_attendee_names(event, date):
""" Find the names of people attending `event` on `date`.
:param event: Event instance specifying event to check
:param date: datetime.date instance specifying the date to check
:return: list of Strings (names of attendees)
:raises: BookingServiceError if `event` isn't occurring on `date`
"""
def get_attendee_name(attendee_cell):
"""
:return: the name encapsulated within `attendee_cell`, or None
"""
attendee_text = attendee_cell.get_text()
if len(attendee_text) > 0 and attendee_text[0] != '(':
return attendee_text
return None
cache_key = (event.code, date)
if cache_key not in _ATTENDEE_NAME_CACHE:
if not is_event_occurring(event, date):
error_string = '%s not occurring on %s' % (str(event), str(date))
raise BookingServiceError(error_string)
browser = raven_service.get_default_authenticated_browser()
event_url = event.url_for_date(date, BOOKING_SERVICE_URL)
event_html = browser.open(event_url).read()
event_soup = BeautifulSoup(event_html)
attendance_table = event_soup.find_all('table', {'class': 'list'})[0]
attendee_cells = attendance_table.find_all('td')
attendee_names = map(get_attendee_name, attendee_cells)
attendee_names = [name for name in attendee_names if name is not None]
_ATTENDEE_NAME_CACHE[cache_key] = attendee_names
return _ATTENDEE_NAME_CACHE[cache_key][:]
def get_menu_text(event, date):
""" Get the text for the menu for `event` on `date`.
:param event: Event instance indicating the event to check the menu for
:param date: datetime.date instance indicate which day to check
:return: String containing the menu (newlines separating items), or None if
no menu is found
:raises: BookingServiceError is `event` isn't occurring on `date`
"""
cache_key = (event.code, date)
if cache_key not in _MENU_TEXT_CACHE:
if not is_event_occurring(event, date):
error_string = '%s not occurring on %s' % (str(event), str(date))
raise BookingServiceError(error_string)
browser = raven_service.get_default_authenticated_browser()
event_url = event.url_for_date(date, BOOKING_SERVICE_URL)
event_html = browser.open(event_url).read()
event_soup = BeautifulSoup(event_html)
menu_divs = event_soup.find_all('div', {'class': 'menu'})
if len(menu_divs) == 0:
return None
menu_text = menu_divs[0].get_text().replace('\r', '\n')
# Menu text sometimes contains large spans of ' ', so remove them. Also
# remove stray spaces at the start/end of a line.
menu_text = re.sub(r' +', '', menu_text)
menu_text = re.sub(r' ?\n ?', r'\n', menu_text)
menu_text = re.sub(r'(^\n)|(\n$)', r'', menu_text)
_MENU_TEXT_CACHE[cache_key] = menu_text
return _MENU_TEXT_CACHE[cache_key]
def is_event_occurring(event, date):
""" Ensure that `event` is occurring on `date`.
:param event: Event instance representing the event to check
:param date: datetime.date for the date to check
:return: bool, True if `event` is occurring on `date`
"""
cache_key = (event.code, date)
if cache_key not in _EVENT_OCCURRING_CACHE:
browser = raven_service.get_default_authenticated_browser()
event_url = event.url_for_date(date, BOOKING_SERVICE_URL)
event_html = browser.open(event_url).read()
event_occurring = 'not running on' not in event_html
_EVENT_OCCURRING_CACHE[cache_key] = event_occurring
return _EVENT_OCCURRING_CACHE[cache_key]
def create_booking(event, user, date):
""" Attempt to book `user` into `event` on `date`.
:param event: Event instance for event to book in to
:param user: User instance for person to book in
:param date: datetime.date instance for date to book
:return: Booking instance representing the successful booking
:raises: BookingServiceError if the booking could not be made
"""
if not is_event_occurring(event, date):
error_string = '%s not occurring on %s' % (str(event), str(date))
raise BookingServiceError(error_string)
crsid, password = user.crsid, user.password
browser = raven_service.get_authenticated_browser(crsid, password)
event_url = event.url_for_date(date, BOOKING_SERVICE_URL)
event_html = browser.open(event_url).read()
if 'Other dietary or non-dietary requirements' not in event_html:
# Not currently booked in, so make booking.
browser.select_form(nr=0)
browser.submit()
browser.select_form(nr=0)
browser.submit()
return Booking(event, user, date)
def get_booking(event, user, date):
""" Determine if `user` is booked into `event` on `date`.
:param event: Event instance for event to check
:param user: User instance for user to check
:param date: datetime.date instance for date to check
:return: Booking instance for booking if `user` is booked in to `event`,
or None if not
:raises: BookingServiceError if `event` doesn't take place on `date`.
"""
if not is_event_occurring(event, date):
error_string = '%s not occurring on %s' % (str(event), str(date))
raise BookingServiceError(error_string)
crsid, password = user.crsid, user.password
browser = raven_service.get_authenticated_browser(crsid, password)
event_url = event.url_for_date(date, BOOKING_SERVICE_URL)
event_html = browser.open(event_url).read()
if 'Other dietary or non-dietary requirements' in event_html:
return Booking(event, user, date)
return None | {
"content_hash": "6fae1cca48d9082a4ad752345b6ed2c8",
"timestamp": "",
"source": "github",
"line_count": 167,
"max_line_length": 79,
"avg_line_length": 42.67664670658683,
"alnum_prop": 0.6531499929844254,
"repo_name": "JackMorris/CaiusHallHelper",
"id": "2a0802ed51de0a32cc073cce0c1f526c0202f29c",
"size": "7127",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "service/booking_service.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "24986"
}
],
"symlink_target": ""
} |
from typing import Dict, List, no_type_check
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def findFrequentTreeSum(self, root: TreeNode) -> List[int]:
freq = {}
self.findFrequentTreeSumHelper(root, freq)
modes = []
max_freq = -1
for val, curr_freq in freq.items():
if curr_freq > max_freq:
modes = [val]
max_freq = curr_freq
elif curr_freq == max_freq:
modes.append(val)
return modes
def findFrequentTreeSumHelper(self, root: TreeNode, freq: Dict[int, int]) -> int:
if root is None:
return 0
# compute sum recursively
left_sum = self.findFrequentTreeSumHelper(root.left, freq)
right_sum = self.findFrequentTreeSumHelper(root.right, freq)
node_sum = left_sum + right_sum + root.val
if node_sum not in freq:
freq[node_sum] = 0
freq[node_sum] += 1
return node_sum
| {
"content_hash": "b34dfe2a31901940e95594f3c4af738b",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 85,
"avg_line_length": 27.55,
"alnum_prop": 0.5626134301270418,
"repo_name": "vermouth1992/Leetcode",
"id": "68104ed16a16e6d7a435bbff438d534eae5cd8a7",
"size": "2046",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/508.most-frequent-subtree-sum.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "369467"
},
{
"name": "Java",
"bytes": "14251"
},
{
"name": "Python",
"bytes": "232732"
},
{
"name": "Shell",
"bytes": "62"
}
],
"symlink_target": ""
} |
import scipy.io.wavfile as sciwave
from scipy.fftpack import fft
import math
import numpy as np
import RecPing
import time
import pylab
# ========= (1) ENREGISTREMENT ==========
filename = time.strftime("%d%b%Y_%H%M%S.wav") # titre du fichier = date
RecPing.recPing(filename) # enregistrement
# ========= (2) RECEIVED SIGNAL =========
Fs, a = sciwave.read(filename)
a = np.array(a)
a = a / (2.0**15 - 1.0) # Normalisation des donnees (valeurs sur 16 bits)
####### Parametres modifiables ###
M = 1; # Number of sources
N = 2; # Number of microphones(on doit avoir M <= N - 1)
dist = .10; # Distance between adjacent microphones(en m)
c = 346.287; # Speed of sound in air(en m/s)
#c = 1500; # Speed of sound in water
f = 5000; # Signal frequency(en Hz)
lfft = 1024 * 1; # Number of data points for FFT in a snapshot
K = math.floor(np.size(a[:,0]) / lfft); # Number of frequency snapshots(multiple of lfft)
L = int(K * lfft); # Number of data snapshots recorded by receiver
y = a[0:L,:]; # Truncated signal(contient nbre entier de snapshots)
T = 1 / Fs;
rx=np.linspace(-(N-1)/2.0,(N-1)/2.0,N).reshape(N,1); # Assume uniform linear array (on peut avoir elevation si on fait array rectangulaire avec min 3 hydrophones)
ry=np.zeros((N,1),float);
rz=np.zeros((N,1),float);
r = np.concatenate((rx,ry,rz),axis=1); # Assume uniform linear array
## Forme complexe du signal recu
df = float(Fs)/float(lfft)/1.0; # frequency grid size
F = np.arange(0,Fs/1,df);
X=np.zeros((int(lfft),int(N),int(K)),dtype='complex');
for ih in range (0,int(N)):
for iv in range (0,int(K)):
pos=iv*lfft;
tmp=y[pos:pos+lfft,ih];
X[:,ih,iv]=fft(tmp);
# Find the closest frequency from the discrete frequency vector resulted from FFT
values=abs(F-f);
mf=np.amin(values);
mi=np.argmin(values)
f0=F[mi];
x0=np.zeros((int(N), int(K)),dtype='complex');
for ih in range (0,int(N)):
for iv in range (0,int(K)):
x0[ih,iv]=X[mi,ih,iv]; # signal complexe
# Sample covariance matrix
Rxx = np.dot(x0,x0.conj().T)/L;
# Search directions
AzSearch = np.linspace(0,180,181); # Azimuth values to search
ElSearch = np.zeros(AzSearch.shape); # Simple 1D example
# Corresponding points on array manifold to search
kSearch = math.pi*np.array([np.cos(np.radians(AzSearch))*np.cos(np.radians(ElSearch)), np.sin(np.radians(AzSearch))*np.cos(np.radians(ElSearch)), np.sin(np.radians(ElSearch))]);
ASearch = np.exp(-1j*np.dot(r,kSearch));
#################################################################################################################
## Capon
# Capon spectrum
Zc=[]
for i in range (0,np.size(AzSearch)):
Z=(ASearch[:,i].conj().T).dot(np.linalg.inv(Rxx)).dot(ASearch[:,i]);
Zc.append(abs(1/Z));
Zc = 10*np.log10(Zc/max(Zc))
##################################################################################################################
## MUSIC
# Eigendecompose
D,E = np.linalg.eig(Rxx);
lambd = np.sort(D); # Vector of sorted eigenvalues
idx = np.argsort(D); # Index of sorted eigenvalues
E = E[:,idx]; # Sort eigenvalues accordingly
En = E[:,0:np.size(idx)-M]; # Noise eigenvectors (ASSUMPTION: M IS KNOWN)
# MUSIC spectrum
Zm=[]
for i in range (0,np.size(AzSearch)):
Z = (ASearch[:, i].conj().T).dot(En).dot(En.conj().T).dot(ASearch[:, i]);
Zm.append(abs(1/Z));
Zm = 10*np.log10(Zm/max(Zm))
# Angle calculation
Zcmin=np.amin(Zc);
Zcmax=np.amax(Zc);
Zmi=np.argmax(Zm);
# Amplitude detection Capon pour savoir si on a une fausse detection
if (abs(Zcmax-Zcmin)<1):
angle=-90
print("Source not detected")
# Position pic MUSIC pour determiner localisation source detectee
else:
angle = AzSearch[Zmi];
print("Angle :", angle )
# Plot spectrum
pylab.figure()
pylab.plot(AzSearch,Zc)
pylab.plot(AzSearch,Zm)
pylab.show()
| {
"content_hash": "c28ba344391e39abc3f1816d96899741",
"timestamp": "",
"source": "github",
"line_count": 126,
"max_line_length": 177,
"avg_line_length": 31.404761904761905,
"alnum_prop": 0.5986858731362144,
"repo_name": "ENSTA-Bretagne-Guerledan-BoiteNoire/ROS_BUBBLE_Project",
"id": "c1b9a821975ed71bc4ed373decd0b48dae469a87",
"size": "3957",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/bubble_audio/src/AlgoMusicAuto.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1179"
},
{
"name": "C++",
"bytes": "16502"
},
{
"name": "CMake",
"bytes": "41567"
},
{
"name": "Python",
"bytes": "146267"
}
],
"symlink_target": ""
} |
"""
======================================
The :mod:`mpi_array.comms_test` Module
======================================
Module defining :mod:`mpi_array.comms` unit-tests.
Execute as::
python -m mpi_array.comms_test
or::
mpirun -n 4 python -m mpi_array.comms_test
Classes
=======
.. autosummary::
:toctree: generated/
:template: autosummary/inherits_TestCase_class.rst
LocaleCommsTest - Tests for :obj:`mpi_array.comms.LocaleComms`.
CartLocaleCommsTest - Tests for :obj:`mpi_array.comms.CartLocaleComms`.
CreateDistributionTest - Tests for :func:`mpi_array.comms.create_distribution`.
"""
from __future__ import absolute_import
import mpi4py.MPI as _mpi
import numpy as _np # noqa: E402,F401
from .license import license as _license, copyright as _copyright, version as _version
from . import unittest as _unittest
from . import logging as _logging # noqa: E402,F401
from .comms import CartLocaleComms, LocaleComms
from .comms import create_single_locale_distribution, create_locale_comms, create_distribution
from .comms import check_distrib_type, DT_BLOCK, DT_SLAB, DT_CLONED, DT_SINGLE_LOCALE
from .comms import check_locale_type, LT_NODE, LT_PROCESS
from .utils import get_shared_mem_usage_percent_string
from .distribution import SingleLocaleDistribution as _SingleLocaleDistribution
__author__ = "Shane J. Latham"
__license__ = _license()
__copyright__ = _copyright()
__version__ = _version()
class LocaleCommsTest(_unittest.TestCase):
"""
Tests for :obj:`mpi_array.comms.LocaleComms`.
"""
def test_get_shared_mem_usage_percent_string(self):
"""
Coverage for :func:`mpi_array.comms.get_shared_mem_usage_percent_string`.
"""
p = \
get_shared_mem_usage_percent_string(
shm_file_name="/probably/does/not_exist/on_file/system"
)
self.assertEqual("unknown", p)
def test_construct(self):
"""
Test :meth:`mpi_array.comms.LocaleComms.__init__`
"""
i = LocaleComms(peer_comm=_mpi.COMM_WORLD)
self.assertTrue(i.intra_locale_comm is not None)
self.assertTrue(i.intra_locale_comm.size >= 1)
self.assertTrue(i.peer_comm is not None)
self.assertTrue(i.peer_comm.size >= 1)
self.assertEqual(i.num_locales, len(i.peer_ranks_per_locale))
self.assertEqual(
i.peer_comm.size,
_np.sum(len(i.peer_ranks_per_locale[r]) for r in range(i.num_locales))
)
prpl_ranks = \
sorted(sum((tuple(i.peer_ranks_per_locale[r]) for r in range(i.num_locales)), ()))
self.assertSequenceEqual(
list(range(0, i.peer_comm.size)),
prpl_ranks
)
i = LocaleComms()
self.assertTrue(i.intra_locale_comm is not None)
self.assertTrue(i.intra_locale_comm.size >= 1)
self.assertTrue(i.peer_comm is not None)
self.assertTrue(i.peer_comm.size >= 1)
i.inter_locale_comm = _mpi.COMM_NULL
self.assertEqual(_mpi.COMM_NULL, i.inter_locale_comm)
i.inter_locale_comm = None
self.assertEqual(None, i.inter_locale_comm)
def test_construct_invalid_comms(self):
"""
Test :meth:`mpi_array.comms.LocaleComms.__init__`
"""
if _mpi.COMM_WORLD.size != _mpi.COMM_SELF.size:
self.assertRaises(
ValueError,
LocaleComms,
_mpi.COMM_SELF, # peer
_mpi.COMM_SELF, # intra
_mpi.COMM_WORLD # inter
)
lc = LocaleComms()
if lc.intra_locale_comm.size > 1:
self.assertRaises(
ValueError,
LocaleComms,
lc.peer_comm, # peer
lc.intra_locale_comm, # intra
lc.peer_comm # inter
)
def test_construct_no_shared(self):
lc = LocaleComms(intra_locale_comm=_mpi.COMM_SELF)
self.assertEqual(_mpi.IDENT, _mpi.Comm.Compare(_mpi.COMM_WORLD, lc.peer_comm))
self.assertEqual(1, lc.intra_locale_comm.size)
self.assertNotEqual(_mpi.COMM_WORLD, _mpi.COMM_NULL)
class CartLocaleCommsTest(_unittest.TestCase):
"""
:obj:`unittest.TestCase` for :obj:`mpi_array.comms.CartLocaleComms`.
"""
def test_construct_invalid_dims(self):
lc = None
with self.assertRaises(ValueError):
lc = CartLocaleComms()
with self.assertRaises(ValueError):
lc = CartLocaleComms(ndims=None, dims=None)
with self.assertRaises(ValueError):
lc = CartLocaleComms(dims=tuple(), ndims=1)
with self.assertRaises(ValueError):
lc = CartLocaleComms(dims=tuple([0, 2]), ndims=1)
with self.assertRaises(ValueError):
lc = CartLocaleComms(dims=tuple([1, 2]), ndims=3)
self.assertEqual(None, lc)
def test_construct_invalid_cart_comm(self):
cart_comm = _mpi.COMM_WORLD.Create_cart(dims=(_mpi.COMM_WORLD.size,))
if _mpi.COMM_WORLD.size > 1:
self.assertRaises(
ValueError,
CartLocaleComms,
ndims=1,
peer_comm=_mpi.COMM_WORLD,
cart_comm=cart_comm
)
def test_construct_shared(self):
lc = CartLocaleComms(ndims=1)
self.assertEqual(_mpi.IDENT, _mpi.Comm.Compare(_mpi.COMM_WORLD, lc.peer_comm))
self.assertEqual(1, lc.ndim)
lc = CartLocaleComms(ndims=4)
self.assertEqual(_mpi.IDENT, _mpi.Comm.Compare(_mpi.COMM_WORLD, lc.peer_comm))
self.assertEqual(4, lc.ndim)
lc = CartLocaleComms(dims=(0,))
self.assertEqual(_mpi.IDENT, _mpi.Comm.Compare(_mpi.COMM_WORLD, lc.peer_comm))
self.assertEqual(1, lc.ndim)
lc = CartLocaleComms(dims=(0, 0))
self.assertEqual(_mpi.IDENT, _mpi.Comm.Compare(_mpi.COMM_WORLD, lc.peer_comm))
self.assertEqual(2, lc.ndim)
lc = CartLocaleComms(dims=(0, 0, 0))
self.assertEqual(_mpi.IDENT, _mpi.Comm.Compare(_mpi.COMM_WORLD, lc.peer_comm))
self.assertEqual(3, lc.ndim)
def test_construct_no_shared(self):
lc = CartLocaleComms(ndims=1, intra_locale_comm=_mpi.COMM_SELF)
self.assertEqual(_mpi.IDENT, _mpi.Comm.Compare(_mpi.COMM_WORLD, lc.peer_comm))
self.assertEqual(1, lc.intra_locale_comm.size)
self.assertNotEqual(_mpi.COMM_WORLD, _mpi.COMM_NULL)
def test_alloc_locale_buffer(self):
lc = CartLocaleComms(ndims=1)
rma_window_buff = lc.alloc_locale_buffer(shape=(100,), dtype="uint16")
self.assertEqual(_np.dtype("uint16"), rma_window_buff.dtype)
self.assertEqual(_np.dtype("uint16").itemsize, rma_window_buff.itemsize)
self.assertEqual(100 * rma_window_buff.dtype.itemsize, len(rma_window_buff.buffer))
lc = CartLocaleComms(ndims=1, intra_locale_comm=_mpi.COMM_SELF)
rma_window_buff = lc.alloc_locale_buffer(shape=(100,), dtype="uint16")
self.assertEqual(_np.dtype("uint16"), rma_window_buff.dtype)
self.assertEqual(_np.dtype("uint16").itemsize, rma_window_buff.itemsize)
self.assertEqual(100 * rma_window_buff.dtype.itemsize, len(rma_window_buff.buffer))
class CreateDistributionTest(_unittest.TestCase):
"""
Tests for :func:`mpi_array.comms.create_distribution`.
"""
def test_check_distrib_type(self):
self.assertEqual(None, check_distrib_type(DT_SLAB))
self.assertEqual(None, check_distrib_type(DT_BLOCK))
self.assertEqual(None, check_distrib_type(DT_CLONED))
self.assertEqual(None, check_distrib_type(DT_SINGLE_LOCALE))
self.assertRaises(ValueError, check_distrib_type, "not_a_valid_distrib_type")
def test_check_locale_type(self):
self.assertEqual(None, check_locale_type(LT_PROCESS))
self.assertEqual(None, check_locale_type(LT_NODE))
self.assertRaises(ValueError, check_locale_type, "not_a_valid_locale_type")
def test_create_locale_comms_invalid_args(self):
"""
Test that :func:`mpi_array.comms.create_locale_comms` raises exception
for invalid arguments.
"""
if _mpi.COMM_WORLD.size > 1:
self.assertRaises(
ValueError,
create_locale_comms,
locale_type=LT_PROCESS,
peer_comm=_mpi.COMM_WORLD,
intra_locale_comm=_mpi.COMM_WORLD
)
def check_is_single_locale_distribution(self, distrib):
"""
Asserts for checking that the :samp:`{distrib}` :obj:`Distribution`
is single-locale.
"""
self.assertTrue(isinstance(distrib, _SingleLocaleDistribution))
gshape = tuple(distrib.globale_extent.shape_n)
self.assertSequenceEqual(
gshape,
tuple(distrib.locale_extents[0].shape)
)
self.assertSequenceEqual(
(0, 0, 0, 0),
tuple(distrib.locale_extents[0].start_n)
)
self.assertSequenceEqual(
gshape,
tuple(distrib.locale_extents[0].stop_n)
)
self.assertSequenceEqual(
(0, 0, 0, 0),
tuple(distrib.globale_extent.start_n)
)
self.assertSequenceEqual(
gshape,
tuple(distrib.globale_extent.stop_n)
)
def test_create_single_locale_distribution(self):
"""
Tests for :func:`mpi_array.comms.create_single_locale_distribution`.
"""
candd = \
create_single_locale_distribution(
shape=(20, 31, 17, 4),
locale_type=LT_PROCESS,
peer_comm=_mpi.COMM_WORLD
)
distrib = candd.distribution
self.check_is_single_locale_distribution(distrib)
def test_create_distribution_slab(self):
"""
Tests for :func:`mpi_array.comms.create_distribution`.
"""
candd = \
create_distribution(
shape=(20, 31, 17, 4),
locale_type=LT_PROCESS,
distrib_type=DT_SLAB,
peer_comm=_mpi.COMM_WORLD
)
distrib = candd.distribution
self.assertSequenceEqual(
(20, 31, 17, 4)[1:],
tuple(distrib.locale_extents[0].shape)[1:]
)
self.assertSequenceEqual(
(0, 0, 0, 0)[1:],
tuple(distrib.locale_extents[0].start_n)[1:]
)
self.assertSequenceEqual(
(20, 31, 17, 4)[1:],
tuple(distrib.locale_extents[0].stop_n)[1:]
)
self.assertEqual(candd.locale_comms.num_locales, distrib.num_locales)
if distrib.num_locales > 1:
for i in range(1, distrib.num_locales):
self.assertSequenceEqual(
(20, 31, 17, 4)[1:],
tuple(distrib.locale_extents[i].shape)[1:]
)
self.assertSequenceEqual(
(0, 0, 0, 0)[1:],
tuple(distrib.locale_extents[i].start_n)[1:]
)
self.assertSequenceEqual(
(20, 31, 17, 4)[1:],
tuple(distrib.locale_extents[i].stop_n)[1:]
)
self.assertSequenceEqual(
(20, 31, 17, 4),
tuple(distrib.globale_extent.shape)
)
self.assertSequenceEqual(
(0, 0, 0, 0),
tuple(distrib.globale_extent.start_n)
)
self.assertSequenceEqual(
(20, 31, 17, 4),
tuple(distrib.globale_extent.stop_n)
)
def test_create_distribution_single_locale(self):
"""
Tests for :func:`mpi_array.comms.create_distribution`.
"""
candd = \
create_distribution(
shape=(20, 31, 17, 4),
locale_type=LT_PROCESS,
distrib_type=DT_SINGLE_LOCALE,
peer_comm=_mpi.COMM_WORLD
)
distrib = candd.distribution
self.assertEqual(candd.locale_comms.num_locales, distrib.num_locales)
self.check_is_single_locale_distribution(distrib)
_unittest.main(__name__)
__all__ = [s for s in dir() if not s.startswith('_')]
| {
"content_hash": "eb1275510ef5be2b240c63e1979a3c98",
"timestamp": "",
"source": "github",
"line_count": 348,
"max_line_length": 94,
"avg_line_length": 35.19252873563219,
"alnum_prop": 0.5861843716828611,
"repo_name": "mpi-array/mpi_array",
"id": "33d1b0a9fbfaada441e551108387fd89ede1acc9",
"size": "12247",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "mpi_array/comms_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "636404"
},
{
"name": "Shell",
"bytes": "3090"
}
],
"symlink_target": ""
} |
from __future__ import print_function
from .client import Client
__title__ = 'Steamfront'
__author__ = 'Callum Bartlett'
__license__ = 'MIT'
__copyright__ = 'Copyright 2017 Callum Bartlett'
__version__ = '0.1.0'
if __name__ == '__main__':
from sys import argv
if len(argv) < 2:
print('Please give the name of a game you want to find the information of.')
gameName = ' '.join(argv[1:])
c = Client()
g = c.getApp(name=gameName)
i = '{0.name} :: {0.appid} :: {0.type}'
print(i.format(g))
| {
"content_hash": "70bad38bd738e18866f8a85275b39def",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 78,
"avg_line_length": 26.157894736842106,
"alnum_prop": 0.6257545271629779,
"repo_name": "4Kaylum/Steamfront",
"id": "19bab0767e7100d461bd4a1a0a0fd80e2a3ee724",
"size": "497",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "steamfront/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "17235"
}
],
"symlink_target": ""
} |
"""Added Section Layout Settings and Site Settings
Revision ID: 698cc06661d6
Revises: 958a9358a256
Create Date: 2016-02-29 00:03:36.777728
"""
# revision identifiers, used by Alembic.
revision = '698cc06661d6'
down_revision = '958a9358a256'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('sites_settings',
sa.Column('site_id', sa.Integer(), nullable=False),
sa.Column('setting', sa.Text(length=40), nullable=False),
sa.Column('value', sa.Text(length=200), nullable=False),
sa.ForeignKeyConstraint(['site_id'], ['sites.id'], ),
sa.PrimaryKeyConstraint('site_id', 'setting')
)
op.create_index(op.f('ix_sites_settings_site_id'), 'sites_settings', ['site_id'], unique=False)
op.create_table('pages_sections_layout_settings',
sa.Column('page_section_id', sa.Integer(), nullable=False),
sa.Column('setting', sa.Text(length=40), nullable=False),
sa.Column('value', sa.Text(length=200), nullable=True),
sa.Column('image_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['image_id'], ['images.id'], ),
sa.ForeignKeyConstraint(['page_section_id'], ['pages_sections.id'], ),
sa.PrimaryKeyConstraint('page_section_id', 'setting')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('pages_sections_layout_settings')
op.drop_index(op.f('ix_sites_settings_site_id'), table_name='sites_settings')
op.drop_table('sites_settings')
### end Alembic commands ###
| {
"content_hash": "0ab2616ad800131430d542ab91068154",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 99,
"avg_line_length": 36.02173913043478,
"alnum_prop": 0.6813518406759204,
"repo_name": "matslindh/kimochi",
"id": "b51138cb7bec817443c6ea15d03e8d5310a92f9d",
"size": "1657",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "alembic/versions/698cc06661d6_added_section_layout_settings_and_site_.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "7808"
},
{
"name": "JavaScript",
"bytes": "63429"
},
{
"name": "Mako",
"bytes": "50854"
},
{
"name": "Python",
"bytes": "67666"
}
],
"symlink_target": ""
} |
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
from . import server
class servers(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-system - based on the path /system/aaa/server-groups/server-group/servers. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Enclosing container the list of servers
"""
__slots__ = ("_path_helper", "_extmethods", "__server")
_yang_name = "servers"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__server = YANGDynClass(
base=YANGListType(
"address",
server.server,
yang_name="server",
parent=self,
is_container="list",
user_ordered=False,
path_helper=self._path_helper,
yang_keys="address",
extensions=None,
),
is_container="list",
yang_name="server",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/system",
defining_module="openconfig-system",
yang_type="list",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return ["system", "aaa", "server-groups", "server-group", "servers"]
def _get_server(self):
"""
Getter method for server, mapped from YANG variable /system/aaa/server_groups/server_group/servers/server (list)
YANG Description: List of AAA servers
"""
return self.__server
def _set_server(self, v, load=False):
"""
Setter method for server, mapped from YANG variable /system/aaa/server_groups/server_group/servers/server (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_server is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_server() directly.
YANG Description: List of AAA servers
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=YANGListType(
"address",
server.server,
yang_name="server",
parent=self,
is_container="list",
user_ordered=False,
path_helper=self._path_helper,
yang_keys="address",
extensions=None,
),
is_container="list",
yang_name="server",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/system",
defining_module="openconfig-system",
yang_type="list",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """server must be of a type compatible with list""",
"defined-type": "list",
"generated-type": """YANGDynClass(base=YANGListType("address",server.server, yang_name="server", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='address', extensions=None), is_container='list', yang_name="server", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='list', is_config=True)""",
}
)
self.__server = t
if hasattr(self, "_set"):
self._set()
def _unset_server(self):
self.__server = YANGDynClass(
base=YANGListType(
"address",
server.server,
yang_name="server",
parent=self,
is_container="list",
user_ordered=False,
path_helper=self._path_helper,
yang_keys="address",
extensions=None,
),
is_container="list",
yang_name="server",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/system",
defining_module="openconfig-system",
yang_type="list",
is_config=True,
)
server = __builtin__.property(_get_server, _set_server)
_pyangbind_elements = OrderedDict([("server", server)])
| {
"content_hash": "c3493eec9cde1cb5a0b23bb936541222",
"timestamp": "",
"source": "github",
"line_count": 185,
"max_line_length": 514,
"avg_line_length": 36.33513513513513,
"alnum_prop": 0.5584647426361202,
"repo_name": "napalm-automation/napalm-yang",
"id": "821d7b90bfe60239e280846acbbfdc869441a430",
"size": "6746",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "napalm_yang/models/openconfig/system/aaa/server_groups/server_group/servers/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "370237"
},
{
"name": "Jupyter Notebook",
"bytes": "152135"
},
{
"name": "Makefile",
"bytes": "1965"
},
{
"name": "Python",
"bytes": "105688785"
},
{
"name": "Roff",
"bytes": "1632"
}
],
"symlink_target": ""
} |
import warnings
from selene.core.entity import Browser
class SharedBrowser(Browser):
warnings.warn(
'SharedBrowser is deprecated, use Browser instead', DeprecationWarning
)
| {
"content_hash": "5ca3512f12037cd8c62b1725e18ad13a",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 78,
"avg_line_length": 21.333333333333332,
"alnum_prop": 0.75,
"repo_name": "yashaka/selene",
"id": "352bb714f3003babfbfc0b4b96cbf55a7ab00b8e",
"size": "1308",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "selene/support/shared/browser.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "4424"
},
{
"name": "JavaScript",
"bytes": "4519"
},
{
"name": "Python",
"bytes": "371020"
},
{
"name": "Shell",
"bytes": "677"
}
],
"symlink_target": ""
} |
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'DownloadDelta'
db.create_table('packages_downloaddelta', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('file', self.gf('django.db.models.fields.related.ForeignKey')(related_name='download_deltas', to=orm['packages.ReleaseFile'])),
('date', self.gf('django.db.models.fields.DateField')(default=datetime.date.today)),
('delta', self.gf('django.db.models.fields.IntegerField')(default=0)),
))
db.send_create_signal('packages', ['DownloadDelta'])
def backwards(self, orm):
# Deleting model 'DownloadDelta'
db.delete_table('packages_downloaddelta')
models = {
'packages.changelog': {
'Meta': {'object_name': 'ChangeLog'},
'created': ('model_utils.fields.AutoCreatedField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('model_utils.fields.AutoLastModifiedField', [], {'default': 'datetime.datetime.now'}),
'package': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['packages.Package']"}),
'release': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['packages.Release']", 'null': 'True', 'blank': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '25', 'db_index': 'True'})
},
'packages.downloaddelta': {
'Meta': {'object_name': 'DownloadDelta'},
'date': ('django.db.models.fields.DateField', [], {'default': 'datetime.date.today'}),
'delta': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'file': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'download_deltas'", 'to': "orm['packages.ReleaseFile']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'packages.package': {
'Meta': {'object_name': 'Package'},
'created': ('model_utils.fields.AutoCreatedField', [], {'default': 'datetime.datetime.now'}),
'downloads_synced_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('model_utils.fields.AutoLastModifiedField', [], {'default': 'datetime.datetime.now'}),
'name': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '150'}),
'normalized_name': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '150'})
},
'packages.packageuri': {
'Meta': {'unique_together': "(['package', 'uri'],)", 'object_name': 'PackageURI'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'package': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'package_links'", 'to': "orm['packages.Package']"}),
'uri': ('django.db.models.fields.URLField', [], {'max_length': '400'})
},
'packages.readthedocspackageslug': {
'Meta': {'object_name': 'ReadTheDocsPackageSlug'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'package': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'readthedocs_slug'", 'unique': 'True', 'to': "orm['packages.Package']"}),
'slug': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '150'})
},
'packages.release': {
'Meta': {'unique_together': "(('package', 'version'),)", 'object_name': 'Release'},
'author': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'author_email': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'classifiers': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'releases'", 'blank': 'True', 'to': "orm['packages.TroveClassifier']"}),
'created': ('model_utils.fields.AutoCreatedField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'download_uri': ('django.db.models.fields.URLField', [], {'max_length': '1024', 'blank': 'True'}),
'hidden': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'keywords': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'license': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'maintainer': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'maintainer_email': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'modified': ('model_utils.fields.AutoLastModifiedField', [], {'default': 'datetime.datetime.now'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0', 'db_index': 'True'}),
'package': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'releases'", 'to': "orm['packages.Package']"}),
'platform': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'raw_data': ('crate.fields.json.JSONField', [], {'null': 'True', 'blank': 'True'}),
'requires_python': ('django.db.models.fields.CharField', [], {'max_length': '25', 'blank': 'True'}),
'show_install_command': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'summary': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '512'})
},
'packages.releasefile': {
'Meta': {'unique_together': "(('release', 'type', 'python_version', 'filename'),)", 'object_name': 'ReleaseFile'},
'comment': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'created': ('model_utils.fields.AutoCreatedField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'digest': ('django.db.models.fields.CharField', [], {'max_length': '512', 'blank': 'True'}),
'downloads': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '512', 'blank': 'True'}),
'filename': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '200', 'null': 'True', 'blank': 'True'}),
'hidden': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('model_utils.fields.AutoLastModifiedField', [], {'default': 'datetime.datetime.now'}),
'python_version': ('django.db.models.fields.CharField', [], {'max_length': '25'}),
'release': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'files'", 'to': "orm['packages.Release']"}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '25'})
},
'packages.releaseobsolete': {
'Meta': {'object_name': 'ReleaseObsolete'},
'environment': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kind': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '150'}),
'release': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'obsoletes'", 'to': "orm['packages.Release']"}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'})
},
'packages.releaseprovide': {
'Meta': {'object_name': 'ReleaseProvide'},
'environment': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kind': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '150'}),
'release': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'provides'", 'to': "orm['packages.Release']"}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'})
},
'packages.releaserequire': {
'Meta': {'object_name': 'ReleaseRequire'},
'environment': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kind': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '150'}),
'release': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'requires'", 'to': "orm['packages.Release']"}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'})
},
'packages.releaseuri': {
'Meta': {'object_name': 'ReleaseURI'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'release': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'uris'", 'to': "orm['packages.Release']"}),
'uri': ('django.db.models.fields.URLField', [], {'max_length': '500'})
},
'packages.troveclassifier': {
'Meta': {'object_name': 'TroveClassifier'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'trove': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '350'})
}
}
complete_apps = ['packages'] | {
"content_hash": "81083873f997a2ddbee201d4aae8a9ab",
"timestamp": "",
"source": "github",
"line_count": 141,
"max_line_length": 197,
"avg_line_length": 74.2127659574468,
"alnum_prop": 0.5584862385321101,
"repo_name": "crate-archive/crate-site",
"id": "4a15bb7615383bf6e46e98c0c459ddbf56dbf551",
"size": "10488",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "crateweb/apps/packages/migrations/0022_auto__add_downloaddelta.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "JavaScript",
"bytes": "8467"
},
{
"name": "Python",
"bytes": "633678"
}
],
"symlink_target": ""
} |
import sys
from cx_Freeze import setup, Executable
import os
os.environ['TCL_LIBRARY'] = r"D:\Program Files\Python35\tcl\tcl8.6"
os.environ['TK_LIBRARY'] = r"D:\Program Files\Python35\tcl\tk8.6"
includes = ["tkinter"]
include_files = [r"D:\Program Files\Python35\DLLs\tcl86t.dll", \
r"D:\Program Files\Python35\DLLs\tk86t.dll"]
base = None
if sys.platform == "win32":
base = "Win32GUI"
setup(
name = "Categories Backup Tool",
version = "1.0",
description = "Tool used to backup or export Steam Categories",
options = {"build_exe": {"includes": includes, "include_files": include_files}},
executables = [Executable("GUI.py",targetName="CategoriesBackupTool.exe", base = base)])
| {
"content_hash": "ac8a67add3799e840f76cb3af91e69e9",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 92,
"avg_line_length": 30.416666666666668,
"alnum_prop": 0.6712328767123288,
"repo_name": "DanielZa2/SteamCategoriesBackup",
"id": "b56bd4c796307dc24309e00c2e5021d425c31c25",
"size": "829",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Release/Tools/cx_Freeze/setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "22647"
}
],
"symlink_target": ""
} |
import mwparserfromhell
import pywikibot
pywikibot.config.family = "wikipedia"
pywikibot.config.mylang = "en"
site = pywikibot.Site()
site.login()
templates = [
pywikibot.Page(site, "Template:Infobox language"),
pywikibot.Page(site, "Template:Infobox language family")
]
titles = list()
for template in templates:
titles.append(template.title(withNamespace=False).lower())
for reference in template.getReferences(withTemplateInclusion=False, redirectsOnly=True):
titles.append(reference.title(withNamespace=False).lower())
titles = list(set(titles))
report = dict()
for template in templates:
for page in template.getReferences(onlyTemplateInclusion=True):
text = page.get()
code = mwparserfromhell.parse(text)
for template in code.ifilter_templates():
if not template.name.lower().strip() in titles:
continue
parameters = [param.name for param in template.params]
duplicates = list()
for parameter in parameters:
if (parameters.count(parameter) > 1) and not (parameter in duplicates):
duplicates.append(unicode(parameter))
if duplicates:
try:
report[page.title()]
except KeyError:
report[page.title()] = {}
finally:
report[page.title()][template.name.strip()] = ", ".join(duplicates)
print report # log in output file
print
pywikibot.stopme()
| {
"content_hash": "6addab5ca0c0efb33e5619dd31994afd",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 93,
"avg_line_length": 35.31818181818182,
"alnum_prop": 0.620978120978121,
"repo_name": "HazardSJ/HazardBot",
"id": "efb31d828f95e5f2a89e1d003e6e2ab4b404e888",
"size": "1554",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "enwiki/infoboxLanguage.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "87914"
}
],
"symlink_target": ""
} |
from iiits.config import templates | {
"content_hash": "b4ed77409e07f6a6a29f54f4a99d8278",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 34,
"avg_line_length": 34,
"alnum_prop": 0.8823529411764706,
"repo_name": "IIITS/iiits.ac.in",
"id": "c115669d75329ffe73e8751a730d1b08dfb08a28",
"size": "34",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "iiits/delete.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "570565"
},
{
"name": "HTML",
"bytes": "161340"
},
{
"name": "JavaScript",
"bytes": "1819248"
},
{
"name": "Python",
"bytes": "234433"
}
],
"symlink_target": ""
} |
"""
Tests for the silent audio driver.
"""
from __future__ import division
from builtins import object
from tests import mock
import unittest
from pyglet.media.drivers.silent import (EventBuffer, SilentAudioBuffer, SilentAudioPacket,
SilentAudioPlayerPacketConsumer)
from pyglet.media.events import MediaEvent
from pyglet.media.sources import AudioData, AudioFormat
class SilentAudioPacketTest(unittest.TestCase):
def test_partial_consume(self):
packet = SilentAudioPacket(0., 1.)
dt = .4
consumed = packet.consume(dt)
self.assertAlmostEqual(.4, consumed)
self.assertAlmostEqual(.4, packet.timestamp)
self.assertAlmostEqual(.6, packet.duration)
self.assertFalse(packet.is_empty())
def test_exact_consume(self):
packet = SilentAudioPacket(0., 1.)
dt = 1.
consumed = packet.consume(dt)
self.assertAlmostEqual(1., consumed)
self.assertAlmostEqual(1., packet.timestamp)
self.assertAlmostEqual(0., packet.duration)
self.assertTrue(packet.is_empty())
def test_over_consume(self):
packet = SilentAudioPacket(0., 1.)
dt = 2.
consumed = packet.consume(dt)
self.assertAlmostEqual(1., consumed)
self.assertAlmostEqual(1., packet.timestamp)
self.assertAlmostEqual(0., packet.duration)
self.assertTrue(packet.is_empty())
class SilentAudioBufferTest(unittest.TestCase):
def test_add_audio_data(self):
buf = SilentAudioBuffer()
self.assertTrue(buf.is_empty())
self.assertAlmostEqual(0., buf.duration)
data1 = AudioData('', 0, 0., 1., [])
buf.add_audio_data(data1)
self.assertFalse(buf.is_empty())
self.assertAlmostEqual(1., buf.duration)
data2 = AudioData('', 0, 1., 2., [])
buf.add_audio_data(data2)
self.assertFalse(buf.is_empty())
self.assertAlmostEqual(3., buf.duration)
def test_consume_audio_data(self):
buf = SilentAudioBuffer()
buf.add_audio_data(AudioData('', 0, 0., 1., []))
buf.add_audio_data(AudioData('', 0, 1., 2., []))
self.assertFalse(buf.is_empty())
self.assertAlmostEqual(3., buf.duration)
self.assertAlmostEqual(0., buf.get_current_timestamp())
buf.consume_audio_data(0.8)
self.assertFalse(buf.is_empty())
self.assertAlmostEqual(2.2, buf.duration)
self.assertAlmostEqual(0.8, buf.get_current_timestamp())
buf.consume_audio_data(0.8)
self.assertFalse(buf.is_empty())
self.assertAlmostEqual(1.4, buf.duration)
self.assertAlmostEqual(1.6, buf.get_current_timestamp())
buf.consume_audio_data(1.4)
self.assertTrue(buf.is_empty())
self.assertAlmostEqual(0., buf.duration)
self.assertAlmostEqual(3., buf.get_current_timestamp())
def test_consume_too_much(self):
buf = SilentAudioBuffer()
buf.add_audio_data(AudioData('', 0, 0., 1., []))
buf.add_audio_data(AudioData('', 0, 1., 2., []))
buf.consume_audio_data(4.)
self.assertTrue(buf.is_empty())
self.assertAlmostEqual(0., buf.duration)
self.assertAlmostEqual(3., buf.get_current_timestamp())
def test_time_to_next_update(self):
buf = SilentAudioBuffer()
self.assertIsNone(buf.get_time_to_next_update())
buf.add_audio_data(AudioData('', 0, 0., 1., []))
buf.add_audio_data(AudioData('', 0, 1., 2., []))
self.assertAlmostEqual(1., buf.get_time_to_next_update())
buf.consume_audio_data(0.5)
self.assertAlmostEqual(0.5, buf.get_time_to_next_update())
buf.consume_audio_data(1.0)
self.assertAlmostEqual(1.5, buf.get_time_to_next_update())
buf.consume_audio_data(1.5)
self.assertIsNone(buf.get_time_to_next_update())
def test_current_timestamp(self):
buf = SilentAudioBuffer()
self.assertAlmostEqual(0., buf.get_current_timestamp())
buf.add_audio_data(AudioData('', 0, 2., 1., []))
buf.add_audio_data(AudioData('', 0, 1., 2., []))
self.assertAlmostEqual(2., buf.get_current_timestamp())
buf.consume_audio_data(0.2)
self.assertAlmostEqual(2.2, buf.get_current_timestamp())
buf.consume_audio_data(1.)
self.assertAlmostEqual(1.2, buf.get_current_timestamp())
buf.consume_audio_data(2.)
self.assertAlmostEqual(3., buf.get_current_timestamp())
class EventBufferTest(unittest.TestCase):
def test_add_events(self):
buf = EventBuffer()
self.assertIsNone(buf.get_next_event_timestamp())
event1 = MediaEvent(.1, 'Event1')
event2 = MediaEvent(.5, 'Event2')
data = AudioData('', 0, 0., 1., [event1, event2])
buf.add_events(data)
self.assertAlmostEqual(.1, buf.get_next_event_timestamp())
def test_get_expired_events(self):
buf = EventBuffer()
self.assertIsNone(buf.get_next_event_timestamp())
event1 = MediaEvent(.1, 'Event1')
event2 = MediaEvent(.5, 'Event2')
data = AudioData('', 0, 0., 1., [event1, event2])
buf.add_events(data)
expired_events = buf.get_expired_events(0.)
self.assertListEqual([], expired_events)
expired_events = buf.get_expired_events(.1)
self.assertListEqual([event1], expired_events)
expired_events = buf.get_expired_events(.1)
self.assertListEqual([], expired_events)
expired_events = buf.get_expired_events(.6)
self.assertListEqual([event2], expired_events)
expired_events = buf.get_expired_events(.6)
self.assertListEqual([], expired_events)
def test_get_multiple_events(self):
buf = EventBuffer()
self.assertIsNone(buf.get_next_event_timestamp())
event1 = MediaEvent(.2, 'Event1')
event2 = MediaEvent(.2, 'Event2')
data1 = AudioData('', 0, 0., 1., [event1, event2])
buf.add_events(data1)
event3 = MediaEvent(.3, 'Event3')
event4 = MediaEvent(.4, 'Event4')
data2 = AudioData('', 0, 1., 1., [event3, event4])
buf.add_events(data2)
expired_events = buf.get_expired_events(0.)
self.assertListEqual([], expired_events)
expired_events = buf.get_expired_events(.2)
self.assertListEqual([event1, event2], expired_events)
expired_events = buf.get_expired_events(1.6)
self.assertListEqual([event3, event4], expired_events)
def test_get_next_event_timestamp(self):
buf = EventBuffer()
self.assertIsNone(buf.get_next_event_timestamp())
event1 = MediaEvent(.2, 'Event1')
event2 = MediaEvent(.2, 'Event2')
data1 = AudioData('', 0, 0., 1., [event1, event2])
buf.add_events(data1)
event3 = MediaEvent(.3, 'Event3')
event4 = MediaEvent(.4, 'Event4')
data2 = AudioData('', 0, 1., 1., [event3, event4])
buf.add_events(data2)
self.assertAlmostEqual(.2, buf.get_next_event_timestamp())
buf.get_expired_events(.2)
self.assertAlmostEqual(1.3, buf.get_next_event_timestamp())
buf.get_expired_events(1.3)
self.assertAlmostEqual(1.4, buf.get_next_event_timestamp())
buf.get_expired_events(1.4)
self.assertIsNone(buf.get_next_event_timestamp())
def test_get_time_to_next_event(self):
buf = EventBuffer()
self.assertIsNone(buf.get_next_event_timestamp())
event1 = MediaEvent(.2, 'Event1')
event2 = MediaEvent(.2, 'Event2')
data1 = AudioData('', 0, 0., 1., [event1, event2])
buf.add_events(data1)
event3 = MediaEvent(.3, 'Event3')
event4 = MediaEvent(.4, 'Event4')
data2 = AudioData('', 0, 1., 1., [event3, event4])
buf.add_events(data2)
self.assertAlmostEqual(.2, buf.get_time_to_next_event(0.))
self.assertAlmostEqual(.1, buf.get_time_to_next_event(.1))
buf.get_expired_events(.2)
self.assertAlmostEqual(1.1, buf.get_time_to_next_event(.2))
self.assertAlmostEqual(.1, buf.get_time_to_next_event(1.2))
class MockSourceGroup(object):
audio_format = AudioFormat(1, 8, 44100)
def __init__(self, duration, timestamp=0.):
self.mock = mock.MagicMock()
type(self.mock).audio_format = mock.PropertyMock(return_value=self.audio_format)
self.mock.get_audio_data.side_effect = self._get_audio_data
self.timestamp = timestamp
self.duration = duration
self.seconds_buffered = 0.
self.bytes_buffered = 0
def _get_audio_data(self, length):
secs = length / self.audio_format.bytes_per_second
if secs > self.duration:
secs = self.duration
length = int(secs * self.audio_format.bytes_per_second)
if length == 0:
return None
data = AudioData('a'*length, length, self.timestamp, secs, ())
self.timestamp += secs
self.duration -= secs
self.seconds_buffered += secs
self.bytes_buffered += length
return data
class SilentAudioPlayerPacketConsumerTest(unittest.TestCase):
def setUp(self):
self.time_patcher = mock.patch('time.time')
self.thread_patcher = mock.patch('pyglet.media.drivers.silent.MediaThread')
self.mock_time = self.time_patcher.start()
self.mock_thread = self.thread_patcher.start()
def tearDown(self):
self.time_patcher.stop()
self.thread_patcher.stop()
def set_time(self, t):
self.mock_time.return_value = t
def test_buffer_data_initial(self):
mock_player = mock.MagicMock()
mock_source_group = MockSourceGroup(1.)
silent_player = SilentAudioPlayerPacketConsumer(mock_source_group.mock, mock_player)
self.set_time(1000.)
silent_player._buffer_data()
self.assertAlmostEqual(.4, mock_source_group.seconds_buffered, delta=.01)
self.assertAlmostEqual(0., silent_player.get_time(), delta=.01)
def test_playing(self):
mock_player = mock.MagicMock()
mock_source_group = MockSourceGroup(1.)
silent_player = SilentAudioPlayerPacketConsumer(mock_source_group.mock, mock_player)
# Buffer initial data
self.set_time(1000.)
silent_player._buffer_data()
self.assertAlmostEqual(.4, mock_source_group.seconds_buffered, delta=.01)
# Start playing
silent_player.play()
self.assertAlmostEqual(0., silent_player.get_time(), delta=.01)
# Check timestamp increases even when not consuming new data
self.set_time(1000.2)
self.assertAlmostEqual(.2, silent_player.get_time(), delta=.01)
# Timestamp sill correct after consuming data
silent_player._consume_data()
self.assertAlmostEqual(.2, silent_player.get_time(), delta=.01)
# Consuming data means we need to buffer more
silent_player._buffer_data()
self.assertAlmostEqual(.6, mock_source_group.seconds_buffered, delta=.01)
self.assertAlmostEqual(.2, silent_player.get_time(), delta=.01)
def test_not_started_yet(self):
mock_player = mock.MagicMock()
mock_source_group = MockSourceGroup(1.)
silent_player = SilentAudioPlayerPacketConsumer(mock_source_group.mock, mock_player)
# Do initial buffering even when not playing yet
self.set_time(1000.)
silent_player._buffer_data()
self.assertAlmostEqual(.4, mock_source_group.seconds_buffered, delta=.01)
self.assertAlmostEqual(0., silent_player.get_time(), delta=.01)
# Increase of timestamp does not change anything
self.set_time(1001.)
self.assertAlmostEqual(0., silent_player.get_time(), delta=.01)
# No data is consumed
silent_player._consume_data()
self.assertAlmostEqual(0., silent_player.get_time(), delta=.01)
# No new data is buffered
silent_player._buffer_data()
self.assertAlmostEqual(.4, mock_source_group.seconds_buffered, delta=.01)
self.assertAlmostEqual(0., silent_player.get_time(), delta=.01)
def test_play_and_stop(self):
mock_player = mock.MagicMock()
mock_source_group = MockSourceGroup(1.)
silent_player = SilentAudioPlayerPacketConsumer(mock_source_group.mock, mock_player)
# Do initial buffering even when not playing yet
self.set_time(1000.)
silent_player._buffer_data()
self.assertAlmostEqual(.4, mock_source_group.seconds_buffered, delta=.01)
self.assertAlmostEqual(0., silent_player.get_time(), delta=.01)
# Play a little bit
silent_player.play()
self.set_time(1000.2)
silent_player._consume_data()
silent_player._buffer_data()
self.assertAlmostEqual(.6, mock_source_group.seconds_buffered, delta=.01)
self.assertAlmostEqual(.2, silent_player.get_time(), delta=.01)
# Now stop, this should consume data upto stopping moment
self.set_time(1000.4)
silent_player.stop()
self.assertAlmostEqual(.4, silent_player.get_time(), delta=.01)
# Buffering still happens
silent_player._buffer_data()
self.assertAlmostEqual(.8, mock_source_group.seconds_buffered, delta=.01)
self.assertAlmostEqual(.4, silent_player.get_time(), delta=.01)
# But now playback is really paused
self.set_time(1001.)
self.assertAlmostEqual(.4, silent_player.get_time(), delta=.01)
# And no more buffering and consuming
silent_player._consume_data()
silent_player._buffer_data()
self.assertAlmostEqual(.8, mock_source_group.seconds_buffered, delta=.01)
self.assertAlmostEqual(.4, silent_player.get_time(), delta=.01)
| {
"content_hash": "615d0ce2559f51517c1e9ad00f836121",
"timestamp": "",
"source": "github",
"line_count": 386,
"max_line_length": 92,
"avg_line_length": 35.8160621761658,
"alnum_prop": 0.6321880650994575,
"repo_name": "nicememory/pie",
"id": "99262ca0d04309af3be5e51280f5a75f196a2397",
"size": "13825",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyglet/tests/unit/media/test_silent_player.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "5318"
},
{
"name": "C",
"bytes": "6624"
},
{
"name": "CSS",
"bytes": "1828"
},
{
"name": "HTML",
"bytes": "9229"
},
{
"name": "JavaScript",
"bytes": "6751"
},
{
"name": "Makefile",
"bytes": "5773"
},
{
"name": "PHP",
"bytes": "2190"
},
{
"name": "Python",
"bytes": "9377528"
},
{
"name": "Shell",
"bytes": "664"
},
{
"name": "Vim script",
"bytes": "2952"
}
],
"symlink_target": ""
} |
from world.world import world
| {
"content_hash": "406d325de0c4707ddc8ef09c6d4252e8",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 29,
"avg_line_length": 30,
"alnum_prop": 0.8333333333333334,
"repo_name": "mwilliamson/python-vendorize",
"id": "728f17c8cad7d2ad365f3186e6224b1908fa860c",
"size": "30",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/multiple-dependencies-with-rewrite/world/world/__init__.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Makefile",
"bytes": "753"
},
{
"name": "Python",
"bytes": "32031"
}
],
"symlink_target": ""
} |
import json
"""
we make use of a string representation for musical events fed to the RNN.
this file provides encode/decode methods for that representation
"""
# used to separate pieces in the corpus
divtoken = "|"
def encode_rest(duration):
assert type(duration) is int
return "r{}".format(duration)
def encode_note(pitch, duration):
assert type(duration) is int
assert type(pitch) is int
return "n{}d{}".format(pitch, duration)
"""
takes a sequence of notes in our internal JSON encoding and turns it into a form
consumable by the RNN
returns (events, clock) pair
ts_semis: the number of semiquavers in a bar
since the only possible time signatures for chorales are 3/4 or 4/4,
this uniquely identifies a time signature.
"""
def encode_json_notes(notes, quantization, ts_semis):
events = [divtoken]
clock = [0]
prev_end = 0
for note_rep in notes:
pitch = note_rep[0]
offset = note_rep[1]
duration = note_rep[2]
delta = offset - prev_end
if delta > 0:
events.append(encode_rest(delta))
clock.append(((prev_end % ts_semis) // quantization) + 1)
events.append(encode_note(pitch, duration))
clock.append(((offset % ts_semis) // quantization) + 1)
prev_end = offset + duration
return events, clock
pitch_table = [
"C", "C#", "D", "Eb", "E", "F", "F#", "G", "G#", "A", "Bb", "B"
]
duration_table = {
1 : "Semiquaver",
2 : "Quaver",
3 : "Dotted quaver",
4 : "Crotchet",
6 : "Dotted crotchet",
8 : "Minim",
12 : "Dotted minim",
14 : "Double dotted minim",
16 : "Semibreve",
20 : "Semibreve+Crotchet",
24 : "Dotted semibreve",
28 : "Doulbe dotted semibreve",
32 : "Breve",
56 : "Double dotted breve",
64 : "Longa"
}
def readable_pitch(pitch):
assert type(pitch) is int
octave = (pitch // 12) - 1
return pitch_table[pitch % 12] + str(octave)
def readable_duraiton(dur):
return duration_table[dur]
def metadata_for_event(event):
if event == divtoken:
return "EOF", "0", "EOF"
elif event[0] == "n":
_, note = event.split("n")
pitch, duration = [int(s) for s in note.split("d")]
pitch_s = readable_pitch(pitch)
human_readable = "{} {}".format(readable_duraiton(duration), pitch_s)
return pitch_s, str(duration), human_readable
elif event[0] == "r":
dur = int(event.split("r")[1])
human_readable = "{} rest".format(readable_duraiton(dur))
return "Rest", str(dur), human_readable
"""
we generate a tsv with human-readable descriptions of each event type.
this is so we can visualise the embedding used in the RNN with TensorBoard
"""
def generate_metadata_tsv(events):
tsv = "Pitch\tDuration\tDescription\n"
for e in events:
pitch_s, dur_s, desc_s = metadata_for_event(e)
tsv += "{}\t{}\t{}\n".format(pitch_s, dur_s, desc_s)
return tsv
"""
takes a list of events in our RNN string representation format
and decodes them to the standard (pitch, offset, duration) format
we are using throughout the project
"""
def decode_events(events):
offset = 0
output_notes = []
for e in events:
if e == divtoken:
continue
if e[0] == "n": # it's a note
_, note = e.split("n")
pitch, duration = [int(s) for s in note.split("d")]
output_notes.append([pitch, offset, duration])
offset += duration
continue
if e[0] == "r": # it's a rest
duration = int(e.split("r")[1])
offset += duration
return output_notes
def str_to_duration(e):
if e[0] == "n":
_, note = e.split("n")
_, dur_s = note.split("d")
return int(dur_s)
elif e[0] == "r":
_, dur_s = e.split("r")
return int(dur_s)
else:
return None
| {
"content_hash": "86047af34e82e208d7c9ecb3495f226d",
"timestamp": "",
"source": "github",
"line_count": 136,
"max_line_length": 80,
"avg_line_length": 26.808823529411764,
"alnum_prop": 0.6346681294569391,
"repo_name": "alexcoplan/p2proj",
"id": "6661a263aac97f059911f9940b6c87be206cf0af",
"size": "3646",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/rnn/rnn_music_rep.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "906643"
},
{
"name": "CSS",
"bytes": "30128"
},
{
"name": "HTML",
"bytes": "5296"
},
{
"name": "JavaScript",
"bytes": "36519"
},
{
"name": "LilyPond",
"bytes": "292"
},
{
"name": "Makefile",
"bytes": "1711"
},
{
"name": "PostScript",
"bytes": "6245"
},
{
"name": "Python",
"bytes": "93014"
},
{
"name": "TeX",
"bytes": "494726"
}
],
"symlink_target": ""
} |
from sklearn_explain.tests.skl_datasets_reg import skl_datasets_test as skltest
skltest.test_reg_dataset_and_model("RandomReg_10" , "DecisionTreeRegressor_0")
| {
"content_hash": "11d612db508709e89bef06623f17785d",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 79,
"avg_line_length": 40.25,
"alnum_prop": 0.8012422360248447,
"repo_name": "antoinecarme/sklearn_explain",
"id": "a48e2051941f26e32ad714cbc2a5d7d785eb6c24",
"size": "161",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/skl_datasets_reg/RandomReg_10/skl_dataset_RandomReg_10_DecisionTreeRegressor_0_code_gen.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "110343"
}
],
"symlink_target": ""
} |
import os
import re
import codecs
from setuptools import setup, find_packages
package_name = "hera_librarian"
packages = find_packages(exclude=["*.tests"])
setup(
name=package_name,
version="1.1.1",
author="HERA Team",
author_email="hera@lists.berkeley.edu",
url="https://github.com/HERA-Team/librarian/",
license="BSD",
description="A client for the HERA Librarian data management system",
long_description="""\
The HERA Librarian is a distributed system for managing HERA collaboration
data products. This package provides client libraries that allow you to
communicate with a Librarian server. It also includes the server code,
although those modules are not installed in a standard ``pip install``.
""",
install_requires=["astropy >=2.0"],
tests_require=["pytest", "pytest-datafiles", "pytest-console-scripts"],
packages=packages,
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: BSD License",
"Topic :: Scientific/Engineering :: Astronomy",
],
extras_require={
"server": [
"aipy",
"alembic",
"astropy >=2.0",
"flask",
"flask-sqlalchemy",
"hera-librarian",
"numpy",
"psycopg2-binary",
"pytz",
"pyuvdata",
"sqlalchemy>=1.4.0",
]
},
scripts=[
"scripts/librarian_stream_file_or_directory.sh",
"scripts/runserver.py",
],
entry_points={"console_scripts": ["librarian=hera_librarian.cli:main"]},
use_scm_version=True,
setup_requires=["setuptools_scm", "setuptools_scm_git_archive"],
include_package_data=True,
zip_safe=False,
)
| {
"content_hash": "88f995289741dab7e69fff3f59bb2483",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 76,
"avg_line_length": 30.948275862068964,
"alnum_prop": 0.6217270194986072,
"repo_name": "HERA-Team/librarian",
"id": "7e1d3c626d377537041d8ffb6637787af1dea1af",
"size": "1906",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "setup.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "753"
},
{
"name": "HTML",
"bytes": "35677"
},
{
"name": "Python",
"bytes": "309249"
},
{
"name": "Shell",
"bytes": "6785"
}
],
"symlink_target": ""
} |
'''error
Definitions for access/service return code errors/exceptions.
'''
import exceptions
SUCCESS = 0
#
# old style, pass through rc values
#
UNKNOWN = 1
DUPLICATE_KEY = 2
EXEC_TRACEBACK = 5
AFFINITY_ERROR = 6
#
# new style exceptions.
#
table = {}
lookup = lambda i, *a: table.get(i, AccessError)(*a)
ACCESS_ERROR_MASK = 0x400 #starting at 1K to avoid collision.
class AccessError(exceptions.Exception):
id = 0x400 + 0
class DatabaseUnavailable(AccessError):
'''DatabaseUnavailable
Database was unavailable to service the request
'''
id = 0x400 + 1
class NoServiceHandler(AccessError):
'''NoServiceHandler
The requested service handler does not exist.
'''
id = 0x400 + 2
class ServiceTraceback(AccessError):
'''ServiceTraceback
Unknown/Unhandled exception occured while executing the request.
'''
id = 0x400 + 3
class LockTimeout(AccessError):
'''LockTimeout
resource lock timed out/heavy lock contention
'''
id = 0x400 + 4
class ParameterError(AccessError):
'''ParameterError
The request had incorrect/inconsistent parameters.
'''
id = 0x400 + 5
class NoServiceDefined(AccessError):
'''NoServiceDefined
The request was made with no service defined.
'''
id = 0x400 + 6
#
# Build ID/exception table
#
for v in locals().values():
try:
if issubclass(v, AccessError):
table[v.id] = v
except TypeError:
pass
table[None] = AccessError
#
# end..
| {
"content_hash": "dd0a0aa9443addec34d18220fd056c29",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 68,
"avg_line_length": 18.9125,
"alnum_prop": 0.6655651024454726,
"repo_name": "slideinc/notifier",
"id": "83718f5718fe87d00fcea9a9d207cf76c48f81a2",
"size": "3125",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "notifier/error.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "150228"
}
],
"symlink_target": ""
} |
import os
import re
from collections import defaultdict
from StringIO import StringIO
from flask import (Flask, jsonify, abort, request, send_file, current_app,
Response, render_template)
from lxml import etree
from wand.image import Image
import search_index
# NOTE: This directory should contain one subdirectory per book, structured as
# follows:
# <name>.hocr
# img/<pagenum>.jpg
# pagenum should be zero-padded to four, i.e. 0001.<extension>
BOOK_PATH = os.path.join(os.path.expanduser('~'), '.hocrviewer')
# TODO: Customize the following variables:
# - Logo
# - Logo Link
# TODO: Generate JavaScript through Jinja-Template, don't inline it in the
# HTML-Template
app = Flask(__name__)
def get_page_fname(bookname, page_idx):
imgpath = os.path.join(BOOK_PATH, bookname, 'img')
fname = os.path.join(imgpath,
next(x for x in os.listdir(imgpath)
if re.match(r"{0:04}\.(png|jpg|jpeg)"
.format(page_idx), x.lower())))
return fname
def memoize(func):
""" Memoization decorator for a function taking a single argument """
class memodict(dict):
def __missing__(self, key):
ret = self[key] = func(key)
return ret
return memodict().__getitem__
@memoize
def _get_dimensions(bookname):
dimensions = []
imgpath = os.path.join(BOOK_PATH, bookname, 'img')
for fname in os.listdir(imgpath):
fname = os.path.join(imgpath, fname)
with Image(filename=fname) as img:
dimensions.append({'width': img.width, 'height': img.height})
return dimensions
@memoize
def _get_metadata(bookname):
metadict = defaultdict(unicode)
tree = etree.parse(os.path.join(BOOK_PATH, bookname,
"{0}.hocr".format(bookname)))
for field in ("Title", "Creator", "Description", "Publisher",
"Contributor", "Date", "Language"):
elems = tree.xpath('//meta[@name="DC.{0}"]'.format(field))
if not elems:
continue
metadict[field.lower()] = elems[0].get('content')
metadict['num_pages'] = len(os.listdir(os.path.join(BOOK_PATH, bookname,
'img')))
if not metadict['title']:
metadict['title'] = bookname
return metadict
@app.route('/')
def index():
# TODO: Display list of all available books
return render_template(
'index.html',
books={x: _get_metadata(x) for x in os.listdir(BOOK_PATH)
if not x.startswith('.')})
@app.route('/<bookname>')
def view(bookname):
return render_template('viewer.html', bookname=bookname)
@app.route('/api/list')
def list():
""" Return list of all available books. """
return jsonify({'books': [x for x in os.listdir(BOOK_PATH)
if not x.startswith('.')]})
@app.route('/api/reindex', methods=['GET'])
@app.route('/api/<bookname>/reindex', methods=['GET'])
def reindex(bookname=None):
""" Recreate Whoosh index for all or a single book. """
if bookname and not bookname in os.listdir(BOOK_PATH):
abort(404)
if bookname:
books = [bookname]
else:
books = [x for x in os.listdir(BOOK_PATH) if not x.startswith('.')]
for book in books:
search_index.index_book(book)
return Response(status=200)
@app.route('/api/<bookname>', methods=['GET'])
def get_book(bookname):
""" Obtain metadata for book. """
if not bookname in os.listdir(BOOK_PATH):
abort(404)
out_dict = _get_metadata(bookname)
return jsonify(out_dict)
@app.route('/api/<bookname>/toc', methods=['GET'])
def get_book_toc(bookname):
""" Obtain table of contents for book. """
if not bookname in os.listdir(BOOK_PATH):
abort(404)
path = os.path.join(BOOK_PATH, bookname, "{0}.hocr".format(bookname))
tree = etree.parse(path)
struc_elems = tree.xpath('//*[@class="ocr_title" or @class="ocr_chapter"'
' or @class="ocr_section"'
' or @class="ocr_subsection"]')
# NOTE: BookReader TOC is flat at the moment, so we can get away with this:
output = {'toc': (
[{'title': "".join(x.itertext()).strip(),
'pagenum': (x.xpath('ancestor::div[@class="ocr_page"]')[0]
.get('id')[5:])}
for x in struc_elems])
}
return jsonify(output)
@app.route('/api/<bookname>/search/', methods=['GET'])
@app.route('/api/<bookname>/search/<search_term>', methods=['GET'])
def search_book(bookname, search_term=None):
""" Search for a pattern inside a book. """
if not bookname in os.listdir(BOOK_PATH):
abort(404)
if not search_term:
abort(400)
# TODO: Verify that the book has indeed been indexed
results = search_index.search(search_term, bookname=bookname)
out_dict = {
'q': search_term,
'ia': bookname,
'matches': [{'text': hit['snippet'],
'par': [{
'boxes': [
{'l': box[0], 't': box[1], 'r': box[2],
'b': box[3], 'page': hit['pagenum']}
for box in hit['highlights']],
'page': hit['pagenum']}]} for hit in results]
}
callback = request.args.get('callback', False)
if callback:
data = str(jsonify(out_dict).data)
content = str(callback) + "({0})".format(data)
mimetype = "application/javascript"
return current_app.response_class(content, mimetype=mimetype)
else:
return jsonify(out_dict)
@app.route('/api/<bookname>/dimensions', methods=['GET'])
def get_dimensions(bookname):
""" Obtain width and height for all pages from a book. """
if not bookname in os.listdir(BOOK_PATH):
abort(404)
return jsonify({'dimensions': _get_dimensions(bookname)})
@app.route('/api/<bookname>/img/', methods=['GET'])
@app.route('/api/<bookname>/img/<int:page_idx>', methods=['GET'])
def get_image(bookname, page_idx=1):
""" Obtain the image of a given page from a book. """
if not bookname in os.listdir(BOOK_PATH):
abort(404)
fname = get_page_fname(bookname, page_idx)
if not os.path.exists(fname):
abort(404)
scale_factor = request.args.get('scale', type=float)
rotate = request.args.get('rotate', type=int)
img_io = StringIO()
with Image(filename=fname) as img:
if scale_factor:
img.resize(width=int(scale_factor*img.width),
height=int(scale_factor*img.height))
if rotate:
img.rotate(rotate)
img.save(file=img_io)
mimetype = img.mimetype
img_io.seek(0)
return send_file(img_io, mimetype=mimetype)
@app.route('/api/<bookname>/read/<int:page_idx>', methods=['GET'])
def read_page(bookname, page_idx):
""" Obtain spoken text of given page. """
if not bookname in os.listdir(BOOK_PATH):
abort(404)
# TODO: Return MP3 with voice that reads the page at page_idx
raise NotImplementedError
if __name__ == '__main__':
app.run(debug=True)
| {
"content_hash": "15a5fc3b561274b1dd10d895bc69de58",
"timestamp": "",
"source": "github",
"line_count": 210,
"max_line_length": 79,
"avg_line_length": 34.49047619047619,
"alnum_prop": 0.5867734364213724,
"repo_name": "jbaiter/hocrviewer",
"id": "5829814546144247872d078c9c28b6286287a84d",
"size": "7243",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "21397"
},
{
"name": "HTML",
"bytes": "6109"
},
{
"name": "JavaScript",
"bytes": "201361"
},
{
"name": "Python",
"bytes": "10845"
}
],
"symlink_target": ""
} |
"""
The Pygments reStructuredText directive
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This fragment is a `Docutils <ttps://docutils.sf.net/>`_ 0.5 directive
that renders source code (to HTML only, currently) via Pygments.
To use it, adjust the options below and copy the code into a module
that you import on initialization. The code then automatically
registers a ``sourcecode`` directive that you can use instead of
normal code blocks like this::
.. sourcecode:: python
My code goes here.
If you want to have different code styles, e.g. one with line numbers
and one without, add formatters with their names in the VARIANTS dict
below. You can invoke them instead of the DEFAULT one by using a
directive option::
.. sourcecode:: python
:linenos:
My code goes here.
Look at the `directive documentation
<http://docutils.sourceforge.net/docs/howto/rst-directives.html>`_
to get all the gory details.
:copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
import typing as t
from docutils import nodes
from docutils.parsers.rst import Directive, directives
from pygments import highlight
from pygments.formatters.html import HtmlFormatter
from pygments.lexers import get_lexer_by_name
from pygments.lexers.shell import BashSessionLexer
from pygments.lexers.special import TextLexer
if t.TYPE_CHECKING:
from pygments.formatter import Formatter
#: Monkey patch Bash Session lexer to gobble up initial space after prompt
BashSessionLexer._ps1rgx = re.compile(
r"^((?:(?:\[.*?\])|(?:\(\S+\))?(?:| |sh\S*?|\w+\S+[@:]\S+(?:\s+\S+)"
r"?|\[\S+[@:][^\n]+\].+))\s*[$#%] )(.*\n?)"
)
# Options
# ~~~~~~~
#: Set to True if you want inline CSS styles instead of classes
INLINESTYLES = False
#: The default formatter
DEFAULT = HtmlFormatter(cssclass="highlight code-block", noclasses=INLINESTYLES)
#: Add name -> formatter pairs for every variant you want to use
VARIANTS: t.Dict[str, t.Type["Formatter"]] = {
# 'linenos': HtmlFormatter(noclasses=INLINESTYLES, linenos=True),
}
class CodeBlock(Directive):
"""Source code syntax hightlighting."""
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = {key: directives.flag for key in VARIANTS}
has_content = True
def run(self):
self.assert_has_content()
try:
lexer_name = self.arguments[0]
lexer = get_lexer_by_name(lexer_name)
except ValueError:
# no lexer found - use the text one instead of an exception
lexer = TextLexer()
# take an arbitrary option if more than one is given
formatter = self.options and VARIANTS[list(self.options)[0]] or DEFAULT
parsed = highlight("\n".join(self.content), lexer, formatter)
return [nodes.raw("", parsed, format="html")]
| {
"content_hash": "8ec42dc023cc3f588eeb9aeadbb48979",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 80,
"avg_line_length": 31.172043010752688,
"alnum_prop": 0.6857537081752328,
"repo_name": "tony/django-docutils",
"id": "205738ffcea71aa8a842616531927136939eb4b4",
"size": "2899",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/django_docutils/lib/directives/code.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "493"
},
{
"name": "Makefile",
"bytes": "1463"
},
{
"name": "Python",
"bytes": "221630"
}
],
"symlink_target": ""
} |
import json
from oslo_log import log as logging
from hpedockerplugin.backend_orchestrator import Orchestrator
import hpedockerplugin.etcdutil as util
import hpedockerplugin.file_manager as fmgr
LOG = logging.getLogger(__name__)
class FileBackendOrchestrator(Orchestrator):
fp_etcd_client = None
def __init__(self, host_config, backend_configs, def_backend_name):
super(FileBackendOrchestrator, self).__init__(
host_config, backend_configs, def_backend_name)
@staticmethod
def _get_fp_etcd_client(host_config):
return util.HpeFilePersonaEtcdClient(
host_config.host_etcd_ip_address,
host_config.host_etcd_port_number,
host_config.host_etcd_client_cert,
host_config.host_etcd_client_key
)
def _initialize_orchestrator(self, host_config):
FileBackendOrchestrator.fp_etcd_client = self._get_fp_etcd_client(
host_config
)
# Implementation of abstract function from base class
def get_manager(self, host_config, config, etcd_client,
node_id, backend_name):
LOG.info("Getting file manager...")
return fmgr.FileManager(host_config, config, etcd_client,
FileBackendOrchestrator.fp_etcd_client,
node_id, backend_name)
# Implementation of abstract function from base class
def _get_etcd_client(self, host_config):
# Reusing volume code for ETCD client
return util.HpeShareEtcdClient(
host_config.host_etcd_ip_address,
host_config.host_etcd_port_number,
host_config.host_etcd_client_cert,
host_config.host_etcd_client_key)
def get_meta_data_by_name(self, name):
LOG.info("Fetching share details from ETCD: %s" % name)
share = self._etcd_client.get_share(name)
if share:
LOG.info("Returning share details: %s" % share)
return share
LOG.info("Share details not found in ETCD: %s" % name)
return None
def share_exists(self, name):
try:
self._etcd_client.get_share(name)
except Exception:
return False
else:
return True
def create_share(self, **kwargs):
name = kwargs['name']
# Removing backend from share dictionary
# This needs to be put back when share is
# saved to the ETCD store
backend = kwargs.get('backend')
return self._execute_request_for_backend(
backend, 'create_share', name, **kwargs)
def create_share_help(self, **kwargs):
LOG.info("Working on share help content generation...")
create_help_path = "./config/create_share_help.txt"
create_help_file = open(create_help_path, "r")
create_help_content = create_help_file.read()
create_help_file.close()
LOG.info(create_help_content)
return json.dumps({u"Err": create_help_content})
def get_backends_status(self, **kwargs):
LOG.info("Getting backend status...")
line = "=" * 54
spaces = ' ' * 42
resp = "\n%s\nNAME%sSTATUS\n%s\n" % (line, spaces, line)
printable_len = 45
for k, v in self._manager.items():
backend_state = v['backend_state']
padding = (printable_len - len(k)) * ' '
resp += "%s%s %s\n" % (k, padding, backend_state)
return json.dumps({u'Err': resp})
def remove_object(self, obj):
share_name = obj['name']
return self._execute_request('remove_share', share_name, obj)
def mount_object(self, obj, mount_id):
share_name = obj['name']
return self._execute_request('mount_share', share_name,
obj, mount_id)
def unmount_object(self, obj, mount_id):
share_name = obj['name']
return self._execute_request('unmount_share', share_name,
obj, mount_id)
def get_object_details(self, obj):
share_name = obj['name']
return self._execute_request('get_share_details', share_name, obj)
def list_objects(self):
file_mgr = None
file_mgr_info = self._manager.get('DEFAULT')
if file_mgr_info:
file_mgr = file_mgr_info['mgr']
else:
file_mgr_info = self._manager.get('DEFAULT_FILE')
if file_mgr_info:
file_mgr = file_mgr_info['mgr']
share_list = []
db_shares = self._etcd_client.get_all_shares()
if file_mgr:
for db_share in db_shares:
share_info = file_mgr.get_share_info_for_listing(
db_share['name'],
db_share
)
share_list.append(share_info)
return share_list
def get_path(self, obj):
mount_dir = ''
if 'path_info' in obj:
share_name = obj['name']
mount_dir = self._execute_request('get_mount_dir', share_name)
response = json.dumps({u"Err": '', u"Mountpoint": mount_dir})
return response
| {
"content_hash": "9ca2a67efbf4b7e46324c9e1f682367a",
"timestamp": "",
"source": "github",
"line_count": 143,
"max_line_length": 74,
"avg_line_length": 36.02797202797203,
"alnum_prop": 0.5805512422360248,
"repo_name": "hpe-storage/python-hpedockerplugin",
"id": "e98e3feaafb0e20b562ee2cd710272bea48feb3e",
"size": "5152",
"binary": false,
"copies": "1",
"ref": "refs/heads/plugin_v2",
"path": "hpedockerplugin/file_backend_orchestrator.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "3360"
},
{
"name": "Python",
"bytes": "442905"
},
{
"name": "Shell",
"bytes": "3646"
}
],
"symlink_target": ""
} |
from unittest import mock
import ibis
import pydata_google_auth
import pytest
from google.auth import credentials as auth
from google.cloud import bigquery as bq
import ibis_bigquery
pytestmark = pytest.mark.bigquery
def test_repeated_project_name(project_id, credentials):
con = ibis_bigquery.connect(
project_id=project_id,
dataset_id="{}.testing".format(project_id),
credentials=credentials,
)
assert "functional_alltypes" in con.list_tables()
def test_project_id_different_from_default_credentials(monkeypatch):
creds = mock.create_autospec(auth.Credentials)
def mock_credentials(*args, **kwargs):
return creds, "default-project-id"
monkeypatch.setattr(pydata_google_auth, "default", mock_credentials)
con = ibis_bigquery.connect(
project_id="explicit-project-id",
)
assert con.billing_project == "explicit-project-id"
def test_without_dataset(project_id, credentials):
con = ibis_bigquery.connect(
project_id=project_id,
credentials=credentials,
)
with pytest.raises(ValueError, match="Unable to determine BigQuery"):
con.list_tables()
def test_application_name_sets_user_agent(project_id, credentials, monkeypatch):
mock_client = mock.create_autospec(bq.Client)
monkeypatch.setattr(bq, "Client", mock_client)
ibis_bigquery.connect(
project_id=project_id,
dataset_id="bigquery-public-data.stackoverflow",
application_name="my-great-app/0.7.0",
credentials=credentials,
)
info = mock_client.call_args[1]["client_info"]
user_agent = info.to_user_agent()
assert " ibis/{}".format(ibis.__version__) in user_agent
assert "my-great-app/0.7.0 " in user_agent
def test_auth_default(project_id, credentials, monkeypatch):
mock_calls = []
def mock_default(*args, **kwargs):
mock_calls.append((args, kwargs))
return credentials, project_id
monkeypatch.setattr(pydata_google_auth, "default", mock_default)
ibis_bigquery.connect(
project_id=project_id,
dataset_id="bigquery-public-data.stackoverflow",
)
assert len(mock_calls) == 1
args, kwargs = mock_calls[0]
assert len(args) == 1
scopes = args[0]
assert scopes == ibis_bigquery.SCOPES
auth_local_webserver = kwargs["use_local_webserver"]
auth_cache = kwargs["credentials_cache"]
assert not auth_local_webserver
assert isinstance(
auth_cache,
pydata_google_auth.cache.ReadWriteCredentialsCache,
)
def test_auth_local_webserver(project_id, credentials, monkeypatch):
mock_calls = []
def mock_default(*args, **kwargs):
mock_calls.append((args, kwargs))
return credentials, project_id
monkeypatch.setattr(pydata_google_auth, "default", mock_default)
ibis_bigquery.connect(
project_id=project_id,
dataset_id="bigquery-public-data.stackoverflow",
auth_local_webserver=True,
)
assert len(mock_calls) == 1
_, kwargs = mock_calls[0]
auth_local_webserver = kwargs["use_local_webserver"]
assert auth_local_webserver
def test_auth_external_data(project_id, credentials, monkeypatch):
mock_calls = []
def mock_default(*args, **kwargs):
mock_calls.append((args, kwargs))
return credentials, project_id
monkeypatch.setattr(pydata_google_auth, "default", mock_default)
ibis_bigquery.connect(
project_id=project_id,
dataset_id="bigquery-public-data.stackoverflow",
auth_external_data=True,
)
assert len(mock_calls) == 1
args, _ = mock_calls[0]
assert len(args) == 1
scopes = args[0]
assert scopes == ibis_bigquery.EXTERNAL_DATA_SCOPES
def test_auth_cache_reauth(project_id, credentials, monkeypatch):
mock_calls = []
def mock_default(*args, **kwargs):
mock_calls.append((args, kwargs))
return credentials, project_id
monkeypatch.setattr(pydata_google_auth, "default", mock_default)
ibis_bigquery.connect(
project_id=project_id,
dataset_id="bigquery-public-data.stackoverflow",
auth_cache="reauth",
)
assert len(mock_calls) == 1
_, kwargs = mock_calls[0]
auth_cache = kwargs["credentials_cache"]
assert isinstance(
auth_cache,
pydata_google_auth.cache.WriteOnlyCredentialsCache,
)
def test_auth_cache_none(project_id, credentials, monkeypatch):
mock_calls = []
def mock_default(*args, **kwargs):
mock_calls.append((args, kwargs))
return credentials, project_id
monkeypatch.setattr(pydata_google_auth, "default", mock_default)
ibis_bigquery.connect(
project_id=project_id,
dataset_id="bigquery-public-data.stackoverflow",
auth_cache="none",
)
assert len(mock_calls) == 1
_, kwargs = mock_calls[0]
auth_cache = kwargs["credentials_cache"]
assert auth_cache is pydata_google_auth.cache.NOOP
def test_auth_cache_unknown(project_id):
with pytest.raises(ValueError, match="unexpected value for auth_cache"):
ibis_bigquery.connect(
project_id=project_id,
dataset_id="bigquery-public-data.stackoverflow",
auth_cache="not_a_real_cache",
)
| {
"content_hash": "723186a18572797102f192ab67090709",
"timestamp": "",
"source": "github",
"line_count": 182,
"max_line_length": 80,
"avg_line_length": 28.956043956043956,
"alnum_prop": 0.6666034155597723,
"repo_name": "ibis-project/ibis-bigquery",
"id": "33e288d77058264aac0d33dce5699ae3a0faa30c",
"size": "5270",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/system/test_connect.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "187295"
},
{
"name": "Shell",
"bytes": "1038"
}
],
"symlink_target": ""
} |
"""pwkit.ucd_physics - Physical calculations for (ultra)cool dwarfs.
These functions generally implement various nontrivial physical relations
published in the literature. See docstrings for references.
Functions:
bcj_from_spt
J-band bolometric correction from SpT.
bck_from_spt
K-band bolometric correction from SpT.
load_bcah98_mass_radius
Load Baraffe+ 1998 mass/radius data.
mass_from_j
Mass from absolute J magnitude.
mk_radius_from_mass_bcah98
Radius from mass, using BCAH98 models.
tauc_from_mass
Convective turnover time from mass.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
__all__ = str ('''bcj_from_spt bck_from_spt load_bcah98_mass_radius mass_from_j
mk_radius_from_mass_bcah98 tauc_from_mass''').split ()
# Implementation note: we use the numutil.broadcastize() decorator to be able
# to handle both scalar and vector arguments semi-transparently. I'd also like
# us to be able to handle Uvals and Lvals, which aren't going to be compatible
# with this approach. The latter will also present challenges for
# bounds-checking of inputs, so I'm going the numpy route for now. Not sure
# what to do about this in general.
import numpy as np
from . import cgs, msmt, numutil
# Bolometric luminosity estimation.
@numutil.broadcastize (1)
def bcj_from_spt (spt):
"""Calculate a bolometric correction constant for a J band magnitude based on
a spectral type, using the fit of Wilking+ (1999AJ....117..469W).
spt - Numerical spectral type. M0=0, M9=9, L0=10, ...
Returns: the correction `bcj` such that `m_bol = j_abs + bcj`, or NaN if
`spt` is out of range.
Valid values of `spt` are between 0 and 10.
"""
return np.where ((spt >= 0) & (spt <= 10),
1.53 + 0.148 * spt - 0.0105 * spt**2,
np.nan)
@numutil.broadcastize (1)
def bck_from_spt (spt):
"""Calculate a bolometric correction constant for a J band magnitude based on
a spectral type, using the fits of Wilking+ (1999AJ....117..469W), Dahn+
(2002AJ....124.1170D), and Nakajima+ (2004ApJ...607..499N).
spt - Numerical spectral type. M0=0, M9=9, L0=10, ...
Returns: the correction `bck` such that `m_bol = k_abs + bck`, or NaN if
`spt` is out of range.
Valid values of `spt` are between 2 and 30.
"""
# NOTE: the way np.piecewise() is implemented, the last 'true' value in
# the condition list is the one that takes precedence. This motivates the
# construction of our condition list.
#
# XXX: I've restructured the implementation; this needs testing!
spt = np.asfarray (spt) # we crash with integer inputs for some reason.
return np.piecewise (spt,
[spt < 30,
spt < 19,
spt <= 14,
spt < 10,
(spt < 2) | (spt >= 30)],
[lambda s: 3.41 - 0.21 * (s - 20), # Nakajima
lambda s: 3.42 - 0.075 * (s - 14), # Dahn, Nakajima
lambda s: 3.42 + 0.075 * (s - 14), # Dahn, Nakajima
lambda s: 2.43 + 0.0895 * s, # Wilking; only ok for spt >= M2!
np.nan])
Mbol_sun = 4.7554
"""Absolute bolometric luminosity of the Sun. Copied from Eric Mamajek's star
notes:
https://sites.google.com/site/mamajeksstarnotes/basic-astronomical-data-for-the-sun
Quoted uncertainty is 0.0004.
Note that this bit isn't UCD-specific and could/should go elsewhere, say,
astutil.
NOTE! I haven't verified if this value is consistent with the one implicitly
adopted by the various relations that I use above! This could result in errors
of up to ~0.1 mag. Cf. Torres, 2010AJ....140.1158T.
"""
def lbol_from_mbol (mbol, format='cgs'):
from .cgs import lsun
x = 0.4 * (Mbol_sun - mbol)
if format == 'cgs':
return lsun * 10**x
elif format == 'logsun':
return x
elif format == 'logcgs':
return np.log10 (lsun) + x
raise ValueError ('unrecognized output format %r' % format)
@numutil.broadcastize (4)
def lbol_from_spt_dist_mag (sptnum, dist_pc, jmag, kmag, format='cgs'):
"""Estimate a UCD's bolometric luminosity given some basic parameters.
sptnum: the spectral type as a number; 8 -> M8; 10 -> L0 ; 20 -> T0
Valid values range between 0 and 30, ie M0 to Y0.
dist_pc: distance to the object in parsecs
jmag: object's J-band magnitude or NaN (*not* None) if unavailable
kmag: same with K-band magnitude
format: either 'cgs', 'logcgs', or 'logsun', defining the form of the
outputs. Logarithmic quantities are base 10.
This routine can be used with vectors of measurements. The result will be
NaN if a value cannot be computed. This routine implements the method
documented in the Appendix of Williams et al., 2014ApJ...785....9W
(doi:10.1088/0004-637X/785/1/9).
"""
bcj = bcj_from_spt (sptnum)
bck = bck_from_spt (sptnum)
n = np.zeros (sptnum.shape, dtype=np.int)
app_mbol = np.zeros (sptnum.shape)
w = np.isfinite (bcj) & np.isfinite (jmag)
app_mbol[w] += jmag[w] + bcj[w]
n[w] += 1
w = np.isfinite (bck) & np.isfinite (kmag)
app_mbol[w] += kmag[w] + bck[w]
n[w] += 1
w = (n != 0)
abs_mbol = (app_mbol[w] / n[w]) - 5 * (np.log10 (dist_pc[w]) - 1)
# note: abs_mbol is filtered by `w`
lbol = np.empty (sptnum.shape)
lbol.fill (np.nan)
lbol[w] = lbol_from_mbol (abs_mbol, format=format)
return lbol
# Mass estimation.
def _delfosse_mass_from_j_helper (j_abs):
x = 1e-3 * (1.6 + 6.01 * j_abs + 14.888 * j_abs**2 +
-5.3557 * j_abs**3 + 0.28518 * j_abs**4)
return 10**x * cgs.msun
@numutil.broadcastize (1)
def mass_from_j (j_abs):
"""Estimate mass in cgs from absolute J magnitude, using the relationship of
Delfosse+ (2000A&A...364..217D).
j_abs - The absolute J magnitude.
Returns: the estimated mass in grams.
If j_abs > 11, a fixed result of 0.1 Msun is returned. Values of j_abs <
5.5 are illegal and get NaN. There is a discontinuity in the relation at
j_abs = 11, which yields 0.0824 Msun.
"""
j_abs = np.asfarray (j_abs)
return np.piecewise (j_abs,
[j_abs > 11,
j_abs <= 11,
j_abs < 5.5],
[0.1 * cgs.msun,
_delfosse_mass_from_j_helper,
np.nan])
# Radius estimation.
def load_bcah98_mass_radius (tablelines, metallicity=0, heliumfrac=0.275,
age_gyr=5., age_tol=0.05):
"""Load mass and radius from the main data table for the famous models of
Baraffe+ (1998A&A...337..403B).
tablelines
An iterable yielding lines from the table data file.
I've named the file '1998A&A...337..403B_tbl1-3.dat'
in some repositories (it's about 150K, not too bad).
metallicity
The metallicity of the model to select.
heliumfrac
The helium fraction of the model to select.
age_gyr
The age of the model to select, in Gyr.
age_tol
The tolerance on the matched age, in Gyr.
Returns: (mass, radius), where both are Numpy arrays.
The ages in the data table vary slightly at fixed metallicity and helium
fraction. Therefore, there needs to be a tolerance parameter for matching
the age.
"""
mdata, rdata = [], []
for line in tablelines:
a = line.strip ().split ()
thismetallicity = float (a[0])
if thismetallicity != metallicity:
continue
thisheliumfrac = float (a[1])
if thisheliumfrac != heliumfrac:
continue
thisage = float (a[4])
if abs (thisage - age_gyr) > age_tol:
continue
mass = float (a[3]) * cgs.msun
teff = float (a[5])
mbol = float (a[7])
# XXX to check: do they specify m_bol_sun = 4.64? IIRC, yes.
lbol = 10**(0.4 * (4.64 - mbol)) * cgs.lsun
area = lbol / (cgs.sigma * teff**4)
r = np.sqrt (area / (4 * np.pi))
mdata.append (mass)
rdata.append (r)
return np.asarray (mdata), np.asarray (rdata)
def mk_radius_from_mass_bcah98 (*args, **kwargs):
"""Create a function that maps (sub)stellar mass to radius, based on the
famous models of Baraffe+ (1998A&A...337..403B).
tablelines
An iterable yielding lines from the table data file.
I've named the file '1998A&A...337..403B_tbl1-3.dat'
in some repositories (it's about 150K, not too bad).
metallicity
The metallicity of the model to select.
heliumfrac
The helium fraction of the model to select.
age_gyr
The age of the model to select, in Gyr.
age_tol
The tolerance on the matched age, in Gyr.
Returns: a function mtor(mass_g), return a radius in cm as a function of a
mass in grams. The mass must be between 0.05 and 0.7 Msun.
The ages in the data table vary slightly at fixed metallicity and helium
fraction. Therefore, there needs to be a tolerance parameter for matching
the age.
This function requires Scipy.
"""
from scipy.interpolate import UnivariateSpline
m, r = load_bcah98_mass_radius (*args, **kwargs)
spl = UnivariateSpline (m, r, s=1)
# This allows us to do range-checking with either scalars or vectors with
# minimal gymnastics.
@numutil.broadcastize (1)
def interp (mass_g):
if np.any (mass_g < 0.05 * cgs.msun) or np.any (mass_g > 0.7 * cgs.msun):
raise ValueError ('mass_g must must be between 0.05 and 0.7 Msun')
return spl (mass_g)
return interp
# Estimation of the convective turnover time.
@numutil.broadcastize (1)
def tauc_from_mass (mass_g):
"""Estimate the convective turnover time from mass, using the method described
in Cook+ (2014ApJ...785...10C).
mass_g - UCD mass in grams.
Returns: the convective turnover timescale in seconds.
Masses larger than 1.3 Msun are out of range and yield NaN. If the mass is
<0.1 Msun, the turnover time is fixed at 70 days.
The Cook method was inspired by the description in McLean+
(2012ApJ...746...23M). It is a hybrid of the method described in Reiners &
Basri (2010ApJ...710..924R) and the data shown in Kiraga & Stepien
(2007AcA....57..149K). However, this version imposes the 70-day cutoff in
terms of mass, not spectral type, so that it is entirely defined in terms
of a single quantity.
There are discontinuities between the different break points! Any future
use should tweak the coefficients to make everything smooth.
"""
m = mass_g / cgs.msun
return np.piecewise (m,
[m < 1.3,
m < 0.82,
m < 0.65,
m < 0.1],
[lambda x: 61.7 - 44.7 * x,
25.,
lambda x: 86.9 - 94.3 * x,
70.,
np.nan]) * 86400.
| {
"content_hash": "d0d3d412b7f3d4114e7ac12ec8061ac9",
"timestamp": "",
"source": "github",
"line_count": 334,
"max_line_length": 88,
"avg_line_length": 33.56287425149701,
"alnum_prop": 0.6125780553077609,
"repo_name": "pkgw/pwkit",
"id": "6e80a115faa29dd91759aa02d26ce85b02818abd",
"size": "11357",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pwkit/ucd_physics.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Fortran",
"bytes": "198303"
},
{
"name": "IDL",
"bytes": "142529"
},
{
"name": "Python",
"bytes": "1498923"
}
],
"symlink_target": ""
} |
AUTHFAILURE = 'AuthFailure'
# 认证无效。
AUTHFAILURE_INVALIDAUTHORIZATION = 'AuthFailure.InvalidAuthorization'
# 实例安全组信息添加失败。
FAILEDOPERATION_ADDINSTANCEINFOFAILED = 'FailedOperation.AddInstanceInfoFailed'
# 绑定安全组失败。
FAILEDOPERATION_ASSOCIATESECURITYGROUPSFAILED = 'FailedOperation.AssociateSecurityGroupsFailed'
# 实例安全组信息清除失败。
FAILEDOPERATION_CLEARINSTANCEINFOFAILED = 'FailedOperation.ClearInstanceInfoFailed'
# 提交工作流失败。
FAILEDOPERATION_COMMITFLOWERROR = 'FailedOperation.CommitFlowError'
# 解绑安全组失败。
FAILEDOPERATION_DISASSOCIATESECURITYGROUPSFAILED = 'FailedOperation.DisassociateSecurityGroupsFailed'
# 当前DTS状态下不允许该操作。
FAILEDOPERATION_DTSSTATUSABNORMAL = 'FailedOperation.DtsStatusAbnormal'
# 流表不存在。
FAILEDOPERATION_FLOWNOTEXISTS = 'FailedOperation.FlowNotExists'
# 获取安全组详情失败。
FAILEDOPERATION_GETSECURITYGROUPDETAILFAILED = 'FailedOperation.GetSecurityGroupDetailFailed'
# 支付失败。
FAILEDOPERATION_PAYFAILED = 'FailedOperation.PayFailed'
# 暂时笼统的定义这个错误码。
FAILEDOPERATION_REDOFLOWFAILED = 'FailedOperation.RedoFlowFailed'
# 设置规则失败。
FAILEDOPERATION_SETRULELOCATIONFAILED = 'FailedOperation.SetRuleLocationFailed'
# 内部系统错误,和业务无关。
FAILEDOPERATION_SYSTEMERROR = 'FailedOperation.SystemError'
# 实例不支持该接口。
FAILEDOPERATION_UNSUPPORTERROR = 'FailedOperation.UnSupportError'
# weekday输入无效数据。
FAILEDOPERATION_UNKNOWN = 'FailedOperation.Unknown'
# 实例安全组信息更新失败。
FAILEDOPERATION_UPDATEINSTANCEINFOFAILED = 'FailedOperation.UpdateInstanceInfoFailed'
# 更新安全组失败。
FAILEDOPERATION_UPDATESECURITYGROUPSFAILED = 'FailedOperation.UpdateSecurityGroupsFailed'
# 内部错误。
INTERNALERROR = 'InternalError'
# cam鉴权错误。
INTERNALERROR_CAMAUTHOSSRESPONSERETURNCODEERROR = 'InternalError.CamAuthOssResponseReturnCodeError'
# 统一的 DB 操作错误,可以是 update insert select..。
INTERNALERROR_DBOPERATIONFAILED = 'InternalError.DbOperationFailed'
# 执行Http请求失败。
INTERNALERROR_EXECHTTPREQUESTERROR = 'InternalError.ExecHttpRequestError'
# 无操作权限。
INTERNALERROR_INSTANCEOPERATEPERMISSIONERROR = 'InternalError.InstanceOperatePermissionError'
# 内部错误。
INTERNALERROR_INTERNALERROR = 'InternalError.InternalError'
# 获取实例列表出错。
INTERNALERROR_LISTINSTANCESERROR = 'InternalError.ListInstancesError'
# 网络错误。
INTERNALERROR_NETWORKERR = 'InternalError.NetWorkErr'
# 参数错误。
INVALIDPARAMETER = 'InvalidParameter'
# 没有找到对应接口。
INVALIDPARAMETER_ACTIONNOTFOUND = 'InvalidParameter.ActionNotFound'
# 参数为空。
INVALIDPARAMETER_EMPTYPARAM = 'InvalidParameter.EmptyParam'
# 非法参数错误。
INVALIDPARAMETER_ILLEGALPARAMETERERROR = 'InvalidParameter.IllegalParameterError'
# 实例安全组超过限制。
INVALIDPARAMETER_INSTANCESGOVERLIMITERROR = 'InvalidParameter.InstanceSGOverLimitError'
# 业务参数错误。
INVALIDPARAMETER_INVALIDPARAMETER = 'InvalidParameter.InvalidParameter'
# 参数错误,不支持操作。
INVALIDPARAMETER_NOTSUPPORTED = 'InvalidParameter.NotSupported'
# 上海金融只提供vpc网络。
INVALIDPARAMETER_ONLYVPCONSPECZONEID = 'InvalidParameter.OnlyVPCOnSpecZoneId'
# 时间格式或者范围不符合要求。
INVALIDPARAMETER_PERIOD = 'InvalidParameter.Period'
# 接口没有cam权限。
INVALIDPARAMETER_PERMISSIONDENIED = 'InvalidParameter.PermissionDenied'
# 参数取值错误。
INVALIDPARAMETERVALUE = 'InvalidParameterValue'
# 备份不存在。
INVALIDPARAMETERVALUE_BACKUPNOTEXISTS = 'InvalidParameterValue.BackupNotExists'
# 不是vpc网络下实例。
INVALIDPARAMETERVALUE_BASENETWORKACCESSDENY = 'InvalidParameterValue.BaseNetWorkAccessDeny'
# 业务校验不通过。
INVALIDPARAMETERVALUE_CHECKNOTPASS = 'InvalidParameterValue.CheckNotPass'
# 重命名,命名规则错误。
INVALIDPARAMETERVALUE_INSTANCENAMERULEERROR = 'InvalidParameterValue.InstanceNameRuleError'
# 请求购买的实例类型错误(TypeId 1:集群版;2:主从版,即原主从版)。
INVALIDPARAMETERVALUE_INVALIDINSTANCETYPEID = 'InvalidParameterValue.InvalidInstanceTypeId'
# vpc网络下,vpcid 子网id 非法。
INVALIDPARAMETERVALUE_INVALIDSUBNETID = 'InvalidParameterValue.InvalidSubnetId'
# 请求的容量不在售卖容量范围内。
INVALIDPARAMETERVALUE_MEMSIZENOTINRANGE = 'InvalidParameterValue.MemSizeNotInRange'
# 实例不能重复绑定。
INVALIDPARAMETERVALUE_NOTREPEATBIND = 'InvalidParameterValue.NotRepeatBind'
# 密码为空。
INVALIDPARAMETERVALUE_PASSWORDEMPTY = 'InvalidParameterValue.PasswordEmpty'
# 密码校验出错,密码错误。
INVALIDPARAMETERVALUE_PASSWORDERROR = 'InvalidParameterValue.PasswordError'
# 腾讯集团内部账号禁止使用免密实例。
INVALIDPARAMETERVALUE_PASSWORDFREEDENIED = 'InvalidParameterValue.PasswordFreeDenied'
# 设置密码时,MC 传入的 old password 与先前设定密码不同。
INVALIDPARAMETERVALUE_PASSWORDRULEERROR = 'InvalidParameterValue.PasswordRuleError'
# 请求容量偏小,不支持缩容。
INVALIDPARAMETERVALUE_REDUCECAPACITYNOTALLOWED = 'InvalidParameterValue.ReduceCapacityNotAllowed'
# 复制组不存在。
INVALIDPARAMETERVALUE_REPLICATIONGROUPNOTEXISTS = 'InvalidParameterValue.ReplicationGroupNotExists'
# 请求参数错误,安全组id错误。
INVALIDPARAMETERVALUE_SECURITYGROUPIDSNOTEXISTS = 'InvalidParameterValue.SecurityGroupIdsNotExists'
# 实例规格不存在。
INVALIDPARAMETERVALUE_SPECNOTEXIST = 'InvalidParameterValue.SpecNotExist'
# 实例类型不支持。
INVALIDPARAMETERVALUE_UNSUPPORTEDTYPE = 'InvalidParameterValue.UnSupportedType'
# vpc网络下,uniqVpcId 子网id 非法。
INVALIDPARAMETERVALUE_UNVPCIDNOTEXISTS = 'InvalidParameterValue.UnVpcIdNotExists'
# weekday输入无效数据。
INVALIDPARAMETERVALUE_WEEKDAYSISINVALID = 'InvalidParameterValue.WeekDaysIsInvalid'
# 超过配额限制。
LIMITEXCEEDED = 'LimitExceeded'
# 绑定超过上限。
LIMITEXCEEDED_EXCEEDUPPERLIMIT = 'LimitExceeded.ExceedUpperLimit'
# 绑定实例必须为空。
LIMITEXCEEDED_INSTANCENOTEMPTY = 'LimitExceeded.InstanceNotEmpty'
# 请求的容量不在售卖规格中(memSize应为1024的整数倍,单位:MB)。
LIMITEXCEEDED_INVALIDMEMSIZE = 'LimitExceeded.InvalidMemSize'
# 一次请求购买的实例数不在售卖数量限制范围内。
LIMITEXCEEDED_INVALIDPARAMETERGOODSNUMNOTINRANGE = 'LimitExceeded.InvalidParameterGoodsNumNotInRange'
# 请求的容量不在售卖容量范围内。
LIMITEXCEEDED_MEMSIZENOTINRANGE = 'LimitExceeded.MemSizeNotInRange'
# 购买时长超过3年,请求时长超过最大时长。
LIMITEXCEEDED_PERIODEXCEEDMAXLIMIT = 'LimitExceeded.PeriodExceedMaxLimit'
# 购买时长非法,时长最少1个月。
LIMITEXCEEDED_PERIODLESSTHANMINLIMIT = 'LimitExceeded.PeriodLessThanMinLimit'
# 复制组已锁定。
LIMITEXCEEDED_REPLICATIONGROUPLOCKED = 'LimitExceeded.ReplicationGroupLocked'
# 实例被其它流程锁住。
RESOURCEINUSE_INSTANCEBEENLOCKED = 'ResourceInUse.InstanceBeenLocked'
# 资源不足。
RESOURCEINSUFFICIENT = 'ResourceInsufficient'
# 资源不存在。
RESOURCENOTFOUND = 'ResourceNotFound'
# uin 值为空。
RESOURCENOTFOUND_ACCOUNTDOESNOTEXISTS = 'ResourceNotFound.AccountDoesNotExists'
# 根据 serialId 没有找到对应 redis。
RESOURCENOTFOUND_INSTANCENOTEXISTS = 'ResourceNotFound.InstanceNotExists'
# 找不到该实例。
RESOURCENOTFOUND_INSTANCENOTFOUND = 'ResourceNotFound.InstanceNotFound'
# 请求订单号不存在。
RESOURCEUNAVAILABLE_ACCOUNTBALANCENOTENOUGH = 'ResourceUnavailable.AccountBalanceNotEnough'
# 备份已被其它任务锁住,暂时不能执行该操作。
RESOURCEUNAVAILABLE_BACKUPLOCKEDERROR = 'ResourceUnavailable.BackupLockedError'
# 备份状态异常,暂不能执行该操作。备份可能已过期或已被删除。
RESOURCEUNAVAILABLE_BACKUPSTATUSABNORMAL = 'ResourceUnavailable.BackupStatusAbnormal'
# 备份状态无效,可能在其他流程中,也可能被删除。
RESOURCEUNAVAILABLE_BACKUPSTATUSINVALID = 'ResourceUnavailable.BackupStatusInvalid'
# 调用后端接口失败。
RESOURCEUNAVAILABLE_CALLOSSERROR = 'ResourceUnavailable.CallOssError'
# 获取安全组信息失败。
RESOURCEUNAVAILABLE_GETSECURITYERROR = 'ResourceUnavailable.GetSecurityError'
# 实例配置错误。
RESOURCEUNAVAILABLE_INSTANCECONFERROR = 'ResourceUnavailable.InstanceConfError'
# 实例已经被回收了。
RESOURCEUNAVAILABLE_INSTANCEDELETED = 'ResourceUnavailable.InstanceDeleted'
# 实例已过期。
RESOURCEUNAVAILABLE_INSTANCEISOLATED = 'ResourceUnavailable.InstanceIsolated'
# redis 已经被其它流程锁定。
RESOURCEUNAVAILABLE_INSTANCELOCKEDERROR = 'ResourceUnavailable.InstanceLockedError'
# 订单不存在。
RESOURCEUNAVAILABLE_INSTANCENODEAL = 'ResourceUnavailable.InstanceNoDeal'
# 实例状态不支持操作。
RESOURCEUNAVAILABLE_INSTANCENOTSUPPORTOPERATION = 'ResourceUnavailable.InstanceNotSupportOperation'
# 实例状态错误。
RESOURCEUNAVAILABLE_INSTANCESTATEERROR = 'ResourceUnavailable.InstanceStateError'
# redis 状态异常,不能执行对应流程。
RESOURCEUNAVAILABLE_INSTANCESTATUSABNORMAL = 'ResourceUnavailable.InstanceStatusAbnormal'
# 实例状态异常,不能执行对应操作。
RESOURCEUNAVAILABLE_INSTANCESTATUSERROR = 'ResourceUnavailable.InstanceStatusError'
# 实例已经被其它流程锁定。
RESOURCEUNAVAILABLE_INSTANCEUNLOCKEDERROR = 'ResourceUnavailable.InstanceUnLockedError'
# vpc网络IP资源不足。
RESOURCEUNAVAILABLE_NOENOUGHVIPINVPC = 'ResourceUnavailable.NoEnoughVipInVPC'
# 请求的区域暂时不提供redis服务。
RESOURCEUNAVAILABLE_NOREDISSERVICE = 'ResourceUnavailable.NoRedisService'
# 请求的区域暂时不提供请求类型的redis服务。
RESOURCEUNAVAILABLE_NOTYPEIDREDISSERVICE = 'ResourceUnavailable.NoTypeIdRedisService'
# 地域对应类型已售罄。
RESOURCEUNAVAILABLE_SALEOUT = 'ResourceUnavailable.SaleOut'
# 产品还没有接入安全组。
RESOURCEUNAVAILABLE_SECURITYGROUPNOTSUPPORTED = 'ResourceUnavailable.SecurityGroupNotSupported'
# 未授权操作。
UNAUTHORIZEDOPERATION = 'UnauthorizedOperation'
# 无cam 权限。
UNAUTHORIZEDOPERATION_NOCAMAUTHED = 'UnauthorizedOperation.NoCAMAuthed'
# 用户不在白名单中。
UNAUTHORIZEDOPERATION_USERNOTINWHITELIST = 'UnauthorizedOperation.UserNotInWhiteList'
# 操作不支持。
UNSUPPORTEDOPERATION = 'UnsupportedOperation'
# redis 集群版不允许接入安全组。
UNSUPPORTEDOPERATION_CLUSTERINSTANCEACCESSEDDENY = 'UnsupportedOperation.ClusterInstanceAccessedDeny'
# 复制组巡检中。
UNSUPPORTEDOPERATION_INSPECTION = 'UnsupportedOperation.Inspection'
# 不支持当前操作。
UNSUPPORTEDOPERATION_INSTANCENOTOPERATION = 'UnsupportedOperation.InstanceNotOperation'
# 自动续费标识错误。
UNSUPPORTEDOPERATION_ISAUTORENEWERROR = 'UnsupportedOperation.IsAutoRenewError'
# 实例版本过低。
UNSUPPORTEDOPERATION_LIMITPROXYVERSION = 'UnsupportedOperation.LimitProxyVersion'
# 只有集群版实例支持导出备份。
UNSUPPORTEDOPERATION_ONLYCLUSTERINSTANCECANEXPORTBACKUP = 'UnsupportedOperation.OnlyClusterInstanceCanExportBackup'
| {
"content_hash": "51d5916b04149466e92a696b79e06729",
"timestamp": "",
"source": "github",
"line_count": 304,
"max_line_length": 115,
"avg_line_length": 30.667763157894736,
"alnum_prop": 0.8545532553898959,
"repo_name": "tzpBingo/github-trending",
"id": "45142b1383400f19df945dad724615a477cae7cf",
"size": "12032",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "codespace/python/tencentcloud/redis/v20180412/errorcodes.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Go",
"bytes": "11470"
},
{
"name": "HTML",
"bytes": "1543"
},
{
"name": "Python",
"bytes": "49985109"
},
{
"name": "Shell",
"bytes": "18039"
}
],
"symlink_target": ""
} |
import functools
import inspect
import os
import re
import sys
import threading
from collections import OrderedDict
import six
# noinspection PyUnresolvedReferences
from cheap_repr import cheap_repr, find_repr_function, try_register_repr
from snoop.utils import my_cheap_repr, ArgDefaultDict, iscoroutinefunction, \
truncate_list, ensure_tuple, is_comprehension_frame, no_args_decorator, pp_name_prefix, NO_BIRDSEYE, \
_register_cheap_reprs, PY34
from .formatting import Event, Source
from .variables import CommonVariable, Exploding, BaseVariable
find_repr_function(six.text_type).maxparts = 100
find_repr_function(six.binary_type).maxparts = 100
find_repr_function(object).maxparts = 100
find_repr_function(int).maxparts = 999999
cheap_repr.suppression_threshold = 999999
class FrameInfo(object):
def __init__(self, frame):
self.frame = frame
self.local_reprs = {}
self.last_line_no = frame.f_lineno
self.comprehension_variables = OrderedDict()
self.source = Source.for_frame(frame)
code = frame.f_code
self.is_generator = code.co_flags & inspect.CO_GENERATOR
self.had_exception = False
if is_comprehension_frame(frame):
self.comprehension_type = (
re.match(r'<(\w+)comp>', code.co_name).group(1).title()
+ u' comprehension'
)
else:
self.comprehension_type = ''
self.is_ipython_cell = (
code.co_name == '<module>' and
code.co_filename.startswith('<ipython-input-')
)
def update_variables(self, watch, watch_extras, event, whitelist):
self.last_line_no = self.frame.f_lineno
old_local_reprs = self.local_reprs
self.local_reprs = OrderedDict(
(source, my_cheap_repr(value))
for source, value in
self.get_local_reprs(watch, watch_extras, whitelist)
)
if self.comprehension_type:
for name, value_repr in self.local_reprs.items():
values = self.comprehension_variables.setdefault(name, [])
if not values or values[-1] != value_repr:
values.append(value_repr)
values[:] = truncate_list(values, 11)
if event in ('return', 'exception'):
return [
(name, ', '.join(values))
for name, values in self.comprehension_variables.items()
]
else:
return []
variables = []
for name, value_repr in self.local_reprs.items():
if name not in old_local_reprs or old_local_reprs[name] != value_repr:
variables.append((name, value_repr))
return variables
def get_local_reprs(self, watch, watch_extras, whitelist):
frame = self.frame
code = frame.f_code
var_names = [
key for key in frame.f_locals
if whitelist is None or key in whitelist
if not key.startswith(pp_name_prefix)
]
vars_order = code.co_varnames + code.co_cellvars + code.co_freevars + tuple(var_names)
var_names.sort(key=vars_order.index)
result_items = [
(key, frame.f_locals[key])
for key in var_names
]
for variable in watch:
result_items += sorted(variable.items(frame))
for source, value in result_items:
yield source, value
for extra in watch_extras:
try:
pair = extra(source, value)
except Exception:
pass
else:
if pair is not None:
assert len(pair) == 2, "Watch extra must return pair or None"
yield pair
thread_global = threading.local()
internal_directories = (os.path.dirname((lambda: 0).__code__.co_filename),)
try:
# noinspection PyUnresolvedReferences
import birdseye
except ImportError:
pass
else:
internal_directories += (os.path.dirname(birdseye.__file__),)
class TracerMeta(type):
def __new__(mcs, *args, **kwargs):
result = super(TracerMeta, mcs).__new__(mcs, *args, **kwargs)
result.default = result()
return result
def __call__(cls, *args, **kwargs):
if no_args_decorator(args, kwargs):
return cls.default(args[0])
else:
return super(TracerMeta, cls).__call__(*args, **kwargs)
def __enter__(self):
return self.default.__enter__(context=1)
def __exit__(self, *args):
return self.default.__exit__(*args, context=1)
@six.add_metaclass(TracerMeta)
class Tracer(object):
def __init__(
self,
watch=(),
watch_explode=(),
depth=1,
):
self.watch = [
v if isinstance(v, BaseVariable) else CommonVariable(v)
for v in ensure_tuple(watch)
] + [
v if isinstance(v, BaseVariable) else Exploding(v)
for v in ensure_tuple(watch_explode)
]
self.frame_infos = ArgDefaultDict(FrameInfo)
self.depth = depth
assert self.depth >= 1
self.target_codes = set()
self.target_frames = set()
self.variable_whitelist = None
def __call__(self, function):
if iscoroutinefunction(function):
raise NotImplementedError("coroutines are not supported, sorry!")
self.target_codes.add(function.__code__)
@functools.wraps(function)
def simple_wrapper(*args, **kwargs):
with self:
return function(*args, **kwargs)
@functools.wraps(function)
def generator_wrapper(*args, **kwargs):
gen = function(*args, **kwargs)
method, incoming = gen.send, None
while True:
with self:
try:
outgoing = method(incoming)
except StopIteration:
return
try:
method, incoming = gen.send, (yield outgoing)
except Exception as e:
method, incoming = gen.throw, e
if inspect.isgeneratorfunction(function):
return generator_wrapper
else:
return simple_wrapper
def __enter__(self, context=0):
if not self.config.enabled:
return
self.config.thread_local.__dict__.setdefault('depth', -1)
calling_frame = sys._getframe(context + 1)
if not self._is_internal_frame(calling_frame):
calling_frame.f_trace = self.trace
self.target_frames.add(calling_frame)
self.config.last_frame = calling_frame
self.trace(calling_frame, 'enter', None)
stack = thread_global.__dict__.setdefault('original_trace_functions', [])
stack.append(sys.gettrace())
sys.settrace(self.trace)
def __exit__(self, exc_type, exc_value, exc_traceback, context=0):
if not self.config.enabled:
return
previous_trace = thread_global.original_trace_functions.pop()
sys.settrace(previous_trace)
calling_frame = sys._getframe(context + 1)
if not (PY34 and previous_trace is None):
calling_frame.f_trace = previous_trace
self.trace(calling_frame, 'exit', None)
self.target_frames.discard(calling_frame)
self.frame_infos.pop(calling_frame, None)
def _is_internal_frame(self, frame):
return frame.f_code.co_filename.startswith(internal_directories)
def _is_traced_frame(self, frame):
return frame.f_code in self.target_codes or frame in self.target_frames
def trace(self, frame, event, arg):
if not self._is_traced_frame(frame):
if (
self.depth == 1
or self._is_internal_frame(frame)
) and not is_comprehension_frame(frame):
return None
else:
candidate = frame
i = 0
while True:
if is_comprehension_frame(candidate):
candidate = candidate.f_back
continue
i += 1
if self._is_traced_frame(candidate):
break
candidate = candidate.f_back
if i >= self.depth or candidate is None or self._is_internal_frame(candidate):
return None
thread_local = self.config.thread_local
frame_info = self.frame_infos[frame]
if event in ('call', 'enter'):
thread_local.depth += 1
elif self.config.last_frame and self.config.last_frame is not frame:
line_no = frame_info.last_line_no
trace_event = Event(frame_info, event, arg, thread_local.depth, line_no=line_no)
line = self.config.formatter.format_line_only(trace_event)
self.config.write(line)
if event == 'exception':
frame_info.had_exception = True
self.config.last_frame = frame
trace_event = Event(frame_info, event, arg, thread_local.depth)
if not (frame.f_code.co_name == '<genexpr>' and event not in ('return', 'exception')):
trace_event.variables = frame_info.update_variables(
self.watch,
self.config.watch_extras,
event,
self.variable_whitelist,
)
if event in ('return', 'exit'):
del self.frame_infos[frame]
thread_local.depth -= 1
formatted = self.config.formatter.format(trace_event)
self.config.write(formatted)
return self.trace
@staticmethod
def load_ipython_extension(ipython_shell):
from snoop.ipython import SnoopMagics
ipython_shell.register_magics(SnoopMagics)
class Spy(object):
def __init__(self, config):
self.config = config
def __call__(self, *args, **kwargs):
if NO_BIRDSEYE:
raise Exception("birdseye doesn't support this version of Python")
try:
import birdseye
except ImportError:
raise Exception("You must install birdseye separately to use spy: pip install birdseye")
# Decorator without parentheses
if no_args_decorator(args, kwargs):
return self._trace(args[0])
# Decorator with parentheses and perhaps arguments
def decorator(func):
return self._trace(func, *args, **kwargs)
return decorator
def _trace(self, func, *args, **kwargs):
# noinspection PyUnresolvedReferences
from birdseye import eye
traced = eye(func)
traced = self.config.snoop(*args, **kwargs)(traced)
_register_cheap_reprs() # Override birdseye in case it's outdated
@functools.wraps(func)
def wrapper(*func_args, **func_kwargs):
if self.config.enabled:
final_func = traced
else:
final_func = func
return final_func(*func_args, **func_kwargs)
return wrapper
| {
"content_hash": "f613c63e94f2487373521ba3d215a2a8",
"timestamp": "",
"source": "github",
"line_count": 329,
"max_line_length": 106,
"avg_line_length": 34.246200607902736,
"alnum_prop": 0.5695393627407473,
"repo_name": "alexmojaki/snoop",
"id": "528cf687270b3a15bfacf697ab2f78c1d665a4a5",
"size": "11267",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "snoop/tracer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "76278"
},
{
"name": "Shell",
"bytes": "615"
}
],
"symlink_target": ""
} |
""" BEIT: BERT Pre-Training of Image Transformers (https://arxiv.org/abs/2106.08254)
Model from official source: https://github.com/microsoft/unilm/tree/master/beit
and
https://github.com/microsoft/unilm/tree/master/beit2
@inproceedings{beit,
title={{BEiT}: {BERT} Pre-Training of Image Transformers},
author={Hangbo Bao and Li Dong and Songhao Piao and Furu Wei},
booktitle={International Conference on Learning Representations},
year={2022},
url={https://openreview.net/forum?id=p-BhZSz59o4}
}
@article{beitv2,
title={{BEiT v2}: Masked Image Modeling with Vector-Quantized Visual Tokenizers},
author={Zhiliang Peng and Li Dong and Hangbo Bao and Qixiang Ye and Furu Wei},
year={2022},
eprint={2208.06366},
archivePrefix={arXiv},
primaryClass={cs.CV}
}
At this point only the 1k fine-tuned classification weights and model configs have been added,
see original source above for pre-training models and procedure.
Modifications by / Copyright 2021 Ross Wightman, original copyrights below
"""
# --------------------------------------------------------
# BEIT: BERT Pre-Training of Image Transformers (https://arxiv.org/abs/2106.08254)
# Github source: https://github.com/microsoft/unilm/tree/master/beit
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# By Hangbo Bao
# Based on timm and DeiT code bases
# https://github.com/rwightman/pytorch-image-models/tree/master/timm
# https://github.com/facebookresearch/deit/
# https://github.com/facebookresearch/dino
# --------------------------------------------------------'
import math
from functools import partial
from typing import Optional, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.checkpoint import checkpoint
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from .helpers import build_model_with_cfg
from .layers import PatchEmbed, Mlp, DropPath, trunc_normal_
from .registry import register_model
from .vision_transformer import checkpoint_filter_fn
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None,
'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True,
'mean': (0.5, 0.5, 0.5), 'std': (0.5, 0.5, 0.5),
'first_conv': 'patch_embed.proj', 'classifier': 'head',
**kwargs
}
default_cfgs = {
'beit_base_patch16_224': _cfg(
url='https://conversationhub.blob.core.windows.net/beit-share-public/beit/beit_base_patch16_224_pt22k_ft22kto1k.pth'),
'beit_base_patch16_384': _cfg(
url='https://conversationhub.blob.core.windows.net/beit-share-public/beit/beit_base_patch16_384_pt22k_ft22kto1k.pth',
input_size=(3, 384, 384), crop_pct=1.0,
),
'beit_base_patch16_224_in22k': _cfg(
url='https://conversationhub.blob.core.windows.net/beit-share-public/beit/beit_base_patch16_224_pt22k_ft22k.pth',
num_classes=21841,
),
'beit_large_patch16_224': _cfg(
url='https://conversationhub.blob.core.windows.net/beit-share-public/beit/beit_large_patch16_224_pt22k_ft22kto1k.pth'),
'beit_large_patch16_384': _cfg(
url='https://conversationhub.blob.core.windows.net/beit-share-public/beit/beit_large_patch16_384_pt22k_ft22kto1k.pth',
input_size=(3, 384, 384), crop_pct=1.0,
),
'beit_large_patch16_512': _cfg(
url='https://conversationhub.blob.core.windows.net/beit-share-public/beit/beit_large_patch16_512_pt22k_ft22kto1k.pth',
input_size=(3, 512, 512), crop_pct=1.0,
),
'beit_large_patch16_224_in22k': _cfg(
url='https://conversationhub.blob.core.windows.net/beit-share-public/beit/beit_large_patch16_224_pt22k_ft22k.pth',
num_classes=21841,
),
'beitv2_base_patch16_224': _cfg(
url='https://conversationhub.blob.core.windows.net/beit-share-public/beitv2/beitv2_base_patch16_224_pt1k_ft21kto1k.pth',
mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD
),
'beitv2_base_patch16_224_in22k': _cfg(
url='https://conversationhub.blob.core.windows.net/beit-share-public/beitv2/beitv2_base_patch16_224_pt1k_ft21k.pth',
num_classes=21841,
mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD
),
'beitv2_large_patch16_224': _cfg(
url='https://conversationhub.blob.core.windows.net/beit-share-public/beitv2/beitv2_large_patch16_224_pt1k_ft21kto1k.pth',
crop_pct=0.95,
mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD
),
'beitv2_large_patch16_224_in22k': _cfg(
url='https://conversationhub.blob.core.windows.net/beit-share-public/beitv2/beitv2_large_patch16_224_pt1k_ft21k.pth',
num_classes=21841,
mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD
),
}
def gen_relative_position_index(window_size: Tuple[int, int]) -> torch.Tensor:
num_relative_distance = (2 * window_size[0] - 1) * (2 * window_size[1] - 1) + 3
# cls to token & token 2 cls & cls to cls
# get pair-wise relative position index for each token inside the window
window_area = window_size[0] * window_size[1]
coords = torch.stack(torch.meshgrid(
[torch.arange(window_size[0]),
torch.arange(window_size[1])])) # 2, Wh, Ww
coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
relative_coords[:, :, 0] += window_size[0] - 1 # shift to start from 0
relative_coords[:, :, 1] += window_size[1] - 1
relative_coords[:, :, 0] *= 2 * window_size[1] - 1
relative_position_index = torch.zeros(size=(window_area + 1,) * 2, dtype=relative_coords.dtype)
relative_position_index[1:, 1:] = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
relative_position_index[0, 0:] = num_relative_distance - 3
relative_position_index[0:, 0] = num_relative_distance - 2
relative_position_index[0, 0] = num_relative_distance - 1
return relative_position_index
class Attention(nn.Module):
def __init__(
self, dim, num_heads=8, qkv_bias=False, attn_drop=0.,
proj_drop=0., window_size=None, attn_head_dim=None):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
if attn_head_dim is not None:
head_dim = attn_head_dim
all_head_dim = head_dim * self.num_heads
self.scale = head_dim ** -0.5
self.qkv = nn.Linear(dim, all_head_dim * 3, bias=False)
if qkv_bias:
self.q_bias = nn.Parameter(torch.zeros(all_head_dim))
self.register_buffer('k_bias', torch.zeros(all_head_dim), persistent=False)
self.v_bias = nn.Parameter(torch.zeros(all_head_dim))
else:
self.q_bias = None
self.k_bias = None
self.v_bias = None
if window_size:
self.window_size = window_size
self.num_relative_distance = (2 * window_size[0] - 1) * (2 * window_size[1] - 1) + 3
self.relative_position_bias_table = nn.Parameter(
torch.zeros(self.num_relative_distance, num_heads)) # 2*Wh-1 * 2*Ww-1, nH
self.register_buffer("relative_position_index", gen_relative_position_index(window_size))
else:
self.window_size = None
self.relative_position_bias_table = None
self.relative_position_index = None
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(all_head_dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def _get_rel_pos_bias(self):
relative_position_bias = self.relative_position_bias_table[
self.relative_position_index.view(-1)].view(
self.window_size[0] * self.window_size[1] + 1,
self.window_size[0] * self.window_size[1] + 1, -1) # Wh*Ww,Wh*Ww,nH
relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
return relative_position_bias.unsqueeze(0)
def forward(self, x, shared_rel_pos_bias: Optional[torch.Tensor] = None):
B, N, C = x.shape
qkv_bias = torch.cat((self.q_bias, self.k_bias, self.v_bias)) if self.q_bias is not None else None
qkv = F.linear(input=x, weight=self.qkv.weight, bias=qkv_bias)
qkv = qkv.reshape(B, N, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4)
q, k, v = qkv.unbind(0) # make torchscript happy (cannot use tensor as tuple)
q = q * self.scale
attn = (q @ k.transpose(-2, -1))
if self.relative_position_bias_table is not None:
attn = attn + self._get_rel_pos_bias()
if shared_rel_pos_bias is not None:
attn = attn + shared_rel_pos_bias
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, -1)
x = self.proj(x)
x = self.proj_drop(x)
return x
class Block(nn.Module):
def __init__(
self, dim, num_heads, mlp_ratio=4., qkv_bias=False, drop=0., attn_drop=0.,
drop_path=0., init_values=None, act_layer=nn.GELU, norm_layer=nn.LayerNorm,
window_size=None, attn_head_dim=None):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(
dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop,
window_size=window_size, attn_head_dim=attn_head_dim)
# NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
if init_values:
self.gamma_1 = nn.Parameter(init_values * torch.ones(dim))
self.gamma_2 = nn.Parameter(init_values * torch.ones(dim))
else:
self.gamma_1, self.gamma_2 = None, None
def forward(self, x, shared_rel_pos_bias: Optional[torch.Tensor] = None):
if self.gamma_1 is None:
x = x + self.drop_path(self.attn(self.norm1(x), shared_rel_pos_bias=shared_rel_pos_bias))
x = x + self.drop_path(self.mlp(self.norm2(x)))
else:
x = x + self.drop_path(self.gamma_1 * self.attn(self.norm1(x), shared_rel_pos_bias=shared_rel_pos_bias))
x = x + self.drop_path(self.gamma_2 * self.mlp(self.norm2(x)))
return x
class RelativePositionBias(nn.Module):
def __init__(self, window_size, num_heads):
super().__init__()
self.window_size = window_size
self.window_area = window_size[0] * window_size[1]
num_relative_distance = (2 * window_size[0] - 1) * (2 * window_size[1] - 1) + 3
self.relative_position_bias_table = nn.Parameter(torch.zeros(num_relative_distance, num_heads))
# trunc_normal_(self.relative_position_bias_table, std=.02)
self.register_buffer("relative_position_index", gen_relative_position_index(window_size))
def forward(self):
relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view(
self.window_area + 1, self.window_area + 1, -1) # Wh*Ww,Wh*Ww,nH
return relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
class Beit(nn.Module):
""" Vision Transformer with support for patch or hybrid CNN input stage
"""
def __init__(
self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, global_pool='avg',
embed_dim=768, depth=12, num_heads=12, mlp_ratio=4., qkv_bias=True, drop_rate=0.,
attn_drop_rate=0., drop_path_rate=0., norm_layer=partial(nn.LayerNorm, eps=1e-6),
init_values=None, use_abs_pos_emb=True, use_rel_pos_bias=False, use_shared_rel_pos_bias=False,
head_init_scale=0.001):
super().__init__()
self.num_classes = num_classes
self.global_pool = global_pool
self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models
self.grad_checkpointing = False
self.patch_embed = PatchEmbed(
img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim)
num_patches = self.patch_embed.num_patches
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
# self.mask_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim)) if use_abs_pos_emb else None
self.pos_drop = nn.Dropout(p=drop_rate)
if use_shared_rel_pos_bias:
self.rel_pos_bias = RelativePositionBias(window_size=self.patch_embed.grid_size, num_heads=num_heads)
else:
self.rel_pos_bias = None
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
self.blocks = nn.ModuleList([
Block(
dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer,
init_values=init_values, window_size=self.patch_embed.grid_size if use_rel_pos_bias else None)
for i in range(depth)])
use_fc_norm = self.global_pool == 'avg'
self.norm = nn.Identity() if use_fc_norm else norm_layer(embed_dim)
self.fc_norm = norm_layer(embed_dim) if use_fc_norm else None
self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity()
self.apply(self._init_weights)
if self.pos_embed is not None:
trunc_normal_(self.pos_embed, std=.02)
trunc_normal_(self.cls_token, std=.02)
# trunc_normal_(self.mask_token, std=.02)
self.fix_init_weight()
if isinstance(self.head, nn.Linear):
trunc_normal_(self.head.weight, std=.02)
self.head.weight.data.mul_(head_init_scale)
self.head.bias.data.mul_(head_init_scale)
def fix_init_weight(self):
def rescale(param, layer_id):
param.div_(math.sqrt(2.0 * layer_id))
for layer_id, layer in enumerate(self.blocks):
rescale(layer.attn.proj.weight.data, layer_id + 1)
rescale(layer.mlp.fc2.weight.data, layer_id + 1)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
@torch.jit.ignore
def no_weight_decay(self):
nwd = {'pos_embed', 'cls_token'}
for n, _ in self.named_parameters():
if 'relative_position_bias_table' in n:
nwd.add(n)
return nwd
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
self.grad_checkpointing = enable
@torch.jit.ignore
def group_matcher(self, coarse=False):
matcher = dict(
stem=r'^cls_token|pos_embed|patch_embed|rel_pos_bias', # stem and embed
blocks=[(r'^blocks\.(\d+)', None), (r'^norm', (99999,))],
)
return matcher
@torch.jit.ignore
def get_classifier(self):
return self.head
def reset_classifier(self, num_classes, global_pool=None):
self.num_classes = num_classes
if global_pool is not None:
self.global_pool = global_pool
self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()
def forward_features(self, x):
x = self.patch_embed(x)
x = torch.cat((self.cls_token.expand(x.shape[0], -1, -1), x), dim=1)
if self.pos_embed is not None:
x = x + self.pos_embed
x = self.pos_drop(x)
rel_pos_bias = self.rel_pos_bias() if self.rel_pos_bias is not None else None
for blk in self.blocks:
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint(blk, x, shared_rel_pos_bias=rel_pos_bias)
else:
x = blk(x, shared_rel_pos_bias=rel_pos_bias)
x = self.norm(x)
return x
def forward_head(self, x, pre_logits: bool = False):
if self.fc_norm is not None:
x = x[:, 1:].mean(dim=1)
x = self.fc_norm(x)
else:
x = x[:, 0]
return x if pre_logits else self.head(x)
def forward(self, x):
x = self.forward_features(x)
x = self.forward_head(x)
return x
def _beit_checkpoint_filter_fn(state_dict, model):
if 'module' in state_dict:
# beit v2 didn't strip module
state_dict = state_dict['module']
return checkpoint_filter_fn(state_dict, model)
def _create_beit(variant, pretrained=False, **kwargs):
if kwargs.get('features_only', None):
raise RuntimeError('features_only not implemented for Beit models.')
model = build_model_with_cfg(
Beit, variant, pretrained,
# FIXME an updated filter fn needed to interpolate rel pos emb if fine tuning to diff model sizes
pretrained_filter_fn=_beit_checkpoint_filter_fn,
**kwargs)
return model
@register_model
def beit_base_patch16_224(pretrained=False, **kwargs):
model_kwargs = dict(
patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4,
use_abs_pos_emb=False, use_rel_pos_bias=True, init_values=0.1, **kwargs)
model = _create_beit('beit_base_patch16_224', pretrained=pretrained, **model_kwargs)
return model
@register_model
def beit_base_patch16_384(pretrained=False, **kwargs):
model_kwargs = dict(
img_size=384, patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4,
use_abs_pos_emb=False, use_rel_pos_bias=True, init_values=0.1, **kwargs)
model = _create_beit('beit_base_patch16_384', pretrained=pretrained, **model_kwargs)
return model
@register_model
def beit_base_patch16_224_in22k(pretrained=False, **kwargs):
model_kwargs = dict(
patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4,
use_abs_pos_emb=False, use_rel_pos_bias=True, init_values=0.1, **kwargs)
model = _create_beit('beit_base_patch16_224_in22k', pretrained=pretrained, **model_kwargs)
return model
@register_model
def beit_large_patch16_224(pretrained=False, **kwargs):
model_kwargs = dict(
patch_size=16, embed_dim=1024, depth=24, num_heads=16, mlp_ratio=4, qkv_bias=True,
use_abs_pos_emb=False, use_rel_pos_bias=True, init_values=1e-5, **kwargs)
model = _create_beit('beit_large_patch16_224', pretrained=pretrained, **model_kwargs)
return model
@register_model
def beit_large_patch16_384(pretrained=False, **kwargs):
model_kwargs = dict(
img_size=384, patch_size=16, embed_dim=1024, depth=24, num_heads=16, mlp_ratio=4, qkv_bias=True,
use_abs_pos_emb=False, use_rel_pos_bias=True, init_values=1e-5, **kwargs)
model = _create_beit('beit_large_patch16_384', pretrained=pretrained, **model_kwargs)
return model
@register_model
def beit_large_patch16_512(pretrained=False, **kwargs):
model_kwargs = dict(
img_size=512, patch_size=16, embed_dim=1024, depth=24, num_heads=16, mlp_ratio=4, qkv_bias=True,
use_abs_pos_emb=False, use_rel_pos_bias=True, init_values=1e-5, **kwargs)
model = _create_beit('beit_large_patch16_512', pretrained=pretrained, **model_kwargs)
return model
@register_model
def beit_large_patch16_224_in22k(pretrained=False, **kwargs):
model_kwargs = dict(
patch_size=16, embed_dim=1024, depth=24, num_heads=16, mlp_ratio=4, qkv_bias=True,
use_abs_pos_emb=False, use_rel_pos_bias=True, init_values=1e-5, **kwargs)
model = _create_beit('beit_large_patch16_224_in22k', pretrained=pretrained, **model_kwargs)
return model
@register_model
def beitv2_base_patch16_224(pretrained=False, **kwargs):
model_kwargs = dict(
patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4,
use_abs_pos_emb=False, use_rel_pos_bias=True, init_values=1e-5, **kwargs)
model = _create_beit('beitv2_base_patch16_224', pretrained=pretrained, **model_kwargs)
return model
@register_model
def beitv2_base_patch16_224_in22k(pretrained=False, **kwargs):
model_kwargs = dict(
patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4,
use_abs_pos_emb=False, use_rel_pos_bias=True, init_values=1e-5, **kwargs)
model = _create_beit('beitv2_base_patch16_224_in22k', pretrained=pretrained, **model_kwargs)
return model
@register_model
def beitv2_large_patch16_224(pretrained=False, **kwargs):
model_kwargs = dict(
patch_size=16, embed_dim=1024, depth=24, num_heads=16, mlp_ratio=4, qkv_bias=True,
use_abs_pos_emb=False, use_rel_pos_bias=True, init_values=1e-5, **kwargs)
model = _create_beit('beitv2_large_patch16_224', pretrained=pretrained, **model_kwargs)
return model
@register_model
def beitv2_large_patch16_224_in22k(pretrained=False, **kwargs):
model_kwargs = dict(
patch_size=16, embed_dim=1024, depth=24, num_heads=16, mlp_ratio=4, qkv_bias=True,
use_abs_pos_emb=False, use_rel_pos_bias=True, init_values=1e-5, **kwargs)
model = _create_beit('beitv2_large_patch16_224_in22k', pretrained=pretrained, **model_kwargs)
return model
| {
"content_hash": "ab040363ab8f009625469cfc42209358",
"timestamp": "",
"source": "github",
"line_count": 502,
"max_line_length": 129,
"avg_line_length": 43.354581673306775,
"alnum_prop": 0.6368314648042639,
"repo_name": "rwightman/pytorch-image-models",
"id": "1f6bf82b577d6bfcb2524fd39645aa719c747763",
"size": "21764",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "timm/models/beit.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2368284"
},
{
"name": "Shell",
"bytes": "108"
}
],
"symlink_target": ""
} |
"""
Forms for PartyList app.
"""
from django import forms
from django.forms import ModelForm
from .models import BlacklistedGuest, GreylistedGuest, Guest, Party
class GuestForm(ModelForm):
"""
Form for adding a guest on the client.
"""
name = forms.CharField(max_length=100)
gender = forms.CharField(max_length=10)
class Meta:
model = Guest
fields = ["name", "gender"]
class PartyForm(ModelForm):
"""
Form for adding a party on the client.
"""
name = forms.CharField(max_length=100)
date = forms.DateField()
class Meta:
model = Party
fields = ["name", "date"]
class BlacklistForm(ModelForm):
"""
Form for adding a guest to the blacklist.
"""
name = forms.CharField(max_length=100, label="Full Name")
details = forms.CharField(
max_length=1000,
label="Identifying Details",
required=True,
)
reason = forms.CharField(
max_length=1000,
label="Reason",
required=True,
)
class Meta:
model = BlacklistedGuest
fields = ["name", "details", "reason"]
class GreylistForm(ModelForm):
"""
Form for adding a guest to the greylist.
"""
name = forms.CharField(max_length=100, label="Full Name")
details = forms.CharField(
max_length=1000,
label="Identifying Details",
required=True,
)
reason = forms.CharField(
max_length=1000,
label="Reason",
required=True,
)
class Meta:
model = GreylistedGuest
fields = ["name", "details", "reason"]
class EditPartyInfoForm(ModelForm):
"""
Form for editing a party
"""
name = forms.CharField(max_length=100)
date = forms.DateField()
jobs = forms.FileField(required=False)
class Meta:
model = Party
fields = ["name", "date", "jobs"]
| {
"content_hash": "2a3254e874a7eed91348c1fc64843541",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 67,
"avg_line_length": 20.857142857142858,
"alnum_prop": 0.5990516332982087,
"repo_name": "sigmapi-gammaiota/sigmapi-web",
"id": "3d31c6a53f182b6f88233d7b2937a606df5278ae",
"size": "1898",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sigmapiweb/apps/PartyList/forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "47173"
},
{
"name": "HTML",
"bytes": "265883"
},
{
"name": "JavaScript",
"bytes": "1338629"
},
{
"name": "Python",
"bytes": "335952"
},
{
"name": "SCSS",
"bytes": "44203"
},
{
"name": "Shell",
"bytes": "3928"
}
],
"symlink_target": ""
} |
'''
Generates a Slurm script for running blast against the probes on various fasta files
@author: Andrew Robinson
'''
import sys, argparse, os, subprocess
import common
def main(argv):
''''''
parser = argparse.ArgumentParser(description='Generates a Slurm script for running blast against the probes on various fasta files')
parser.add_argument("-j", "--cores", nargs=1, metavar='N', type=int, default=[1], help="The maximum number of cores to use, 0=exclusive. [Default: 1]")
parser.add_argument("-p", "--partition", nargs=1, metavar="partition", default=["8hour"], choices=['bigmem', '8hour', 'compute', 'long'], help="The partition (or queue) to submit job to")
parser.add_argument("rawfile", nargs="+", help="Files or directory of raw fastq/a sequences to process. If directory, -f filter is used to select files within.")
parser.add_argument("-f", "--dir-filter", nargs=1, metavar='filter', default=["*.f*a"], help="A filter to match files when searching a directory. [Default: \"*.f*a\"]")
parser.add_argument("-t", "--filename-trim", nargs=1, metavar='trim', default=[".*"], help="Bash REGEX to trim extension from end of filename. [Default: \".*\"]")
parser.add_argument("-d", "--probe-database", nargs=1, metavar='database', default=["probes.fasta"], help="Filename of probes database fasta file [Default: probes.fasta]")
args = parser.parse_args(argv[1:])
common.writecmd(argv)
# expand files
rawfiles=common.expandFiles(args.rawfile, args.dir_filter[0])
error=False
if len(rawfiles) == 0:
sys.stderr.write("No RAW files found: '%s'\n" % (" ".join(args.rawfile)))
error=True
if error:
return 1
## make the variable parts of script
vars={}
vars["rawfiles"] = " ".join(rawfiles)
if args.cores[0] == 0:
vars["slurmheader"] = common.makeExclusiveHeader(partition=args.partition[0])
vars["cores"] = "16"
else:
vars["slurmheader"] = common.makeHeader(partition=args.partition[0], ntasks=args.cores[0])
vars["cores"] = args.cores[0]
vars["blastversion"] = subprocess.check_output(["carlaseq_module_version", "blast+"]).rstrip()
vars["biostreamtoolsversion"] = subprocess.check_output(["carlaseq_module_version", "biostreamtools-gcc"]).rstrip()
vars["trim"] = args.filename_trim[0]
vars["probes"] = args.probe_database[0]
vars["CMD"] = " ".join(argv)
jobscript = common.loadTemplate("blastprobes.slurm")
print jobscript.format(**vars)
return 0
if __name__ == "__main__":
if len(sys.argv) == 1:
sys.argv.append("-h")
sys.exit(main(sys.argv))
| {
"content_hash": "46f7319ff109b4785c5e4cca53806adf",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 191,
"avg_line_length": 43.90163934426229,
"alnum_prop": 0.6463778939507094,
"repo_name": "molecularbiodiversity/carlaseq",
"id": "aa082f3c973dac7962cbe8a1d8e8033426fce2db",
"size": "2718",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/carlaseq_make_blastprobes_job.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "29771"
},
{
"name": "Shell",
"bytes": "2297"
}
],
"symlink_target": ""
} |
import os
import glob
import math
import subprocess
import re
import sys
import string
from decimal import Decimal
from astropy.io import fits
from astropy import wcs
from dateutil import parser
def logme( str ):
log.write(str + "\n")
print str
return
#MODIFY THESE FIELDS AS NEEDED!
#input path *with* ending forward slash
input_path='./'
#output path *with* ending forward slash
output_path='./wcs/'
#log file name
log_fname = 'log.plateme.txt'
#suffix for output files, if any...
output_suffix='.wcs'
#path to astrometry.net solve_field executable
solve_field_path='/usr/local/astrometry/bin/solve-field'
#image counter
count = 0
#does output directory exist? If not, create it
try:
os.mkdir(output_path)
except:
pass
log=open(log_fname, 'a+')
#get a list of all FITS files in the input directory
im=glob.glob(input_path+'*.fits')+glob.glob(input_path+'*.fit')
#loop through all qualifying files and perform plate-solving
for new in sorted(im):
error_flag = False
#remove spaces from filename
new_nospace = string.replace(new, ' ', '_')
new_nospace = string.replace(new_nospace, '(', '')
new_nospace = string.replace(new_nospace, ')', '')
os.rename(new, new_nospace)
new = new_nospace
logme("\nSolving %s"%(new))
#pull out RA/DEC from the FITS header, if they exist
d1=fits.open('%s'%(new))
d1.close()
h1=d1[0].header
try:
ra=h1['RA']
dec=h1['DEC']
except KeyError:
ra=h1['OBJCTRA']
dec=h1['OBJCTDEC']
raA=''
decA=''
for j in range(0,len(ra)):
if ra[j]==' ':
raA+=':'
else:
raA+=ra[j]
for j in range(0,len(dec)):
if dec[j]==' ':
decA+=':'
else:
decA+=dec[j]
ra=raA
dec=decA
#plate solve this image, using RA/DEC from FITS header
output = subprocess.check_output(solve_field_path + ' --no-fits2fits --overwrite --downsample 2 --guess-scale --ra %s --dec %s --radius 10.0 --cpulimit 30 --no-plots '%(ra,dec)+'"%s"'%(new), shell=True)
log.write(output)
#print output
#remove astrometry.net temporary files
os.system("find . -name '*.xyls' -delete;")
os.system("find . -name '*.axy' -delete;")
os.system("find . -name '*.corr' -delete;")
os.system("find . -name '*.match' -delete;")
os.system("find . -name '*.rdls' -delete;")
os.system("find . -name '*.solved' -delete;")
os.system("find . -name '*.wcs' -delete;")
os.system("find . -name '*.png' -delete;")
output_file = "%s"%(new.rsplit('.',1)[0])+output_suffix+".fits"
output_file = output_file.rsplit('/',1)[1]
output_file = output_path+output_file
logme("Writing solution to "+output_file)
os.system('mv %s.new %s'%(new.rsplit('.',1)[0],output_file))
count += 1
logme("\nComplete. Processed %d of %d files."%(count, len(im)))
log.close() | {
"content_hash": "245fdeda3e9aa5f3eed4f3d010177dea",
"timestamp": "",
"source": "github",
"line_count": 105,
"max_line_length": 206,
"avg_line_length": 28.35238095238095,
"alnum_prop": 0.598925092374874,
"repo_name": "mcnowinski/various-and-sundry",
"id": "c425bd710ef585b456b9bcd560e11b96a0a0650a",
"size": "2977",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lightcurve/plateme.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1682798"
},
{
"name": "C++",
"bytes": "1174494"
},
{
"name": "HTML",
"bytes": "110159"
},
{
"name": "Java",
"bytes": "1455648"
},
{
"name": "Jupyter Notebook",
"bytes": "220401"
},
{
"name": "Objective-C",
"bytes": "1815"
},
{
"name": "Python",
"bytes": "629215"
},
{
"name": "Shell",
"bytes": "45546"
}
],
"symlink_target": ""
} |
import datetime
from typing import Dict, List, Optional, TYPE_CHECKING, Union
from .. import _serialization
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from .. import models as _models
class AADProperties(_serialization.Model):
"""AADProperties.
:ivar service_principal_client_id:
:vartype service_principal_client_id: str
:ivar tenant_id:
:vartype tenant_id: str
:ivar authority:
:vartype authority: str
:ivar audience:
:vartype audience: str
:ivar service_principal_object_id:
:vartype service_principal_object_id: str
"""
_attribute_map = {
"service_principal_client_id": {"key": "servicePrincipalClientId", "type": "str"},
"tenant_id": {"key": "tenantId", "type": "str"},
"authority": {"key": "authority", "type": "str"},
"audience": {"key": "audience", "type": "str"},
"service_principal_object_id": {"key": "servicePrincipalObjectId", "type": "str"},
}
def __init__(
self,
*,
service_principal_client_id: Optional[str] = None,
tenant_id: Optional[str] = None,
authority: Optional[str] = None,
audience: Optional[str] = None,
service_principal_object_id: Optional[str] = None,
**kwargs
):
"""
:keyword service_principal_client_id:
:paramtype service_principal_client_id: str
:keyword tenant_id:
:paramtype tenant_id: str
:keyword authority:
:paramtype authority: str
:keyword audience:
:paramtype audience: str
:keyword service_principal_object_id:
:paramtype service_principal_object_id: str
"""
super().__init__(**kwargs)
self.service_principal_client_id = service_principal_client_id
self.tenant_id = tenant_id
self.authority = authority
self.audience = audience
self.service_principal_object_id = service_principal_object_id
class Resource(_serialization.Model):
"""ARM Resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id represents the complete path to the resource.
:vartype id: str
:ivar name: Resource name associated with the resource.
:vartype name: str
:ivar type: Resource type represents the complete path of the form
Namespace/ResourceType/ResourceType/...
:vartype type: str
:ivar location: Resource location.
:vartype location: str
:ivar tags: Resource tags.
:vartype tags: dict[str, str]
:ivar e_tag: Optional ETag.
:vartype e_tag: str
"""
_validation = {
"id": {"readonly": True},
"name": {"readonly": True},
"type": {"readonly": True},
}
_attribute_map = {
"id": {"key": "id", "type": "str"},
"name": {"key": "name", "type": "str"},
"type": {"key": "type", "type": "str"},
"location": {"key": "location", "type": "str"},
"tags": {"key": "tags", "type": "{str}"},
"e_tag": {"key": "eTag", "type": "str"},
}
def __init__(
self,
*,
location: Optional[str] = None,
tags: Optional[Dict[str, str]] = None,
e_tag: Optional[str] = None,
**kwargs
):
"""
:keyword location: Resource location.
:paramtype location: str
:keyword tags: Resource tags.
:paramtype tags: dict[str, str]
:keyword e_tag: Optional ETag.
:paramtype e_tag: str
"""
super().__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.location = location
self.tags = tags
self.e_tag = e_tag
class AADPropertiesResource(Resource):
"""AADPropertiesResource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id represents the complete path to the resource.
:vartype id: str
:ivar name: Resource name associated with the resource.
:vartype name: str
:ivar type: Resource type represents the complete path of the form
Namespace/ResourceType/ResourceType/...
:vartype type: str
:ivar location: Resource location.
:vartype location: str
:ivar tags: Resource tags.
:vartype tags: dict[str, str]
:ivar e_tag: Optional ETag.
:vartype e_tag: str
:ivar properties: AADPropertiesResource properties.
:vartype properties: ~azure.mgmt.recoveryservicesbackup.passivestamp.models.AADProperties
"""
_validation = {
"id": {"readonly": True},
"name": {"readonly": True},
"type": {"readonly": True},
}
_attribute_map = {
"id": {"key": "id", "type": "str"},
"name": {"key": "name", "type": "str"},
"type": {"key": "type", "type": "str"},
"location": {"key": "location", "type": "str"},
"tags": {"key": "tags", "type": "{str}"},
"e_tag": {"key": "eTag", "type": "str"},
"properties": {"key": "properties", "type": "AADProperties"},
}
def __init__(
self,
*,
location: Optional[str] = None,
tags: Optional[Dict[str, str]] = None,
e_tag: Optional[str] = None,
properties: Optional["_models.AADProperties"] = None,
**kwargs
):
"""
:keyword location: Resource location.
:paramtype location: str
:keyword tags: Resource tags.
:paramtype tags: dict[str, str]
:keyword e_tag: Optional ETag.
:paramtype e_tag: str
:keyword properties: AADPropertiesResource properties.
:paramtype properties: ~azure.mgmt.recoveryservicesbackup.passivestamp.models.AADProperties
"""
super().__init__(location=location, tags=tags, e_tag=e_tag, **kwargs)
self.properties = properties
class ProtectedItem(_serialization.Model): # pylint: disable=too-many-instance-attributes
"""Base class for backup items.
You probably want to use the sub-classes and not this class directly. Known sub-classes are:
AzureFileshareProtectedItem, AzureIaaSVMProtectedItem, AzureVmWorkloadProtectedItem,
DPMProtectedItem, GenericProtectedItem, MabFileFolderProtectedItem, AzureSqlProtectedItem
All required parameters must be populated in order to send to Azure.
:ivar protected_item_type: backup item type. Required.
:vartype protected_item_type: str
:ivar backup_management_type: Type of backup management for the backed up item. Known values
are: "Invalid", "AzureIaasVM", "MAB", "DPM", "AzureBackupServer", "AzureSql", "AzureStorage",
"AzureWorkload", and "DefaultBackup".
:vartype backup_management_type: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.BackupManagementType
:ivar workload_type: Type of workload this item represents. Known values are: "Invalid", "VM",
"FileFolder", "AzureSqlDb", "SQLDB", "Exchange", "Sharepoint", "VMwareVM", "SystemState",
"Client", "GenericDataSource", "SQLDataBase", "AzureFileShare", "SAPHanaDatabase", and
"SAPAseDatabase".
:vartype workload_type: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.DataSourceType
:ivar container_name: Unique name of container.
:vartype container_name: str
:ivar source_resource_id: ARM ID of the resource to be backed up.
:vartype source_resource_id: str
:ivar policy_id: ID of the backup policy with which this item is backed up.
:vartype policy_id: str
:ivar last_recovery_point: Timestamp when the last (latest) backup copy was created for this
backup item.
:vartype last_recovery_point: ~datetime.datetime
:ivar backup_set_name: Name of the backup set the backup item belongs to.
:vartype backup_set_name: str
:ivar create_mode: Create mode to indicate recovery of existing soft deleted data source or
creation of new data source. Known values are: "Invalid", "Default", and "Recover".
:vartype create_mode: str or ~azure.mgmt.recoveryservicesbackup.passivestamp.models.CreateMode
:ivar deferred_delete_time_in_utc: Time for deferred deletion in UTC.
:vartype deferred_delete_time_in_utc: ~datetime.datetime
:ivar is_scheduled_for_deferred_delete: Flag to identify whether the DS is scheduled for
deferred delete.
:vartype is_scheduled_for_deferred_delete: bool
:ivar deferred_delete_time_remaining: Time remaining before the DS marked for deferred delete
is permanently deleted.
:vartype deferred_delete_time_remaining: str
:ivar is_deferred_delete_schedule_upcoming: Flag to identify whether the deferred deleted DS is
to be purged soon.
:vartype is_deferred_delete_schedule_upcoming: bool
:ivar is_rehydrate: Flag to identify that deferred deleted DS is to be moved into Pause state.
:vartype is_rehydrate: bool
:ivar resource_guard_operation_requests: ResourceGuardOperationRequests on which LAC check will
be performed.
:vartype resource_guard_operation_requests: list[str]
"""
_validation = {
"protected_item_type": {"required": True},
}
_attribute_map = {
"protected_item_type": {"key": "protectedItemType", "type": "str"},
"backup_management_type": {"key": "backupManagementType", "type": "str"},
"workload_type": {"key": "workloadType", "type": "str"},
"container_name": {"key": "containerName", "type": "str"},
"source_resource_id": {"key": "sourceResourceId", "type": "str"},
"policy_id": {"key": "policyId", "type": "str"},
"last_recovery_point": {"key": "lastRecoveryPoint", "type": "iso-8601"},
"backup_set_name": {"key": "backupSetName", "type": "str"},
"create_mode": {"key": "createMode", "type": "str"},
"deferred_delete_time_in_utc": {"key": "deferredDeleteTimeInUTC", "type": "iso-8601"},
"is_scheduled_for_deferred_delete": {"key": "isScheduledForDeferredDelete", "type": "bool"},
"deferred_delete_time_remaining": {"key": "deferredDeleteTimeRemaining", "type": "str"},
"is_deferred_delete_schedule_upcoming": {"key": "isDeferredDeleteScheduleUpcoming", "type": "bool"},
"is_rehydrate": {"key": "isRehydrate", "type": "bool"},
"resource_guard_operation_requests": {"key": "resourceGuardOperationRequests", "type": "[str]"},
}
_subtype_map = {
"protected_item_type": {
"AzureFileShareProtectedItem": "AzureFileshareProtectedItem",
"AzureIaaSVMProtectedItem": "AzureIaaSVMProtectedItem",
"AzureVmWorkloadProtectedItem": "AzureVmWorkloadProtectedItem",
"DPMProtectedItem": "DPMProtectedItem",
"GenericProtectedItem": "GenericProtectedItem",
"MabFileFolderProtectedItem": "MabFileFolderProtectedItem",
"Microsoft.Sql/servers/databases": "AzureSqlProtectedItem",
}
}
def __init__(
self,
*,
backup_management_type: Optional[Union[str, "_models.BackupManagementType"]] = None,
workload_type: Optional[Union[str, "_models.DataSourceType"]] = None,
container_name: Optional[str] = None,
source_resource_id: Optional[str] = None,
policy_id: Optional[str] = None,
last_recovery_point: Optional[datetime.datetime] = None,
backup_set_name: Optional[str] = None,
create_mode: Optional[Union[str, "_models.CreateMode"]] = None,
deferred_delete_time_in_utc: Optional[datetime.datetime] = None,
is_scheduled_for_deferred_delete: Optional[bool] = None,
deferred_delete_time_remaining: Optional[str] = None,
is_deferred_delete_schedule_upcoming: Optional[bool] = None,
is_rehydrate: Optional[bool] = None,
resource_guard_operation_requests: Optional[List[str]] = None,
**kwargs
):
"""
:keyword backup_management_type: Type of backup management for the backed up item. Known values
are: "Invalid", "AzureIaasVM", "MAB", "DPM", "AzureBackupServer", "AzureSql", "AzureStorage",
"AzureWorkload", and "DefaultBackup".
:paramtype backup_management_type: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.BackupManagementType
:keyword workload_type: Type of workload this item represents. Known values are: "Invalid",
"VM", "FileFolder", "AzureSqlDb", "SQLDB", "Exchange", "Sharepoint", "VMwareVM", "SystemState",
"Client", "GenericDataSource", "SQLDataBase", "AzureFileShare", "SAPHanaDatabase", and
"SAPAseDatabase".
:paramtype workload_type: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.DataSourceType
:keyword container_name: Unique name of container.
:paramtype container_name: str
:keyword source_resource_id: ARM ID of the resource to be backed up.
:paramtype source_resource_id: str
:keyword policy_id: ID of the backup policy with which this item is backed up.
:paramtype policy_id: str
:keyword last_recovery_point: Timestamp when the last (latest) backup copy was created for this
backup item.
:paramtype last_recovery_point: ~datetime.datetime
:keyword backup_set_name: Name of the backup set the backup item belongs to.
:paramtype backup_set_name: str
:keyword create_mode: Create mode to indicate recovery of existing soft deleted data source or
creation of new data source. Known values are: "Invalid", "Default", and "Recover".
:paramtype create_mode: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.CreateMode
:keyword deferred_delete_time_in_utc: Time for deferred deletion in UTC.
:paramtype deferred_delete_time_in_utc: ~datetime.datetime
:keyword is_scheduled_for_deferred_delete: Flag to identify whether the DS is scheduled for
deferred delete.
:paramtype is_scheduled_for_deferred_delete: bool
:keyword deferred_delete_time_remaining: Time remaining before the DS marked for deferred
delete is permanently deleted.
:paramtype deferred_delete_time_remaining: str
:keyword is_deferred_delete_schedule_upcoming: Flag to identify whether the deferred deleted DS
is to be purged soon.
:paramtype is_deferred_delete_schedule_upcoming: bool
:keyword is_rehydrate: Flag to identify that deferred deleted DS is to be moved into Pause
state.
:paramtype is_rehydrate: bool
:keyword resource_guard_operation_requests: ResourceGuardOperationRequests on which LAC check
will be performed.
:paramtype resource_guard_operation_requests: list[str]
"""
super().__init__(**kwargs)
self.protected_item_type = None # type: Optional[str]
self.backup_management_type = backup_management_type
self.workload_type = workload_type
self.container_name = container_name
self.source_resource_id = source_resource_id
self.policy_id = policy_id
self.last_recovery_point = last_recovery_point
self.backup_set_name = backup_set_name
self.create_mode = create_mode
self.deferred_delete_time_in_utc = deferred_delete_time_in_utc
self.is_scheduled_for_deferred_delete = is_scheduled_for_deferred_delete
self.deferred_delete_time_remaining = deferred_delete_time_remaining
self.is_deferred_delete_schedule_upcoming = is_deferred_delete_schedule_upcoming
self.is_rehydrate = is_rehydrate
self.resource_guard_operation_requests = resource_guard_operation_requests
class AzureFileshareProtectedItem(ProtectedItem): # pylint: disable=too-many-instance-attributes
"""Azure File Share workload-specific backup item.
All required parameters must be populated in order to send to Azure.
:ivar protected_item_type: backup item type. Required.
:vartype protected_item_type: str
:ivar backup_management_type: Type of backup management for the backed up item. Known values
are: "Invalid", "AzureIaasVM", "MAB", "DPM", "AzureBackupServer", "AzureSql", "AzureStorage",
"AzureWorkload", and "DefaultBackup".
:vartype backup_management_type: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.BackupManagementType
:ivar workload_type: Type of workload this item represents. Known values are: "Invalid", "VM",
"FileFolder", "AzureSqlDb", "SQLDB", "Exchange", "Sharepoint", "VMwareVM", "SystemState",
"Client", "GenericDataSource", "SQLDataBase", "AzureFileShare", "SAPHanaDatabase", and
"SAPAseDatabase".
:vartype workload_type: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.DataSourceType
:ivar container_name: Unique name of container.
:vartype container_name: str
:ivar source_resource_id: ARM ID of the resource to be backed up.
:vartype source_resource_id: str
:ivar policy_id: ID of the backup policy with which this item is backed up.
:vartype policy_id: str
:ivar last_recovery_point: Timestamp when the last (latest) backup copy was created for this
backup item.
:vartype last_recovery_point: ~datetime.datetime
:ivar backup_set_name: Name of the backup set the backup item belongs to.
:vartype backup_set_name: str
:ivar create_mode: Create mode to indicate recovery of existing soft deleted data source or
creation of new data source. Known values are: "Invalid", "Default", and "Recover".
:vartype create_mode: str or ~azure.mgmt.recoveryservicesbackup.passivestamp.models.CreateMode
:ivar deferred_delete_time_in_utc: Time for deferred deletion in UTC.
:vartype deferred_delete_time_in_utc: ~datetime.datetime
:ivar is_scheduled_for_deferred_delete: Flag to identify whether the DS is scheduled for
deferred delete.
:vartype is_scheduled_for_deferred_delete: bool
:ivar deferred_delete_time_remaining: Time remaining before the DS marked for deferred delete
is permanently deleted.
:vartype deferred_delete_time_remaining: str
:ivar is_deferred_delete_schedule_upcoming: Flag to identify whether the deferred deleted DS is
to be purged soon.
:vartype is_deferred_delete_schedule_upcoming: bool
:ivar is_rehydrate: Flag to identify that deferred deleted DS is to be moved into Pause state.
:vartype is_rehydrate: bool
:ivar resource_guard_operation_requests: ResourceGuardOperationRequests on which LAC check will
be performed.
:vartype resource_guard_operation_requests: list[str]
:ivar friendly_name: Friendly name of the fileshare represented by this backup item.
:vartype friendly_name: str
:ivar protection_status: Backup status of this backup item.
:vartype protection_status: str
:ivar protection_state: Backup state of this backup item. Known values are: "Invalid",
"IRPending", "Protected", "ProtectionError", "ProtectionStopped", and "ProtectionPaused".
:vartype protection_state: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.ProtectionState
:ivar health_status: backups running status for this backup item. Known values are: "Passed",
"ActionRequired", "ActionSuggested", and "Invalid".
:vartype health_status: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.HealthStatus
:ivar last_backup_status: Last backup operation status. Possible values: Healthy, Unhealthy.
:vartype last_backup_status: str
:ivar last_backup_time: Timestamp of the last backup operation on this backup item.
:vartype last_backup_time: ~datetime.datetime
:ivar kpis_healths: Health details of different KPIs.
:vartype kpis_healths: dict[str,
~azure.mgmt.recoveryservicesbackup.passivestamp.models.KPIResourceHealthDetails]
:ivar extended_info: Additional information with this backup item.
:vartype extended_info:
~azure.mgmt.recoveryservicesbackup.passivestamp.models.AzureFileshareProtectedItemExtendedInfo
"""
_validation = {
"protected_item_type": {"required": True},
}
_attribute_map = {
"protected_item_type": {"key": "protectedItemType", "type": "str"},
"backup_management_type": {"key": "backupManagementType", "type": "str"},
"workload_type": {"key": "workloadType", "type": "str"},
"container_name": {"key": "containerName", "type": "str"},
"source_resource_id": {"key": "sourceResourceId", "type": "str"},
"policy_id": {"key": "policyId", "type": "str"},
"last_recovery_point": {"key": "lastRecoveryPoint", "type": "iso-8601"},
"backup_set_name": {"key": "backupSetName", "type": "str"},
"create_mode": {"key": "createMode", "type": "str"},
"deferred_delete_time_in_utc": {"key": "deferredDeleteTimeInUTC", "type": "iso-8601"},
"is_scheduled_for_deferred_delete": {"key": "isScheduledForDeferredDelete", "type": "bool"},
"deferred_delete_time_remaining": {"key": "deferredDeleteTimeRemaining", "type": "str"},
"is_deferred_delete_schedule_upcoming": {"key": "isDeferredDeleteScheduleUpcoming", "type": "bool"},
"is_rehydrate": {"key": "isRehydrate", "type": "bool"},
"resource_guard_operation_requests": {"key": "resourceGuardOperationRequests", "type": "[str]"},
"friendly_name": {"key": "friendlyName", "type": "str"},
"protection_status": {"key": "protectionStatus", "type": "str"},
"protection_state": {"key": "protectionState", "type": "str"},
"health_status": {"key": "healthStatus", "type": "str"},
"last_backup_status": {"key": "lastBackupStatus", "type": "str"},
"last_backup_time": {"key": "lastBackupTime", "type": "iso-8601"},
"kpis_healths": {"key": "kpisHealths", "type": "{KPIResourceHealthDetails}"},
"extended_info": {"key": "extendedInfo", "type": "AzureFileshareProtectedItemExtendedInfo"},
}
def __init__(
self,
*,
backup_management_type: Optional[Union[str, "_models.BackupManagementType"]] = None,
workload_type: Optional[Union[str, "_models.DataSourceType"]] = None,
container_name: Optional[str] = None,
source_resource_id: Optional[str] = None,
policy_id: Optional[str] = None,
last_recovery_point: Optional[datetime.datetime] = None,
backup_set_name: Optional[str] = None,
create_mode: Optional[Union[str, "_models.CreateMode"]] = None,
deferred_delete_time_in_utc: Optional[datetime.datetime] = None,
is_scheduled_for_deferred_delete: Optional[bool] = None,
deferred_delete_time_remaining: Optional[str] = None,
is_deferred_delete_schedule_upcoming: Optional[bool] = None,
is_rehydrate: Optional[bool] = None,
resource_guard_operation_requests: Optional[List[str]] = None,
friendly_name: Optional[str] = None,
protection_status: Optional[str] = None,
protection_state: Optional[Union[str, "_models.ProtectionState"]] = None,
health_status: Optional[Union[str, "_models.HealthStatus"]] = None,
last_backup_status: Optional[str] = None,
last_backup_time: Optional[datetime.datetime] = None,
kpis_healths: Optional[Dict[str, "_models.KPIResourceHealthDetails"]] = None,
extended_info: Optional["_models.AzureFileshareProtectedItemExtendedInfo"] = None,
**kwargs
):
"""
:keyword backup_management_type: Type of backup management for the backed up item. Known values
are: "Invalid", "AzureIaasVM", "MAB", "DPM", "AzureBackupServer", "AzureSql", "AzureStorage",
"AzureWorkload", and "DefaultBackup".
:paramtype backup_management_type: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.BackupManagementType
:keyword workload_type: Type of workload this item represents. Known values are: "Invalid",
"VM", "FileFolder", "AzureSqlDb", "SQLDB", "Exchange", "Sharepoint", "VMwareVM", "SystemState",
"Client", "GenericDataSource", "SQLDataBase", "AzureFileShare", "SAPHanaDatabase", and
"SAPAseDatabase".
:paramtype workload_type: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.DataSourceType
:keyword container_name: Unique name of container.
:paramtype container_name: str
:keyword source_resource_id: ARM ID of the resource to be backed up.
:paramtype source_resource_id: str
:keyword policy_id: ID of the backup policy with which this item is backed up.
:paramtype policy_id: str
:keyword last_recovery_point: Timestamp when the last (latest) backup copy was created for this
backup item.
:paramtype last_recovery_point: ~datetime.datetime
:keyword backup_set_name: Name of the backup set the backup item belongs to.
:paramtype backup_set_name: str
:keyword create_mode: Create mode to indicate recovery of existing soft deleted data source or
creation of new data source. Known values are: "Invalid", "Default", and "Recover".
:paramtype create_mode: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.CreateMode
:keyword deferred_delete_time_in_utc: Time for deferred deletion in UTC.
:paramtype deferred_delete_time_in_utc: ~datetime.datetime
:keyword is_scheduled_for_deferred_delete: Flag to identify whether the DS is scheduled for
deferred delete.
:paramtype is_scheduled_for_deferred_delete: bool
:keyword deferred_delete_time_remaining: Time remaining before the DS marked for deferred
delete is permanently deleted.
:paramtype deferred_delete_time_remaining: str
:keyword is_deferred_delete_schedule_upcoming: Flag to identify whether the deferred deleted DS
is to be purged soon.
:paramtype is_deferred_delete_schedule_upcoming: bool
:keyword is_rehydrate: Flag to identify that deferred deleted DS is to be moved into Pause
state.
:paramtype is_rehydrate: bool
:keyword resource_guard_operation_requests: ResourceGuardOperationRequests on which LAC check
will be performed.
:paramtype resource_guard_operation_requests: list[str]
:keyword friendly_name: Friendly name of the fileshare represented by this backup item.
:paramtype friendly_name: str
:keyword protection_status: Backup status of this backup item.
:paramtype protection_status: str
:keyword protection_state: Backup state of this backup item. Known values are: "Invalid",
"IRPending", "Protected", "ProtectionError", "ProtectionStopped", and "ProtectionPaused".
:paramtype protection_state: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.ProtectionState
:keyword health_status: backups running status for this backup item. Known values are:
"Passed", "ActionRequired", "ActionSuggested", and "Invalid".
:paramtype health_status: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.HealthStatus
:keyword last_backup_status: Last backup operation status. Possible values: Healthy, Unhealthy.
:paramtype last_backup_status: str
:keyword last_backup_time: Timestamp of the last backup operation on this backup item.
:paramtype last_backup_time: ~datetime.datetime
:keyword kpis_healths: Health details of different KPIs.
:paramtype kpis_healths: dict[str,
~azure.mgmt.recoveryservicesbackup.passivestamp.models.KPIResourceHealthDetails]
:keyword extended_info: Additional information with this backup item.
:paramtype extended_info:
~azure.mgmt.recoveryservicesbackup.passivestamp.models.AzureFileshareProtectedItemExtendedInfo
"""
super().__init__(
backup_management_type=backup_management_type,
workload_type=workload_type,
container_name=container_name,
source_resource_id=source_resource_id,
policy_id=policy_id,
last_recovery_point=last_recovery_point,
backup_set_name=backup_set_name,
create_mode=create_mode,
deferred_delete_time_in_utc=deferred_delete_time_in_utc,
is_scheduled_for_deferred_delete=is_scheduled_for_deferred_delete,
deferred_delete_time_remaining=deferred_delete_time_remaining,
is_deferred_delete_schedule_upcoming=is_deferred_delete_schedule_upcoming,
is_rehydrate=is_rehydrate,
resource_guard_operation_requests=resource_guard_operation_requests,
**kwargs
)
self.protected_item_type = "AzureFileShareProtectedItem" # type: str
self.friendly_name = friendly_name
self.protection_status = protection_status
self.protection_state = protection_state
self.health_status = health_status
self.last_backup_status = last_backup_status
self.last_backup_time = last_backup_time
self.kpis_healths = kpis_healths
self.extended_info = extended_info
class AzureFileshareProtectedItemExtendedInfo(_serialization.Model):
"""Additional information about Azure File Share backup item.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar oldest_recovery_point: The oldest backup copy available for this item in the service.
:vartype oldest_recovery_point: ~datetime.datetime
:ivar recovery_point_count: Number of available backup copies associated with this backup item.
:vartype recovery_point_count: int
:ivar policy_state: Indicates consistency of policy object and policy applied to this backup
item.
:vartype policy_state: str
:ivar resource_state: Indicates the state of this resource. Possible values are from enum
ResourceState {Invalid, Active, SoftDeleted, Deleted}.
:vartype resource_state: str
:ivar resource_state_sync_time: The resource state sync time for this backup item.
:vartype resource_state_sync_time: ~datetime.datetime
"""
_validation = {
"resource_state": {"readonly": True},
"resource_state_sync_time": {"readonly": True},
}
_attribute_map = {
"oldest_recovery_point": {"key": "oldestRecoveryPoint", "type": "iso-8601"},
"recovery_point_count": {"key": "recoveryPointCount", "type": "int"},
"policy_state": {"key": "policyState", "type": "str"},
"resource_state": {"key": "resourceState", "type": "str"},
"resource_state_sync_time": {"key": "resourceStateSyncTime", "type": "iso-8601"},
}
def __init__(
self,
*,
oldest_recovery_point: Optional[datetime.datetime] = None,
recovery_point_count: Optional[int] = None,
policy_state: Optional[str] = None,
**kwargs
):
"""
:keyword oldest_recovery_point: The oldest backup copy available for this item in the service.
:paramtype oldest_recovery_point: ~datetime.datetime
:keyword recovery_point_count: Number of available backup copies associated with this backup
item.
:paramtype recovery_point_count: int
:keyword policy_state: Indicates consistency of policy object and policy applied to this backup
item.
:paramtype policy_state: str
"""
super().__init__(**kwargs)
self.oldest_recovery_point = oldest_recovery_point
self.recovery_point_count = recovery_point_count
self.policy_state = policy_state
self.resource_state = None
self.resource_state_sync_time = None
class RecoveryPoint(_serialization.Model):
"""Base class for backup copies. Workload-specific backup copies are derived from this class.
You probably want to use the sub-classes and not this class directly. Known sub-classes are:
AzureFileShareRecoveryPoint, AzureWorkloadRecoveryPoint, GenericRecoveryPoint,
IaasVMRecoveryPoint
All required parameters must be populated in order to send to Azure.
:ivar object_type: This property will be used as the discriminator for deciding the specific
types in the polymorphic chain of types. Required.
:vartype object_type: str
"""
_validation = {
"object_type": {"required": True},
}
_attribute_map = {
"object_type": {"key": "objectType", "type": "str"},
}
_subtype_map = {
"object_type": {
"AzureFileShareRecoveryPoint": "AzureFileShareRecoveryPoint",
"AzureWorkloadRecoveryPoint": "AzureWorkloadRecoveryPoint",
"GenericRecoveryPoint": "GenericRecoveryPoint",
"IaasVMRecoveryPoint": "IaasVMRecoveryPoint",
}
}
def __init__(self, **kwargs):
""" """
super().__init__(**kwargs)
self.object_type = None # type: Optional[str]
class AzureFileShareRecoveryPoint(RecoveryPoint):
"""Azure File Share workload specific backup copy.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar object_type: This property will be used as the discriminator for deciding the specific
types in the polymorphic chain of types. Required.
:vartype object_type: str
:ivar recovery_point_type: Type of the backup copy. Specifies whether it is a crash consistent
backup or app consistent.
:vartype recovery_point_type: str
:ivar recovery_point_time: Time at which this backup copy was created.
:vartype recovery_point_time: ~datetime.datetime
:ivar file_share_snapshot_uri: Contains Url to the snapshot of fileshare, if applicable.
:vartype file_share_snapshot_uri: str
:ivar recovery_point_size_in_gb: Contains recovery point size.
:vartype recovery_point_size_in_gb: int
"""
_validation = {
"object_type": {"required": True},
"recovery_point_type": {"readonly": True},
"recovery_point_time": {"readonly": True},
"file_share_snapshot_uri": {"readonly": True},
"recovery_point_size_in_gb": {"readonly": True},
}
_attribute_map = {
"object_type": {"key": "objectType", "type": "str"},
"recovery_point_type": {"key": "recoveryPointType", "type": "str"},
"recovery_point_time": {"key": "recoveryPointTime", "type": "iso-8601"},
"file_share_snapshot_uri": {"key": "fileShareSnapshotUri", "type": "str"},
"recovery_point_size_in_gb": {"key": "recoveryPointSizeInGB", "type": "int"},
}
def __init__(self, **kwargs):
""" """
super().__init__(**kwargs)
self.object_type = "AzureFileShareRecoveryPoint" # type: str
self.recovery_point_type = None
self.recovery_point_time = None
self.file_share_snapshot_uri = None
self.recovery_point_size_in_gb = None
class RestoreRequest(_serialization.Model):
"""Base class for restore request. Workload-specific restore requests are derived from this class.
You probably want to use the sub-classes and not this class directly. Known sub-classes are:
AzureFileShareRestoreRequest, AzureWorkloadRestoreRequest, IaasVMRestoreRequest
All required parameters must be populated in order to send to Azure.
:ivar object_type: This property will be used as the discriminator for deciding the specific
types in the polymorphic chain of types. Required.
:vartype object_type: str
"""
_validation = {
"object_type": {"required": True},
}
_attribute_map = {
"object_type": {"key": "objectType", "type": "str"},
}
_subtype_map = {
"object_type": {
"AzureFileShareRestoreRequest": "AzureFileShareRestoreRequest",
"AzureWorkloadRestoreRequest": "AzureWorkloadRestoreRequest",
"IaasVMRestoreRequest": "IaasVMRestoreRequest",
}
}
def __init__(self, **kwargs):
""" """
super().__init__(**kwargs)
self.object_type = None # type: Optional[str]
class AzureFileShareRestoreRequest(RestoreRequest):
"""AzureFileShare Restore Request.
All required parameters must be populated in order to send to Azure.
:ivar object_type: This property will be used as the discriminator for deciding the specific
types in the polymorphic chain of types. Required.
:vartype object_type: str
:ivar recovery_type: Type of this recovery. Known values are: "Invalid", "OriginalLocation",
"AlternateLocation", "RestoreDisks", and "Offline".
:vartype recovery_type: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.RecoveryType
:ivar source_resource_id: Source storage account ARM Id.
:vartype source_resource_id: str
:ivar copy_options: Options to resolve copy conflicts. Known values are: "Invalid",
"CreateCopy", "Skip", "Overwrite", and "FailOnConflict".
:vartype copy_options: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.CopyOptions
:ivar restore_request_type: Restore Type (FullShareRestore or ItemLevelRestore). Known values
are: "Invalid", "FullShareRestore", and "ItemLevelRestore".
:vartype restore_request_type: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.RestoreRequestType
:ivar restore_file_specs: List of Source Files/Folders(which need to recover) and
TargetFolderPath details.
:vartype restore_file_specs:
list[~azure.mgmt.recoveryservicesbackup.passivestamp.models.RestoreFileSpecs]
:ivar target_details: Target File Share Details.
:vartype target_details:
~azure.mgmt.recoveryservicesbackup.passivestamp.models.TargetAFSRestoreInfo
"""
_validation = {
"object_type": {"required": True},
}
_attribute_map = {
"object_type": {"key": "objectType", "type": "str"},
"recovery_type": {"key": "recoveryType", "type": "str"},
"source_resource_id": {"key": "sourceResourceId", "type": "str"},
"copy_options": {"key": "copyOptions", "type": "str"},
"restore_request_type": {"key": "restoreRequestType", "type": "str"},
"restore_file_specs": {"key": "restoreFileSpecs", "type": "[RestoreFileSpecs]"},
"target_details": {"key": "targetDetails", "type": "TargetAFSRestoreInfo"},
}
def __init__(
self,
*,
recovery_type: Optional[Union[str, "_models.RecoveryType"]] = None,
source_resource_id: Optional[str] = None,
copy_options: Optional[Union[str, "_models.CopyOptions"]] = None,
restore_request_type: Optional[Union[str, "_models.RestoreRequestType"]] = None,
restore_file_specs: Optional[List["_models.RestoreFileSpecs"]] = None,
target_details: Optional["_models.TargetAFSRestoreInfo"] = None,
**kwargs
):
"""
:keyword recovery_type: Type of this recovery. Known values are: "Invalid", "OriginalLocation",
"AlternateLocation", "RestoreDisks", and "Offline".
:paramtype recovery_type: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.RecoveryType
:keyword source_resource_id: Source storage account ARM Id.
:paramtype source_resource_id: str
:keyword copy_options: Options to resolve copy conflicts. Known values are: "Invalid",
"CreateCopy", "Skip", "Overwrite", and "FailOnConflict".
:paramtype copy_options: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.CopyOptions
:keyword restore_request_type: Restore Type (FullShareRestore or ItemLevelRestore). Known
values are: "Invalid", "FullShareRestore", and "ItemLevelRestore".
:paramtype restore_request_type: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.RestoreRequestType
:keyword restore_file_specs: List of Source Files/Folders(which need to recover) and
TargetFolderPath details.
:paramtype restore_file_specs:
list[~azure.mgmt.recoveryservicesbackup.passivestamp.models.RestoreFileSpecs]
:keyword target_details: Target File Share Details.
:paramtype target_details:
~azure.mgmt.recoveryservicesbackup.passivestamp.models.TargetAFSRestoreInfo
"""
super().__init__(**kwargs)
self.object_type = "AzureFileShareRestoreRequest" # type: str
self.recovery_type = recovery_type
self.source_resource_id = source_resource_id
self.copy_options = copy_options
self.restore_request_type = restore_request_type
self.restore_file_specs = restore_file_specs
self.target_details = target_details
class AzureIaaSVMProtectedItem(ProtectedItem): # pylint: disable=too-many-instance-attributes
"""IaaS VM workload-specific backup item.
You probably want to use the sub-classes and not this class directly. Known sub-classes are:
AzureIaaSClassicComputeVMProtectedItem, AzureIaaSComputeVMProtectedItem
All required parameters must be populated in order to send to Azure.
:ivar protected_item_type: backup item type. Required.
:vartype protected_item_type: str
:ivar backup_management_type: Type of backup management for the backed up item. Known values
are: "Invalid", "AzureIaasVM", "MAB", "DPM", "AzureBackupServer", "AzureSql", "AzureStorage",
"AzureWorkload", and "DefaultBackup".
:vartype backup_management_type: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.BackupManagementType
:ivar workload_type: Type of workload this item represents. Known values are: "Invalid", "VM",
"FileFolder", "AzureSqlDb", "SQLDB", "Exchange", "Sharepoint", "VMwareVM", "SystemState",
"Client", "GenericDataSource", "SQLDataBase", "AzureFileShare", "SAPHanaDatabase", and
"SAPAseDatabase".
:vartype workload_type: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.DataSourceType
:ivar container_name: Unique name of container.
:vartype container_name: str
:ivar source_resource_id: ARM ID of the resource to be backed up.
:vartype source_resource_id: str
:ivar policy_id: ID of the backup policy with which this item is backed up.
:vartype policy_id: str
:ivar last_recovery_point: Timestamp when the last (latest) backup copy was created for this
backup item.
:vartype last_recovery_point: ~datetime.datetime
:ivar backup_set_name: Name of the backup set the backup item belongs to.
:vartype backup_set_name: str
:ivar create_mode: Create mode to indicate recovery of existing soft deleted data source or
creation of new data source. Known values are: "Invalid", "Default", and "Recover".
:vartype create_mode: str or ~azure.mgmt.recoveryservicesbackup.passivestamp.models.CreateMode
:ivar deferred_delete_time_in_utc: Time for deferred deletion in UTC.
:vartype deferred_delete_time_in_utc: ~datetime.datetime
:ivar is_scheduled_for_deferred_delete: Flag to identify whether the DS is scheduled for
deferred delete.
:vartype is_scheduled_for_deferred_delete: bool
:ivar deferred_delete_time_remaining: Time remaining before the DS marked for deferred delete
is permanently deleted.
:vartype deferred_delete_time_remaining: str
:ivar is_deferred_delete_schedule_upcoming: Flag to identify whether the deferred deleted DS is
to be purged soon.
:vartype is_deferred_delete_schedule_upcoming: bool
:ivar is_rehydrate: Flag to identify that deferred deleted DS is to be moved into Pause state.
:vartype is_rehydrate: bool
:ivar resource_guard_operation_requests: ResourceGuardOperationRequests on which LAC check will
be performed.
:vartype resource_guard_operation_requests: list[str]
:ivar friendly_name: Friendly name of the VM represented by this backup item.
:vartype friendly_name: str
:ivar virtual_machine_id: Fully qualified ARM ID of the virtual machine represented by this
item.
:vartype virtual_machine_id: str
:ivar protection_status: Backup status of this backup item.
:vartype protection_status: str
:ivar protection_state: Backup state of this backup item. Known values are: "Invalid",
"IRPending", "Protected", "ProtectionError", "ProtectionStopped", and "ProtectionPaused".
:vartype protection_state: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.ProtectionState
:ivar health_status: Health status of protected item. Known values are: "Passed",
"ActionRequired", "ActionSuggested", and "Invalid".
:vartype health_status: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.HealthStatus
:ivar health_details: Health details on this backup item.
:vartype health_details:
list[~azure.mgmt.recoveryservicesbackup.passivestamp.models.AzureIaaSVMHealthDetails]
:ivar kpis_healths: Health details of different KPIs.
:vartype kpis_healths: dict[str,
~azure.mgmt.recoveryservicesbackup.passivestamp.models.KPIResourceHealthDetails]
:ivar last_backup_status: Last backup operation status.
:vartype last_backup_status: str
:ivar last_backup_time: Timestamp of the last backup operation on this backup item.
:vartype last_backup_time: ~datetime.datetime
:ivar protected_item_data_id: Data ID of the protected item.
:vartype protected_item_data_id: str
:ivar extended_info: Additional information for this backup item.
:vartype extended_info:
~azure.mgmt.recoveryservicesbackup.passivestamp.models.AzureIaaSVMProtectedItemExtendedInfo
:ivar extended_properties: Extended Properties for Azure IaasVM Backup.
:vartype extended_properties:
~azure.mgmt.recoveryservicesbackup.passivestamp.models.ExtendedProperties
"""
_validation = {
"protected_item_type": {"required": True},
}
_attribute_map = {
"protected_item_type": {"key": "protectedItemType", "type": "str"},
"backup_management_type": {"key": "backupManagementType", "type": "str"},
"workload_type": {"key": "workloadType", "type": "str"},
"container_name": {"key": "containerName", "type": "str"},
"source_resource_id": {"key": "sourceResourceId", "type": "str"},
"policy_id": {"key": "policyId", "type": "str"},
"last_recovery_point": {"key": "lastRecoveryPoint", "type": "iso-8601"},
"backup_set_name": {"key": "backupSetName", "type": "str"},
"create_mode": {"key": "createMode", "type": "str"},
"deferred_delete_time_in_utc": {"key": "deferredDeleteTimeInUTC", "type": "iso-8601"},
"is_scheduled_for_deferred_delete": {"key": "isScheduledForDeferredDelete", "type": "bool"},
"deferred_delete_time_remaining": {"key": "deferredDeleteTimeRemaining", "type": "str"},
"is_deferred_delete_schedule_upcoming": {"key": "isDeferredDeleteScheduleUpcoming", "type": "bool"},
"is_rehydrate": {"key": "isRehydrate", "type": "bool"},
"resource_guard_operation_requests": {"key": "resourceGuardOperationRequests", "type": "[str]"},
"friendly_name": {"key": "friendlyName", "type": "str"},
"virtual_machine_id": {"key": "virtualMachineId", "type": "str"},
"protection_status": {"key": "protectionStatus", "type": "str"},
"protection_state": {"key": "protectionState", "type": "str"},
"health_status": {"key": "healthStatus", "type": "str"},
"health_details": {"key": "healthDetails", "type": "[AzureIaaSVMHealthDetails]"},
"kpis_healths": {"key": "kpisHealths", "type": "{KPIResourceHealthDetails}"},
"last_backup_status": {"key": "lastBackupStatus", "type": "str"},
"last_backup_time": {"key": "lastBackupTime", "type": "iso-8601"},
"protected_item_data_id": {"key": "protectedItemDataId", "type": "str"},
"extended_info": {"key": "extendedInfo", "type": "AzureIaaSVMProtectedItemExtendedInfo"},
"extended_properties": {"key": "extendedProperties", "type": "ExtendedProperties"},
}
_subtype_map = {
"protected_item_type": {
"Microsoft.ClassicCompute/virtualMachines": "AzureIaaSClassicComputeVMProtectedItem",
"Microsoft.Compute/virtualMachines": "AzureIaaSComputeVMProtectedItem",
}
}
def __init__( # pylint: disable=too-many-locals
self,
*,
backup_management_type: Optional[Union[str, "_models.BackupManagementType"]] = None,
workload_type: Optional[Union[str, "_models.DataSourceType"]] = None,
container_name: Optional[str] = None,
source_resource_id: Optional[str] = None,
policy_id: Optional[str] = None,
last_recovery_point: Optional[datetime.datetime] = None,
backup_set_name: Optional[str] = None,
create_mode: Optional[Union[str, "_models.CreateMode"]] = None,
deferred_delete_time_in_utc: Optional[datetime.datetime] = None,
is_scheduled_for_deferred_delete: Optional[bool] = None,
deferred_delete_time_remaining: Optional[str] = None,
is_deferred_delete_schedule_upcoming: Optional[bool] = None,
is_rehydrate: Optional[bool] = None,
resource_guard_operation_requests: Optional[List[str]] = None,
friendly_name: Optional[str] = None,
virtual_machine_id: Optional[str] = None,
protection_status: Optional[str] = None,
protection_state: Optional[Union[str, "_models.ProtectionState"]] = None,
health_status: Optional[Union[str, "_models.HealthStatus"]] = None,
health_details: Optional[List["_models.AzureIaaSVMHealthDetails"]] = None,
kpis_healths: Optional[Dict[str, "_models.KPIResourceHealthDetails"]] = None,
last_backup_status: Optional[str] = None,
last_backup_time: Optional[datetime.datetime] = None,
protected_item_data_id: Optional[str] = None,
extended_info: Optional["_models.AzureIaaSVMProtectedItemExtendedInfo"] = None,
extended_properties: Optional["_models.ExtendedProperties"] = None,
**kwargs
):
"""
:keyword backup_management_type: Type of backup management for the backed up item. Known values
are: "Invalid", "AzureIaasVM", "MAB", "DPM", "AzureBackupServer", "AzureSql", "AzureStorage",
"AzureWorkload", and "DefaultBackup".
:paramtype backup_management_type: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.BackupManagementType
:keyword workload_type: Type of workload this item represents. Known values are: "Invalid",
"VM", "FileFolder", "AzureSqlDb", "SQLDB", "Exchange", "Sharepoint", "VMwareVM", "SystemState",
"Client", "GenericDataSource", "SQLDataBase", "AzureFileShare", "SAPHanaDatabase", and
"SAPAseDatabase".
:paramtype workload_type: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.DataSourceType
:keyword container_name: Unique name of container.
:paramtype container_name: str
:keyword source_resource_id: ARM ID of the resource to be backed up.
:paramtype source_resource_id: str
:keyword policy_id: ID of the backup policy with which this item is backed up.
:paramtype policy_id: str
:keyword last_recovery_point: Timestamp when the last (latest) backup copy was created for this
backup item.
:paramtype last_recovery_point: ~datetime.datetime
:keyword backup_set_name: Name of the backup set the backup item belongs to.
:paramtype backup_set_name: str
:keyword create_mode: Create mode to indicate recovery of existing soft deleted data source or
creation of new data source. Known values are: "Invalid", "Default", and "Recover".
:paramtype create_mode: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.CreateMode
:keyword deferred_delete_time_in_utc: Time for deferred deletion in UTC.
:paramtype deferred_delete_time_in_utc: ~datetime.datetime
:keyword is_scheduled_for_deferred_delete: Flag to identify whether the DS is scheduled for
deferred delete.
:paramtype is_scheduled_for_deferred_delete: bool
:keyword deferred_delete_time_remaining: Time remaining before the DS marked for deferred
delete is permanently deleted.
:paramtype deferred_delete_time_remaining: str
:keyword is_deferred_delete_schedule_upcoming: Flag to identify whether the deferred deleted DS
is to be purged soon.
:paramtype is_deferred_delete_schedule_upcoming: bool
:keyword is_rehydrate: Flag to identify that deferred deleted DS is to be moved into Pause
state.
:paramtype is_rehydrate: bool
:keyword resource_guard_operation_requests: ResourceGuardOperationRequests on which LAC check
will be performed.
:paramtype resource_guard_operation_requests: list[str]
:keyword friendly_name: Friendly name of the VM represented by this backup item.
:paramtype friendly_name: str
:keyword virtual_machine_id: Fully qualified ARM ID of the virtual machine represented by this
item.
:paramtype virtual_machine_id: str
:keyword protection_status: Backup status of this backup item.
:paramtype protection_status: str
:keyword protection_state: Backup state of this backup item. Known values are: "Invalid",
"IRPending", "Protected", "ProtectionError", "ProtectionStopped", and "ProtectionPaused".
:paramtype protection_state: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.ProtectionState
:keyword health_status: Health status of protected item. Known values are: "Passed",
"ActionRequired", "ActionSuggested", and "Invalid".
:paramtype health_status: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.HealthStatus
:keyword health_details: Health details on this backup item.
:paramtype health_details:
list[~azure.mgmt.recoveryservicesbackup.passivestamp.models.AzureIaaSVMHealthDetails]
:keyword kpis_healths: Health details of different KPIs.
:paramtype kpis_healths: dict[str,
~azure.mgmt.recoveryservicesbackup.passivestamp.models.KPIResourceHealthDetails]
:keyword last_backup_status: Last backup operation status.
:paramtype last_backup_status: str
:keyword last_backup_time: Timestamp of the last backup operation on this backup item.
:paramtype last_backup_time: ~datetime.datetime
:keyword protected_item_data_id: Data ID of the protected item.
:paramtype protected_item_data_id: str
:keyword extended_info: Additional information for this backup item.
:paramtype extended_info:
~azure.mgmt.recoveryservicesbackup.passivestamp.models.AzureIaaSVMProtectedItemExtendedInfo
:keyword extended_properties: Extended Properties for Azure IaasVM Backup.
:paramtype extended_properties:
~azure.mgmt.recoveryservicesbackup.passivestamp.models.ExtendedProperties
"""
super().__init__(
backup_management_type=backup_management_type,
workload_type=workload_type,
container_name=container_name,
source_resource_id=source_resource_id,
policy_id=policy_id,
last_recovery_point=last_recovery_point,
backup_set_name=backup_set_name,
create_mode=create_mode,
deferred_delete_time_in_utc=deferred_delete_time_in_utc,
is_scheduled_for_deferred_delete=is_scheduled_for_deferred_delete,
deferred_delete_time_remaining=deferred_delete_time_remaining,
is_deferred_delete_schedule_upcoming=is_deferred_delete_schedule_upcoming,
is_rehydrate=is_rehydrate,
resource_guard_operation_requests=resource_guard_operation_requests,
**kwargs
)
self.protected_item_type = "AzureIaaSVMProtectedItem" # type: str
self.friendly_name = friendly_name
self.virtual_machine_id = virtual_machine_id
self.protection_status = protection_status
self.protection_state = protection_state
self.health_status = health_status
self.health_details = health_details
self.kpis_healths = kpis_healths
self.last_backup_status = last_backup_status
self.last_backup_time = last_backup_time
self.protected_item_data_id = protected_item_data_id
self.extended_info = extended_info
self.extended_properties = extended_properties
class AzureIaaSClassicComputeVMProtectedItem(AzureIaaSVMProtectedItem): # pylint: disable=too-many-instance-attributes
"""IaaS VM workload-specific backup item representing the Classic Compute VM.
All required parameters must be populated in order to send to Azure.
:ivar protected_item_type: backup item type. Required.
:vartype protected_item_type: str
:ivar backup_management_type: Type of backup management for the backed up item. Known values
are: "Invalid", "AzureIaasVM", "MAB", "DPM", "AzureBackupServer", "AzureSql", "AzureStorage",
"AzureWorkload", and "DefaultBackup".
:vartype backup_management_type: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.BackupManagementType
:ivar workload_type: Type of workload this item represents. Known values are: "Invalid", "VM",
"FileFolder", "AzureSqlDb", "SQLDB", "Exchange", "Sharepoint", "VMwareVM", "SystemState",
"Client", "GenericDataSource", "SQLDataBase", "AzureFileShare", "SAPHanaDatabase", and
"SAPAseDatabase".
:vartype workload_type: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.DataSourceType
:ivar container_name: Unique name of container.
:vartype container_name: str
:ivar source_resource_id: ARM ID of the resource to be backed up.
:vartype source_resource_id: str
:ivar policy_id: ID of the backup policy with which this item is backed up.
:vartype policy_id: str
:ivar last_recovery_point: Timestamp when the last (latest) backup copy was created for this
backup item.
:vartype last_recovery_point: ~datetime.datetime
:ivar backup_set_name: Name of the backup set the backup item belongs to.
:vartype backup_set_name: str
:ivar create_mode: Create mode to indicate recovery of existing soft deleted data source or
creation of new data source. Known values are: "Invalid", "Default", and "Recover".
:vartype create_mode: str or ~azure.mgmt.recoveryservicesbackup.passivestamp.models.CreateMode
:ivar deferred_delete_time_in_utc: Time for deferred deletion in UTC.
:vartype deferred_delete_time_in_utc: ~datetime.datetime
:ivar is_scheduled_for_deferred_delete: Flag to identify whether the DS is scheduled for
deferred delete.
:vartype is_scheduled_for_deferred_delete: bool
:ivar deferred_delete_time_remaining: Time remaining before the DS marked for deferred delete
is permanently deleted.
:vartype deferred_delete_time_remaining: str
:ivar is_deferred_delete_schedule_upcoming: Flag to identify whether the deferred deleted DS is
to be purged soon.
:vartype is_deferred_delete_schedule_upcoming: bool
:ivar is_rehydrate: Flag to identify that deferred deleted DS is to be moved into Pause state.
:vartype is_rehydrate: bool
:ivar resource_guard_operation_requests: ResourceGuardOperationRequests on which LAC check will
be performed.
:vartype resource_guard_operation_requests: list[str]
:ivar friendly_name: Friendly name of the VM represented by this backup item.
:vartype friendly_name: str
:ivar virtual_machine_id: Fully qualified ARM ID of the virtual machine represented by this
item.
:vartype virtual_machine_id: str
:ivar protection_status: Backup status of this backup item.
:vartype protection_status: str
:ivar protection_state: Backup state of this backup item. Known values are: "Invalid",
"IRPending", "Protected", "ProtectionError", "ProtectionStopped", and "ProtectionPaused".
:vartype protection_state: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.ProtectionState
:ivar health_status: Health status of protected item. Known values are: "Passed",
"ActionRequired", "ActionSuggested", and "Invalid".
:vartype health_status: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.HealthStatus
:ivar health_details: Health details on this backup item.
:vartype health_details:
list[~azure.mgmt.recoveryservicesbackup.passivestamp.models.AzureIaaSVMHealthDetails]
:ivar kpis_healths: Health details of different KPIs.
:vartype kpis_healths: dict[str,
~azure.mgmt.recoveryservicesbackup.passivestamp.models.KPIResourceHealthDetails]
:ivar last_backup_status: Last backup operation status.
:vartype last_backup_status: str
:ivar last_backup_time: Timestamp of the last backup operation on this backup item.
:vartype last_backup_time: ~datetime.datetime
:ivar protected_item_data_id: Data ID of the protected item.
:vartype protected_item_data_id: str
:ivar extended_info: Additional information for this backup item.
:vartype extended_info:
~azure.mgmt.recoveryservicesbackup.passivestamp.models.AzureIaaSVMProtectedItemExtendedInfo
:ivar extended_properties: Extended Properties for Azure IaasVM Backup.
:vartype extended_properties:
~azure.mgmt.recoveryservicesbackup.passivestamp.models.ExtendedProperties
"""
_validation = {
"protected_item_type": {"required": True},
}
_attribute_map = {
"protected_item_type": {"key": "protectedItemType", "type": "str"},
"backup_management_type": {"key": "backupManagementType", "type": "str"},
"workload_type": {"key": "workloadType", "type": "str"},
"container_name": {"key": "containerName", "type": "str"},
"source_resource_id": {"key": "sourceResourceId", "type": "str"},
"policy_id": {"key": "policyId", "type": "str"},
"last_recovery_point": {"key": "lastRecoveryPoint", "type": "iso-8601"},
"backup_set_name": {"key": "backupSetName", "type": "str"},
"create_mode": {"key": "createMode", "type": "str"},
"deferred_delete_time_in_utc": {"key": "deferredDeleteTimeInUTC", "type": "iso-8601"},
"is_scheduled_for_deferred_delete": {"key": "isScheduledForDeferredDelete", "type": "bool"},
"deferred_delete_time_remaining": {"key": "deferredDeleteTimeRemaining", "type": "str"},
"is_deferred_delete_schedule_upcoming": {"key": "isDeferredDeleteScheduleUpcoming", "type": "bool"},
"is_rehydrate": {"key": "isRehydrate", "type": "bool"},
"resource_guard_operation_requests": {"key": "resourceGuardOperationRequests", "type": "[str]"},
"friendly_name": {"key": "friendlyName", "type": "str"},
"virtual_machine_id": {"key": "virtualMachineId", "type": "str"},
"protection_status": {"key": "protectionStatus", "type": "str"},
"protection_state": {"key": "protectionState", "type": "str"},
"health_status": {"key": "healthStatus", "type": "str"},
"health_details": {"key": "healthDetails", "type": "[AzureIaaSVMHealthDetails]"},
"kpis_healths": {"key": "kpisHealths", "type": "{KPIResourceHealthDetails}"},
"last_backup_status": {"key": "lastBackupStatus", "type": "str"},
"last_backup_time": {"key": "lastBackupTime", "type": "iso-8601"},
"protected_item_data_id": {"key": "protectedItemDataId", "type": "str"},
"extended_info": {"key": "extendedInfo", "type": "AzureIaaSVMProtectedItemExtendedInfo"},
"extended_properties": {"key": "extendedProperties", "type": "ExtendedProperties"},
}
def __init__( # pylint: disable=too-many-locals
self,
*,
backup_management_type: Optional[Union[str, "_models.BackupManagementType"]] = None,
workload_type: Optional[Union[str, "_models.DataSourceType"]] = None,
container_name: Optional[str] = None,
source_resource_id: Optional[str] = None,
policy_id: Optional[str] = None,
last_recovery_point: Optional[datetime.datetime] = None,
backup_set_name: Optional[str] = None,
create_mode: Optional[Union[str, "_models.CreateMode"]] = None,
deferred_delete_time_in_utc: Optional[datetime.datetime] = None,
is_scheduled_for_deferred_delete: Optional[bool] = None,
deferred_delete_time_remaining: Optional[str] = None,
is_deferred_delete_schedule_upcoming: Optional[bool] = None,
is_rehydrate: Optional[bool] = None,
resource_guard_operation_requests: Optional[List[str]] = None,
friendly_name: Optional[str] = None,
virtual_machine_id: Optional[str] = None,
protection_status: Optional[str] = None,
protection_state: Optional[Union[str, "_models.ProtectionState"]] = None,
health_status: Optional[Union[str, "_models.HealthStatus"]] = None,
health_details: Optional[List["_models.AzureIaaSVMHealthDetails"]] = None,
kpis_healths: Optional[Dict[str, "_models.KPIResourceHealthDetails"]] = None,
last_backup_status: Optional[str] = None,
last_backup_time: Optional[datetime.datetime] = None,
protected_item_data_id: Optional[str] = None,
extended_info: Optional["_models.AzureIaaSVMProtectedItemExtendedInfo"] = None,
extended_properties: Optional["_models.ExtendedProperties"] = None,
**kwargs
):
"""
:keyword backup_management_type: Type of backup management for the backed up item. Known values
are: "Invalid", "AzureIaasVM", "MAB", "DPM", "AzureBackupServer", "AzureSql", "AzureStorage",
"AzureWorkload", and "DefaultBackup".
:paramtype backup_management_type: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.BackupManagementType
:keyword workload_type: Type of workload this item represents. Known values are: "Invalid",
"VM", "FileFolder", "AzureSqlDb", "SQLDB", "Exchange", "Sharepoint", "VMwareVM", "SystemState",
"Client", "GenericDataSource", "SQLDataBase", "AzureFileShare", "SAPHanaDatabase", and
"SAPAseDatabase".
:paramtype workload_type: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.DataSourceType
:keyword container_name: Unique name of container.
:paramtype container_name: str
:keyword source_resource_id: ARM ID of the resource to be backed up.
:paramtype source_resource_id: str
:keyword policy_id: ID of the backup policy with which this item is backed up.
:paramtype policy_id: str
:keyword last_recovery_point: Timestamp when the last (latest) backup copy was created for this
backup item.
:paramtype last_recovery_point: ~datetime.datetime
:keyword backup_set_name: Name of the backup set the backup item belongs to.
:paramtype backup_set_name: str
:keyword create_mode: Create mode to indicate recovery of existing soft deleted data source or
creation of new data source. Known values are: "Invalid", "Default", and "Recover".
:paramtype create_mode: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.CreateMode
:keyword deferred_delete_time_in_utc: Time for deferred deletion in UTC.
:paramtype deferred_delete_time_in_utc: ~datetime.datetime
:keyword is_scheduled_for_deferred_delete: Flag to identify whether the DS is scheduled for
deferred delete.
:paramtype is_scheduled_for_deferred_delete: bool
:keyword deferred_delete_time_remaining: Time remaining before the DS marked for deferred
delete is permanently deleted.
:paramtype deferred_delete_time_remaining: str
:keyword is_deferred_delete_schedule_upcoming: Flag to identify whether the deferred deleted DS
is to be purged soon.
:paramtype is_deferred_delete_schedule_upcoming: bool
:keyword is_rehydrate: Flag to identify that deferred deleted DS is to be moved into Pause
state.
:paramtype is_rehydrate: bool
:keyword resource_guard_operation_requests: ResourceGuardOperationRequests on which LAC check
will be performed.
:paramtype resource_guard_operation_requests: list[str]
:keyword friendly_name: Friendly name of the VM represented by this backup item.
:paramtype friendly_name: str
:keyword virtual_machine_id: Fully qualified ARM ID of the virtual machine represented by this
item.
:paramtype virtual_machine_id: str
:keyword protection_status: Backup status of this backup item.
:paramtype protection_status: str
:keyword protection_state: Backup state of this backup item. Known values are: "Invalid",
"IRPending", "Protected", "ProtectionError", "ProtectionStopped", and "ProtectionPaused".
:paramtype protection_state: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.ProtectionState
:keyword health_status: Health status of protected item. Known values are: "Passed",
"ActionRequired", "ActionSuggested", and "Invalid".
:paramtype health_status: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.HealthStatus
:keyword health_details: Health details on this backup item.
:paramtype health_details:
list[~azure.mgmt.recoveryservicesbackup.passivestamp.models.AzureIaaSVMHealthDetails]
:keyword kpis_healths: Health details of different KPIs.
:paramtype kpis_healths: dict[str,
~azure.mgmt.recoveryservicesbackup.passivestamp.models.KPIResourceHealthDetails]
:keyword last_backup_status: Last backup operation status.
:paramtype last_backup_status: str
:keyword last_backup_time: Timestamp of the last backup operation on this backup item.
:paramtype last_backup_time: ~datetime.datetime
:keyword protected_item_data_id: Data ID of the protected item.
:paramtype protected_item_data_id: str
:keyword extended_info: Additional information for this backup item.
:paramtype extended_info:
~azure.mgmt.recoveryservicesbackup.passivestamp.models.AzureIaaSVMProtectedItemExtendedInfo
:keyword extended_properties: Extended Properties for Azure IaasVM Backup.
:paramtype extended_properties:
~azure.mgmt.recoveryservicesbackup.passivestamp.models.ExtendedProperties
"""
super().__init__(
backup_management_type=backup_management_type,
workload_type=workload_type,
container_name=container_name,
source_resource_id=source_resource_id,
policy_id=policy_id,
last_recovery_point=last_recovery_point,
backup_set_name=backup_set_name,
create_mode=create_mode,
deferred_delete_time_in_utc=deferred_delete_time_in_utc,
is_scheduled_for_deferred_delete=is_scheduled_for_deferred_delete,
deferred_delete_time_remaining=deferred_delete_time_remaining,
is_deferred_delete_schedule_upcoming=is_deferred_delete_schedule_upcoming,
is_rehydrate=is_rehydrate,
resource_guard_operation_requests=resource_guard_operation_requests,
friendly_name=friendly_name,
virtual_machine_id=virtual_machine_id,
protection_status=protection_status,
protection_state=protection_state,
health_status=health_status,
health_details=health_details,
kpis_healths=kpis_healths,
last_backup_status=last_backup_status,
last_backup_time=last_backup_time,
protected_item_data_id=protected_item_data_id,
extended_info=extended_info,
extended_properties=extended_properties,
**kwargs
)
self.protected_item_type = "Microsoft.ClassicCompute/virtualMachines" # type: str
class AzureIaaSComputeVMProtectedItem(AzureIaaSVMProtectedItem): # pylint: disable=too-many-instance-attributes
"""IaaS VM workload-specific backup item representing the Azure Resource Manager VM.
All required parameters must be populated in order to send to Azure.
:ivar protected_item_type: backup item type. Required.
:vartype protected_item_type: str
:ivar backup_management_type: Type of backup management for the backed up item. Known values
are: "Invalid", "AzureIaasVM", "MAB", "DPM", "AzureBackupServer", "AzureSql", "AzureStorage",
"AzureWorkload", and "DefaultBackup".
:vartype backup_management_type: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.BackupManagementType
:ivar workload_type: Type of workload this item represents. Known values are: "Invalid", "VM",
"FileFolder", "AzureSqlDb", "SQLDB", "Exchange", "Sharepoint", "VMwareVM", "SystemState",
"Client", "GenericDataSource", "SQLDataBase", "AzureFileShare", "SAPHanaDatabase", and
"SAPAseDatabase".
:vartype workload_type: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.DataSourceType
:ivar container_name: Unique name of container.
:vartype container_name: str
:ivar source_resource_id: ARM ID of the resource to be backed up.
:vartype source_resource_id: str
:ivar policy_id: ID of the backup policy with which this item is backed up.
:vartype policy_id: str
:ivar last_recovery_point: Timestamp when the last (latest) backup copy was created for this
backup item.
:vartype last_recovery_point: ~datetime.datetime
:ivar backup_set_name: Name of the backup set the backup item belongs to.
:vartype backup_set_name: str
:ivar create_mode: Create mode to indicate recovery of existing soft deleted data source or
creation of new data source. Known values are: "Invalid", "Default", and "Recover".
:vartype create_mode: str or ~azure.mgmt.recoveryservicesbackup.passivestamp.models.CreateMode
:ivar deferred_delete_time_in_utc: Time for deferred deletion in UTC.
:vartype deferred_delete_time_in_utc: ~datetime.datetime
:ivar is_scheduled_for_deferred_delete: Flag to identify whether the DS is scheduled for
deferred delete.
:vartype is_scheduled_for_deferred_delete: bool
:ivar deferred_delete_time_remaining: Time remaining before the DS marked for deferred delete
is permanently deleted.
:vartype deferred_delete_time_remaining: str
:ivar is_deferred_delete_schedule_upcoming: Flag to identify whether the deferred deleted DS is
to be purged soon.
:vartype is_deferred_delete_schedule_upcoming: bool
:ivar is_rehydrate: Flag to identify that deferred deleted DS is to be moved into Pause state.
:vartype is_rehydrate: bool
:ivar resource_guard_operation_requests: ResourceGuardOperationRequests on which LAC check will
be performed.
:vartype resource_guard_operation_requests: list[str]
:ivar friendly_name: Friendly name of the VM represented by this backup item.
:vartype friendly_name: str
:ivar virtual_machine_id: Fully qualified ARM ID of the virtual machine represented by this
item.
:vartype virtual_machine_id: str
:ivar protection_status: Backup status of this backup item.
:vartype protection_status: str
:ivar protection_state: Backup state of this backup item. Known values are: "Invalid",
"IRPending", "Protected", "ProtectionError", "ProtectionStopped", and "ProtectionPaused".
:vartype protection_state: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.ProtectionState
:ivar health_status: Health status of protected item. Known values are: "Passed",
"ActionRequired", "ActionSuggested", and "Invalid".
:vartype health_status: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.HealthStatus
:ivar health_details: Health details on this backup item.
:vartype health_details:
list[~azure.mgmt.recoveryservicesbackup.passivestamp.models.AzureIaaSVMHealthDetails]
:ivar kpis_healths: Health details of different KPIs.
:vartype kpis_healths: dict[str,
~azure.mgmt.recoveryservicesbackup.passivestamp.models.KPIResourceHealthDetails]
:ivar last_backup_status: Last backup operation status.
:vartype last_backup_status: str
:ivar last_backup_time: Timestamp of the last backup operation on this backup item.
:vartype last_backup_time: ~datetime.datetime
:ivar protected_item_data_id: Data ID of the protected item.
:vartype protected_item_data_id: str
:ivar extended_info: Additional information for this backup item.
:vartype extended_info:
~azure.mgmt.recoveryservicesbackup.passivestamp.models.AzureIaaSVMProtectedItemExtendedInfo
:ivar extended_properties: Extended Properties for Azure IaasVM Backup.
:vartype extended_properties:
~azure.mgmt.recoveryservicesbackup.passivestamp.models.ExtendedProperties
"""
_validation = {
"protected_item_type": {"required": True},
}
_attribute_map = {
"protected_item_type": {"key": "protectedItemType", "type": "str"},
"backup_management_type": {"key": "backupManagementType", "type": "str"},
"workload_type": {"key": "workloadType", "type": "str"},
"container_name": {"key": "containerName", "type": "str"},
"source_resource_id": {"key": "sourceResourceId", "type": "str"},
"policy_id": {"key": "policyId", "type": "str"},
"last_recovery_point": {"key": "lastRecoveryPoint", "type": "iso-8601"},
"backup_set_name": {"key": "backupSetName", "type": "str"},
"create_mode": {"key": "createMode", "type": "str"},
"deferred_delete_time_in_utc": {"key": "deferredDeleteTimeInUTC", "type": "iso-8601"},
"is_scheduled_for_deferred_delete": {"key": "isScheduledForDeferredDelete", "type": "bool"},
"deferred_delete_time_remaining": {"key": "deferredDeleteTimeRemaining", "type": "str"},
"is_deferred_delete_schedule_upcoming": {"key": "isDeferredDeleteScheduleUpcoming", "type": "bool"},
"is_rehydrate": {"key": "isRehydrate", "type": "bool"},
"resource_guard_operation_requests": {"key": "resourceGuardOperationRequests", "type": "[str]"},
"friendly_name": {"key": "friendlyName", "type": "str"},
"virtual_machine_id": {"key": "virtualMachineId", "type": "str"},
"protection_status": {"key": "protectionStatus", "type": "str"},
"protection_state": {"key": "protectionState", "type": "str"},
"health_status": {"key": "healthStatus", "type": "str"},
"health_details": {"key": "healthDetails", "type": "[AzureIaaSVMHealthDetails]"},
"kpis_healths": {"key": "kpisHealths", "type": "{KPIResourceHealthDetails}"},
"last_backup_status": {"key": "lastBackupStatus", "type": "str"},
"last_backup_time": {"key": "lastBackupTime", "type": "iso-8601"},
"protected_item_data_id": {"key": "protectedItemDataId", "type": "str"},
"extended_info": {"key": "extendedInfo", "type": "AzureIaaSVMProtectedItemExtendedInfo"},
"extended_properties": {"key": "extendedProperties", "type": "ExtendedProperties"},
}
def __init__( # pylint: disable=too-many-locals
self,
*,
backup_management_type: Optional[Union[str, "_models.BackupManagementType"]] = None,
workload_type: Optional[Union[str, "_models.DataSourceType"]] = None,
container_name: Optional[str] = None,
source_resource_id: Optional[str] = None,
policy_id: Optional[str] = None,
last_recovery_point: Optional[datetime.datetime] = None,
backup_set_name: Optional[str] = None,
create_mode: Optional[Union[str, "_models.CreateMode"]] = None,
deferred_delete_time_in_utc: Optional[datetime.datetime] = None,
is_scheduled_for_deferred_delete: Optional[bool] = None,
deferred_delete_time_remaining: Optional[str] = None,
is_deferred_delete_schedule_upcoming: Optional[bool] = None,
is_rehydrate: Optional[bool] = None,
resource_guard_operation_requests: Optional[List[str]] = None,
friendly_name: Optional[str] = None,
virtual_machine_id: Optional[str] = None,
protection_status: Optional[str] = None,
protection_state: Optional[Union[str, "_models.ProtectionState"]] = None,
health_status: Optional[Union[str, "_models.HealthStatus"]] = None,
health_details: Optional[List["_models.AzureIaaSVMHealthDetails"]] = None,
kpis_healths: Optional[Dict[str, "_models.KPIResourceHealthDetails"]] = None,
last_backup_status: Optional[str] = None,
last_backup_time: Optional[datetime.datetime] = None,
protected_item_data_id: Optional[str] = None,
extended_info: Optional["_models.AzureIaaSVMProtectedItemExtendedInfo"] = None,
extended_properties: Optional["_models.ExtendedProperties"] = None,
**kwargs
):
"""
:keyword backup_management_type: Type of backup management for the backed up item. Known values
are: "Invalid", "AzureIaasVM", "MAB", "DPM", "AzureBackupServer", "AzureSql", "AzureStorage",
"AzureWorkload", and "DefaultBackup".
:paramtype backup_management_type: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.BackupManagementType
:keyword workload_type: Type of workload this item represents. Known values are: "Invalid",
"VM", "FileFolder", "AzureSqlDb", "SQLDB", "Exchange", "Sharepoint", "VMwareVM", "SystemState",
"Client", "GenericDataSource", "SQLDataBase", "AzureFileShare", "SAPHanaDatabase", and
"SAPAseDatabase".
:paramtype workload_type: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.DataSourceType
:keyword container_name: Unique name of container.
:paramtype container_name: str
:keyword source_resource_id: ARM ID of the resource to be backed up.
:paramtype source_resource_id: str
:keyword policy_id: ID of the backup policy with which this item is backed up.
:paramtype policy_id: str
:keyword last_recovery_point: Timestamp when the last (latest) backup copy was created for this
backup item.
:paramtype last_recovery_point: ~datetime.datetime
:keyword backup_set_name: Name of the backup set the backup item belongs to.
:paramtype backup_set_name: str
:keyword create_mode: Create mode to indicate recovery of existing soft deleted data source or
creation of new data source. Known values are: "Invalid", "Default", and "Recover".
:paramtype create_mode: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.CreateMode
:keyword deferred_delete_time_in_utc: Time for deferred deletion in UTC.
:paramtype deferred_delete_time_in_utc: ~datetime.datetime
:keyword is_scheduled_for_deferred_delete: Flag to identify whether the DS is scheduled for
deferred delete.
:paramtype is_scheduled_for_deferred_delete: bool
:keyword deferred_delete_time_remaining: Time remaining before the DS marked for deferred
delete is permanently deleted.
:paramtype deferred_delete_time_remaining: str
:keyword is_deferred_delete_schedule_upcoming: Flag to identify whether the deferred deleted DS
is to be purged soon.
:paramtype is_deferred_delete_schedule_upcoming: bool
:keyword is_rehydrate: Flag to identify that deferred deleted DS is to be moved into Pause
state.
:paramtype is_rehydrate: bool
:keyword resource_guard_operation_requests: ResourceGuardOperationRequests on which LAC check
will be performed.
:paramtype resource_guard_operation_requests: list[str]
:keyword friendly_name: Friendly name of the VM represented by this backup item.
:paramtype friendly_name: str
:keyword virtual_machine_id: Fully qualified ARM ID of the virtual machine represented by this
item.
:paramtype virtual_machine_id: str
:keyword protection_status: Backup status of this backup item.
:paramtype protection_status: str
:keyword protection_state: Backup state of this backup item. Known values are: "Invalid",
"IRPending", "Protected", "ProtectionError", "ProtectionStopped", and "ProtectionPaused".
:paramtype protection_state: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.ProtectionState
:keyword health_status: Health status of protected item. Known values are: "Passed",
"ActionRequired", "ActionSuggested", and "Invalid".
:paramtype health_status: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.HealthStatus
:keyword health_details: Health details on this backup item.
:paramtype health_details:
list[~azure.mgmt.recoveryservicesbackup.passivestamp.models.AzureIaaSVMHealthDetails]
:keyword kpis_healths: Health details of different KPIs.
:paramtype kpis_healths: dict[str,
~azure.mgmt.recoveryservicesbackup.passivestamp.models.KPIResourceHealthDetails]
:keyword last_backup_status: Last backup operation status.
:paramtype last_backup_status: str
:keyword last_backup_time: Timestamp of the last backup operation on this backup item.
:paramtype last_backup_time: ~datetime.datetime
:keyword protected_item_data_id: Data ID of the protected item.
:paramtype protected_item_data_id: str
:keyword extended_info: Additional information for this backup item.
:paramtype extended_info:
~azure.mgmt.recoveryservicesbackup.passivestamp.models.AzureIaaSVMProtectedItemExtendedInfo
:keyword extended_properties: Extended Properties for Azure IaasVM Backup.
:paramtype extended_properties:
~azure.mgmt.recoveryservicesbackup.passivestamp.models.ExtendedProperties
"""
super().__init__(
backup_management_type=backup_management_type,
workload_type=workload_type,
container_name=container_name,
source_resource_id=source_resource_id,
policy_id=policy_id,
last_recovery_point=last_recovery_point,
backup_set_name=backup_set_name,
create_mode=create_mode,
deferred_delete_time_in_utc=deferred_delete_time_in_utc,
is_scheduled_for_deferred_delete=is_scheduled_for_deferred_delete,
deferred_delete_time_remaining=deferred_delete_time_remaining,
is_deferred_delete_schedule_upcoming=is_deferred_delete_schedule_upcoming,
is_rehydrate=is_rehydrate,
resource_guard_operation_requests=resource_guard_operation_requests,
friendly_name=friendly_name,
virtual_machine_id=virtual_machine_id,
protection_status=protection_status,
protection_state=protection_state,
health_status=health_status,
health_details=health_details,
kpis_healths=kpis_healths,
last_backup_status=last_backup_status,
last_backup_time=last_backup_time,
protected_item_data_id=protected_item_data_id,
extended_info=extended_info,
extended_properties=extended_properties,
**kwargs
)
self.protected_item_type = "Microsoft.Compute/virtualMachines" # type: str
class AzureIaaSVMErrorInfo(_serialization.Model):
"""Azure IaaS VM workload-specific error information.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar error_code: Error code.
:vartype error_code: int
:ivar error_title: Title: Typically, the entity that the error pertains to.
:vartype error_title: str
:ivar error_string: Localized error string.
:vartype error_string: str
:ivar recommendations: List of localized recommendations for above error code.
:vartype recommendations: list[str]
"""
_validation = {
"error_code": {"readonly": True},
"error_title": {"readonly": True},
"error_string": {"readonly": True},
"recommendations": {"readonly": True},
}
_attribute_map = {
"error_code": {"key": "errorCode", "type": "int"},
"error_title": {"key": "errorTitle", "type": "str"},
"error_string": {"key": "errorString", "type": "str"},
"recommendations": {"key": "recommendations", "type": "[str]"},
}
def __init__(self, **kwargs):
""" """
super().__init__(**kwargs)
self.error_code = None
self.error_title = None
self.error_string = None
self.recommendations = None
class ResourceHealthDetails(_serialization.Model):
"""Health Details for backup items.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar code: Health Code.
:vartype code: int
:ivar title: Health Title.
:vartype title: str
:ivar message: Health Message.
:vartype message: str
:ivar recommendations: Health Recommended Actions.
:vartype recommendations: list[str]
"""
_validation = {
"code": {"readonly": True},
"title": {"readonly": True},
"message": {"readonly": True},
"recommendations": {"readonly": True},
}
_attribute_map = {
"code": {"key": "code", "type": "int"},
"title": {"key": "title", "type": "str"},
"message": {"key": "message", "type": "str"},
"recommendations": {"key": "recommendations", "type": "[str]"},
}
def __init__(self, **kwargs):
""" """
super().__init__(**kwargs)
self.code = None
self.title = None
self.message = None
self.recommendations = None
class AzureIaaSVMHealthDetails(ResourceHealthDetails):
"""Azure IaaS VM workload-specific Health Details.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar code: Health Code.
:vartype code: int
:ivar title: Health Title.
:vartype title: str
:ivar message: Health Message.
:vartype message: str
:ivar recommendations: Health Recommended Actions.
:vartype recommendations: list[str]
"""
_validation = {
"code": {"readonly": True},
"title": {"readonly": True},
"message": {"readonly": True},
"recommendations": {"readonly": True},
}
_attribute_map = {
"code": {"key": "code", "type": "int"},
"title": {"key": "title", "type": "str"},
"message": {"key": "message", "type": "str"},
"recommendations": {"key": "recommendations", "type": "[str]"},
}
def __init__(self, **kwargs):
""" """
super().__init__(**kwargs)
class Job(_serialization.Model):
"""Defines workload agnostic properties for a job.
You probably want to use the sub-classes and not this class directly. Known sub-classes are:
AzureIaaSVMJob, AzureStorageJob, AzureWorkloadJob, DpmJob, MabJob
All required parameters must be populated in order to send to Azure.
:ivar entity_friendly_name: Friendly name of the entity on which the current job is executing.
:vartype entity_friendly_name: str
:ivar backup_management_type: Backup management type to execute the current job. Known values
are: "Invalid", "AzureIaasVM", "MAB", "DPM", "AzureBackupServer", "AzureSql", "AzureStorage",
"AzureWorkload", and "DefaultBackup".
:vartype backup_management_type: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.BackupManagementType
:ivar operation: The operation name.
:vartype operation: str
:ivar status: Job status.
:vartype status: str
:ivar start_time: The start time.
:vartype start_time: ~datetime.datetime
:ivar end_time: The end time.
:vartype end_time: ~datetime.datetime
:ivar activity_id: ActivityId of job.
:vartype activity_id: str
:ivar job_type: This property will be used as the discriminator for deciding the specific types
in the polymorphic chain of types. Required.
:vartype job_type: str
"""
_validation = {
"job_type": {"required": True},
}
_attribute_map = {
"entity_friendly_name": {"key": "entityFriendlyName", "type": "str"},
"backup_management_type": {"key": "backupManagementType", "type": "str"},
"operation": {"key": "operation", "type": "str"},
"status": {"key": "status", "type": "str"},
"start_time": {"key": "startTime", "type": "iso-8601"},
"end_time": {"key": "endTime", "type": "iso-8601"},
"activity_id": {"key": "activityId", "type": "str"},
"job_type": {"key": "jobType", "type": "str"},
}
_subtype_map = {
"job_type": {
"AzureIaaSVMJob": "AzureIaaSVMJob",
"AzureStorageJob": "AzureStorageJob",
"AzureWorkloadJob": "AzureWorkloadJob",
"DpmJob": "DpmJob",
"MabJob": "MabJob",
}
}
def __init__(
self,
*,
entity_friendly_name: Optional[str] = None,
backup_management_type: Optional[Union[str, "_models.BackupManagementType"]] = None,
operation: Optional[str] = None,
status: Optional[str] = None,
start_time: Optional[datetime.datetime] = None,
end_time: Optional[datetime.datetime] = None,
activity_id: Optional[str] = None,
**kwargs
):
"""
:keyword entity_friendly_name: Friendly name of the entity on which the current job is
executing.
:paramtype entity_friendly_name: str
:keyword backup_management_type: Backup management type to execute the current job. Known
values are: "Invalid", "AzureIaasVM", "MAB", "DPM", "AzureBackupServer", "AzureSql",
"AzureStorage", "AzureWorkload", and "DefaultBackup".
:paramtype backup_management_type: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.BackupManagementType
:keyword operation: The operation name.
:paramtype operation: str
:keyword status: Job status.
:paramtype status: str
:keyword start_time: The start time.
:paramtype start_time: ~datetime.datetime
:keyword end_time: The end time.
:paramtype end_time: ~datetime.datetime
:keyword activity_id: ActivityId of job.
:paramtype activity_id: str
"""
super().__init__(**kwargs)
self.entity_friendly_name = entity_friendly_name
self.backup_management_type = backup_management_type
self.operation = operation
self.status = status
self.start_time = start_time
self.end_time = end_time
self.activity_id = activity_id
self.job_type = None # type: Optional[str]
class AzureIaaSVMJob(Job): # pylint: disable=too-many-instance-attributes
"""Azure IaaS VM workload-specific job object.
All required parameters must be populated in order to send to Azure.
:ivar entity_friendly_name: Friendly name of the entity on which the current job is executing.
:vartype entity_friendly_name: str
:ivar backup_management_type: Backup management type to execute the current job. Known values
are: "Invalid", "AzureIaasVM", "MAB", "DPM", "AzureBackupServer", "AzureSql", "AzureStorage",
"AzureWorkload", and "DefaultBackup".
:vartype backup_management_type: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.BackupManagementType
:ivar operation: The operation name.
:vartype operation: str
:ivar status: Job status.
:vartype status: str
:ivar start_time: The start time.
:vartype start_time: ~datetime.datetime
:ivar end_time: The end time.
:vartype end_time: ~datetime.datetime
:ivar activity_id: ActivityId of job.
:vartype activity_id: str
:ivar job_type: This property will be used as the discriminator for deciding the specific types
in the polymorphic chain of types. Required.
:vartype job_type: str
:ivar duration: Time elapsed during the execution of this job.
:vartype duration: ~datetime.timedelta
:ivar actions_info: Gets or sets the state/actions applicable on this job like cancel/retry.
:vartype actions_info: list[str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.JobSupportedAction]
:ivar error_details: Error details on execution of this job.
:vartype error_details:
list[~azure.mgmt.recoveryservicesbackup.passivestamp.models.AzureIaaSVMErrorInfo]
:ivar virtual_machine_version: Specifies whether the backup item is a Classic or an Azure
Resource Manager VM.
:vartype virtual_machine_version: str
:ivar extended_info: Additional information for this job.
:vartype extended_info:
~azure.mgmt.recoveryservicesbackup.passivestamp.models.AzureIaaSVMJobExtendedInfo
"""
_validation = {
"job_type": {"required": True},
}
_attribute_map = {
"entity_friendly_name": {"key": "entityFriendlyName", "type": "str"},
"backup_management_type": {"key": "backupManagementType", "type": "str"},
"operation": {"key": "operation", "type": "str"},
"status": {"key": "status", "type": "str"},
"start_time": {"key": "startTime", "type": "iso-8601"},
"end_time": {"key": "endTime", "type": "iso-8601"},
"activity_id": {"key": "activityId", "type": "str"},
"job_type": {"key": "jobType", "type": "str"},
"duration": {"key": "duration", "type": "duration"},
"actions_info": {"key": "actionsInfo", "type": "[str]"},
"error_details": {"key": "errorDetails", "type": "[AzureIaaSVMErrorInfo]"},
"virtual_machine_version": {"key": "virtualMachineVersion", "type": "str"},
"extended_info": {"key": "extendedInfo", "type": "AzureIaaSVMJobExtendedInfo"},
}
def __init__(
self,
*,
entity_friendly_name: Optional[str] = None,
backup_management_type: Optional[Union[str, "_models.BackupManagementType"]] = None,
operation: Optional[str] = None,
status: Optional[str] = None,
start_time: Optional[datetime.datetime] = None,
end_time: Optional[datetime.datetime] = None,
activity_id: Optional[str] = None,
duration: Optional[datetime.timedelta] = None,
actions_info: Optional[List[Union[str, "_models.JobSupportedAction"]]] = None,
error_details: Optional[List["_models.AzureIaaSVMErrorInfo"]] = None,
virtual_machine_version: Optional[str] = None,
extended_info: Optional["_models.AzureIaaSVMJobExtendedInfo"] = None,
**kwargs
):
"""
:keyword entity_friendly_name: Friendly name of the entity on which the current job is
executing.
:paramtype entity_friendly_name: str
:keyword backup_management_type: Backup management type to execute the current job. Known
values are: "Invalid", "AzureIaasVM", "MAB", "DPM", "AzureBackupServer", "AzureSql",
"AzureStorage", "AzureWorkload", and "DefaultBackup".
:paramtype backup_management_type: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.BackupManagementType
:keyword operation: The operation name.
:paramtype operation: str
:keyword status: Job status.
:paramtype status: str
:keyword start_time: The start time.
:paramtype start_time: ~datetime.datetime
:keyword end_time: The end time.
:paramtype end_time: ~datetime.datetime
:keyword activity_id: ActivityId of job.
:paramtype activity_id: str
:keyword duration: Time elapsed during the execution of this job.
:paramtype duration: ~datetime.timedelta
:keyword actions_info: Gets or sets the state/actions applicable on this job like cancel/retry.
:paramtype actions_info: list[str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.JobSupportedAction]
:keyword error_details: Error details on execution of this job.
:paramtype error_details:
list[~azure.mgmt.recoveryservicesbackup.passivestamp.models.AzureIaaSVMErrorInfo]
:keyword virtual_machine_version: Specifies whether the backup item is a Classic or an Azure
Resource Manager VM.
:paramtype virtual_machine_version: str
:keyword extended_info: Additional information for this job.
:paramtype extended_info:
~azure.mgmt.recoveryservicesbackup.passivestamp.models.AzureIaaSVMJobExtendedInfo
"""
super().__init__(
entity_friendly_name=entity_friendly_name,
backup_management_type=backup_management_type,
operation=operation,
status=status,
start_time=start_time,
end_time=end_time,
activity_id=activity_id,
**kwargs
)
self.job_type = "AzureIaaSVMJob" # type: str
self.duration = duration
self.actions_info = actions_info
self.error_details = error_details
self.virtual_machine_version = virtual_machine_version
self.extended_info = extended_info
class AzureIaaSVMJobExtendedInfo(_serialization.Model):
"""Azure IaaS VM workload-specific additional information for job.
:ivar tasks_list: List of tasks associated with this job.
:vartype tasks_list:
list[~azure.mgmt.recoveryservicesbackup.passivestamp.models.AzureIaaSVMJobTaskDetails]
:ivar property_bag: Job properties.
:vartype property_bag: dict[str, str]
:ivar internal_property_bag: Job internal properties.
:vartype internal_property_bag: dict[str, str]
:ivar progress_percentage: Indicates progress of the job. Null if it has not started or
completed.
:vartype progress_percentage: float
:ivar estimated_remaining_duration: Time remaining for execution of this job.
:vartype estimated_remaining_duration: str
:ivar dynamic_error_message: Non localized error message on job execution.
:vartype dynamic_error_message: str
"""
_attribute_map = {
"tasks_list": {"key": "tasksList", "type": "[AzureIaaSVMJobTaskDetails]"},
"property_bag": {"key": "propertyBag", "type": "{str}"},
"internal_property_bag": {"key": "internalPropertyBag", "type": "{str}"},
"progress_percentage": {"key": "progressPercentage", "type": "float"},
"estimated_remaining_duration": {"key": "estimatedRemainingDuration", "type": "str"},
"dynamic_error_message": {"key": "dynamicErrorMessage", "type": "str"},
}
def __init__(
self,
*,
tasks_list: Optional[List["_models.AzureIaaSVMJobTaskDetails"]] = None,
property_bag: Optional[Dict[str, str]] = None,
internal_property_bag: Optional[Dict[str, str]] = None,
progress_percentage: Optional[float] = None,
estimated_remaining_duration: Optional[str] = None,
dynamic_error_message: Optional[str] = None,
**kwargs
):
"""
:keyword tasks_list: List of tasks associated with this job.
:paramtype tasks_list:
list[~azure.mgmt.recoveryservicesbackup.passivestamp.models.AzureIaaSVMJobTaskDetails]
:keyword property_bag: Job properties.
:paramtype property_bag: dict[str, str]
:keyword internal_property_bag: Job internal properties.
:paramtype internal_property_bag: dict[str, str]
:keyword progress_percentage: Indicates progress of the job. Null if it has not started or
completed.
:paramtype progress_percentage: float
:keyword estimated_remaining_duration: Time remaining for execution of this job.
:paramtype estimated_remaining_duration: str
:keyword dynamic_error_message: Non localized error message on job execution.
:paramtype dynamic_error_message: str
"""
super().__init__(**kwargs)
self.tasks_list = tasks_list
self.property_bag = property_bag
self.internal_property_bag = internal_property_bag
self.progress_percentage = progress_percentage
self.estimated_remaining_duration = estimated_remaining_duration
self.dynamic_error_message = dynamic_error_message
class AzureIaaSVMJobTaskDetails(_serialization.Model):
"""Azure IaaS VM workload-specific job task details.
:ivar task_id: The task display name.
:vartype task_id: str
:ivar start_time: The start time.
:vartype start_time: ~datetime.datetime
:ivar end_time: The end time.
:vartype end_time: ~datetime.datetime
:ivar instance_id: The instanceId.
:vartype instance_id: str
:ivar duration: Time elapsed for task.
:vartype duration: ~datetime.timedelta
:ivar status: The status.
:vartype status: str
:ivar progress_percentage: Progress of the task.
:vartype progress_percentage: float
:ivar task_execution_details: Details about execution of the task.
eg: number of bytes transferred etc.
:vartype task_execution_details: str
"""
_attribute_map = {
"task_id": {"key": "taskId", "type": "str"},
"start_time": {"key": "startTime", "type": "iso-8601"},
"end_time": {"key": "endTime", "type": "iso-8601"},
"instance_id": {"key": "instanceId", "type": "str"},
"duration": {"key": "duration", "type": "duration"},
"status": {"key": "status", "type": "str"},
"progress_percentage": {"key": "progressPercentage", "type": "float"},
"task_execution_details": {"key": "taskExecutionDetails", "type": "str"},
}
def __init__(
self,
*,
task_id: Optional[str] = None,
start_time: Optional[datetime.datetime] = None,
end_time: Optional[datetime.datetime] = None,
instance_id: Optional[str] = None,
duration: Optional[datetime.timedelta] = None,
status: Optional[str] = None,
progress_percentage: Optional[float] = None,
task_execution_details: Optional[str] = None,
**kwargs
):
"""
:keyword task_id: The task display name.
:paramtype task_id: str
:keyword start_time: The start time.
:paramtype start_time: ~datetime.datetime
:keyword end_time: The end time.
:paramtype end_time: ~datetime.datetime
:keyword instance_id: The instanceId.
:paramtype instance_id: str
:keyword duration: Time elapsed for task.
:paramtype duration: ~datetime.timedelta
:keyword status: The status.
:paramtype status: str
:keyword progress_percentage: Progress of the task.
:paramtype progress_percentage: float
:keyword task_execution_details: Details about execution of the task.
eg: number of bytes transferred etc.
:paramtype task_execution_details: str
"""
super().__init__(**kwargs)
self.task_id = task_id
self.start_time = start_time
self.end_time = end_time
self.instance_id = instance_id
self.duration = duration
self.status = status
self.progress_percentage = progress_percentage
self.task_execution_details = task_execution_details
class AzureIaaSVMProtectedItemExtendedInfo(_serialization.Model):
"""Additional information on Azure IaaS VM specific backup item.
:ivar oldest_recovery_point: The oldest backup copy available for this backup item.
:vartype oldest_recovery_point: ~datetime.datetime
:ivar recovery_point_count: Number of backup copies available for this backup item.
:vartype recovery_point_count: int
:ivar policy_inconsistent: Specifies if backup policy associated with the backup item is
inconsistent.
:vartype policy_inconsistent: bool
"""
_attribute_map = {
"oldest_recovery_point": {"key": "oldestRecoveryPoint", "type": "iso-8601"},
"recovery_point_count": {"key": "recoveryPointCount", "type": "int"},
"policy_inconsistent": {"key": "policyInconsistent", "type": "bool"},
}
def __init__(
self,
*,
oldest_recovery_point: Optional[datetime.datetime] = None,
recovery_point_count: Optional[int] = None,
policy_inconsistent: Optional[bool] = None,
**kwargs
):
"""
:keyword oldest_recovery_point: The oldest backup copy available for this backup item.
:paramtype oldest_recovery_point: ~datetime.datetime
:keyword recovery_point_count: Number of backup copies available for this backup item.
:paramtype recovery_point_count: int
:keyword policy_inconsistent: Specifies if backup policy associated with the backup item is
inconsistent.
:paramtype policy_inconsistent: bool
"""
super().__init__(**kwargs)
self.oldest_recovery_point = oldest_recovery_point
self.recovery_point_count = recovery_point_count
self.policy_inconsistent = policy_inconsistent
class AzureSqlProtectedItem(ProtectedItem): # pylint: disable=too-many-instance-attributes
"""Azure SQL workload-specific backup item.
All required parameters must be populated in order to send to Azure.
:ivar protected_item_type: backup item type. Required.
:vartype protected_item_type: str
:ivar backup_management_type: Type of backup management for the backed up item. Known values
are: "Invalid", "AzureIaasVM", "MAB", "DPM", "AzureBackupServer", "AzureSql", "AzureStorage",
"AzureWorkload", and "DefaultBackup".
:vartype backup_management_type: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.BackupManagementType
:ivar workload_type: Type of workload this item represents. Known values are: "Invalid", "VM",
"FileFolder", "AzureSqlDb", "SQLDB", "Exchange", "Sharepoint", "VMwareVM", "SystemState",
"Client", "GenericDataSource", "SQLDataBase", "AzureFileShare", "SAPHanaDatabase", and
"SAPAseDatabase".
:vartype workload_type: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.DataSourceType
:ivar container_name: Unique name of container.
:vartype container_name: str
:ivar source_resource_id: ARM ID of the resource to be backed up.
:vartype source_resource_id: str
:ivar policy_id: ID of the backup policy with which this item is backed up.
:vartype policy_id: str
:ivar last_recovery_point: Timestamp when the last (latest) backup copy was created for this
backup item.
:vartype last_recovery_point: ~datetime.datetime
:ivar backup_set_name: Name of the backup set the backup item belongs to.
:vartype backup_set_name: str
:ivar create_mode: Create mode to indicate recovery of existing soft deleted data source or
creation of new data source. Known values are: "Invalid", "Default", and "Recover".
:vartype create_mode: str or ~azure.mgmt.recoveryservicesbackup.passivestamp.models.CreateMode
:ivar deferred_delete_time_in_utc: Time for deferred deletion in UTC.
:vartype deferred_delete_time_in_utc: ~datetime.datetime
:ivar is_scheduled_for_deferred_delete: Flag to identify whether the DS is scheduled for
deferred delete.
:vartype is_scheduled_for_deferred_delete: bool
:ivar deferred_delete_time_remaining: Time remaining before the DS marked for deferred delete
is permanently deleted.
:vartype deferred_delete_time_remaining: str
:ivar is_deferred_delete_schedule_upcoming: Flag to identify whether the deferred deleted DS is
to be purged soon.
:vartype is_deferred_delete_schedule_upcoming: bool
:ivar is_rehydrate: Flag to identify that deferred deleted DS is to be moved into Pause state.
:vartype is_rehydrate: bool
:ivar resource_guard_operation_requests: ResourceGuardOperationRequests on which LAC check will
be performed.
:vartype resource_guard_operation_requests: list[str]
:ivar protected_item_data_id: Internal ID of a backup item. Used by Azure SQL Backup engine to
contact Recovery Services.
:vartype protected_item_data_id: str
:ivar protection_state: Backup state of the backed up item. Known values are: "Invalid",
"IRPending", "Protected", "ProtectionError", "ProtectionStopped", and "ProtectionPaused".
:vartype protection_state: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.ProtectedItemState
:ivar extended_info: Additional information for this backup item.
:vartype extended_info:
~azure.mgmt.recoveryservicesbackup.passivestamp.models.AzureSqlProtectedItemExtendedInfo
"""
_validation = {
"protected_item_type": {"required": True},
}
_attribute_map = {
"protected_item_type": {"key": "protectedItemType", "type": "str"},
"backup_management_type": {"key": "backupManagementType", "type": "str"},
"workload_type": {"key": "workloadType", "type": "str"},
"container_name": {"key": "containerName", "type": "str"},
"source_resource_id": {"key": "sourceResourceId", "type": "str"},
"policy_id": {"key": "policyId", "type": "str"},
"last_recovery_point": {"key": "lastRecoveryPoint", "type": "iso-8601"},
"backup_set_name": {"key": "backupSetName", "type": "str"},
"create_mode": {"key": "createMode", "type": "str"},
"deferred_delete_time_in_utc": {"key": "deferredDeleteTimeInUTC", "type": "iso-8601"},
"is_scheduled_for_deferred_delete": {"key": "isScheduledForDeferredDelete", "type": "bool"},
"deferred_delete_time_remaining": {"key": "deferredDeleteTimeRemaining", "type": "str"},
"is_deferred_delete_schedule_upcoming": {"key": "isDeferredDeleteScheduleUpcoming", "type": "bool"},
"is_rehydrate": {"key": "isRehydrate", "type": "bool"},
"resource_guard_operation_requests": {"key": "resourceGuardOperationRequests", "type": "[str]"},
"protected_item_data_id": {"key": "protectedItemDataId", "type": "str"},
"protection_state": {"key": "protectionState", "type": "str"},
"extended_info": {"key": "extendedInfo", "type": "AzureSqlProtectedItemExtendedInfo"},
}
def __init__(
self,
*,
backup_management_type: Optional[Union[str, "_models.BackupManagementType"]] = None,
workload_type: Optional[Union[str, "_models.DataSourceType"]] = None,
container_name: Optional[str] = None,
source_resource_id: Optional[str] = None,
policy_id: Optional[str] = None,
last_recovery_point: Optional[datetime.datetime] = None,
backup_set_name: Optional[str] = None,
create_mode: Optional[Union[str, "_models.CreateMode"]] = None,
deferred_delete_time_in_utc: Optional[datetime.datetime] = None,
is_scheduled_for_deferred_delete: Optional[bool] = None,
deferred_delete_time_remaining: Optional[str] = None,
is_deferred_delete_schedule_upcoming: Optional[bool] = None,
is_rehydrate: Optional[bool] = None,
resource_guard_operation_requests: Optional[List[str]] = None,
protected_item_data_id: Optional[str] = None,
protection_state: Optional[Union[str, "_models.ProtectedItemState"]] = None,
extended_info: Optional["_models.AzureSqlProtectedItemExtendedInfo"] = None,
**kwargs
):
"""
:keyword backup_management_type: Type of backup management for the backed up item. Known values
are: "Invalid", "AzureIaasVM", "MAB", "DPM", "AzureBackupServer", "AzureSql", "AzureStorage",
"AzureWorkload", and "DefaultBackup".
:paramtype backup_management_type: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.BackupManagementType
:keyword workload_type: Type of workload this item represents. Known values are: "Invalid",
"VM", "FileFolder", "AzureSqlDb", "SQLDB", "Exchange", "Sharepoint", "VMwareVM", "SystemState",
"Client", "GenericDataSource", "SQLDataBase", "AzureFileShare", "SAPHanaDatabase", and
"SAPAseDatabase".
:paramtype workload_type: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.DataSourceType
:keyword container_name: Unique name of container.
:paramtype container_name: str
:keyword source_resource_id: ARM ID of the resource to be backed up.
:paramtype source_resource_id: str
:keyword policy_id: ID of the backup policy with which this item is backed up.
:paramtype policy_id: str
:keyword last_recovery_point: Timestamp when the last (latest) backup copy was created for this
backup item.
:paramtype last_recovery_point: ~datetime.datetime
:keyword backup_set_name: Name of the backup set the backup item belongs to.
:paramtype backup_set_name: str
:keyword create_mode: Create mode to indicate recovery of existing soft deleted data source or
creation of new data source. Known values are: "Invalid", "Default", and "Recover".
:paramtype create_mode: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.CreateMode
:keyword deferred_delete_time_in_utc: Time for deferred deletion in UTC.
:paramtype deferred_delete_time_in_utc: ~datetime.datetime
:keyword is_scheduled_for_deferred_delete: Flag to identify whether the DS is scheduled for
deferred delete.
:paramtype is_scheduled_for_deferred_delete: bool
:keyword deferred_delete_time_remaining: Time remaining before the DS marked for deferred
delete is permanently deleted.
:paramtype deferred_delete_time_remaining: str
:keyword is_deferred_delete_schedule_upcoming: Flag to identify whether the deferred deleted DS
is to be purged soon.
:paramtype is_deferred_delete_schedule_upcoming: bool
:keyword is_rehydrate: Flag to identify that deferred deleted DS is to be moved into Pause
state.
:paramtype is_rehydrate: bool
:keyword resource_guard_operation_requests: ResourceGuardOperationRequests on which LAC check
will be performed.
:paramtype resource_guard_operation_requests: list[str]
:keyword protected_item_data_id: Internal ID of a backup item. Used by Azure SQL Backup engine
to contact Recovery Services.
:paramtype protected_item_data_id: str
:keyword protection_state: Backup state of the backed up item. Known values are: "Invalid",
"IRPending", "Protected", "ProtectionError", "ProtectionStopped", and "ProtectionPaused".
:paramtype protection_state: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.ProtectedItemState
:keyword extended_info: Additional information for this backup item.
:paramtype extended_info:
~azure.mgmt.recoveryservicesbackup.passivestamp.models.AzureSqlProtectedItemExtendedInfo
"""
super().__init__(
backup_management_type=backup_management_type,
workload_type=workload_type,
container_name=container_name,
source_resource_id=source_resource_id,
policy_id=policy_id,
last_recovery_point=last_recovery_point,
backup_set_name=backup_set_name,
create_mode=create_mode,
deferred_delete_time_in_utc=deferred_delete_time_in_utc,
is_scheduled_for_deferred_delete=is_scheduled_for_deferred_delete,
deferred_delete_time_remaining=deferred_delete_time_remaining,
is_deferred_delete_schedule_upcoming=is_deferred_delete_schedule_upcoming,
is_rehydrate=is_rehydrate,
resource_guard_operation_requests=resource_guard_operation_requests,
**kwargs
)
self.protected_item_type = "Microsoft.Sql/servers/databases" # type: str
self.protected_item_data_id = protected_item_data_id
self.protection_state = protection_state
self.extended_info = extended_info
class AzureSqlProtectedItemExtendedInfo(_serialization.Model):
"""Additional information on Azure Sql specific protected item.
:ivar oldest_recovery_point: The oldest backup copy available for this item in the service.
:vartype oldest_recovery_point: ~datetime.datetime
:ivar recovery_point_count: Number of available backup copies associated with this backup item.
:vartype recovery_point_count: int
:ivar policy_state: State of the backup policy associated with this backup item.
:vartype policy_state: str
"""
_attribute_map = {
"oldest_recovery_point": {"key": "oldestRecoveryPoint", "type": "iso-8601"},
"recovery_point_count": {"key": "recoveryPointCount", "type": "int"},
"policy_state": {"key": "policyState", "type": "str"},
}
def __init__(
self,
*,
oldest_recovery_point: Optional[datetime.datetime] = None,
recovery_point_count: Optional[int] = None,
policy_state: Optional[str] = None,
**kwargs
):
"""
:keyword oldest_recovery_point: The oldest backup copy available for this item in the service.
:paramtype oldest_recovery_point: ~datetime.datetime
:keyword recovery_point_count: Number of available backup copies associated with this backup
item.
:paramtype recovery_point_count: int
:keyword policy_state: State of the backup policy associated with this backup item.
:paramtype policy_state: str
"""
super().__init__(**kwargs)
self.oldest_recovery_point = oldest_recovery_point
self.recovery_point_count = recovery_point_count
self.policy_state = policy_state
class AzureStorageErrorInfo(_serialization.Model):
"""Azure storage specific error information.
:ivar error_code: Error code.
:vartype error_code: int
:ivar error_string: Localized error string.
:vartype error_string: str
:ivar recommendations: List of localized recommendations for above error code.
:vartype recommendations: list[str]
"""
_attribute_map = {
"error_code": {"key": "errorCode", "type": "int"},
"error_string": {"key": "errorString", "type": "str"},
"recommendations": {"key": "recommendations", "type": "[str]"},
}
def __init__(
self,
*,
error_code: Optional[int] = None,
error_string: Optional[str] = None,
recommendations: Optional[List[str]] = None,
**kwargs
):
"""
:keyword error_code: Error code.
:paramtype error_code: int
:keyword error_string: Localized error string.
:paramtype error_string: str
:keyword recommendations: List of localized recommendations for above error code.
:paramtype recommendations: list[str]
"""
super().__init__(**kwargs)
self.error_code = error_code
self.error_string = error_string
self.recommendations = recommendations
class AzureStorageJob(Job): # pylint: disable=too-many-instance-attributes
"""Azure storage specific job.
All required parameters must be populated in order to send to Azure.
:ivar entity_friendly_name: Friendly name of the entity on which the current job is executing.
:vartype entity_friendly_name: str
:ivar backup_management_type: Backup management type to execute the current job. Known values
are: "Invalid", "AzureIaasVM", "MAB", "DPM", "AzureBackupServer", "AzureSql", "AzureStorage",
"AzureWorkload", and "DefaultBackup".
:vartype backup_management_type: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.BackupManagementType
:ivar operation: The operation name.
:vartype operation: str
:ivar status: Job status.
:vartype status: str
:ivar start_time: The start time.
:vartype start_time: ~datetime.datetime
:ivar end_time: The end time.
:vartype end_time: ~datetime.datetime
:ivar activity_id: ActivityId of job.
:vartype activity_id: str
:ivar job_type: This property will be used as the discriminator for deciding the specific types
in the polymorphic chain of types. Required.
:vartype job_type: str
:ivar duration: Time elapsed during the execution of this job.
:vartype duration: ~datetime.timedelta
:ivar actions_info: Gets or sets the state/actions applicable on this job like cancel/retry.
:vartype actions_info: list[str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.JobSupportedAction]
:ivar error_details: Error details on execution of this job.
:vartype error_details:
list[~azure.mgmt.recoveryservicesbackup.passivestamp.models.AzureStorageErrorInfo]
:ivar storage_account_name: Specifies friendly name of the storage account.
:vartype storage_account_name: str
:ivar storage_account_version: Specifies whether the Storage account is a Classic or an Azure
Resource Manager Storage account.
:vartype storage_account_version: str
:ivar extended_info: Additional information about the job.
:vartype extended_info:
~azure.mgmt.recoveryservicesbackup.passivestamp.models.AzureStorageJobExtendedInfo
"""
_validation = {
"job_type": {"required": True},
}
_attribute_map = {
"entity_friendly_name": {"key": "entityFriendlyName", "type": "str"},
"backup_management_type": {"key": "backupManagementType", "type": "str"},
"operation": {"key": "operation", "type": "str"},
"status": {"key": "status", "type": "str"},
"start_time": {"key": "startTime", "type": "iso-8601"},
"end_time": {"key": "endTime", "type": "iso-8601"},
"activity_id": {"key": "activityId", "type": "str"},
"job_type": {"key": "jobType", "type": "str"},
"duration": {"key": "duration", "type": "duration"},
"actions_info": {"key": "actionsInfo", "type": "[str]"},
"error_details": {"key": "errorDetails", "type": "[AzureStorageErrorInfo]"},
"storage_account_name": {"key": "storageAccountName", "type": "str"},
"storage_account_version": {"key": "storageAccountVersion", "type": "str"},
"extended_info": {"key": "extendedInfo", "type": "AzureStorageJobExtendedInfo"},
}
def __init__(
self,
*,
entity_friendly_name: Optional[str] = None,
backup_management_type: Optional[Union[str, "_models.BackupManagementType"]] = None,
operation: Optional[str] = None,
status: Optional[str] = None,
start_time: Optional[datetime.datetime] = None,
end_time: Optional[datetime.datetime] = None,
activity_id: Optional[str] = None,
duration: Optional[datetime.timedelta] = None,
actions_info: Optional[List[Union[str, "_models.JobSupportedAction"]]] = None,
error_details: Optional[List["_models.AzureStorageErrorInfo"]] = None,
storage_account_name: Optional[str] = None,
storage_account_version: Optional[str] = None,
extended_info: Optional["_models.AzureStorageJobExtendedInfo"] = None,
**kwargs
):
"""
:keyword entity_friendly_name: Friendly name of the entity on which the current job is
executing.
:paramtype entity_friendly_name: str
:keyword backup_management_type: Backup management type to execute the current job. Known
values are: "Invalid", "AzureIaasVM", "MAB", "DPM", "AzureBackupServer", "AzureSql",
"AzureStorage", "AzureWorkload", and "DefaultBackup".
:paramtype backup_management_type: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.BackupManagementType
:keyword operation: The operation name.
:paramtype operation: str
:keyword status: Job status.
:paramtype status: str
:keyword start_time: The start time.
:paramtype start_time: ~datetime.datetime
:keyword end_time: The end time.
:paramtype end_time: ~datetime.datetime
:keyword activity_id: ActivityId of job.
:paramtype activity_id: str
:keyword duration: Time elapsed during the execution of this job.
:paramtype duration: ~datetime.timedelta
:keyword actions_info: Gets or sets the state/actions applicable on this job like cancel/retry.
:paramtype actions_info: list[str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.JobSupportedAction]
:keyword error_details: Error details on execution of this job.
:paramtype error_details:
list[~azure.mgmt.recoveryservicesbackup.passivestamp.models.AzureStorageErrorInfo]
:keyword storage_account_name: Specifies friendly name of the storage account.
:paramtype storage_account_name: str
:keyword storage_account_version: Specifies whether the Storage account is a Classic or an
Azure Resource Manager Storage account.
:paramtype storage_account_version: str
:keyword extended_info: Additional information about the job.
:paramtype extended_info:
~azure.mgmt.recoveryservicesbackup.passivestamp.models.AzureStorageJobExtendedInfo
"""
super().__init__(
entity_friendly_name=entity_friendly_name,
backup_management_type=backup_management_type,
operation=operation,
status=status,
start_time=start_time,
end_time=end_time,
activity_id=activity_id,
**kwargs
)
self.job_type = "AzureStorageJob" # type: str
self.duration = duration
self.actions_info = actions_info
self.error_details = error_details
self.storage_account_name = storage_account_name
self.storage_account_version = storage_account_version
self.extended_info = extended_info
class AzureStorageJobExtendedInfo(_serialization.Model):
"""Azure Storage workload-specific additional information for job.
:ivar tasks_list: List of tasks for this job.
:vartype tasks_list:
list[~azure.mgmt.recoveryservicesbackup.passivestamp.models.AzureStorageJobTaskDetails]
:ivar property_bag: Job properties.
:vartype property_bag: dict[str, str]
:ivar dynamic_error_message: Non localized error message on job execution.
:vartype dynamic_error_message: str
"""
_attribute_map = {
"tasks_list": {"key": "tasksList", "type": "[AzureStorageJobTaskDetails]"},
"property_bag": {"key": "propertyBag", "type": "{str}"},
"dynamic_error_message": {"key": "dynamicErrorMessage", "type": "str"},
}
def __init__(
self,
*,
tasks_list: Optional[List["_models.AzureStorageJobTaskDetails"]] = None,
property_bag: Optional[Dict[str, str]] = None,
dynamic_error_message: Optional[str] = None,
**kwargs
):
"""
:keyword tasks_list: List of tasks for this job.
:paramtype tasks_list:
list[~azure.mgmt.recoveryservicesbackup.passivestamp.models.AzureStorageJobTaskDetails]
:keyword property_bag: Job properties.
:paramtype property_bag: dict[str, str]
:keyword dynamic_error_message: Non localized error message on job execution.
:paramtype dynamic_error_message: str
"""
super().__init__(**kwargs)
self.tasks_list = tasks_list
self.property_bag = property_bag
self.dynamic_error_message = dynamic_error_message
class AzureStorageJobTaskDetails(_serialization.Model):
"""Azure storage workload specific job task details.
:ivar task_id: The task display name.
:vartype task_id: str
:ivar status: The status.
:vartype status: str
"""
_attribute_map = {
"task_id": {"key": "taskId", "type": "str"},
"status": {"key": "status", "type": "str"},
}
def __init__(self, *, task_id: Optional[str] = None, status: Optional[str] = None, **kwargs):
"""
:keyword task_id: The task display name.
:paramtype task_id: str
:keyword status: The status.
:paramtype status: str
"""
super().__init__(**kwargs)
self.task_id = task_id
self.status = status
class AzureVmWorkloadProtectedItem(ProtectedItem): # pylint: disable=too-many-instance-attributes
"""Azure VM workload-specific protected item.
You probably want to use the sub-classes and not this class directly. Known sub-classes are:
AzureVmWorkloadSAPAseDatabaseProtectedItem, AzureVmWorkloadSAPHanaDatabaseProtectedItem,
AzureVmWorkloadSQLDatabaseProtectedItem
All required parameters must be populated in order to send to Azure.
:ivar protected_item_type: backup item type. Required.
:vartype protected_item_type: str
:ivar backup_management_type: Type of backup management for the backed up item. Known values
are: "Invalid", "AzureIaasVM", "MAB", "DPM", "AzureBackupServer", "AzureSql", "AzureStorage",
"AzureWorkload", and "DefaultBackup".
:vartype backup_management_type: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.BackupManagementType
:ivar workload_type: Type of workload this item represents. Known values are: "Invalid", "VM",
"FileFolder", "AzureSqlDb", "SQLDB", "Exchange", "Sharepoint", "VMwareVM", "SystemState",
"Client", "GenericDataSource", "SQLDataBase", "AzureFileShare", "SAPHanaDatabase", and
"SAPAseDatabase".
:vartype workload_type: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.DataSourceType
:ivar container_name: Unique name of container.
:vartype container_name: str
:ivar source_resource_id: ARM ID of the resource to be backed up.
:vartype source_resource_id: str
:ivar policy_id: ID of the backup policy with which this item is backed up.
:vartype policy_id: str
:ivar last_recovery_point: Timestamp when the last (latest) backup copy was created for this
backup item.
:vartype last_recovery_point: ~datetime.datetime
:ivar backup_set_name: Name of the backup set the backup item belongs to.
:vartype backup_set_name: str
:ivar create_mode: Create mode to indicate recovery of existing soft deleted data source or
creation of new data source. Known values are: "Invalid", "Default", and "Recover".
:vartype create_mode: str or ~azure.mgmt.recoveryservicesbackup.passivestamp.models.CreateMode
:ivar deferred_delete_time_in_utc: Time for deferred deletion in UTC.
:vartype deferred_delete_time_in_utc: ~datetime.datetime
:ivar is_scheduled_for_deferred_delete: Flag to identify whether the DS is scheduled for
deferred delete.
:vartype is_scheduled_for_deferred_delete: bool
:ivar deferred_delete_time_remaining: Time remaining before the DS marked for deferred delete
is permanently deleted.
:vartype deferred_delete_time_remaining: str
:ivar is_deferred_delete_schedule_upcoming: Flag to identify whether the deferred deleted DS is
to be purged soon.
:vartype is_deferred_delete_schedule_upcoming: bool
:ivar is_rehydrate: Flag to identify that deferred deleted DS is to be moved into Pause state.
:vartype is_rehydrate: bool
:ivar resource_guard_operation_requests: ResourceGuardOperationRequests on which LAC check will
be performed.
:vartype resource_guard_operation_requests: list[str]
:ivar friendly_name: Friendly name of the DB represented by this backup item.
:vartype friendly_name: str
:ivar server_name: Host/Cluster Name for instance or AG.
:vartype server_name: str
:ivar parent_name: Parent name of the DB such as Instance or Availability Group.
:vartype parent_name: str
:ivar parent_type: Parent type of protected item, example: for a DB, standalone server or
distributed.
:vartype parent_type: str
:ivar protection_status: Backup status of this backup item.
:vartype protection_status: str
:ivar protection_state: Backup state of this backup item. Known values are: "Invalid",
"IRPending", "Protected", "ProtectionError", "ProtectionStopped", and "ProtectionPaused".
:vartype protection_state: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.ProtectionState
:ivar last_backup_status: Last backup operation status. Possible values: Healthy, Unhealthy.
Known values are: "Invalid", "Healthy", "Unhealthy", and "IRPending".
:vartype last_backup_status: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.LastBackupStatus
:ivar last_backup_time: Timestamp of the last backup operation on this backup item.
:vartype last_backup_time: ~datetime.datetime
:ivar last_backup_error_detail: Error details in last backup.
:vartype last_backup_error_detail:
~azure.mgmt.recoveryservicesbackup.passivestamp.models.ErrorDetail
:ivar protected_item_data_source_id: Data ID of the protected item.
:vartype protected_item_data_source_id: str
:ivar protected_item_health_status: Health status of the backup item, evaluated based on last
heartbeat received. Known values are: "Invalid", "Healthy", "Unhealthy", "NotReachable", and
"IRPending".
:vartype protected_item_health_status: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.ProtectedItemHealthStatus
:ivar extended_info: Additional information for this backup item.
:vartype extended_info:
~azure.mgmt.recoveryservicesbackup.passivestamp.models.AzureVmWorkloadProtectedItemExtendedInfo
:ivar kpis_healths: Health details of different KPIs.
:vartype kpis_healths: dict[str,
~azure.mgmt.recoveryservicesbackup.passivestamp.models.KPIResourceHealthDetails]
"""
_validation = {
"protected_item_type": {"required": True},
}
_attribute_map = {
"protected_item_type": {"key": "protectedItemType", "type": "str"},
"backup_management_type": {"key": "backupManagementType", "type": "str"},
"workload_type": {"key": "workloadType", "type": "str"},
"container_name": {"key": "containerName", "type": "str"},
"source_resource_id": {"key": "sourceResourceId", "type": "str"},
"policy_id": {"key": "policyId", "type": "str"},
"last_recovery_point": {"key": "lastRecoveryPoint", "type": "iso-8601"},
"backup_set_name": {"key": "backupSetName", "type": "str"},
"create_mode": {"key": "createMode", "type": "str"},
"deferred_delete_time_in_utc": {"key": "deferredDeleteTimeInUTC", "type": "iso-8601"},
"is_scheduled_for_deferred_delete": {"key": "isScheduledForDeferredDelete", "type": "bool"},
"deferred_delete_time_remaining": {"key": "deferredDeleteTimeRemaining", "type": "str"},
"is_deferred_delete_schedule_upcoming": {"key": "isDeferredDeleteScheduleUpcoming", "type": "bool"},
"is_rehydrate": {"key": "isRehydrate", "type": "bool"},
"resource_guard_operation_requests": {"key": "resourceGuardOperationRequests", "type": "[str]"},
"friendly_name": {"key": "friendlyName", "type": "str"},
"server_name": {"key": "serverName", "type": "str"},
"parent_name": {"key": "parentName", "type": "str"},
"parent_type": {"key": "parentType", "type": "str"},
"protection_status": {"key": "protectionStatus", "type": "str"},
"protection_state": {"key": "protectionState", "type": "str"},
"last_backup_status": {"key": "lastBackupStatus", "type": "str"},
"last_backup_time": {"key": "lastBackupTime", "type": "iso-8601"},
"last_backup_error_detail": {"key": "lastBackupErrorDetail", "type": "ErrorDetail"},
"protected_item_data_source_id": {"key": "protectedItemDataSourceId", "type": "str"},
"protected_item_health_status": {"key": "protectedItemHealthStatus", "type": "str"},
"extended_info": {"key": "extendedInfo", "type": "AzureVmWorkloadProtectedItemExtendedInfo"},
"kpis_healths": {"key": "kpisHealths", "type": "{KPIResourceHealthDetails}"},
}
_subtype_map = {
"protected_item_type": {
"AzureVmWorkloadSAPAseDatabase": "AzureVmWorkloadSAPAseDatabaseProtectedItem",
"AzureVmWorkloadSAPHanaDatabase": "AzureVmWorkloadSAPHanaDatabaseProtectedItem",
"AzureVmWorkloadSQLDatabase": "AzureVmWorkloadSQLDatabaseProtectedItem",
}
}
def __init__( # pylint: disable=too-many-locals
self,
*,
backup_management_type: Optional[Union[str, "_models.BackupManagementType"]] = None,
workload_type: Optional[Union[str, "_models.DataSourceType"]] = None,
container_name: Optional[str] = None,
source_resource_id: Optional[str] = None,
policy_id: Optional[str] = None,
last_recovery_point: Optional[datetime.datetime] = None,
backup_set_name: Optional[str] = None,
create_mode: Optional[Union[str, "_models.CreateMode"]] = None,
deferred_delete_time_in_utc: Optional[datetime.datetime] = None,
is_scheduled_for_deferred_delete: Optional[bool] = None,
deferred_delete_time_remaining: Optional[str] = None,
is_deferred_delete_schedule_upcoming: Optional[bool] = None,
is_rehydrate: Optional[bool] = None,
resource_guard_operation_requests: Optional[List[str]] = None,
friendly_name: Optional[str] = None,
server_name: Optional[str] = None,
parent_name: Optional[str] = None,
parent_type: Optional[str] = None,
protection_status: Optional[str] = None,
protection_state: Optional[Union[str, "_models.ProtectionState"]] = None,
last_backup_status: Optional[Union[str, "_models.LastBackupStatus"]] = None,
last_backup_time: Optional[datetime.datetime] = None,
last_backup_error_detail: Optional["_models.ErrorDetail"] = None,
protected_item_data_source_id: Optional[str] = None,
protected_item_health_status: Optional[Union[str, "_models.ProtectedItemHealthStatus"]] = None,
extended_info: Optional["_models.AzureVmWorkloadProtectedItemExtendedInfo"] = None,
kpis_healths: Optional[Dict[str, "_models.KPIResourceHealthDetails"]] = None,
**kwargs
):
"""
:keyword backup_management_type: Type of backup management for the backed up item. Known values
are: "Invalid", "AzureIaasVM", "MAB", "DPM", "AzureBackupServer", "AzureSql", "AzureStorage",
"AzureWorkload", and "DefaultBackup".
:paramtype backup_management_type: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.BackupManagementType
:keyword workload_type: Type of workload this item represents. Known values are: "Invalid",
"VM", "FileFolder", "AzureSqlDb", "SQLDB", "Exchange", "Sharepoint", "VMwareVM", "SystemState",
"Client", "GenericDataSource", "SQLDataBase", "AzureFileShare", "SAPHanaDatabase", and
"SAPAseDatabase".
:paramtype workload_type: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.DataSourceType
:keyword container_name: Unique name of container.
:paramtype container_name: str
:keyword source_resource_id: ARM ID of the resource to be backed up.
:paramtype source_resource_id: str
:keyword policy_id: ID of the backup policy with which this item is backed up.
:paramtype policy_id: str
:keyword last_recovery_point: Timestamp when the last (latest) backup copy was created for this
backup item.
:paramtype last_recovery_point: ~datetime.datetime
:keyword backup_set_name: Name of the backup set the backup item belongs to.
:paramtype backup_set_name: str
:keyword create_mode: Create mode to indicate recovery of existing soft deleted data source or
creation of new data source. Known values are: "Invalid", "Default", and "Recover".
:paramtype create_mode: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.CreateMode
:keyword deferred_delete_time_in_utc: Time for deferred deletion in UTC.
:paramtype deferred_delete_time_in_utc: ~datetime.datetime
:keyword is_scheduled_for_deferred_delete: Flag to identify whether the DS is scheduled for
deferred delete.
:paramtype is_scheduled_for_deferred_delete: bool
:keyword deferred_delete_time_remaining: Time remaining before the DS marked for deferred
delete is permanently deleted.
:paramtype deferred_delete_time_remaining: str
:keyword is_deferred_delete_schedule_upcoming: Flag to identify whether the deferred deleted DS
is to be purged soon.
:paramtype is_deferred_delete_schedule_upcoming: bool
:keyword is_rehydrate: Flag to identify that deferred deleted DS is to be moved into Pause
state.
:paramtype is_rehydrate: bool
:keyword resource_guard_operation_requests: ResourceGuardOperationRequests on which LAC check
will be performed.
:paramtype resource_guard_operation_requests: list[str]
:keyword friendly_name: Friendly name of the DB represented by this backup item.
:paramtype friendly_name: str
:keyword server_name: Host/Cluster Name for instance or AG.
:paramtype server_name: str
:keyword parent_name: Parent name of the DB such as Instance or Availability Group.
:paramtype parent_name: str
:keyword parent_type: Parent type of protected item, example: for a DB, standalone server or
distributed.
:paramtype parent_type: str
:keyword protection_status: Backup status of this backup item.
:paramtype protection_status: str
:keyword protection_state: Backup state of this backup item. Known values are: "Invalid",
"IRPending", "Protected", "ProtectionError", "ProtectionStopped", and "ProtectionPaused".
:paramtype protection_state: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.ProtectionState
:keyword last_backup_status: Last backup operation status. Possible values: Healthy, Unhealthy.
Known values are: "Invalid", "Healthy", "Unhealthy", and "IRPending".
:paramtype last_backup_status: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.LastBackupStatus
:keyword last_backup_time: Timestamp of the last backup operation on this backup item.
:paramtype last_backup_time: ~datetime.datetime
:keyword last_backup_error_detail: Error details in last backup.
:paramtype last_backup_error_detail:
~azure.mgmt.recoveryservicesbackup.passivestamp.models.ErrorDetail
:keyword protected_item_data_source_id: Data ID of the protected item.
:paramtype protected_item_data_source_id: str
:keyword protected_item_health_status: Health status of the backup item, evaluated based on
last heartbeat received. Known values are: "Invalid", "Healthy", "Unhealthy", "NotReachable",
and "IRPending".
:paramtype protected_item_health_status: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.ProtectedItemHealthStatus
:keyword extended_info: Additional information for this backup item.
:paramtype extended_info:
~azure.mgmt.recoveryservicesbackup.passivestamp.models.AzureVmWorkloadProtectedItemExtendedInfo
:keyword kpis_healths: Health details of different KPIs.
:paramtype kpis_healths: dict[str,
~azure.mgmt.recoveryservicesbackup.passivestamp.models.KPIResourceHealthDetails]
"""
super().__init__(
backup_management_type=backup_management_type,
workload_type=workload_type,
container_name=container_name,
source_resource_id=source_resource_id,
policy_id=policy_id,
last_recovery_point=last_recovery_point,
backup_set_name=backup_set_name,
create_mode=create_mode,
deferred_delete_time_in_utc=deferred_delete_time_in_utc,
is_scheduled_for_deferred_delete=is_scheduled_for_deferred_delete,
deferred_delete_time_remaining=deferred_delete_time_remaining,
is_deferred_delete_schedule_upcoming=is_deferred_delete_schedule_upcoming,
is_rehydrate=is_rehydrate,
resource_guard_operation_requests=resource_guard_operation_requests,
**kwargs
)
self.protected_item_type = "AzureVmWorkloadProtectedItem" # type: str
self.friendly_name = friendly_name
self.server_name = server_name
self.parent_name = parent_name
self.parent_type = parent_type
self.protection_status = protection_status
self.protection_state = protection_state
self.last_backup_status = last_backup_status
self.last_backup_time = last_backup_time
self.last_backup_error_detail = last_backup_error_detail
self.protected_item_data_source_id = protected_item_data_source_id
self.protected_item_health_status = protected_item_health_status
self.extended_info = extended_info
self.kpis_healths = kpis_healths
class AzureVmWorkloadProtectedItemExtendedInfo(_serialization.Model):
"""Additional information on Azure Workload for SQL specific backup item.
:ivar oldest_recovery_point: The oldest backup copy available for this backup item.
:vartype oldest_recovery_point: ~datetime.datetime
:ivar recovery_point_count: Number of backup copies available for this backup item.
:vartype recovery_point_count: int
:ivar policy_state: Indicates consistency of policy object and policy applied to this backup
item.
:vartype policy_state: str
"""
_attribute_map = {
"oldest_recovery_point": {"key": "oldestRecoveryPoint", "type": "iso-8601"},
"recovery_point_count": {"key": "recoveryPointCount", "type": "int"},
"policy_state": {"key": "policyState", "type": "str"},
}
def __init__(
self,
*,
oldest_recovery_point: Optional[datetime.datetime] = None,
recovery_point_count: Optional[int] = None,
policy_state: Optional[str] = None,
**kwargs
):
"""
:keyword oldest_recovery_point: The oldest backup copy available for this backup item.
:paramtype oldest_recovery_point: ~datetime.datetime
:keyword recovery_point_count: Number of backup copies available for this backup item.
:paramtype recovery_point_count: int
:keyword policy_state: Indicates consistency of policy object and policy applied to this backup
item.
:paramtype policy_state: str
"""
super().__init__(**kwargs)
self.oldest_recovery_point = oldest_recovery_point
self.recovery_point_count = recovery_point_count
self.policy_state = policy_state
class AzureVmWorkloadSAPAseDatabaseProtectedItem(
AzureVmWorkloadProtectedItem
): # pylint: disable=too-many-instance-attributes
"""Azure VM workload-specific protected item representing SAP ASE Database.
All required parameters must be populated in order to send to Azure.
:ivar protected_item_type: backup item type. Required.
:vartype protected_item_type: str
:ivar backup_management_type: Type of backup management for the backed up item. Known values
are: "Invalid", "AzureIaasVM", "MAB", "DPM", "AzureBackupServer", "AzureSql", "AzureStorage",
"AzureWorkload", and "DefaultBackup".
:vartype backup_management_type: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.BackupManagementType
:ivar workload_type: Type of workload this item represents. Known values are: "Invalid", "VM",
"FileFolder", "AzureSqlDb", "SQLDB", "Exchange", "Sharepoint", "VMwareVM", "SystemState",
"Client", "GenericDataSource", "SQLDataBase", "AzureFileShare", "SAPHanaDatabase", and
"SAPAseDatabase".
:vartype workload_type: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.DataSourceType
:ivar container_name: Unique name of container.
:vartype container_name: str
:ivar source_resource_id: ARM ID of the resource to be backed up.
:vartype source_resource_id: str
:ivar policy_id: ID of the backup policy with which this item is backed up.
:vartype policy_id: str
:ivar last_recovery_point: Timestamp when the last (latest) backup copy was created for this
backup item.
:vartype last_recovery_point: ~datetime.datetime
:ivar backup_set_name: Name of the backup set the backup item belongs to.
:vartype backup_set_name: str
:ivar create_mode: Create mode to indicate recovery of existing soft deleted data source or
creation of new data source. Known values are: "Invalid", "Default", and "Recover".
:vartype create_mode: str or ~azure.mgmt.recoveryservicesbackup.passivestamp.models.CreateMode
:ivar deferred_delete_time_in_utc: Time for deferred deletion in UTC.
:vartype deferred_delete_time_in_utc: ~datetime.datetime
:ivar is_scheduled_for_deferred_delete: Flag to identify whether the DS is scheduled for
deferred delete.
:vartype is_scheduled_for_deferred_delete: bool
:ivar deferred_delete_time_remaining: Time remaining before the DS marked for deferred delete
is permanently deleted.
:vartype deferred_delete_time_remaining: str
:ivar is_deferred_delete_schedule_upcoming: Flag to identify whether the deferred deleted DS is
to be purged soon.
:vartype is_deferred_delete_schedule_upcoming: bool
:ivar is_rehydrate: Flag to identify that deferred deleted DS is to be moved into Pause state.
:vartype is_rehydrate: bool
:ivar resource_guard_operation_requests: ResourceGuardOperationRequests on which LAC check will
be performed.
:vartype resource_guard_operation_requests: list[str]
:ivar friendly_name: Friendly name of the DB represented by this backup item.
:vartype friendly_name: str
:ivar server_name: Host/Cluster Name for instance or AG.
:vartype server_name: str
:ivar parent_name: Parent name of the DB such as Instance or Availability Group.
:vartype parent_name: str
:ivar parent_type: Parent type of protected item, example: for a DB, standalone server or
distributed.
:vartype parent_type: str
:ivar protection_status: Backup status of this backup item.
:vartype protection_status: str
:ivar protection_state: Backup state of this backup item. Known values are: "Invalid",
"IRPending", "Protected", "ProtectionError", "ProtectionStopped", and "ProtectionPaused".
:vartype protection_state: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.ProtectionState
:ivar last_backup_status: Last backup operation status. Possible values: Healthy, Unhealthy.
Known values are: "Invalid", "Healthy", "Unhealthy", and "IRPending".
:vartype last_backup_status: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.LastBackupStatus
:ivar last_backup_time: Timestamp of the last backup operation on this backup item.
:vartype last_backup_time: ~datetime.datetime
:ivar last_backup_error_detail: Error details in last backup.
:vartype last_backup_error_detail:
~azure.mgmt.recoveryservicesbackup.passivestamp.models.ErrorDetail
:ivar protected_item_data_source_id: Data ID of the protected item.
:vartype protected_item_data_source_id: str
:ivar protected_item_health_status: Health status of the backup item, evaluated based on last
heartbeat received. Known values are: "Invalid", "Healthy", "Unhealthy", "NotReachable", and
"IRPending".
:vartype protected_item_health_status: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.ProtectedItemHealthStatus
:ivar extended_info: Additional information for this backup item.
:vartype extended_info:
~azure.mgmt.recoveryservicesbackup.passivestamp.models.AzureVmWorkloadProtectedItemExtendedInfo
:ivar kpis_healths: Health details of different KPIs.
:vartype kpis_healths: dict[str,
~azure.mgmt.recoveryservicesbackup.passivestamp.models.KPIResourceHealthDetails]
"""
_validation = {
"protected_item_type": {"required": True},
}
_attribute_map = {
"protected_item_type": {"key": "protectedItemType", "type": "str"},
"backup_management_type": {"key": "backupManagementType", "type": "str"},
"workload_type": {"key": "workloadType", "type": "str"},
"container_name": {"key": "containerName", "type": "str"},
"source_resource_id": {"key": "sourceResourceId", "type": "str"},
"policy_id": {"key": "policyId", "type": "str"},
"last_recovery_point": {"key": "lastRecoveryPoint", "type": "iso-8601"},
"backup_set_name": {"key": "backupSetName", "type": "str"},
"create_mode": {"key": "createMode", "type": "str"},
"deferred_delete_time_in_utc": {"key": "deferredDeleteTimeInUTC", "type": "iso-8601"},
"is_scheduled_for_deferred_delete": {"key": "isScheduledForDeferredDelete", "type": "bool"},
"deferred_delete_time_remaining": {"key": "deferredDeleteTimeRemaining", "type": "str"},
"is_deferred_delete_schedule_upcoming": {"key": "isDeferredDeleteScheduleUpcoming", "type": "bool"},
"is_rehydrate": {"key": "isRehydrate", "type": "bool"},
"resource_guard_operation_requests": {"key": "resourceGuardOperationRequests", "type": "[str]"},
"friendly_name": {"key": "friendlyName", "type": "str"},
"server_name": {"key": "serverName", "type": "str"},
"parent_name": {"key": "parentName", "type": "str"},
"parent_type": {"key": "parentType", "type": "str"},
"protection_status": {"key": "protectionStatus", "type": "str"},
"protection_state": {"key": "protectionState", "type": "str"},
"last_backup_status": {"key": "lastBackupStatus", "type": "str"},
"last_backup_time": {"key": "lastBackupTime", "type": "iso-8601"},
"last_backup_error_detail": {"key": "lastBackupErrorDetail", "type": "ErrorDetail"},
"protected_item_data_source_id": {"key": "protectedItemDataSourceId", "type": "str"},
"protected_item_health_status": {"key": "protectedItemHealthStatus", "type": "str"},
"extended_info": {"key": "extendedInfo", "type": "AzureVmWorkloadProtectedItemExtendedInfo"},
"kpis_healths": {"key": "kpisHealths", "type": "{KPIResourceHealthDetails}"},
}
def __init__( # pylint: disable=too-many-locals
self,
*,
backup_management_type: Optional[Union[str, "_models.BackupManagementType"]] = None,
workload_type: Optional[Union[str, "_models.DataSourceType"]] = None,
container_name: Optional[str] = None,
source_resource_id: Optional[str] = None,
policy_id: Optional[str] = None,
last_recovery_point: Optional[datetime.datetime] = None,
backup_set_name: Optional[str] = None,
create_mode: Optional[Union[str, "_models.CreateMode"]] = None,
deferred_delete_time_in_utc: Optional[datetime.datetime] = None,
is_scheduled_for_deferred_delete: Optional[bool] = None,
deferred_delete_time_remaining: Optional[str] = None,
is_deferred_delete_schedule_upcoming: Optional[bool] = None,
is_rehydrate: Optional[bool] = None,
resource_guard_operation_requests: Optional[List[str]] = None,
friendly_name: Optional[str] = None,
server_name: Optional[str] = None,
parent_name: Optional[str] = None,
parent_type: Optional[str] = None,
protection_status: Optional[str] = None,
protection_state: Optional[Union[str, "_models.ProtectionState"]] = None,
last_backup_status: Optional[Union[str, "_models.LastBackupStatus"]] = None,
last_backup_time: Optional[datetime.datetime] = None,
last_backup_error_detail: Optional["_models.ErrorDetail"] = None,
protected_item_data_source_id: Optional[str] = None,
protected_item_health_status: Optional[Union[str, "_models.ProtectedItemHealthStatus"]] = None,
extended_info: Optional["_models.AzureVmWorkloadProtectedItemExtendedInfo"] = None,
kpis_healths: Optional[Dict[str, "_models.KPIResourceHealthDetails"]] = None,
**kwargs
):
"""
:keyword backup_management_type: Type of backup management for the backed up item. Known values
are: "Invalid", "AzureIaasVM", "MAB", "DPM", "AzureBackupServer", "AzureSql", "AzureStorage",
"AzureWorkload", and "DefaultBackup".
:paramtype backup_management_type: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.BackupManagementType
:keyword workload_type: Type of workload this item represents. Known values are: "Invalid",
"VM", "FileFolder", "AzureSqlDb", "SQLDB", "Exchange", "Sharepoint", "VMwareVM", "SystemState",
"Client", "GenericDataSource", "SQLDataBase", "AzureFileShare", "SAPHanaDatabase", and
"SAPAseDatabase".
:paramtype workload_type: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.DataSourceType
:keyword container_name: Unique name of container.
:paramtype container_name: str
:keyword source_resource_id: ARM ID of the resource to be backed up.
:paramtype source_resource_id: str
:keyword policy_id: ID of the backup policy with which this item is backed up.
:paramtype policy_id: str
:keyword last_recovery_point: Timestamp when the last (latest) backup copy was created for this
backup item.
:paramtype last_recovery_point: ~datetime.datetime
:keyword backup_set_name: Name of the backup set the backup item belongs to.
:paramtype backup_set_name: str
:keyword create_mode: Create mode to indicate recovery of existing soft deleted data source or
creation of new data source. Known values are: "Invalid", "Default", and "Recover".
:paramtype create_mode: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.CreateMode
:keyword deferred_delete_time_in_utc: Time for deferred deletion in UTC.
:paramtype deferred_delete_time_in_utc: ~datetime.datetime
:keyword is_scheduled_for_deferred_delete: Flag to identify whether the DS is scheduled for
deferred delete.
:paramtype is_scheduled_for_deferred_delete: bool
:keyword deferred_delete_time_remaining: Time remaining before the DS marked for deferred
delete is permanently deleted.
:paramtype deferred_delete_time_remaining: str
:keyword is_deferred_delete_schedule_upcoming: Flag to identify whether the deferred deleted DS
is to be purged soon.
:paramtype is_deferred_delete_schedule_upcoming: bool
:keyword is_rehydrate: Flag to identify that deferred deleted DS is to be moved into Pause
state.
:paramtype is_rehydrate: bool
:keyword resource_guard_operation_requests: ResourceGuardOperationRequests on which LAC check
will be performed.
:paramtype resource_guard_operation_requests: list[str]
:keyword friendly_name: Friendly name of the DB represented by this backup item.
:paramtype friendly_name: str
:keyword server_name: Host/Cluster Name for instance or AG.
:paramtype server_name: str
:keyword parent_name: Parent name of the DB such as Instance or Availability Group.
:paramtype parent_name: str
:keyword parent_type: Parent type of protected item, example: for a DB, standalone server or
distributed.
:paramtype parent_type: str
:keyword protection_status: Backup status of this backup item.
:paramtype protection_status: str
:keyword protection_state: Backup state of this backup item. Known values are: "Invalid",
"IRPending", "Protected", "ProtectionError", "ProtectionStopped", and "ProtectionPaused".
:paramtype protection_state: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.ProtectionState
:keyword last_backup_status: Last backup operation status. Possible values: Healthy, Unhealthy.
Known values are: "Invalid", "Healthy", "Unhealthy", and "IRPending".
:paramtype last_backup_status: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.LastBackupStatus
:keyword last_backup_time: Timestamp of the last backup operation on this backup item.
:paramtype last_backup_time: ~datetime.datetime
:keyword last_backup_error_detail: Error details in last backup.
:paramtype last_backup_error_detail:
~azure.mgmt.recoveryservicesbackup.passivestamp.models.ErrorDetail
:keyword protected_item_data_source_id: Data ID of the protected item.
:paramtype protected_item_data_source_id: str
:keyword protected_item_health_status: Health status of the backup item, evaluated based on
last heartbeat received. Known values are: "Invalid", "Healthy", "Unhealthy", "NotReachable",
and "IRPending".
:paramtype protected_item_health_status: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.ProtectedItemHealthStatus
:keyword extended_info: Additional information for this backup item.
:paramtype extended_info:
~azure.mgmt.recoveryservicesbackup.passivestamp.models.AzureVmWorkloadProtectedItemExtendedInfo
:keyword kpis_healths: Health details of different KPIs.
:paramtype kpis_healths: dict[str,
~azure.mgmt.recoveryservicesbackup.passivestamp.models.KPIResourceHealthDetails]
"""
super().__init__(
backup_management_type=backup_management_type,
workload_type=workload_type,
container_name=container_name,
source_resource_id=source_resource_id,
policy_id=policy_id,
last_recovery_point=last_recovery_point,
backup_set_name=backup_set_name,
create_mode=create_mode,
deferred_delete_time_in_utc=deferred_delete_time_in_utc,
is_scheduled_for_deferred_delete=is_scheduled_for_deferred_delete,
deferred_delete_time_remaining=deferred_delete_time_remaining,
is_deferred_delete_schedule_upcoming=is_deferred_delete_schedule_upcoming,
is_rehydrate=is_rehydrate,
resource_guard_operation_requests=resource_guard_operation_requests,
friendly_name=friendly_name,
server_name=server_name,
parent_name=parent_name,
parent_type=parent_type,
protection_status=protection_status,
protection_state=protection_state,
last_backup_status=last_backup_status,
last_backup_time=last_backup_time,
last_backup_error_detail=last_backup_error_detail,
protected_item_data_source_id=protected_item_data_source_id,
protected_item_health_status=protected_item_health_status,
extended_info=extended_info,
kpis_healths=kpis_healths,
**kwargs
)
self.protected_item_type = "AzureVmWorkloadSAPAseDatabase" # type: str
class AzureVmWorkloadSAPHanaDatabaseProtectedItem(
AzureVmWorkloadProtectedItem
): # pylint: disable=too-many-instance-attributes
"""Azure VM workload-specific protected item representing SAP HANA Database.
All required parameters must be populated in order to send to Azure.
:ivar protected_item_type: backup item type. Required.
:vartype protected_item_type: str
:ivar backup_management_type: Type of backup management for the backed up item. Known values
are: "Invalid", "AzureIaasVM", "MAB", "DPM", "AzureBackupServer", "AzureSql", "AzureStorage",
"AzureWorkload", and "DefaultBackup".
:vartype backup_management_type: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.BackupManagementType
:ivar workload_type: Type of workload this item represents. Known values are: "Invalid", "VM",
"FileFolder", "AzureSqlDb", "SQLDB", "Exchange", "Sharepoint", "VMwareVM", "SystemState",
"Client", "GenericDataSource", "SQLDataBase", "AzureFileShare", "SAPHanaDatabase", and
"SAPAseDatabase".
:vartype workload_type: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.DataSourceType
:ivar container_name: Unique name of container.
:vartype container_name: str
:ivar source_resource_id: ARM ID of the resource to be backed up.
:vartype source_resource_id: str
:ivar policy_id: ID of the backup policy with which this item is backed up.
:vartype policy_id: str
:ivar last_recovery_point: Timestamp when the last (latest) backup copy was created for this
backup item.
:vartype last_recovery_point: ~datetime.datetime
:ivar backup_set_name: Name of the backup set the backup item belongs to.
:vartype backup_set_name: str
:ivar create_mode: Create mode to indicate recovery of existing soft deleted data source or
creation of new data source. Known values are: "Invalid", "Default", and "Recover".
:vartype create_mode: str or ~azure.mgmt.recoveryservicesbackup.passivestamp.models.CreateMode
:ivar deferred_delete_time_in_utc: Time for deferred deletion in UTC.
:vartype deferred_delete_time_in_utc: ~datetime.datetime
:ivar is_scheduled_for_deferred_delete: Flag to identify whether the DS is scheduled for
deferred delete.
:vartype is_scheduled_for_deferred_delete: bool
:ivar deferred_delete_time_remaining: Time remaining before the DS marked for deferred delete
is permanently deleted.
:vartype deferred_delete_time_remaining: str
:ivar is_deferred_delete_schedule_upcoming: Flag to identify whether the deferred deleted DS is
to be purged soon.
:vartype is_deferred_delete_schedule_upcoming: bool
:ivar is_rehydrate: Flag to identify that deferred deleted DS is to be moved into Pause state.
:vartype is_rehydrate: bool
:ivar resource_guard_operation_requests: ResourceGuardOperationRequests on which LAC check will
be performed.
:vartype resource_guard_operation_requests: list[str]
:ivar friendly_name: Friendly name of the DB represented by this backup item.
:vartype friendly_name: str
:ivar server_name: Host/Cluster Name for instance or AG.
:vartype server_name: str
:ivar parent_name: Parent name of the DB such as Instance or Availability Group.
:vartype parent_name: str
:ivar parent_type: Parent type of protected item, example: for a DB, standalone server or
distributed.
:vartype parent_type: str
:ivar protection_status: Backup status of this backup item.
:vartype protection_status: str
:ivar protection_state: Backup state of this backup item. Known values are: "Invalid",
"IRPending", "Protected", "ProtectionError", "ProtectionStopped", and "ProtectionPaused".
:vartype protection_state: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.ProtectionState
:ivar last_backup_status: Last backup operation status. Possible values: Healthy, Unhealthy.
Known values are: "Invalid", "Healthy", "Unhealthy", and "IRPending".
:vartype last_backup_status: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.LastBackupStatus
:ivar last_backup_time: Timestamp of the last backup operation on this backup item.
:vartype last_backup_time: ~datetime.datetime
:ivar last_backup_error_detail: Error details in last backup.
:vartype last_backup_error_detail:
~azure.mgmt.recoveryservicesbackup.passivestamp.models.ErrorDetail
:ivar protected_item_data_source_id: Data ID of the protected item.
:vartype protected_item_data_source_id: str
:ivar protected_item_health_status: Health status of the backup item, evaluated based on last
heartbeat received. Known values are: "Invalid", "Healthy", "Unhealthy", "NotReachable", and
"IRPending".
:vartype protected_item_health_status: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.ProtectedItemHealthStatus
:ivar extended_info: Additional information for this backup item.
:vartype extended_info:
~azure.mgmt.recoveryservicesbackup.passivestamp.models.AzureVmWorkloadProtectedItemExtendedInfo
:ivar kpis_healths: Health details of different KPIs.
:vartype kpis_healths: dict[str,
~azure.mgmt.recoveryservicesbackup.passivestamp.models.KPIResourceHealthDetails]
"""
_validation = {
"protected_item_type": {"required": True},
}
_attribute_map = {
"protected_item_type": {"key": "protectedItemType", "type": "str"},
"backup_management_type": {"key": "backupManagementType", "type": "str"},
"workload_type": {"key": "workloadType", "type": "str"},
"container_name": {"key": "containerName", "type": "str"},
"source_resource_id": {"key": "sourceResourceId", "type": "str"},
"policy_id": {"key": "policyId", "type": "str"},
"last_recovery_point": {"key": "lastRecoveryPoint", "type": "iso-8601"},
"backup_set_name": {"key": "backupSetName", "type": "str"},
"create_mode": {"key": "createMode", "type": "str"},
"deferred_delete_time_in_utc": {"key": "deferredDeleteTimeInUTC", "type": "iso-8601"},
"is_scheduled_for_deferred_delete": {"key": "isScheduledForDeferredDelete", "type": "bool"},
"deferred_delete_time_remaining": {"key": "deferredDeleteTimeRemaining", "type": "str"},
"is_deferred_delete_schedule_upcoming": {"key": "isDeferredDeleteScheduleUpcoming", "type": "bool"},
"is_rehydrate": {"key": "isRehydrate", "type": "bool"},
"resource_guard_operation_requests": {"key": "resourceGuardOperationRequests", "type": "[str]"},
"friendly_name": {"key": "friendlyName", "type": "str"},
"server_name": {"key": "serverName", "type": "str"},
"parent_name": {"key": "parentName", "type": "str"},
"parent_type": {"key": "parentType", "type": "str"},
"protection_status": {"key": "protectionStatus", "type": "str"},
"protection_state": {"key": "protectionState", "type": "str"},
"last_backup_status": {"key": "lastBackupStatus", "type": "str"},
"last_backup_time": {"key": "lastBackupTime", "type": "iso-8601"},
"last_backup_error_detail": {"key": "lastBackupErrorDetail", "type": "ErrorDetail"},
"protected_item_data_source_id": {"key": "protectedItemDataSourceId", "type": "str"},
"protected_item_health_status": {"key": "protectedItemHealthStatus", "type": "str"},
"extended_info": {"key": "extendedInfo", "type": "AzureVmWorkloadProtectedItemExtendedInfo"},
"kpis_healths": {"key": "kpisHealths", "type": "{KPIResourceHealthDetails}"},
}
def __init__( # pylint: disable=too-many-locals
self,
*,
backup_management_type: Optional[Union[str, "_models.BackupManagementType"]] = None,
workload_type: Optional[Union[str, "_models.DataSourceType"]] = None,
container_name: Optional[str] = None,
source_resource_id: Optional[str] = None,
policy_id: Optional[str] = None,
last_recovery_point: Optional[datetime.datetime] = None,
backup_set_name: Optional[str] = None,
create_mode: Optional[Union[str, "_models.CreateMode"]] = None,
deferred_delete_time_in_utc: Optional[datetime.datetime] = None,
is_scheduled_for_deferred_delete: Optional[bool] = None,
deferred_delete_time_remaining: Optional[str] = None,
is_deferred_delete_schedule_upcoming: Optional[bool] = None,
is_rehydrate: Optional[bool] = None,
resource_guard_operation_requests: Optional[List[str]] = None,
friendly_name: Optional[str] = None,
server_name: Optional[str] = None,
parent_name: Optional[str] = None,
parent_type: Optional[str] = None,
protection_status: Optional[str] = None,
protection_state: Optional[Union[str, "_models.ProtectionState"]] = None,
last_backup_status: Optional[Union[str, "_models.LastBackupStatus"]] = None,
last_backup_time: Optional[datetime.datetime] = None,
last_backup_error_detail: Optional["_models.ErrorDetail"] = None,
protected_item_data_source_id: Optional[str] = None,
protected_item_health_status: Optional[Union[str, "_models.ProtectedItemHealthStatus"]] = None,
extended_info: Optional["_models.AzureVmWorkloadProtectedItemExtendedInfo"] = None,
kpis_healths: Optional[Dict[str, "_models.KPIResourceHealthDetails"]] = None,
**kwargs
):
"""
:keyword backup_management_type: Type of backup management for the backed up item. Known values
are: "Invalid", "AzureIaasVM", "MAB", "DPM", "AzureBackupServer", "AzureSql", "AzureStorage",
"AzureWorkload", and "DefaultBackup".
:paramtype backup_management_type: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.BackupManagementType
:keyword workload_type: Type of workload this item represents. Known values are: "Invalid",
"VM", "FileFolder", "AzureSqlDb", "SQLDB", "Exchange", "Sharepoint", "VMwareVM", "SystemState",
"Client", "GenericDataSource", "SQLDataBase", "AzureFileShare", "SAPHanaDatabase", and
"SAPAseDatabase".
:paramtype workload_type: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.DataSourceType
:keyword container_name: Unique name of container.
:paramtype container_name: str
:keyword source_resource_id: ARM ID of the resource to be backed up.
:paramtype source_resource_id: str
:keyword policy_id: ID of the backup policy with which this item is backed up.
:paramtype policy_id: str
:keyword last_recovery_point: Timestamp when the last (latest) backup copy was created for this
backup item.
:paramtype last_recovery_point: ~datetime.datetime
:keyword backup_set_name: Name of the backup set the backup item belongs to.
:paramtype backup_set_name: str
:keyword create_mode: Create mode to indicate recovery of existing soft deleted data source or
creation of new data source. Known values are: "Invalid", "Default", and "Recover".
:paramtype create_mode: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.CreateMode
:keyword deferred_delete_time_in_utc: Time for deferred deletion in UTC.
:paramtype deferred_delete_time_in_utc: ~datetime.datetime
:keyword is_scheduled_for_deferred_delete: Flag to identify whether the DS is scheduled for
deferred delete.
:paramtype is_scheduled_for_deferred_delete: bool
:keyword deferred_delete_time_remaining: Time remaining before the DS marked for deferred
delete is permanently deleted.
:paramtype deferred_delete_time_remaining: str
:keyword is_deferred_delete_schedule_upcoming: Flag to identify whether the deferred deleted DS
is to be purged soon.
:paramtype is_deferred_delete_schedule_upcoming: bool
:keyword is_rehydrate: Flag to identify that deferred deleted DS is to be moved into Pause
state.
:paramtype is_rehydrate: bool
:keyword resource_guard_operation_requests: ResourceGuardOperationRequests on which LAC check
will be performed.
:paramtype resource_guard_operation_requests: list[str]
:keyword friendly_name: Friendly name of the DB represented by this backup item.
:paramtype friendly_name: str
:keyword server_name: Host/Cluster Name for instance or AG.
:paramtype server_name: str
:keyword parent_name: Parent name of the DB such as Instance or Availability Group.
:paramtype parent_name: str
:keyword parent_type: Parent type of protected item, example: for a DB, standalone server or
distributed.
:paramtype parent_type: str
:keyword protection_status: Backup status of this backup item.
:paramtype protection_status: str
:keyword protection_state: Backup state of this backup item. Known values are: "Invalid",
"IRPending", "Protected", "ProtectionError", "ProtectionStopped", and "ProtectionPaused".
:paramtype protection_state: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.ProtectionState
:keyword last_backup_status: Last backup operation status. Possible values: Healthy, Unhealthy.
Known values are: "Invalid", "Healthy", "Unhealthy", and "IRPending".
:paramtype last_backup_status: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.LastBackupStatus
:keyword last_backup_time: Timestamp of the last backup operation on this backup item.
:paramtype last_backup_time: ~datetime.datetime
:keyword last_backup_error_detail: Error details in last backup.
:paramtype last_backup_error_detail:
~azure.mgmt.recoveryservicesbackup.passivestamp.models.ErrorDetail
:keyword protected_item_data_source_id: Data ID of the protected item.
:paramtype protected_item_data_source_id: str
:keyword protected_item_health_status: Health status of the backup item, evaluated based on
last heartbeat received. Known values are: "Invalid", "Healthy", "Unhealthy", "NotReachable",
and "IRPending".
:paramtype protected_item_health_status: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.ProtectedItemHealthStatus
:keyword extended_info: Additional information for this backup item.
:paramtype extended_info:
~azure.mgmt.recoveryservicesbackup.passivestamp.models.AzureVmWorkloadProtectedItemExtendedInfo
:keyword kpis_healths: Health details of different KPIs.
:paramtype kpis_healths: dict[str,
~azure.mgmt.recoveryservicesbackup.passivestamp.models.KPIResourceHealthDetails]
"""
super().__init__(
backup_management_type=backup_management_type,
workload_type=workload_type,
container_name=container_name,
source_resource_id=source_resource_id,
policy_id=policy_id,
last_recovery_point=last_recovery_point,
backup_set_name=backup_set_name,
create_mode=create_mode,
deferred_delete_time_in_utc=deferred_delete_time_in_utc,
is_scheduled_for_deferred_delete=is_scheduled_for_deferred_delete,
deferred_delete_time_remaining=deferred_delete_time_remaining,
is_deferred_delete_schedule_upcoming=is_deferred_delete_schedule_upcoming,
is_rehydrate=is_rehydrate,
resource_guard_operation_requests=resource_guard_operation_requests,
friendly_name=friendly_name,
server_name=server_name,
parent_name=parent_name,
parent_type=parent_type,
protection_status=protection_status,
protection_state=protection_state,
last_backup_status=last_backup_status,
last_backup_time=last_backup_time,
last_backup_error_detail=last_backup_error_detail,
protected_item_data_source_id=protected_item_data_source_id,
protected_item_health_status=protected_item_health_status,
extended_info=extended_info,
kpis_healths=kpis_healths,
**kwargs
)
self.protected_item_type = "AzureVmWorkloadSAPHanaDatabase" # type: str
class AzureVmWorkloadSQLDatabaseProtectedItem(
AzureVmWorkloadProtectedItem
): # pylint: disable=too-many-instance-attributes
"""Azure VM workload-specific protected item representing SQL Database.
All required parameters must be populated in order to send to Azure.
:ivar protected_item_type: backup item type. Required.
:vartype protected_item_type: str
:ivar backup_management_type: Type of backup management for the backed up item. Known values
are: "Invalid", "AzureIaasVM", "MAB", "DPM", "AzureBackupServer", "AzureSql", "AzureStorage",
"AzureWorkload", and "DefaultBackup".
:vartype backup_management_type: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.BackupManagementType
:ivar workload_type: Type of workload this item represents. Known values are: "Invalid", "VM",
"FileFolder", "AzureSqlDb", "SQLDB", "Exchange", "Sharepoint", "VMwareVM", "SystemState",
"Client", "GenericDataSource", "SQLDataBase", "AzureFileShare", "SAPHanaDatabase", and
"SAPAseDatabase".
:vartype workload_type: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.DataSourceType
:ivar container_name: Unique name of container.
:vartype container_name: str
:ivar source_resource_id: ARM ID of the resource to be backed up.
:vartype source_resource_id: str
:ivar policy_id: ID of the backup policy with which this item is backed up.
:vartype policy_id: str
:ivar last_recovery_point: Timestamp when the last (latest) backup copy was created for this
backup item.
:vartype last_recovery_point: ~datetime.datetime
:ivar backup_set_name: Name of the backup set the backup item belongs to.
:vartype backup_set_name: str
:ivar create_mode: Create mode to indicate recovery of existing soft deleted data source or
creation of new data source. Known values are: "Invalid", "Default", and "Recover".
:vartype create_mode: str or ~azure.mgmt.recoveryservicesbackup.passivestamp.models.CreateMode
:ivar deferred_delete_time_in_utc: Time for deferred deletion in UTC.
:vartype deferred_delete_time_in_utc: ~datetime.datetime
:ivar is_scheduled_for_deferred_delete: Flag to identify whether the DS is scheduled for
deferred delete.
:vartype is_scheduled_for_deferred_delete: bool
:ivar deferred_delete_time_remaining: Time remaining before the DS marked for deferred delete
is permanently deleted.
:vartype deferred_delete_time_remaining: str
:ivar is_deferred_delete_schedule_upcoming: Flag to identify whether the deferred deleted DS is
to be purged soon.
:vartype is_deferred_delete_schedule_upcoming: bool
:ivar is_rehydrate: Flag to identify that deferred deleted DS is to be moved into Pause state.
:vartype is_rehydrate: bool
:ivar resource_guard_operation_requests: ResourceGuardOperationRequests on which LAC check will
be performed.
:vartype resource_guard_operation_requests: list[str]
:ivar friendly_name: Friendly name of the DB represented by this backup item.
:vartype friendly_name: str
:ivar server_name: Host/Cluster Name for instance or AG.
:vartype server_name: str
:ivar parent_name: Parent name of the DB such as Instance or Availability Group.
:vartype parent_name: str
:ivar parent_type: Parent type of protected item, example: for a DB, standalone server or
distributed.
:vartype parent_type: str
:ivar protection_status: Backup status of this backup item.
:vartype protection_status: str
:ivar protection_state: Backup state of this backup item. Known values are: "Invalid",
"IRPending", "Protected", "ProtectionError", "ProtectionStopped", and "ProtectionPaused".
:vartype protection_state: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.ProtectionState
:ivar last_backup_status: Last backup operation status. Possible values: Healthy, Unhealthy.
Known values are: "Invalid", "Healthy", "Unhealthy", and "IRPending".
:vartype last_backup_status: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.LastBackupStatus
:ivar last_backup_time: Timestamp of the last backup operation on this backup item.
:vartype last_backup_time: ~datetime.datetime
:ivar last_backup_error_detail: Error details in last backup.
:vartype last_backup_error_detail:
~azure.mgmt.recoveryservicesbackup.passivestamp.models.ErrorDetail
:ivar protected_item_data_source_id: Data ID of the protected item.
:vartype protected_item_data_source_id: str
:ivar protected_item_health_status: Health status of the backup item, evaluated based on last
heartbeat received. Known values are: "Invalid", "Healthy", "Unhealthy", "NotReachable", and
"IRPending".
:vartype protected_item_health_status: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.ProtectedItemHealthStatus
:ivar extended_info: Additional information for this backup item.
:vartype extended_info:
~azure.mgmt.recoveryservicesbackup.passivestamp.models.AzureVmWorkloadProtectedItemExtendedInfo
:ivar kpis_healths: Health details of different KPIs.
:vartype kpis_healths: dict[str,
~azure.mgmt.recoveryservicesbackup.passivestamp.models.KPIResourceHealthDetails]
"""
_validation = {
"protected_item_type": {"required": True},
}
_attribute_map = {
"protected_item_type": {"key": "protectedItemType", "type": "str"},
"backup_management_type": {"key": "backupManagementType", "type": "str"},
"workload_type": {"key": "workloadType", "type": "str"},
"container_name": {"key": "containerName", "type": "str"},
"source_resource_id": {"key": "sourceResourceId", "type": "str"},
"policy_id": {"key": "policyId", "type": "str"},
"last_recovery_point": {"key": "lastRecoveryPoint", "type": "iso-8601"},
"backup_set_name": {"key": "backupSetName", "type": "str"},
"create_mode": {"key": "createMode", "type": "str"},
"deferred_delete_time_in_utc": {"key": "deferredDeleteTimeInUTC", "type": "iso-8601"},
"is_scheduled_for_deferred_delete": {"key": "isScheduledForDeferredDelete", "type": "bool"},
"deferred_delete_time_remaining": {"key": "deferredDeleteTimeRemaining", "type": "str"},
"is_deferred_delete_schedule_upcoming": {"key": "isDeferredDeleteScheduleUpcoming", "type": "bool"},
"is_rehydrate": {"key": "isRehydrate", "type": "bool"},
"resource_guard_operation_requests": {"key": "resourceGuardOperationRequests", "type": "[str]"},
"friendly_name": {"key": "friendlyName", "type": "str"},
"server_name": {"key": "serverName", "type": "str"},
"parent_name": {"key": "parentName", "type": "str"},
"parent_type": {"key": "parentType", "type": "str"},
"protection_status": {"key": "protectionStatus", "type": "str"},
"protection_state": {"key": "protectionState", "type": "str"},
"last_backup_status": {"key": "lastBackupStatus", "type": "str"},
"last_backup_time": {"key": "lastBackupTime", "type": "iso-8601"},
"last_backup_error_detail": {"key": "lastBackupErrorDetail", "type": "ErrorDetail"},
"protected_item_data_source_id": {"key": "protectedItemDataSourceId", "type": "str"},
"protected_item_health_status": {"key": "protectedItemHealthStatus", "type": "str"},
"extended_info": {"key": "extendedInfo", "type": "AzureVmWorkloadProtectedItemExtendedInfo"},
"kpis_healths": {"key": "kpisHealths", "type": "{KPIResourceHealthDetails}"},
}
def __init__( # pylint: disable=too-many-locals
self,
*,
backup_management_type: Optional[Union[str, "_models.BackupManagementType"]] = None,
workload_type: Optional[Union[str, "_models.DataSourceType"]] = None,
container_name: Optional[str] = None,
source_resource_id: Optional[str] = None,
policy_id: Optional[str] = None,
last_recovery_point: Optional[datetime.datetime] = None,
backup_set_name: Optional[str] = None,
create_mode: Optional[Union[str, "_models.CreateMode"]] = None,
deferred_delete_time_in_utc: Optional[datetime.datetime] = None,
is_scheduled_for_deferred_delete: Optional[bool] = None,
deferred_delete_time_remaining: Optional[str] = None,
is_deferred_delete_schedule_upcoming: Optional[bool] = None,
is_rehydrate: Optional[bool] = None,
resource_guard_operation_requests: Optional[List[str]] = None,
friendly_name: Optional[str] = None,
server_name: Optional[str] = None,
parent_name: Optional[str] = None,
parent_type: Optional[str] = None,
protection_status: Optional[str] = None,
protection_state: Optional[Union[str, "_models.ProtectionState"]] = None,
last_backup_status: Optional[Union[str, "_models.LastBackupStatus"]] = None,
last_backup_time: Optional[datetime.datetime] = None,
last_backup_error_detail: Optional["_models.ErrorDetail"] = None,
protected_item_data_source_id: Optional[str] = None,
protected_item_health_status: Optional[Union[str, "_models.ProtectedItemHealthStatus"]] = None,
extended_info: Optional["_models.AzureVmWorkloadProtectedItemExtendedInfo"] = None,
kpis_healths: Optional[Dict[str, "_models.KPIResourceHealthDetails"]] = None,
**kwargs
):
"""
:keyword backup_management_type: Type of backup management for the backed up item. Known values
are: "Invalid", "AzureIaasVM", "MAB", "DPM", "AzureBackupServer", "AzureSql", "AzureStorage",
"AzureWorkload", and "DefaultBackup".
:paramtype backup_management_type: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.BackupManagementType
:keyword workload_type: Type of workload this item represents. Known values are: "Invalid",
"VM", "FileFolder", "AzureSqlDb", "SQLDB", "Exchange", "Sharepoint", "VMwareVM", "SystemState",
"Client", "GenericDataSource", "SQLDataBase", "AzureFileShare", "SAPHanaDatabase", and
"SAPAseDatabase".
:paramtype workload_type: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.DataSourceType
:keyword container_name: Unique name of container.
:paramtype container_name: str
:keyword source_resource_id: ARM ID of the resource to be backed up.
:paramtype source_resource_id: str
:keyword policy_id: ID of the backup policy with which this item is backed up.
:paramtype policy_id: str
:keyword last_recovery_point: Timestamp when the last (latest) backup copy was created for this
backup item.
:paramtype last_recovery_point: ~datetime.datetime
:keyword backup_set_name: Name of the backup set the backup item belongs to.
:paramtype backup_set_name: str
:keyword create_mode: Create mode to indicate recovery of existing soft deleted data source or
creation of new data source. Known values are: "Invalid", "Default", and "Recover".
:paramtype create_mode: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.CreateMode
:keyword deferred_delete_time_in_utc: Time for deferred deletion in UTC.
:paramtype deferred_delete_time_in_utc: ~datetime.datetime
:keyword is_scheduled_for_deferred_delete: Flag to identify whether the DS is scheduled for
deferred delete.
:paramtype is_scheduled_for_deferred_delete: bool
:keyword deferred_delete_time_remaining: Time remaining before the DS marked for deferred
delete is permanently deleted.
:paramtype deferred_delete_time_remaining: str
:keyword is_deferred_delete_schedule_upcoming: Flag to identify whether the deferred deleted DS
is to be purged soon.
:paramtype is_deferred_delete_schedule_upcoming: bool
:keyword is_rehydrate: Flag to identify that deferred deleted DS is to be moved into Pause
state.
:paramtype is_rehydrate: bool
:keyword resource_guard_operation_requests: ResourceGuardOperationRequests on which LAC check
will be performed.
:paramtype resource_guard_operation_requests: list[str]
:keyword friendly_name: Friendly name of the DB represented by this backup item.
:paramtype friendly_name: str
:keyword server_name: Host/Cluster Name for instance or AG.
:paramtype server_name: str
:keyword parent_name: Parent name of the DB such as Instance or Availability Group.
:paramtype parent_name: str
:keyword parent_type: Parent type of protected item, example: for a DB, standalone server or
distributed.
:paramtype parent_type: str
:keyword protection_status: Backup status of this backup item.
:paramtype protection_status: str
:keyword protection_state: Backup state of this backup item. Known values are: "Invalid",
"IRPending", "Protected", "ProtectionError", "ProtectionStopped", and "ProtectionPaused".
:paramtype protection_state: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.ProtectionState
:keyword last_backup_status: Last backup operation status. Possible values: Healthy, Unhealthy.
Known values are: "Invalid", "Healthy", "Unhealthy", and "IRPending".
:paramtype last_backup_status: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.LastBackupStatus
:keyword last_backup_time: Timestamp of the last backup operation on this backup item.
:paramtype last_backup_time: ~datetime.datetime
:keyword last_backup_error_detail: Error details in last backup.
:paramtype last_backup_error_detail:
~azure.mgmt.recoveryservicesbackup.passivestamp.models.ErrorDetail
:keyword protected_item_data_source_id: Data ID of the protected item.
:paramtype protected_item_data_source_id: str
:keyword protected_item_health_status: Health status of the backup item, evaluated based on
last heartbeat received. Known values are: "Invalid", "Healthy", "Unhealthy", "NotReachable",
and "IRPending".
:paramtype protected_item_health_status: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.ProtectedItemHealthStatus
:keyword extended_info: Additional information for this backup item.
:paramtype extended_info:
~azure.mgmt.recoveryservicesbackup.passivestamp.models.AzureVmWorkloadProtectedItemExtendedInfo
:keyword kpis_healths: Health details of different KPIs.
:paramtype kpis_healths: dict[str,
~azure.mgmt.recoveryservicesbackup.passivestamp.models.KPIResourceHealthDetails]
"""
super().__init__(
backup_management_type=backup_management_type,
workload_type=workload_type,
container_name=container_name,
source_resource_id=source_resource_id,
policy_id=policy_id,
last_recovery_point=last_recovery_point,
backup_set_name=backup_set_name,
create_mode=create_mode,
deferred_delete_time_in_utc=deferred_delete_time_in_utc,
is_scheduled_for_deferred_delete=is_scheduled_for_deferred_delete,
deferred_delete_time_remaining=deferred_delete_time_remaining,
is_deferred_delete_schedule_upcoming=is_deferred_delete_schedule_upcoming,
is_rehydrate=is_rehydrate,
resource_guard_operation_requests=resource_guard_operation_requests,
friendly_name=friendly_name,
server_name=server_name,
parent_name=parent_name,
parent_type=parent_type,
protection_status=protection_status,
protection_state=protection_state,
last_backup_status=last_backup_status,
last_backup_time=last_backup_time,
last_backup_error_detail=last_backup_error_detail,
protected_item_data_source_id=protected_item_data_source_id,
protected_item_health_status=protected_item_health_status,
extended_info=extended_info,
kpis_healths=kpis_healths,
**kwargs
)
self.protected_item_type = "AzureVmWorkloadSQLDatabase" # type: str
class AzureWorkloadErrorInfo(_serialization.Model):
"""Azure storage specific error information.
:ivar error_code: Error code.
:vartype error_code: int
:ivar error_string: Localized error string.
:vartype error_string: str
:ivar error_title: Title: Typically, the entity that the error pertains to.
:vartype error_title: str
:ivar recommendations: List of localized recommendations for above error code.
:vartype recommendations: list[str]
:ivar additional_details: Additional details for above error code.
:vartype additional_details: str
"""
_attribute_map = {
"error_code": {"key": "errorCode", "type": "int"},
"error_string": {"key": "errorString", "type": "str"},
"error_title": {"key": "errorTitle", "type": "str"},
"recommendations": {"key": "recommendations", "type": "[str]"},
"additional_details": {"key": "additionalDetails", "type": "str"},
}
def __init__(
self,
*,
error_code: Optional[int] = None,
error_string: Optional[str] = None,
error_title: Optional[str] = None,
recommendations: Optional[List[str]] = None,
additional_details: Optional[str] = None,
**kwargs
):
"""
:keyword error_code: Error code.
:paramtype error_code: int
:keyword error_string: Localized error string.
:paramtype error_string: str
:keyword error_title: Title: Typically, the entity that the error pertains to.
:paramtype error_title: str
:keyword recommendations: List of localized recommendations for above error code.
:paramtype recommendations: list[str]
:keyword additional_details: Additional details for above error code.
:paramtype additional_details: str
"""
super().__init__(**kwargs)
self.error_code = error_code
self.error_string = error_string
self.error_title = error_title
self.recommendations = recommendations
self.additional_details = additional_details
class AzureWorkloadJob(Job): # pylint: disable=too-many-instance-attributes
"""Azure storage specific job.
All required parameters must be populated in order to send to Azure.
:ivar entity_friendly_name: Friendly name of the entity on which the current job is executing.
:vartype entity_friendly_name: str
:ivar backup_management_type: Backup management type to execute the current job. Known values
are: "Invalid", "AzureIaasVM", "MAB", "DPM", "AzureBackupServer", "AzureSql", "AzureStorage",
"AzureWorkload", and "DefaultBackup".
:vartype backup_management_type: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.BackupManagementType
:ivar operation: The operation name.
:vartype operation: str
:ivar status: Job status.
:vartype status: str
:ivar start_time: The start time.
:vartype start_time: ~datetime.datetime
:ivar end_time: The end time.
:vartype end_time: ~datetime.datetime
:ivar activity_id: ActivityId of job.
:vartype activity_id: str
:ivar job_type: This property will be used as the discriminator for deciding the specific types
in the polymorphic chain of types. Required.
:vartype job_type: str
:ivar workload_type: Workload type of the job.
:vartype workload_type: str
:ivar duration: Time elapsed during the execution of this job.
:vartype duration: ~datetime.timedelta
:ivar actions_info: Gets or sets the state/actions applicable on this job like cancel/retry.
:vartype actions_info: list[str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.JobSupportedAction]
:ivar error_details: Error details on execution of this job.
:vartype error_details:
list[~azure.mgmt.recoveryservicesbackup.passivestamp.models.AzureWorkloadErrorInfo]
:ivar extended_info: Additional information about the job.
:vartype extended_info:
~azure.mgmt.recoveryservicesbackup.passivestamp.models.AzureWorkloadJobExtendedInfo
"""
_validation = {
"job_type": {"required": True},
}
_attribute_map = {
"entity_friendly_name": {"key": "entityFriendlyName", "type": "str"},
"backup_management_type": {"key": "backupManagementType", "type": "str"},
"operation": {"key": "operation", "type": "str"},
"status": {"key": "status", "type": "str"},
"start_time": {"key": "startTime", "type": "iso-8601"},
"end_time": {"key": "endTime", "type": "iso-8601"},
"activity_id": {"key": "activityId", "type": "str"},
"job_type": {"key": "jobType", "type": "str"},
"workload_type": {"key": "workloadType", "type": "str"},
"duration": {"key": "duration", "type": "duration"},
"actions_info": {"key": "actionsInfo", "type": "[str]"},
"error_details": {"key": "errorDetails", "type": "[AzureWorkloadErrorInfo]"},
"extended_info": {"key": "extendedInfo", "type": "AzureWorkloadJobExtendedInfo"},
}
def __init__(
self,
*,
entity_friendly_name: Optional[str] = None,
backup_management_type: Optional[Union[str, "_models.BackupManagementType"]] = None,
operation: Optional[str] = None,
status: Optional[str] = None,
start_time: Optional[datetime.datetime] = None,
end_time: Optional[datetime.datetime] = None,
activity_id: Optional[str] = None,
workload_type: Optional[str] = None,
duration: Optional[datetime.timedelta] = None,
actions_info: Optional[List[Union[str, "_models.JobSupportedAction"]]] = None,
error_details: Optional[List["_models.AzureWorkloadErrorInfo"]] = None,
extended_info: Optional["_models.AzureWorkloadJobExtendedInfo"] = None,
**kwargs
):
"""
:keyword entity_friendly_name: Friendly name of the entity on which the current job is
executing.
:paramtype entity_friendly_name: str
:keyword backup_management_type: Backup management type to execute the current job. Known
values are: "Invalid", "AzureIaasVM", "MAB", "DPM", "AzureBackupServer", "AzureSql",
"AzureStorage", "AzureWorkload", and "DefaultBackup".
:paramtype backup_management_type: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.BackupManagementType
:keyword operation: The operation name.
:paramtype operation: str
:keyword status: Job status.
:paramtype status: str
:keyword start_time: The start time.
:paramtype start_time: ~datetime.datetime
:keyword end_time: The end time.
:paramtype end_time: ~datetime.datetime
:keyword activity_id: ActivityId of job.
:paramtype activity_id: str
:keyword workload_type: Workload type of the job.
:paramtype workload_type: str
:keyword duration: Time elapsed during the execution of this job.
:paramtype duration: ~datetime.timedelta
:keyword actions_info: Gets or sets the state/actions applicable on this job like cancel/retry.
:paramtype actions_info: list[str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.JobSupportedAction]
:keyword error_details: Error details on execution of this job.
:paramtype error_details:
list[~azure.mgmt.recoveryservicesbackup.passivestamp.models.AzureWorkloadErrorInfo]
:keyword extended_info: Additional information about the job.
:paramtype extended_info:
~azure.mgmt.recoveryservicesbackup.passivestamp.models.AzureWorkloadJobExtendedInfo
"""
super().__init__(
entity_friendly_name=entity_friendly_name,
backup_management_type=backup_management_type,
operation=operation,
status=status,
start_time=start_time,
end_time=end_time,
activity_id=activity_id,
**kwargs
)
self.job_type = "AzureWorkloadJob" # type: str
self.workload_type = workload_type
self.duration = duration
self.actions_info = actions_info
self.error_details = error_details
self.extended_info = extended_info
class AzureWorkloadJobExtendedInfo(_serialization.Model):
"""Azure VM workload-specific additional information for job.
:ivar tasks_list: List of tasks for this job.
:vartype tasks_list:
list[~azure.mgmt.recoveryservicesbackup.passivestamp.models.AzureWorkloadJobTaskDetails]
:ivar property_bag: Job properties.
:vartype property_bag: dict[str, str]
:ivar dynamic_error_message: Non localized error message on job execution.
:vartype dynamic_error_message: str
"""
_attribute_map = {
"tasks_list": {"key": "tasksList", "type": "[AzureWorkloadJobTaskDetails]"},
"property_bag": {"key": "propertyBag", "type": "{str}"},
"dynamic_error_message": {"key": "dynamicErrorMessage", "type": "str"},
}
def __init__(
self,
*,
tasks_list: Optional[List["_models.AzureWorkloadJobTaskDetails"]] = None,
property_bag: Optional[Dict[str, str]] = None,
dynamic_error_message: Optional[str] = None,
**kwargs
):
"""
:keyword tasks_list: List of tasks for this job.
:paramtype tasks_list:
list[~azure.mgmt.recoveryservicesbackup.passivestamp.models.AzureWorkloadJobTaskDetails]
:keyword property_bag: Job properties.
:paramtype property_bag: dict[str, str]
:keyword dynamic_error_message: Non localized error message on job execution.
:paramtype dynamic_error_message: str
"""
super().__init__(**kwargs)
self.tasks_list = tasks_list
self.property_bag = property_bag
self.dynamic_error_message = dynamic_error_message
class AzureWorkloadJobTaskDetails(_serialization.Model):
"""Azure VM workload specific job task details.
:ivar task_id: The task display name.
:vartype task_id: str
:ivar status: The status.
:vartype status: str
"""
_attribute_map = {
"task_id": {"key": "taskId", "type": "str"},
"status": {"key": "status", "type": "str"},
}
def __init__(self, *, task_id: Optional[str] = None, status: Optional[str] = None, **kwargs):
"""
:keyword task_id: The task display name.
:paramtype task_id: str
:keyword status: The status.
:paramtype status: str
"""
super().__init__(**kwargs)
self.task_id = task_id
self.status = status
class AzureWorkloadRecoveryPoint(RecoveryPoint):
"""Workload specific recovery point, specifically encapsulates full/diff recovery point.
You probably want to use the sub-classes and not this class directly. Known sub-classes are:
AzureWorkloadPointInTimeRecoveryPoint, AzureWorkloadSAPHanaRecoveryPoint,
AzureWorkloadSQLRecoveryPoint
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar object_type: This property will be used as the discriminator for deciding the specific
types in the polymorphic chain of types. Required.
:vartype object_type: str
:ivar recovery_point_time_in_utc: UTC time at which recovery point was created.
:vartype recovery_point_time_in_utc: ~datetime.datetime
:ivar type: Type of restore point. Known values are: "Invalid", "Full", "Log", "Differential",
and "Incremental".
:vartype type: str or ~azure.mgmt.recoveryservicesbackup.passivestamp.models.RestorePointType
:ivar recovery_point_tier_details: Recovery point tier information.
:vartype recovery_point_tier_details:
list[~azure.mgmt.recoveryservicesbackup.passivestamp.models.RecoveryPointTierInformation]
:ivar recovery_point_move_readiness_info: Eligibility of RP to be moved to another tier.
:vartype recovery_point_move_readiness_info: dict[str,
~azure.mgmt.recoveryservicesbackup.passivestamp.models.RecoveryPointMoveReadinessInfo]
"""
_validation = {
"object_type": {"required": True},
"recovery_point_time_in_utc": {"readonly": True},
"type": {"readonly": True},
}
_attribute_map = {
"object_type": {"key": "objectType", "type": "str"},
"recovery_point_time_in_utc": {"key": "recoveryPointTimeInUTC", "type": "iso-8601"},
"type": {"key": "type", "type": "str"},
"recovery_point_tier_details": {"key": "recoveryPointTierDetails", "type": "[RecoveryPointTierInformation]"},
"recovery_point_move_readiness_info": {
"key": "recoveryPointMoveReadinessInfo",
"type": "{RecoveryPointMoveReadinessInfo}",
},
}
_subtype_map = {
"object_type": {
"AzureWorkloadPointInTimeRecoveryPoint": "AzureWorkloadPointInTimeRecoveryPoint",
"AzureWorkloadSAPHanaRecoveryPoint": "AzureWorkloadSAPHanaRecoveryPoint",
"AzureWorkloadSQLRecoveryPoint": "AzureWorkloadSQLRecoveryPoint",
}
}
def __init__(
self,
*,
recovery_point_tier_details: Optional[List["_models.RecoveryPointTierInformation"]] = None,
recovery_point_move_readiness_info: Optional[Dict[str, "_models.RecoveryPointMoveReadinessInfo"]] = None,
**kwargs
):
"""
:keyword recovery_point_tier_details: Recovery point tier information.
:paramtype recovery_point_tier_details:
list[~azure.mgmt.recoveryservicesbackup.passivestamp.models.RecoveryPointTierInformation]
:keyword recovery_point_move_readiness_info: Eligibility of RP to be moved to another tier.
:paramtype recovery_point_move_readiness_info: dict[str,
~azure.mgmt.recoveryservicesbackup.passivestamp.models.RecoveryPointMoveReadinessInfo]
"""
super().__init__(**kwargs)
self.object_type = "AzureWorkloadRecoveryPoint" # type: str
self.recovery_point_time_in_utc = None
self.type = None
self.recovery_point_tier_details = recovery_point_tier_details
self.recovery_point_move_readiness_info = recovery_point_move_readiness_info
class AzureWorkloadPointInTimeRecoveryPoint(AzureWorkloadRecoveryPoint):
"""Recovery point specific to PointInTime.
You probably want to use the sub-classes and not this class directly. Known sub-classes are:
AzureWorkloadSAPHanaPointInTimeRecoveryPoint
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar object_type: This property will be used as the discriminator for deciding the specific
types in the polymorphic chain of types. Required.
:vartype object_type: str
:ivar recovery_point_time_in_utc: UTC time at which recovery point was created.
:vartype recovery_point_time_in_utc: ~datetime.datetime
:ivar type: Type of restore point. Known values are: "Invalid", "Full", "Log", "Differential",
and "Incremental".
:vartype type: str or ~azure.mgmt.recoveryservicesbackup.passivestamp.models.RestorePointType
:ivar recovery_point_tier_details: Recovery point tier information.
:vartype recovery_point_tier_details:
list[~azure.mgmt.recoveryservicesbackup.passivestamp.models.RecoveryPointTierInformation]
:ivar recovery_point_move_readiness_info: Eligibility of RP to be moved to another tier.
:vartype recovery_point_move_readiness_info: dict[str,
~azure.mgmt.recoveryservicesbackup.passivestamp.models.RecoveryPointMoveReadinessInfo]
:ivar time_ranges: List of log ranges.
:vartype time_ranges:
list[~azure.mgmt.recoveryservicesbackup.passivestamp.models.PointInTimeRange]
"""
_validation = {
"object_type": {"required": True},
"recovery_point_time_in_utc": {"readonly": True},
"type": {"readonly": True},
}
_attribute_map = {
"object_type": {"key": "objectType", "type": "str"},
"recovery_point_time_in_utc": {"key": "recoveryPointTimeInUTC", "type": "iso-8601"},
"type": {"key": "type", "type": "str"},
"recovery_point_tier_details": {"key": "recoveryPointTierDetails", "type": "[RecoveryPointTierInformation]"},
"recovery_point_move_readiness_info": {
"key": "recoveryPointMoveReadinessInfo",
"type": "{RecoveryPointMoveReadinessInfo}",
},
"time_ranges": {"key": "timeRanges", "type": "[PointInTimeRange]"},
}
_subtype_map = {
"object_type": {"AzureWorkloadSAPHanaPointInTimeRecoveryPoint": "AzureWorkloadSAPHanaPointInTimeRecoveryPoint"}
}
def __init__(
self,
*,
recovery_point_tier_details: Optional[List["_models.RecoveryPointTierInformation"]] = None,
recovery_point_move_readiness_info: Optional[Dict[str, "_models.RecoveryPointMoveReadinessInfo"]] = None,
time_ranges: Optional[List["_models.PointInTimeRange"]] = None,
**kwargs
):
"""
:keyword recovery_point_tier_details: Recovery point tier information.
:paramtype recovery_point_tier_details:
list[~azure.mgmt.recoveryservicesbackup.passivestamp.models.RecoveryPointTierInformation]
:keyword recovery_point_move_readiness_info: Eligibility of RP to be moved to another tier.
:paramtype recovery_point_move_readiness_info: dict[str,
~azure.mgmt.recoveryservicesbackup.passivestamp.models.RecoveryPointMoveReadinessInfo]
:keyword time_ranges: List of log ranges.
:paramtype time_ranges:
list[~azure.mgmt.recoveryservicesbackup.passivestamp.models.PointInTimeRange]
"""
super().__init__(
recovery_point_tier_details=recovery_point_tier_details,
recovery_point_move_readiness_info=recovery_point_move_readiness_info,
**kwargs
)
self.object_type = "AzureWorkloadPointInTimeRecoveryPoint" # type: str
self.time_ranges = time_ranges
class AzureWorkloadRestoreRequest(RestoreRequest):
"""AzureWorkload-specific restore.
You probably want to use the sub-classes and not this class directly. Known sub-classes are:
AzureWorkloadPointInTimeRestoreRequest, AzureWorkloadSAPHanaRestoreRequest,
AzureWorkloadSQLRestoreRequest
All required parameters must be populated in order to send to Azure.
:ivar object_type: This property will be used as the discriminator for deciding the specific
types in the polymorphic chain of types. Required.
:vartype object_type: str
:ivar recovery_type: Type of this recovery. Known values are: "Invalid", "OriginalLocation",
"AlternateLocation", "RestoreDisks", and "Offline".
:vartype recovery_type: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.RecoveryType
:ivar source_resource_id: Fully qualified ARM ID of the VM on which workload that was running
is being recovered.
:vartype source_resource_id: str
:ivar property_bag: Workload specific property bag.
:vartype property_bag: dict[str, str]
:ivar target_info: Details of target database.
:vartype target_info: ~azure.mgmt.recoveryservicesbackup.passivestamp.models.TargetRestoreInfo
:ivar recovery_mode: Defines whether the current recovery mode is file restore or database
restore. Known values are: "Invalid", "FileRecovery", and "WorkloadRecovery".
:vartype recovery_mode: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.RecoveryMode
:ivar target_virtual_machine_id: This is the complete ARM Id of the target VM
For e.g.
/subscriptions/{subId}/resourcegroups/{rg}/provider/Microsoft.Compute/virtualmachines/{vm}.
:vartype target_virtual_machine_id: str
"""
_validation = {
"object_type": {"required": True},
}
_attribute_map = {
"object_type": {"key": "objectType", "type": "str"},
"recovery_type": {"key": "recoveryType", "type": "str"},
"source_resource_id": {"key": "sourceResourceId", "type": "str"},
"property_bag": {"key": "propertyBag", "type": "{str}"},
"target_info": {"key": "targetInfo", "type": "TargetRestoreInfo"},
"recovery_mode": {"key": "recoveryMode", "type": "str"},
"target_virtual_machine_id": {"key": "targetVirtualMachineId", "type": "str"},
}
_subtype_map = {
"object_type": {
"AzureWorkloadPointInTimeRestoreRequest": "AzureWorkloadPointInTimeRestoreRequest",
"AzureWorkloadSAPHanaRestoreRequest": "AzureWorkloadSAPHanaRestoreRequest",
"AzureWorkloadSQLRestoreRequest": "AzureWorkloadSQLRestoreRequest",
}
}
def __init__(
self,
*,
recovery_type: Optional[Union[str, "_models.RecoveryType"]] = None,
source_resource_id: Optional[str] = None,
property_bag: Optional[Dict[str, str]] = None,
target_info: Optional["_models.TargetRestoreInfo"] = None,
recovery_mode: Optional[Union[str, "_models.RecoveryMode"]] = None,
target_virtual_machine_id: Optional[str] = None,
**kwargs
):
"""
:keyword recovery_type: Type of this recovery. Known values are: "Invalid", "OriginalLocation",
"AlternateLocation", "RestoreDisks", and "Offline".
:paramtype recovery_type: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.RecoveryType
:keyword source_resource_id: Fully qualified ARM ID of the VM on which workload that was
running is being recovered.
:paramtype source_resource_id: str
:keyword property_bag: Workload specific property bag.
:paramtype property_bag: dict[str, str]
:keyword target_info: Details of target database.
:paramtype target_info:
~azure.mgmt.recoveryservicesbackup.passivestamp.models.TargetRestoreInfo
:keyword recovery_mode: Defines whether the current recovery mode is file restore or database
restore. Known values are: "Invalid", "FileRecovery", and "WorkloadRecovery".
:paramtype recovery_mode: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.RecoveryMode
:keyword target_virtual_machine_id: This is the complete ARM Id of the target VM
For e.g.
/subscriptions/{subId}/resourcegroups/{rg}/provider/Microsoft.Compute/virtualmachines/{vm}.
:paramtype target_virtual_machine_id: str
"""
super().__init__(**kwargs)
self.object_type = "AzureWorkloadRestoreRequest" # type: str
self.recovery_type = recovery_type
self.source_resource_id = source_resource_id
self.property_bag = property_bag
self.target_info = target_info
self.recovery_mode = recovery_mode
self.target_virtual_machine_id = target_virtual_machine_id
class AzureWorkloadPointInTimeRestoreRequest(AzureWorkloadRestoreRequest):
"""AzureWorkload SAP Hana -specific restore. Specifically for PointInTime/Log restore.
All required parameters must be populated in order to send to Azure.
:ivar object_type: This property will be used as the discriminator for deciding the specific
types in the polymorphic chain of types. Required.
:vartype object_type: str
:ivar recovery_type: Type of this recovery. Known values are: "Invalid", "OriginalLocation",
"AlternateLocation", "RestoreDisks", and "Offline".
:vartype recovery_type: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.RecoveryType
:ivar source_resource_id: Fully qualified ARM ID of the VM on which workload that was running
is being recovered.
:vartype source_resource_id: str
:ivar property_bag: Workload specific property bag.
:vartype property_bag: dict[str, str]
:ivar target_info: Details of target database.
:vartype target_info: ~azure.mgmt.recoveryservicesbackup.passivestamp.models.TargetRestoreInfo
:ivar recovery_mode: Defines whether the current recovery mode is file restore or database
restore. Known values are: "Invalid", "FileRecovery", and "WorkloadRecovery".
:vartype recovery_mode: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.RecoveryMode
:ivar target_virtual_machine_id: This is the complete ARM Id of the target VM
For e.g.
/subscriptions/{subId}/resourcegroups/{rg}/provider/Microsoft.Compute/virtualmachines/{vm}.
:vartype target_virtual_machine_id: str
:ivar point_in_time: PointInTime value.
:vartype point_in_time: ~datetime.datetime
"""
_validation = {
"object_type": {"required": True},
}
_attribute_map = {
"object_type": {"key": "objectType", "type": "str"},
"recovery_type": {"key": "recoveryType", "type": "str"},
"source_resource_id": {"key": "sourceResourceId", "type": "str"},
"property_bag": {"key": "propertyBag", "type": "{str}"},
"target_info": {"key": "targetInfo", "type": "TargetRestoreInfo"},
"recovery_mode": {"key": "recoveryMode", "type": "str"},
"target_virtual_machine_id": {"key": "targetVirtualMachineId", "type": "str"},
"point_in_time": {"key": "pointInTime", "type": "iso-8601"},
}
def __init__(
self,
*,
recovery_type: Optional[Union[str, "_models.RecoveryType"]] = None,
source_resource_id: Optional[str] = None,
property_bag: Optional[Dict[str, str]] = None,
target_info: Optional["_models.TargetRestoreInfo"] = None,
recovery_mode: Optional[Union[str, "_models.RecoveryMode"]] = None,
target_virtual_machine_id: Optional[str] = None,
point_in_time: Optional[datetime.datetime] = None,
**kwargs
):
"""
:keyword recovery_type: Type of this recovery. Known values are: "Invalid", "OriginalLocation",
"AlternateLocation", "RestoreDisks", and "Offline".
:paramtype recovery_type: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.RecoveryType
:keyword source_resource_id: Fully qualified ARM ID of the VM on which workload that was
running is being recovered.
:paramtype source_resource_id: str
:keyword property_bag: Workload specific property bag.
:paramtype property_bag: dict[str, str]
:keyword target_info: Details of target database.
:paramtype target_info:
~azure.mgmt.recoveryservicesbackup.passivestamp.models.TargetRestoreInfo
:keyword recovery_mode: Defines whether the current recovery mode is file restore or database
restore. Known values are: "Invalid", "FileRecovery", and "WorkloadRecovery".
:paramtype recovery_mode: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.RecoveryMode
:keyword target_virtual_machine_id: This is the complete ARM Id of the target VM
For e.g.
/subscriptions/{subId}/resourcegroups/{rg}/provider/Microsoft.Compute/virtualmachines/{vm}.
:paramtype target_virtual_machine_id: str
:keyword point_in_time: PointInTime value.
:paramtype point_in_time: ~datetime.datetime
"""
super().__init__(
recovery_type=recovery_type,
source_resource_id=source_resource_id,
property_bag=property_bag,
target_info=target_info,
recovery_mode=recovery_mode,
target_virtual_machine_id=target_virtual_machine_id,
**kwargs
)
self.object_type = "AzureWorkloadPointInTimeRestoreRequest" # type: str
self.point_in_time = point_in_time
class AzureWorkloadSAPHanaPointInTimeRecoveryPoint(AzureWorkloadPointInTimeRecoveryPoint):
"""Recovery point specific to PointInTime in SAPHana.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar object_type: This property will be used as the discriminator for deciding the specific
types in the polymorphic chain of types. Required.
:vartype object_type: str
:ivar recovery_point_time_in_utc: UTC time at which recovery point was created.
:vartype recovery_point_time_in_utc: ~datetime.datetime
:ivar type: Type of restore point. Known values are: "Invalid", "Full", "Log", "Differential",
and "Incremental".
:vartype type: str or ~azure.mgmt.recoveryservicesbackup.passivestamp.models.RestorePointType
:ivar recovery_point_tier_details: Recovery point tier information.
:vartype recovery_point_tier_details:
list[~azure.mgmt.recoveryservicesbackup.passivestamp.models.RecoveryPointTierInformation]
:ivar recovery_point_move_readiness_info: Eligibility of RP to be moved to another tier.
:vartype recovery_point_move_readiness_info: dict[str,
~azure.mgmt.recoveryservicesbackup.passivestamp.models.RecoveryPointMoveReadinessInfo]
:ivar time_ranges: List of log ranges.
:vartype time_ranges:
list[~azure.mgmt.recoveryservicesbackup.passivestamp.models.PointInTimeRange]
"""
_validation = {
"object_type": {"required": True},
"recovery_point_time_in_utc": {"readonly": True},
"type": {"readonly": True},
}
_attribute_map = {
"object_type": {"key": "objectType", "type": "str"},
"recovery_point_time_in_utc": {"key": "recoveryPointTimeInUTC", "type": "iso-8601"},
"type": {"key": "type", "type": "str"},
"recovery_point_tier_details": {"key": "recoveryPointTierDetails", "type": "[RecoveryPointTierInformation]"},
"recovery_point_move_readiness_info": {
"key": "recoveryPointMoveReadinessInfo",
"type": "{RecoveryPointMoveReadinessInfo}",
},
"time_ranges": {"key": "timeRanges", "type": "[PointInTimeRange]"},
}
def __init__(
self,
*,
recovery_point_tier_details: Optional[List["_models.RecoveryPointTierInformation"]] = None,
recovery_point_move_readiness_info: Optional[Dict[str, "_models.RecoveryPointMoveReadinessInfo"]] = None,
time_ranges: Optional[List["_models.PointInTimeRange"]] = None,
**kwargs
):
"""
:keyword recovery_point_tier_details: Recovery point tier information.
:paramtype recovery_point_tier_details:
list[~azure.mgmt.recoveryservicesbackup.passivestamp.models.RecoveryPointTierInformation]
:keyword recovery_point_move_readiness_info: Eligibility of RP to be moved to another tier.
:paramtype recovery_point_move_readiness_info: dict[str,
~azure.mgmt.recoveryservicesbackup.passivestamp.models.RecoveryPointMoveReadinessInfo]
:keyword time_ranges: List of log ranges.
:paramtype time_ranges:
list[~azure.mgmt.recoveryservicesbackup.passivestamp.models.PointInTimeRange]
"""
super().__init__(
recovery_point_tier_details=recovery_point_tier_details,
recovery_point_move_readiness_info=recovery_point_move_readiness_info,
time_ranges=time_ranges,
**kwargs
)
self.object_type = "AzureWorkloadSAPHanaPointInTimeRecoveryPoint" # type: str
class AzureWorkloadSAPHanaRestoreRequest(AzureWorkloadRestoreRequest):
"""AzureWorkload SAP Hana-specific restore.
You probably want to use the sub-classes and not this class directly. Known sub-classes are:
AzureWorkloadSAPHanaPointInTimeRestoreRequest
All required parameters must be populated in order to send to Azure.
:ivar object_type: This property will be used as the discriminator for deciding the specific
types in the polymorphic chain of types. Required.
:vartype object_type: str
:ivar recovery_type: Type of this recovery. Known values are: "Invalid", "OriginalLocation",
"AlternateLocation", "RestoreDisks", and "Offline".
:vartype recovery_type: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.RecoveryType
:ivar source_resource_id: Fully qualified ARM ID of the VM on which workload that was running
is being recovered.
:vartype source_resource_id: str
:ivar property_bag: Workload specific property bag.
:vartype property_bag: dict[str, str]
:ivar target_info: Details of target database.
:vartype target_info: ~azure.mgmt.recoveryservicesbackup.passivestamp.models.TargetRestoreInfo
:ivar recovery_mode: Defines whether the current recovery mode is file restore or database
restore. Known values are: "Invalid", "FileRecovery", and "WorkloadRecovery".
:vartype recovery_mode: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.RecoveryMode
:ivar target_virtual_machine_id: This is the complete ARM Id of the target VM
For e.g.
/subscriptions/{subId}/resourcegroups/{rg}/provider/Microsoft.Compute/virtualmachines/{vm}.
:vartype target_virtual_machine_id: str
"""
_validation = {
"object_type": {"required": True},
}
_attribute_map = {
"object_type": {"key": "objectType", "type": "str"},
"recovery_type": {"key": "recoveryType", "type": "str"},
"source_resource_id": {"key": "sourceResourceId", "type": "str"},
"property_bag": {"key": "propertyBag", "type": "{str}"},
"target_info": {"key": "targetInfo", "type": "TargetRestoreInfo"},
"recovery_mode": {"key": "recoveryMode", "type": "str"},
"target_virtual_machine_id": {"key": "targetVirtualMachineId", "type": "str"},
}
_subtype_map = {
"object_type": {
"AzureWorkloadSAPHanaPointInTimeRestoreRequest": "AzureWorkloadSAPHanaPointInTimeRestoreRequest"
}
}
def __init__(
self,
*,
recovery_type: Optional[Union[str, "_models.RecoveryType"]] = None,
source_resource_id: Optional[str] = None,
property_bag: Optional[Dict[str, str]] = None,
target_info: Optional["_models.TargetRestoreInfo"] = None,
recovery_mode: Optional[Union[str, "_models.RecoveryMode"]] = None,
target_virtual_machine_id: Optional[str] = None,
**kwargs
):
"""
:keyword recovery_type: Type of this recovery. Known values are: "Invalid", "OriginalLocation",
"AlternateLocation", "RestoreDisks", and "Offline".
:paramtype recovery_type: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.RecoveryType
:keyword source_resource_id: Fully qualified ARM ID of the VM on which workload that was
running is being recovered.
:paramtype source_resource_id: str
:keyword property_bag: Workload specific property bag.
:paramtype property_bag: dict[str, str]
:keyword target_info: Details of target database.
:paramtype target_info:
~azure.mgmt.recoveryservicesbackup.passivestamp.models.TargetRestoreInfo
:keyword recovery_mode: Defines whether the current recovery mode is file restore or database
restore. Known values are: "Invalid", "FileRecovery", and "WorkloadRecovery".
:paramtype recovery_mode: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.RecoveryMode
:keyword target_virtual_machine_id: This is the complete ARM Id of the target VM
For e.g.
/subscriptions/{subId}/resourcegroups/{rg}/provider/Microsoft.Compute/virtualmachines/{vm}.
:paramtype target_virtual_machine_id: str
"""
super().__init__(
recovery_type=recovery_type,
source_resource_id=source_resource_id,
property_bag=property_bag,
target_info=target_info,
recovery_mode=recovery_mode,
target_virtual_machine_id=target_virtual_machine_id,
**kwargs
)
self.object_type = "AzureWorkloadSAPHanaRestoreRequest" # type: str
class AzureWorkloadSAPHanaPointInTimeRestoreRequest(AzureWorkloadSAPHanaRestoreRequest):
"""AzureWorkload SAP Hana -specific restore. Specifically for PointInTime/Log restore.
All required parameters must be populated in order to send to Azure.
:ivar object_type: This property will be used as the discriminator for deciding the specific
types in the polymorphic chain of types. Required.
:vartype object_type: str
:ivar recovery_type: Type of this recovery. Known values are: "Invalid", "OriginalLocation",
"AlternateLocation", "RestoreDisks", and "Offline".
:vartype recovery_type: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.RecoveryType
:ivar source_resource_id: Fully qualified ARM ID of the VM on which workload that was running
is being recovered.
:vartype source_resource_id: str
:ivar property_bag: Workload specific property bag.
:vartype property_bag: dict[str, str]
:ivar target_info: Details of target database.
:vartype target_info: ~azure.mgmt.recoveryservicesbackup.passivestamp.models.TargetRestoreInfo
:ivar recovery_mode: Defines whether the current recovery mode is file restore or database
restore. Known values are: "Invalid", "FileRecovery", and "WorkloadRecovery".
:vartype recovery_mode: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.RecoveryMode
:ivar target_virtual_machine_id: This is the complete ARM Id of the target VM
For e.g.
/subscriptions/{subId}/resourcegroups/{rg}/provider/Microsoft.Compute/virtualmachines/{vm}.
:vartype target_virtual_machine_id: str
:ivar point_in_time: PointInTime value.
:vartype point_in_time: ~datetime.datetime
"""
_validation = {
"object_type": {"required": True},
}
_attribute_map = {
"object_type": {"key": "objectType", "type": "str"},
"recovery_type": {"key": "recoveryType", "type": "str"},
"source_resource_id": {"key": "sourceResourceId", "type": "str"},
"property_bag": {"key": "propertyBag", "type": "{str}"},
"target_info": {"key": "targetInfo", "type": "TargetRestoreInfo"},
"recovery_mode": {"key": "recoveryMode", "type": "str"},
"target_virtual_machine_id": {"key": "targetVirtualMachineId", "type": "str"},
"point_in_time": {"key": "pointInTime", "type": "iso-8601"},
}
def __init__(
self,
*,
recovery_type: Optional[Union[str, "_models.RecoveryType"]] = None,
source_resource_id: Optional[str] = None,
property_bag: Optional[Dict[str, str]] = None,
target_info: Optional["_models.TargetRestoreInfo"] = None,
recovery_mode: Optional[Union[str, "_models.RecoveryMode"]] = None,
target_virtual_machine_id: Optional[str] = None,
point_in_time: Optional[datetime.datetime] = None,
**kwargs
):
"""
:keyword recovery_type: Type of this recovery. Known values are: "Invalid", "OriginalLocation",
"AlternateLocation", "RestoreDisks", and "Offline".
:paramtype recovery_type: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.RecoveryType
:keyword source_resource_id: Fully qualified ARM ID of the VM on which workload that was
running is being recovered.
:paramtype source_resource_id: str
:keyword property_bag: Workload specific property bag.
:paramtype property_bag: dict[str, str]
:keyword target_info: Details of target database.
:paramtype target_info:
~azure.mgmt.recoveryservicesbackup.passivestamp.models.TargetRestoreInfo
:keyword recovery_mode: Defines whether the current recovery mode is file restore or database
restore. Known values are: "Invalid", "FileRecovery", and "WorkloadRecovery".
:paramtype recovery_mode: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.RecoveryMode
:keyword target_virtual_machine_id: This is the complete ARM Id of the target VM
For e.g.
/subscriptions/{subId}/resourcegroups/{rg}/provider/Microsoft.Compute/virtualmachines/{vm}.
:paramtype target_virtual_machine_id: str
:keyword point_in_time: PointInTime value.
:paramtype point_in_time: ~datetime.datetime
"""
super().__init__(
recovery_type=recovery_type,
source_resource_id=source_resource_id,
property_bag=property_bag,
target_info=target_info,
recovery_mode=recovery_mode,
target_virtual_machine_id=target_virtual_machine_id,
**kwargs
)
self.object_type = "AzureWorkloadSAPHanaPointInTimeRestoreRequest" # type: str
self.point_in_time = point_in_time
class AzureWorkloadSAPHanaRecoveryPoint(AzureWorkloadRecoveryPoint):
"""SAPHana specific recoverypoint, specifically encapsulates full/diff recoverypoints.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar object_type: This property will be used as the discriminator for deciding the specific
types in the polymorphic chain of types. Required.
:vartype object_type: str
:ivar recovery_point_time_in_utc: UTC time at which recovery point was created.
:vartype recovery_point_time_in_utc: ~datetime.datetime
:ivar type: Type of restore point. Known values are: "Invalid", "Full", "Log", "Differential",
and "Incremental".
:vartype type: str or ~azure.mgmt.recoveryservicesbackup.passivestamp.models.RestorePointType
:ivar recovery_point_tier_details: Recovery point tier information.
:vartype recovery_point_tier_details:
list[~azure.mgmt.recoveryservicesbackup.passivestamp.models.RecoveryPointTierInformation]
:ivar recovery_point_move_readiness_info: Eligibility of RP to be moved to another tier.
:vartype recovery_point_move_readiness_info: dict[str,
~azure.mgmt.recoveryservicesbackup.passivestamp.models.RecoveryPointMoveReadinessInfo]
"""
_validation = {
"object_type": {"required": True},
"recovery_point_time_in_utc": {"readonly": True},
"type": {"readonly": True},
}
_attribute_map = {
"object_type": {"key": "objectType", "type": "str"},
"recovery_point_time_in_utc": {"key": "recoveryPointTimeInUTC", "type": "iso-8601"},
"type": {"key": "type", "type": "str"},
"recovery_point_tier_details": {"key": "recoveryPointTierDetails", "type": "[RecoveryPointTierInformation]"},
"recovery_point_move_readiness_info": {
"key": "recoveryPointMoveReadinessInfo",
"type": "{RecoveryPointMoveReadinessInfo}",
},
}
def __init__(
self,
*,
recovery_point_tier_details: Optional[List["_models.RecoveryPointTierInformation"]] = None,
recovery_point_move_readiness_info: Optional[Dict[str, "_models.RecoveryPointMoveReadinessInfo"]] = None,
**kwargs
):
"""
:keyword recovery_point_tier_details: Recovery point tier information.
:paramtype recovery_point_tier_details:
list[~azure.mgmt.recoveryservicesbackup.passivestamp.models.RecoveryPointTierInformation]
:keyword recovery_point_move_readiness_info: Eligibility of RP to be moved to another tier.
:paramtype recovery_point_move_readiness_info: dict[str,
~azure.mgmt.recoveryservicesbackup.passivestamp.models.RecoveryPointMoveReadinessInfo]
"""
super().__init__(
recovery_point_tier_details=recovery_point_tier_details,
recovery_point_move_readiness_info=recovery_point_move_readiness_info,
**kwargs
)
self.object_type = "AzureWorkloadSAPHanaRecoveryPoint" # type: str
class AzureWorkloadSQLRecoveryPoint(AzureWorkloadRecoveryPoint):
"""SQL specific recoverypoint, specifically encapsulates full/diff recoverypoint along with extended info.
You probably want to use the sub-classes and not this class directly. Known sub-classes are:
AzureWorkloadSQLPointInTimeRecoveryPoint
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar object_type: This property will be used as the discriminator for deciding the specific
types in the polymorphic chain of types. Required.
:vartype object_type: str
:ivar recovery_point_time_in_utc: UTC time at which recovery point was created.
:vartype recovery_point_time_in_utc: ~datetime.datetime
:ivar type: Type of restore point. Known values are: "Invalid", "Full", "Log", "Differential",
and "Incremental".
:vartype type: str or ~azure.mgmt.recoveryservicesbackup.passivestamp.models.RestorePointType
:ivar recovery_point_tier_details: Recovery point tier information.
:vartype recovery_point_tier_details:
list[~azure.mgmt.recoveryservicesbackup.passivestamp.models.RecoveryPointTierInformation]
:ivar recovery_point_move_readiness_info: Eligibility of RP to be moved to another tier.
:vartype recovery_point_move_readiness_info: dict[str,
~azure.mgmt.recoveryservicesbackup.passivestamp.models.RecoveryPointMoveReadinessInfo]
:ivar extended_info: Extended Info that provides data directory details. Will be populated in
two cases:
When a specific recovery point is accessed using GetRecoveryPoint
Or when ListRecoveryPoints is called for Log RP only with ExtendedInfo query filter.
:vartype extended_info:
~azure.mgmt.recoveryservicesbackup.passivestamp.models.AzureWorkloadSQLRecoveryPointExtendedInfo
"""
_validation = {
"object_type": {"required": True},
"recovery_point_time_in_utc": {"readonly": True},
"type": {"readonly": True},
}
_attribute_map = {
"object_type": {"key": "objectType", "type": "str"},
"recovery_point_time_in_utc": {"key": "recoveryPointTimeInUTC", "type": "iso-8601"},
"type": {"key": "type", "type": "str"},
"recovery_point_tier_details": {"key": "recoveryPointTierDetails", "type": "[RecoveryPointTierInformation]"},
"recovery_point_move_readiness_info": {
"key": "recoveryPointMoveReadinessInfo",
"type": "{RecoveryPointMoveReadinessInfo}",
},
"extended_info": {"key": "extendedInfo", "type": "AzureWorkloadSQLRecoveryPointExtendedInfo"},
}
_subtype_map = {
"object_type": {"AzureWorkloadSQLPointInTimeRecoveryPoint": "AzureWorkloadSQLPointInTimeRecoveryPoint"}
}
def __init__(
self,
*,
recovery_point_tier_details: Optional[List["_models.RecoveryPointTierInformation"]] = None,
recovery_point_move_readiness_info: Optional[Dict[str, "_models.RecoveryPointMoveReadinessInfo"]] = None,
extended_info: Optional["_models.AzureWorkloadSQLRecoveryPointExtendedInfo"] = None,
**kwargs
):
"""
:keyword recovery_point_tier_details: Recovery point tier information.
:paramtype recovery_point_tier_details:
list[~azure.mgmt.recoveryservicesbackup.passivestamp.models.RecoveryPointTierInformation]
:keyword recovery_point_move_readiness_info: Eligibility of RP to be moved to another tier.
:paramtype recovery_point_move_readiness_info: dict[str,
~azure.mgmt.recoveryservicesbackup.passivestamp.models.RecoveryPointMoveReadinessInfo]
:keyword extended_info: Extended Info that provides data directory details. Will be populated
in two cases:
When a specific recovery point is accessed using GetRecoveryPoint
Or when ListRecoveryPoints is called for Log RP only with ExtendedInfo query filter.
:paramtype extended_info:
~azure.mgmt.recoveryservicesbackup.passivestamp.models.AzureWorkloadSQLRecoveryPointExtendedInfo
"""
super().__init__(
recovery_point_tier_details=recovery_point_tier_details,
recovery_point_move_readiness_info=recovery_point_move_readiness_info,
**kwargs
)
self.object_type = "AzureWorkloadSQLRecoveryPoint" # type: str
self.extended_info = extended_info
class AzureWorkloadSQLPointInTimeRecoveryPoint(AzureWorkloadSQLRecoveryPoint):
"""Recovery point specific to PointInTime.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar object_type: This property will be used as the discriminator for deciding the specific
types in the polymorphic chain of types. Required.
:vartype object_type: str
:ivar recovery_point_time_in_utc: UTC time at which recovery point was created.
:vartype recovery_point_time_in_utc: ~datetime.datetime
:ivar type: Type of restore point. Known values are: "Invalid", "Full", "Log", "Differential",
and "Incremental".
:vartype type: str or ~azure.mgmt.recoveryservicesbackup.passivestamp.models.RestorePointType
:ivar recovery_point_tier_details: Recovery point tier information.
:vartype recovery_point_tier_details:
list[~azure.mgmt.recoveryservicesbackup.passivestamp.models.RecoveryPointTierInformation]
:ivar recovery_point_move_readiness_info: Eligibility of RP to be moved to another tier.
:vartype recovery_point_move_readiness_info: dict[str,
~azure.mgmt.recoveryservicesbackup.passivestamp.models.RecoveryPointMoveReadinessInfo]
:ivar extended_info: Extended Info that provides data directory details. Will be populated in
two cases:
When a specific recovery point is accessed using GetRecoveryPoint
Or when ListRecoveryPoints is called for Log RP only with ExtendedInfo query filter.
:vartype extended_info:
~azure.mgmt.recoveryservicesbackup.passivestamp.models.AzureWorkloadSQLRecoveryPointExtendedInfo
:ivar time_ranges: List of log ranges.
:vartype time_ranges:
list[~azure.mgmt.recoveryservicesbackup.passivestamp.models.PointInTimeRange]
"""
_validation = {
"object_type": {"required": True},
"recovery_point_time_in_utc": {"readonly": True},
"type": {"readonly": True},
}
_attribute_map = {
"object_type": {"key": "objectType", "type": "str"},
"recovery_point_time_in_utc": {"key": "recoveryPointTimeInUTC", "type": "iso-8601"},
"type": {"key": "type", "type": "str"},
"recovery_point_tier_details": {"key": "recoveryPointTierDetails", "type": "[RecoveryPointTierInformation]"},
"recovery_point_move_readiness_info": {
"key": "recoveryPointMoveReadinessInfo",
"type": "{RecoveryPointMoveReadinessInfo}",
},
"extended_info": {"key": "extendedInfo", "type": "AzureWorkloadSQLRecoveryPointExtendedInfo"},
"time_ranges": {"key": "timeRanges", "type": "[PointInTimeRange]"},
}
def __init__(
self,
*,
recovery_point_tier_details: Optional[List["_models.RecoveryPointTierInformation"]] = None,
recovery_point_move_readiness_info: Optional[Dict[str, "_models.RecoveryPointMoveReadinessInfo"]] = None,
extended_info: Optional["_models.AzureWorkloadSQLRecoveryPointExtendedInfo"] = None,
time_ranges: Optional[List["_models.PointInTimeRange"]] = None,
**kwargs
):
"""
:keyword recovery_point_tier_details: Recovery point tier information.
:paramtype recovery_point_tier_details:
list[~azure.mgmt.recoveryservicesbackup.passivestamp.models.RecoveryPointTierInformation]
:keyword recovery_point_move_readiness_info: Eligibility of RP to be moved to another tier.
:paramtype recovery_point_move_readiness_info: dict[str,
~azure.mgmt.recoveryservicesbackup.passivestamp.models.RecoveryPointMoveReadinessInfo]
:keyword extended_info: Extended Info that provides data directory details. Will be populated
in two cases:
When a specific recovery point is accessed using GetRecoveryPoint
Or when ListRecoveryPoints is called for Log RP only with ExtendedInfo query filter.
:paramtype extended_info:
~azure.mgmt.recoveryservicesbackup.passivestamp.models.AzureWorkloadSQLRecoveryPointExtendedInfo
:keyword time_ranges: List of log ranges.
:paramtype time_ranges:
list[~azure.mgmt.recoveryservicesbackup.passivestamp.models.PointInTimeRange]
"""
super().__init__(
recovery_point_tier_details=recovery_point_tier_details,
recovery_point_move_readiness_info=recovery_point_move_readiness_info,
extended_info=extended_info,
**kwargs
)
self.object_type = "AzureWorkloadSQLPointInTimeRecoveryPoint" # type: str
self.time_ranges = time_ranges
class AzureWorkloadSQLRestoreRequest(AzureWorkloadRestoreRequest):
"""AzureWorkload SQL -specific restore. Specifically for full/diff restore.
You probably want to use the sub-classes and not this class directly. Known sub-classes are:
AzureWorkloadSQLPointInTimeRestoreRequest
All required parameters must be populated in order to send to Azure.
:ivar object_type: This property will be used as the discriminator for deciding the specific
types in the polymorphic chain of types. Required.
:vartype object_type: str
:ivar recovery_type: Type of this recovery. Known values are: "Invalid", "OriginalLocation",
"AlternateLocation", "RestoreDisks", and "Offline".
:vartype recovery_type: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.RecoveryType
:ivar source_resource_id: Fully qualified ARM ID of the VM on which workload that was running
is being recovered.
:vartype source_resource_id: str
:ivar property_bag: Workload specific property bag.
:vartype property_bag: dict[str, str]
:ivar target_info: Details of target database.
:vartype target_info: ~azure.mgmt.recoveryservicesbackup.passivestamp.models.TargetRestoreInfo
:ivar recovery_mode: Defines whether the current recovery mode is file restore or database
restore. Known values are: "Invalid", "FileRecovery", and "WorkloadRecovery".
:vartype recovery_mode: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.RecoveryMode
:ivar target_virtual_machine_id: This is the complete ARM Id of the target VM
For e.g.
/subscriptions/{subId}/resourcegroups/{rg}/provider/Microsoft.Compute/virtualmachines/{vm}.
:vartype target_virtual_machine_id: str
:ivar should_use_alternate_target_location: Default option set to true. If this is set to
false, alternate data directory must be provided.
:vartype should_use_alternate_target_location: bool
:ivar is_non_recoverable: SQL specific property where user can chose to set no-recovery when
restore operation is tried.
:vartype is_non_recoverable: bool
:ivar alternate_directory_paths: Data directory details.
:vartype alternate_directory_paths:
list[~azure.mgmt.recoveryservicesbackup.passivestamp.models.SQLDataDirectoryMapping]
"""
_validation = {
"object_type": {"required": True},
}
_attribute_map = {
"object_type": {"key": "objectType", "type": "str"},
"recovery_type": {"key": "recoveryType", "type": "str"},
"source_resource_id": {"key": "sourceResourceId", "type": "str"},
"property_bag": {"key": "propertyBag", "type": "{str}"},
"target_info": {"key": "targetInfo", "type": "TargetRestoreInfo"},
"recovery_mode": {"key": "recoveryMode", "type": "str"},
"target_virtual_machine_id": {"key": "targetVirtualMachineId", "type": "str"},
"should_use_alternate_target_location": {"key": "shouldUseAlternateTargetLocation", "type": "bool"},
"is_non_recoverable": {"key": "isNonRecoverable", "type": "bool"},
"alternate_directory_paths": {"key": "alternateDirectoryPaths", "type": "[SQLDataDirectoryMapping]"},
}
_subtype_map = {
"object_type": {"AzureWorkloadSQLPointInTimeRestoreRequest": "AzureWorkloadSQLPointInTimeRestoreRequest"}
}
def __init__(
self,
*,
recovery_type: Optional[Union[str, "_models.RecoveryType"]] = None,
source_resource_id: Optional[str] = None,
property_bag: Optional[Dict[str, str]] = None,
target_info: Optional["_models.TargetRestoreInfo"] = None,
recovery_mode: Optional[Union[str, "_models.RecoveryMode"]] = None,
target_virtual_machine_id: Optional[str] = None,
should_use_alternate_target_location: Optional[bool] = None,
is_non_recoverable: Optional[bool] = None,
alternate_directory_paths: Optional[List["_models.SQLDataDirectoryMapping"]] = None,
**kwargs
):
"""
:keyword recovery_type: Type of this recovery. Known values are: "Invalid", "OriginalLocation",
"AlternateLocation", "RestoreDisks", and "Offline".
:paramtype recovery_type: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.RecoveryType
:keyword source_resource_id: Fully qualified ARM ID of the VM on which workload that was
running is being recovered.
:paramtype source_resource_id: str
:keyword property_bag: Workload specific property bag.
:paramtype property_bag: dict[str, str]
:keyword target_info: Details of target database.
:paramtype target_info:
~azure.mgmt.recoveryservicesbackup.passivestamp.models.TargetRestoreInfo
:keyword recovery_mode: Defines whether the current recovery mode is file restore or database
restore. Known values are: "Invalid", "FileRecovery", and "WorkloadRecovery".
:paramtype recovery_mode: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.RecoveryMode
:keyword target_virtual_machine_id: This is the complete ARM Id of the target VM
For e.g.
/subscriptions/{subId}/resourcegroups/{rg}/provider/Microsoft.Compute/virtualmachines/{vm}.
:paramtype target_virtual_machine_id: str
:keyword should_use_alternate_target_location: Default option set to true. If this is set to
false, alternate data directory must be provided.
:paramtype should_use_alternate_target_location: bool
:keyword is_non_recoverable: SQL specific property where user can chose to set no-recovery when
restore operation is tried.
:paramtype is_non_recoverable: bool
:keyword alternate_directory_paths: Data directory details.
:paramtype alternate_directory_paths:
list[~azure.mgmt.recoveryservicesbackup.passivestamp.models.SQLDataDirectoryMapping]
"""
super().__init__(
recovery_type=recovery_type,
source_resource_id=source_resource_id,
property_bag=property_bag,
target_info=target_info,
recovery_mode=recovery_mode,
target_virtual_machine_id=target_virtual_machine_id,
**kwargs
)
self.object_type = "AzureWorkloadSQLRestoreRequest" # type: str
self.should_use_alternate_target_location = should_use_alternate_target_location
self.is_non_recoverable = is_non_recoverable
self.alternate_directory_paths = alternate_directory_paths
class AzureWorkloadSQLPointInTimeRestoreRequest(
AzureWorkloadSQLRestoreRequest
): # pylint: disable=too-many-instance-attributes
"""AzureWorkload SQL -specific restore. Specifically for PointInTime/Log restore.
All required parameters must be populated in order to send to Azure.
:ivar object_type: This property will be used as the discriminator for deciding the specific
types in the polymorphic chain of types. Required.
:vartype object_type: str
:ivar recovery_type: Type of this recovery. Known values are: "Invalid", "OriginalLocation",
"AlternateLocation", "RestoreDisks", and "Offline".
:vartype recovery_type: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.RecoveryType
:ivar source_resource_id: Fully qualified ARM ID of the VM on which workload that was running
is being recovered.
:vartype source_resource_id: str
:ivar property_bag: Workload specific property bag.
:vartype property_bag: dict[str, str]
:ivar target_info: Details of target database.
:vartype target_info: ~azure.mgmt.recoveryservicesbackup.passivestamp.models.TargetRestoreInfo
:ivar recovery_mode: Defines whether the current recovery mode is file restore or database
restore. Known values are: "Invalid", "FileRecovery", and "WorkloadRecovery".
:vartype recovery_mode: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.RecoveryMode
:ivar target_virtual_machine_id: This is the complete ARM Id of the target VM
For e.g.
/subscriptions/{subId}/resourcegroups/{rg}/provider/Microsoft.Compute/virtualmachines/{vm}.
:vartype target_virtual_machine_id: str
:ivar should_use_alternate_target_location: Default option set to true. If this is set to
false, alternate data directory must be provided.
:vartype should_use_alternate_target_location: bool
:ivar is_non_recoverable: SQL specific property where user can chose to set no-recovery when
restore operation is tried.
:vartype is_non_recoverable: bool
:ivar alternate_directory_paths: Data directory details.
:vartype alternate_directory_paths:
list[~azure.mgmt.recoveryservicesbackup.passivestamp.models.SQLDataDirectoryMapping]
:ivar point_in_time: PointInTime value.
:vartype point_in_time: ~datetime.datetime
"""
_validation = {
"object_type": {"required": True},
}
_attribute_map = {
"object_type": {"key": "objectType", "type": "str"},
"recovery_type": {"key": "recoveryType", "type": "str"},
"source_resource_id": {"key": "sourceResourceId", "type": "str"},
"property_bag": {"key": "propertyBag", "type": "{str}"},
"target_info": {"key": "targetInfo", "type": "TargetRestoreInfo"},
"recovery_mode": {"key": "recoveryMode", "type": "str"},
"target_virtual_machine_id": {"key": "targetVirtualMachineId", "type": "str"},
"should_use_alternate_target_location": {"key": "shouldUseAlternateTargetLocation", "type": "bool"},
"is_non_recoverable": {"key": "isNonRecoverable", "type": "bool"},
"alternate_directory_paths": {"key": "alternateDirectoryPaths", "type": "[SQLDataDirectoryMapping]"},
"point_in_time": {"key": "pointInTime", "type": "iso-8601"},
}
def __init__(
self,
*,
recovery_type: Optional[Union[str, "_models.RecoveryType"]] = None,
source_resource_id: Optional[str] = None,
property_bag: Optional[Dict[str, str]] = None,
target_info: Optional["_models.TargetRestoreInfo"] = None,
recovery_mode: Optional[Union[str, "_models.RecoveryMode"]] = None,
target_virtual_machine_id: Optional[str] = None,
should_use_alternate_target_location: Optional[bool] = None,
is_non_recoverable: Optional[bool] = None,
alternate_directory_paths: Optional[List["_models.SQLDataDirectoryMapping"]] = None,
point_in_time: Optional[datetime.datetime] = None,
**kwargs
):
"""
:keyword recovery_type: Type of this recovery. Known values are: "Invalid", "OriginalLocation",
"AlternateLocation", "RestoreDisks", and "Offline".
:paramtype recovery_type: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.RecoveryType
:keyword source_resource_id: Fully qualified ARM ID of the VM on which workload that was
running is being recovered.
:paramtype source_resource_id: str
:keyword property_bag: Workload specific property bag.
:paramtype property_bag: dict[str, str]
:keyword target_info: Details of target database.
:paramtype target_info:
~azure.mgmt.recoveryservicesbackup.passivestamp.models.TargetRestoreInfo
:keyword recovery_mode: Defines whether the current recovery mode is file restore or database
restore. Known values are: "Invalid", "FileRecovery", and "WorkloadRecovery".
:paramtype recovery_mode: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.RecoveryMode
:keyword target_virtual_machine_id: This is the complete ARM Id of the target VM
For e.g.
/subscriptions/{subId}/resourcegroups/{rg}/provider/Microsoft.Compute/virtualmachines/{vm}.
:paramtype target_virtual_machine_id: str
:keyword should_use_alternate_target_location: Default option set to true. If this is set to
false, alternate data directory must be provided.
:paramtype should_use_alternate_target_location: bool
:keyword is_non_recoverable: SQL specific property where user can chose to set no-recovery when
restore operation is tried.
:paramtype is_non_recoverable: bool
:keyword alternate_directory_paths: Data directory details.
:paramtype alternate_directory_paths:
list[~azure.mgmt.recoveryservicesbackup.passivestamp.models.SQLDataDirectoryMapping]
:keyword point_in_time: PointInTime value.
:paramtype point_in_time: ~datetime.datetime
"""
super().__init__(
recovery_type=recovery_type,
source_resource_id=source_resource_id,
property_bag=property_bag,
target_info=target_info,
recovery_mode=recovery_mode,
target_virtual_machine_id=target_virtual_machine_id,
should_use_alternate_target_location=should_use_alternate_target_location,
is_non_recoverable=is_non_recoverable,
alternate_directory_paths=alternate_directory_paths,
**kwargs
)
self.object_type = "AzureWorkloadSQLPointInTimeRestoreRequest" # type: str
self.point_in_time = point_in_time
class AzureWorkloadSQLRecoveryPointExtendedInfo(_serialization.Model):
"""Extended info class details.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar data_directory_time_in_utc: UTC time at which data directory info was captured.
:vartype data_directory_time_in_utc: ~datetime.datetime
:ivar data_directory_paths: List of data directory paths during restore operation.
:vartype data_directory_paths:
list[~azure.mgmt.recoveryservicesbackup.passivestamp.models.SQLDataDirectory]
"""
_validation = {
"data_directory_time_in_utc": {"readonly": True},
"data_directory_paths": {"readonly": True},
}
_attribute_map = {
"data_directory_time_in_utc": {"key": "dataDirectoryTimeInUTC", "type": "iso-8601"},
"data_directory_paths": {"key": "dataDirectoryPaths", "type": "[SQLDataDirectory]"},
}
def __init__(self, **kwargs):
""" """
super().__init__(**kwargs)
self.data_directory_time_in_utc = None
self.data_directory_paths = None
class BackupManagementUsage(_serialization.Model):
"""Backup management usages of a vault.
:ivar unit: Unit of the usage. Known values are: "Count", "Bytes", "Seconds", "Percent",
"CountPerSecond", and "BytesPerSecond".
:vartype unit: str or ~azure.mgmt.recoveryservicesbackup.passivestamp.models.UsagesUnit
:ivar quota_period: Quota period of usage.
:vartype quota_period: str
:ivar next_reset_time: Next reset time of usage.
:vartype next_reset_time: ~datetime.datetime
:ivar current_value: Current value of usage.
:vartype current_value: int
:ivar limit: Limit of usage.
:vartype limit: int
:ivar name: Name of usage.
:vartype name: ~azure.mgmt.recoveryservicesbackup.passivestamp.models.NameInfo
"""
_attribute_map = {
"unit": {"key": "unit", "type": "str"},
"quota_period": {"key": "quotaPeriod", "type": "str"},
"next_reset_time": {"key": "nextResetTime", "type": "iso-8601"},
"current_value": {"key": "currentValue", "type": "int"},
"limit": {"key": "limit", "type": "int"},
"name": {"key": "name", "type": "NameInfo"},
}
def __init__(
self,
*,
unit: Optional[Union[str, "_models.UsagesUnit"]] = None,
quota_period: Optional[str] = None,
next_reset_time: Optional[datetime.datetime] = None,
current_value: Optional[int] = None,
limit: Optional[int] = None,
name: Optional["_models.NameInfo"] = None,
**kwargs
):
"""
:keyword unit: Unit of the usage. Known values are: "Count", "Bytes", "Seconds", "Percent",
"CountPerSecond", and "BytesPerSecond".
:paramtype unit: str or ~azure.mgmt.recoveryservicesbackup.passivestamp.models.UsagesUnit
:keyword quota_period: Quota period of usage.
:paramtype quota_period: str
:keyword next_reset_time: Next reset time of usage.
:paramtype next_reset_time: ~datetime.datetime
:keyword current_value: Current value of usage.
:paramtype current_value: int
:keyword limit: Limit of usage.
:paramtype limit: int
:keyword name: Name of usage.
:paramtype name: ~azure.mgmt.recoveryservicesbackup.passivestamp.models.NameInfo
"""
super().__init__(**kwargs)
self.unit = unit
self.quota_period = quota_period
self.next_reset_time = next_reset_time
self.current_value = current_value
self.limit = limit
self.name = name
class BackupManagementUsageList(_serialization.Model):
"""Backup management usage for vault.
:ivar value: The list of backup management usages for the given vault.
:vartype value:
list[~azure.mgmt.recoveryservicesbackup.passivestamp.models.BackupManagementUsage]
"""
_attribute_map = {
"value": {"key": "value", "type": "[BackupManagementUsage]"},
}
def __init__(self, *, value: Optional[List["_models.BackupManagementUsage"]] = None, **kwargs):
"""
:keyword value: The list of backup management usages for the given vault.
:paramtype value:
list[~azure.mgmt.recoveryservicesbackup.passivestamp.models.BackupManagementUsage]
"""
super().__init__(**kwargs)
self.value = value
class BackupResourceConfig(_serialization.Model):
"""The resource storage details.
:ivar storage_model_type: Storage type. Known values are: "Invalid", "GeoRedundant",
"LocallyRedundant", "ZoneRedundant", and "ReadAccessGeoZoneRedundant".
:vartype storage_model_type: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.StorageType
:ivar storage_type: Storage type. Known values are: "Invalid", "GeoRedundant",
"LocallyRedundant", "ZoneRedundant", and "ReadAccessGeoZoneRedundant".
:vartype storage_type: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.StorageType
:ivar storage_type_state: Locked or Unlocked. Once a machine is registered against a resource,
the storageTypeState is always Locked. Known values are: "Invalid", "Locked", and "Unlocked".
:vartype storage_type_state: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.StorageTypeState
:ivar cross_region_restore_flag: Opt in details of Cross Region Restore feature.
:vartype cross_region_restore_flag: bool
"""
_attribute_map = {
"storage_model_type": {"key": "storageModelType", "type": "str"},
"storage_type": {"key": "storageType", "type": "str"},
"storage_type_state": {"key": "storageTypeState", "type": "str"},
"cross_region_restore_flag": {"key": "crossRegionRestoreFlag", "type": "bool"},
}
def __init__(
self,
*,
storage_model_type: Optional[Union[str, "_models.StorageType"]] = None,
storage_type: Optional[Union[str, "_models.StorageType"]] = None,
storage_type_state: Optional[Union[str, "_models.StorageTypeState"]] = None,
cross_region_restore_flag: Optional[bool] = None,
**kwargs
):
"""
:keyword storage_model_type: Storage type. Known values are: "Invalid", "GeoRedundant",
"LocallyRedundant", "ZoneRedundant", and "ReadAccessGeoZoneRedundant".
:paramtype storage_model_type: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.StorageType
:keyword storage_type: Storage type. Known values are: "Invalid", "GeoRedundant",
"LocallyRedundant", "ZoneRedundant", and "ReadAccessGeoZoneRedundant".
:paramtype storage_type: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.StorageType
:keyword storage_type_state: Locked or Unlocked. Once a machine is registered against a
resource, the storageTypeState is always Locked. Known values are: "Invalid", "Locked", and
"Unlocked".
:paramtype storage_type_state: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.StorageTypeState
:keyword cross_region_restore_flag: Opt in details of Cross Region Restore feature.
:paramtype cross_region_restore_flag: bool
"""
super().__init__(**kwargs)
self.storage_model_type = storage_model_type
self.storage_type = storage_type
self.storage_type_state = storage_type_state
self.cross_region_restore_flag = cross_region_restore_flag
class BackupResourceConfigResource(Resource):
"""The resource storage details.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id represents the complete path to the resource.
:vartype id: str
:ivar name: Resource name associated with the resource.
:vartype name: str
:ivar type: Resource type represents the complete path of the form
Namespace/ResourceType/ResourceType/...
:vartype type: str
:ivar location: Resource location.
:vartype location: str
:ivar tags: Resource tags.
:vartype tags: dict[str, str]
:ivar e_tag: Optional ETag.
:vartype e_tag: str
:ivar properties: BackupResourceConfigResource properties.
:vartype properties:
~azure.mgmt.recoveryservicesbackup.passivestamp.models.BackupResourceConfig
"""
_validation = {
"id": {"readonly": True},
"name": {"readonly": True},
"type": {"readonly": True},
}
_attribute_map = {
"id": {"key": "id", "type": "str"},
"name": {"key": "name", "type": "str"},
"type": {"key": "type", "type": "str"},
"location": {"key": "location", "type": "str"},
"tags": {"key": "tags", "type": "{str}"},
"e_tag": {"key": "eTag", "type": "str"},
"properties": {"key": "properties", "type": "BackupResourceConfig"},
}
def __init__(
self,
*,
location: Optional[str] = None,
tags: Optional[Dict[str, str]] = None,
e_tag: Optional[str] = None,
properties: Optional["_models.BackupResourceConfig"] = None,
**kwargs
):
"""
:keyword location: Resource location.
:paramtype location: str
:keyword tags: Resource tags.
:paramtype tags: dict[str, str]
:keyword e_tag: Optional ETag.
:paramtype e_tag: str
:keyword properties: BackupResourceConfigResource properties.
:paramtype properties:
~azure.mgmt.recoveryservicesbackup.passivestamp.models.BackupResourceConfig
"""
super().__init__(location=location, tags=tags, e_tag=e_tag, **kwargs)
self.properties = properties
class BEKDetails(_serialization.Model):
"""BEK is bitlocker encryption key.
:ivar secret_url: Secret is BEK.
:vartype secret_url: str
:ivar secret_vault_id: ID of the Key Vault where this Secret is stored.
:vartype secret_vault_id: str
:ivar secret_data: BEK data.
:vartype secret_data: str
"""
_attribute_map = {
"secret_url": {"key": "secretUrl", "type": "str"},
"secret_vault_id": {"key": "secretVaultId", "type": "str"},
"secret_data": {"key": "secretData", "type": "str"},
}
def __init__(
self,
*,
secret_url: Optional[str] = None,
secret_vault_id: Optional[str] = None,
secret_data: Optional[str] = None,
**kwargs
):
"""
:keyword secret_url: Secret is BEK.
:paramtype secret_url: str
:keyword secret_vault_id: ID of the Key Vault where this Secret is stored.
:paramtype secret_vault_id: str
:keyword secret_data: BEK data.
:paramtype secret_data: str
"""
super().__init__(**kwargs)
self.secret_url = secret_url
self.secret_vault_id = secret_vault_id
self.secret_data = secret_data
class BMSAADPropertiesQueryObject(_serialization.Model):
"""Filters to list backup items.
:ivar backup_management_type: Backup management type for the backed up item. Known values are:
"Invalid", "AzureIaasVM", "MAB", "DPM", "AzureBackupServer", "AzureSql", "AzureStorage",
"AzureWorkload", and "DefaultBackup".
:vartype backup_management_type: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.BackupManagementType
"""
_attribute_map = {
"backup_management_type": {"key": "backupManagementType", "type": "str"},
}
def __init__(
self, *, backup_management_type: Optional[Union[str, "_models.BackupManagementType"]] = None, **kwargs
):
"""
:keyword backup_management_type: Backup management type for the backed up item. Known values
are: "Invalid", "AzureIaasVM", "MAB", "DPM", "AzureBackupServer", "AzureSql", "AzureStorage",
"AzureWorkload", and "DefaultBackup".
:paramtype backup_management_type: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.BackupManagementType
"""
super().__init__(**kwargs)
self.backup_management_type = backup_management_type
class BMSBackupSummariesQueryObject(_serialization.Model):
"""Query parameters to fetch backup summaries.
:ivar type: Backup management type for this container. Known values are: "Invalid",
"BackupProtectedItemCountSummary", and "BackupProtectionContainerCountSummary".
:vartype type: str or ~azure.mgmt.recoveryservicesbackup.passivestamp.models.Type
"""
_attribute_map = {
"type": {"key": "type", "type": "str"},
}
def __init__(self, *, type: Optional[Union[str, "_models.Type"]] = None, **kwargs):
"""
:keyword type: Backup management type for this container. Known values are: "Invalid",
"BackupProtectedItemCountSummary", and "BackupProtectionContainerCountSummary".
:paramtype type: str or ~azure.mgmt.recoveryservicesbackup.passivestamp.models.Type
"""
super().__init__(**kwargs)
self.type = type
class BMSRPQueryObject(_serialization.Model):
"""Filters to list backup copies.
:ivar start_date: Backup copies created after this time.
:vartype start_date: ~datetime.datetime
:ivar end_date: Backup copies created before this time.
:vartype end_date: ~datetime.datetime
:ivar restore_point_query_type: RestorePoint type. Known values are: "Invalid", "Full", "Log",
"Differential", "FullAndDifferential", "All", and "Incremental".
:vartype restore_point_query_type: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.RestorePointQueryType
:ivar extended_info: In Get Recovery Point, it tells whether extended information about
recovery point is asked.
:vartype extended_info: bool
:ivar move_ready_rp_only: Whether the RP can be moved to another tier.
:vartype move_ready_rp_only: bool
"""
_attribute_map = {
"start_date": {"key": "startDate", "type": "iso-8601"},
"end_date": {"key": "endDate", "type": "iso-8601"},
"restore_point_query_type": {"key": "restorePointQueryType", "type": "str"},
"extended_info": {"key": "extendedInfo", "type": "bool"},
"move_ready_rp_only": {"key": "moveReadyRPOnly", "type": "bool"},
}
def __init__(
self,
*,
start_date: Optional[datetime.datetime] = None,
end_date: Optional[datetime.datetime] = None,
restore_point_query_type: Optional[Union[str, "_models.RestorePointQueryType"]] = None,
extended_info: Optional[bool] = None,
move_ready_rp_only: Optional[bool] = None,
**kwargs
):
"""
:keyword start_date: Backup copies created after this time.
:paramtype start_date: ~datetime.datetime
:keyword end_date: Backup copies created before this time.
:paramtype end_date: ~datetime.datetime
:keyword restore_point_query_type: RestorePoint type. Known values are: "Invalid", "Full",
"Log", "Differential", "FullAndDifferential", "All", and "Incremental".
:paramtype restore_point_query_type: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.RestorePointQueryType
:keyword extended_info: In Get Recovery Point, it tells whether extended information about
recovery point is asked.
:paramtype extended_info: bool
:keyword move_ready_rp_only: Whether the RP can be moved to another tier.
:paramtype move_ready_rp_only: bool
"""
super().__init__(**kwargs)
self.start_date = start_date
self.end_date = end_date
self.restore_point_query_type = restore_point_query_type
self.extended_info = extended_info
self.move_ready_rp_only = move_ready_rp_only
class ClientScriptForConnect(_serialization.Model):
"""Client script details for file / folder restore.
:ivar script_content: File content of the client script for file / folder restore.
:vartype script_content: str
:ivar script_extension: File extension of the client script for file / folder restore - .ps1 ,
.sh , etc.
:vartype script_extension: str
:ivar os_type: OS type - Windows, Linux etc. for which this file / folder restore client script
works.
:vartype os_type: str
:ivar url: URL of Executable from where to source the content. If this is not null then
ScriptContent should not be used.
:vartype url: str
:ivar script_name_suffix: Mandatory suffix that should be added to the name of script that is
given for download to user.
If its null or empty then , ignore it.
:vartype script_name_suffix: str
"""
_attribute_map = {
"script_content": {"key": "scriptContent", "type": "str"},
"script_extension": {"key": "scriptExtension", "type": "str"},
"os_type": {"key": "osType", "type": "str"},
"url": {"key": "url", "type": "str"},
"script_name_suffix": {"key": "scriptNameSuffix", "type": "str"},
}
def __init__(
self,
*,
script_content: Optional[str] = None,
script_extension: Optional[str] = None,
os_type: Optional[str] = None,
url: Optional[str] = None,
script_name_suffix: Optional[str] = None,
**kwargs
):
"""
:keyword script_content: File content of the client script for file / folder restore.
:paramtype script_content: str
:keyword script_extension: File extension of the client script for file / folder restore - .ps1
, .sh , etc.
:paramtype script_extension: str
:keyword os_type: OS type - Windows, Linux etc. for which this file / folder restore client
script works.
:paramtype os_type: str
:keyword url: URL of Executable from where to source the content. If this is not null then
ScriptContent should not be used.
:paramtype url: str
:keyword script_name_suffix: Mandatory suffix that should be added to the name of script that
is given for download to user.
If its null or empty then , ignore it.
:paramtype script_name_suffix: str
"""
super().__init__(**kwargs)
self.script_content = script_content
self.script_extension = script_extension
self.os_type = os_type
self.url = url
self.script_name_suffix = script_name_suffix
class CrossRegionRestoreRequest(_serialization.Model):
"""CrossRegionRestoreRequest.
:ivar cross_region_restore_access_details: Access details for cross region restore.
:vartype cross_region_restore_access_details:
~azure.mgmt.recoveryservicesbackup.passivestamp.models.CrrAccessToken
:ivar restore_request: Request object for triggering restore.
:vartype restore_request: ~azure.mgmt.recoveryservicesbackup.passivestamp.models.RestoreRequest
"""
_attribute_map = {
"cross_region_restore_access_details": {"key": "crossRegionRestoreAccessDetails", "type": "CrrAccessToken"},
"restore_request": {"key": "restoreRequest", "type": "RestoreRequest"},
}
def __init__(
self,
*,
cross_region_restore_access_details: Optional["_models.CrrAccessToken"] = None,
restore_request: Optional["_models.RestoreRequest"] = None,
**kwargs
):
"""
:keyword cross_region_restore_access_details: Access details for cross region restore.
:paramtype cross_region_restore_access_details:
~azure.mgmt.recoveryservicesbackup.passivestamp.models.CrrAccessToken
:keyword restore_request: Request object for triggering restore.
:paramtype restore_request:
~azure.mgmt.recoveryservicesbackup.passivestamp.models.RestoreRequest
"""
super().__init__(**kwargs)
self.cross_region_restore_access_details = cross_region_restore_access_details
self.restore_request = restore_request
class CrossRegionRestoreRequestResource(Resource):
"""CrossRegionRestoreRequestResource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id represents the complete path to the resource.
:vartype id: str
:ivar name: Resource name associated with the resource.
:vartype name: str
:ivar type: Resource type represents the complete path of the form
Namespace/ResourceType/ResourceType/...
:vartype type: str
:ivar location: Resource location.
:vartype location: str
:ivar tags: Resource tags.
:vartype tags: dict[str, str]
:ivar e_tag: Optional ETag.
:vartype e_tag: str
:ivar properties: CrossRegionRestoreRequestResource properties.
:vartype properties:
~azure.mgmt.recoveryservicesbackup.passivestamp.models.CrossRegionRestoreRequest
"""
_validation = {
"id": {"readonly": True},
"name": {"readonly": True},
"type": {"readonly": True},
}
_attribute_map = {
"id": {"key": "id", "type": "str"},
"name": {"key": "name", "type": "str"},
"type": {"key": "type", "type": "str"},
"location": {"key": "location", "type": "str"},
"tags": {"key": "tags", "type": "{str}"},
"e_tag": {"key": "eTag", "type": "str"},
"properties": {"key": "properties", "type": "CrossRegionRestoreRequest"},
}
def __init__(
self,
*,
location: Optional[str] = None,
tags: Optional[Dict[str, str]] = None,
e_tag: Optional[str] = None,
properties: Optional["_models.CrossRegionRestoreRequest"] = None,
**kwargs
):
"""
:keyword location: Resource location.
:paramtype location: str
:keyword tags: Resource tags.
:paramtype tags: dict[str, str]
:keyword e_tag: Optional ETag.
:paramtype e_tag: str
:keyword properties: CrossRegionRestoreRequestResource properties.
:paramtype properties:
~azure.mgmt.recoveryservicesbackup.passivestamp.models.CrossRegionRestoreRequest
"""
super().__init__(location=location, tags=tags, e_tag=e_tag, **kwargs)
self.properties = properties
class CrrAccessToken(_serialization.Model): # pylint: disable=too-many-instance-attributes
"""CrrAccessToken.
You probably want to use the sub-classes and not this class directly. Known sub-classes are:
WorkloadCrrAccessToken
All required parameters must be populated in order to send to Azure.
:ivar object_type: Type of the specific object - used for deserializing. Required.
:vartype object_type: str
:ivar access_token_string: Access token used for authentication.
:vartype access_token_string: str
:ivar subscription_id: Subscription Id of the source vault.
:vartype subscription_id: str
:ivar resource_group_name: Resource Group name of the source vault.
:vartype resource_group_name: str
:ivar resource_name: Resource Name of the source vault.
:vartype resource_name: str
:ivar resource_id: Resource Id of the source vault.
:vartype resource_id: str
:ivar protection_container_id: Protected item container id.
:vartype protection_container_id: int
:ivar recovery_point_id: Recovery Point Id.
:vartype recovery_point_id: str
:ivar recovery_point_time: Recovery Point Time.
:vartype recovery_point_time: str
:ivar container_name: Container Unique name.
:vartype container_name: str
:ivar container_type: Container Type.
:vartype container_type: str
:ivar backup_management_type: Backup Management Type.
:vartype backup_management_type: str
:ivar datasource_type: Datasource Type.
:vartype datasource_type: str
:ivar datasource_name: Datasource Friendly Name.
:vartype datasource_name: str
:ivar datasource_id: Datasource Id.
:vartype datasource_id: str
:ivar datasource_container_name: Datasource Container Unique Name.
:vartype datasource_container_name: str
:ivar coordinator_service_stamp_id: CoordinatorServiceStampId to be used by BCM in restore
call.
:vartype coordinator_service_stamp_id: str
:ivar coordinator_service_stamp_uri: CoordinatorServiceStampUri to be used by BCM in restore
call.
:vartype coordinator_service_stamp_uri: str
:ivar protection_service_stamp_id: ProtectionServiceStampId to be used by BCM in restore call.
:vartype protection_service_stamp_id: str
:ivar protection_service_stamp_uri: ProtectionServiceStampUri to be used by BCM in restore
call.
:vartype protection_service_stamp_uri: str
:ivar token_extended_information: Extended Information about the token like FileSpec etc.
:vartype token_extended_information: str
:ivar rp_tier_information: Recovery point Tier Information.
:vartype rp_tier_information: dict[str, str]
:ivar rp_original_sa_option: Recovery point information: Original SA option.
:vartype rp_original_sa_option: bool
:ivar rp_is_managed_virtual_machine: Recovery point information: Managed virtual machine.
:vartype rp_is_managed_virtual_machine: bool
:ivar rp_vm_size_description: Recovery point information: VM size description.
:vartype rp_vm_size_description: str
:ivar b_ms_active_region: Active region name of BMS Stamp.
:vartype b_ms_active_region: str
"""
_validation = {
"object_type": {"required": True},
}
_attribute_map = {
"object_type": {"key": "objectType", "type": "str"},
"access_token_string": {"key": "accessTokenString", "type": "str"},
"subscription_id": {"key": "subscriptionId", "type": "str"},
"resource_group_name": {"key": "resourceGroupName", "type": "str"},
"resource_name": {"key": "resourceName", "type": "str"},
"resource_id": {"key": "resourceId", "type": "str"},
"protection_container_id": {"key": "protectionContainerId", "type": "int"},
"recovery_point_id": {"key": "recoveryPointId", "type": "str"},
"recovery_point_time": {"key": "recoveryPointTime", "type": "str"},
"container_name": {"key": "containerName", "type": "str"},
"container_type": {"key": "containerType", "type": "str"},
"backup_management_type": {"key": "backupManagementType", "type": "str"},
"datasource_type": {"key": "datasourceType", "type": "str"},
"datasource_name": {"key": "datasourceName", "type": "str"},
"datasource_id": {"key": "datasourceId", "type": "str"},
"datasource_container_name": {"key": "datasourceContainerName", "type": "str"},
"coordinator_service_stamp_id": {"key": "coordinatorServiceStampId", "type": "str"},
"coordinator_service_stamp_uri": {"key": "coordinatorServiceStampUri", "type": "str"},
"protection_service_stamp_id": {"key": "protectionServiceStampId", "type": "str"},
"protection_service_stamp_uri": {"key": "protectionServiceStampUri", "type": "str"},
"token_extended_information": {"key": "tokenExtendedInformation", "type": "str"},
"rp_tier_information": {"key": "rpTierInformation", "type": "{str}"},
"rp_original_sa_option": {"key": "rpOriginalSAOption", "type": "bool"},
"rp_is_managed_virtual_machine": {"key": "rpIsManagedVirtualMachine", "type": "bool"},
"rp_vm_size_description": {"key": "rpVMSizeDescription", "type": "str"},
"b_ms_active_region": {"key": "bMSActiveRegion", "type": "str"},
}
_subtype_map = {"object_type": {"WorkloadCrrAccessToken": "WorkloadCrrAccessToken"}}
def __init__( # pylint: disable=too-many-locals
self,
*,
access_token_string: Optional[str] = None,
subscription_id: Optional[str] = None,
resource_group_name: Optional[str] = None,
resource_name: Optional[str] = None,
resource_id: Optional[str] = None,
protection_container_id: Optional[int] = None,
recovery_point_id: Optional[str] = None,
recovery_point_time: Optional[str] = None,
container_name: Optional[str] = None,
container_type: Optional[str] = None,
backup_management_type: Optional[str] = None,
datasource_type: Optional[str] = None,
datasource_name: Optional[str] = None,
datasource_id: Optional[str] = None,
datasource_container_name: Optional[str] = None,
coordinator_service_stamp_id: Optional[str] = None,
coordinator_service_stamp_uri: Optional[str] = None,
protection_service_stamp_id: Optional[str] = None,
protection_service_stamp_uri: Optional[str] = None,
token_extended_information: Optional[str] = None,
rp_tier_information: Optional[Dict[str, str]] = None,
rp_original_sa_option: Optional[bool] = None,
rp_is_managed_virtual_machine: Optional[bool] = None,
rp_vm_size_description: Optional[str] = None,
b_ms_active_region: Optional[str] = None,
**kwargs
):
"""
:keyword access_token_string: Access token used for authentication.
:paramtype access_token_string: str
:keyword subscription_id: Subscription Id of the source vault.
:paramtype subscription_id: str
:keyword resource_group_name: Resource Group name of the source vault.
:paramtype resource_group_name: str
:keyword resource_name: Resource Name of the source vault.
:paramtype resource_name: str
:keyword resource_id: Resource Id of the source vault.
:paramtype resource_id: str
:keyword protection_container_id: Protected item container id.
:paramtype protection_container_id: int
:keyword recovery_point_id: Recovery Point Id.
:paramtype recovery_point_id: str
:keyword recovery_point_time: Recovery Point Time.
:paramtype recovery_point_time: str
:keyword container_name: Container Unique name.
:paramtype container_name: str
:keyword container_type: Container Type.
:paramtype container_type: str
:keyword backup_management_type: Backup Management Type.
:paramtype backup_management_type: str
:keyword datasource_type: Datasource Type.
:paramtype datasource_type: str
:keyword datasource_name: Datasource Friendly Name.
:paramtype datasource_name: str
:keyword datasource_id: Datasource Id.
:paramtype datasource_id: str
:keyword datasource_container_name: Datasource Container Unique Name.
:paramtype datasource_container_name: str
:keyword coordinator_service_stamp_id: CoordinatorServiceStampId to be used by BCM in restore
call.
:paramtype coordinator_service_stamp_id: str
:keyword coordinator_service_stamp_uri: CoordinatorServiceStampUri to be used by BCM in restore
call.
:paramtype coordinator_service_stamp_uri: str
:keyword protection_service_stamp_id: ProtectionServiceStampId to be used by BCM in restore
call.
:paramtype protection_service_stamp_id: str
:keyword protection_service_stamp_uri: ProtectionServiceStampUri to be used by BCM in restore
call.
:paramtype protection_service_stamp_uri: str
:keyword token_extended_information: Extended Information about the token like FileSpec etc.
:paramtype token_extended_information: str
:keyword rp_tier_information: Recovery point Tier Information.
:paramtype rp_tier_information: dict[str, str]
:keyword rp_original_sa_option: Recovery point information: Original SA option.
:paramtype rp_original_sa_option: bool
:keyword rp_is_managed_virtual_machine: Recovery point information: Managed virtual machine.
:paramtype rp_is_managed_virtual_machine: bool
:keyword rp_vm_size_description: Recovery point information: VM size description.
:paramtype rp_vm_size_description: str
:keyword b_ms_active_region: Active region name of BMS Stamp.
:paramtype b_ms_active_region: str
"""
super().__init__(**kwargs)
self.object_type = None # type: Optional[str]
self.access_token_string = access_token_string
self.subscription_id = subscription_id
self.resource_group_name = resource_group_name
self.resource_name = resource_name
self.resource_id = resource_id
self.protection_container_id = protection_container_id
self.recovery_point_id = recovery_point_id
self.recovery_point_time = recovery_point_time
self.container_name = container_name
self.container_type = container_type
self.backup_management_type = backup_management_type
self.datasource_type = datasource_type
self.datasource_name = datasource_name
self.datasource_id = datasource_id
self.datasource_container_name = datasource_container_name
self.coordinator_service_stamp_id = coordinator_service_stamp_id
self.coordinator_service_stamp_uri = coordinator_service_stamp_uri
self.protection_service_stamp_id = protection_service_stamp_id
self.protection_service_stamp_uri = protection_service_stamp_uri
self.token_extended_information = token_extended_information
self.rp_tier_information = rp_tier_information
self.rp_original_sa_option = rp_original_sa_option
self.rp_is_managed_virtual_machine = rp_is_managed_virtual_machine
self.rp_vm_size_description = rp_vm_size_description
self.b_ms_active_region = b_ms_active_region
class CrrAccessTokenResource(Resource):
"""CrrAccessTokenResource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id represents the complete path to the resource.
:vartype id: str
:ivar name: Resource name associated with the resource.
:vartype name: str
:ivar type: Resource type represents the complete path of the form
Namespace/ResourceType/ResourceType/...
:vartype type: str
:ivar location: Resource location.
:vartype location: str
:ivar tags: Resource tags.
:vartype tags: dict[str, str]
:ivar e_tag: Optional ETag.
:vartype e_tag: str
:ivar properties: CrrAccessTokenResource properties.
:vartype properties: ~azure.mgmt.recoveryservicesbackup.passivestamp.models.CrrAccessToken
"""
_validation = {
"id": {"readonly": True},
"name": {"readonly": True},
"type": {"readonly": True},
}
_attribute_map = {
"id": {"key": "id", "type": "str"},
"name": {"key": "name", "type": "str"},
"type": {"key": "type", "type": "str"},
"location": {"key": "location", "type": "str"},
"tags": {"key": "tags", "type": "{str}"},
"e_tag": {"key": "eTag", "type": "str"},
"properties": {"key": "properties", "type": "CrrAccessToken"},
}
def __init__(
self,
*,
location: Optional[str] = None,
tags: Optional[Dict[str, str]] = None,
e_tag: Optional[str] = None,
properties: Optional["_models.CrrAccessToken"] = None,
**kwargs
):
"""
:keyword location: Resource location.
:paramtype location: str
:keyword tags: Resource tags.
:paramtype tags: dict[str, str]
:keyword e_tag: Optional ETag.
:paramtype e_tag: str
:keyword properties: CrrAccessTokenResource properties.
:paramtype properties: ~azure.mgmt.recoveryservicesbackup.passivestamp.models.CrrAccessToken
"""
super().__init__(location=location, tags=tags, e_tag=e_tag, **kwargs)
self.properties = properties
class CrrJobRequest(_serialization.Model):
"""Request object for fetching CRR jobs.
:ivar resource_id: Entire ARM resource id of the resource.
:vartype resource_id: str
:ivar job_name: Job Name of the job to be fetched.
:vartype job_name: str
"""
_attribute_map = {
"resource_id": {"key": "resourceId", "type": "str"},
"job_name": {"key": "jobName", "type": "str"},
}
def __init__(self, *, resource_id: Optional[str] = None, job_name: Optional[str] = None, **kwargs):
"""
:keyword resource_id: Entire ARM resource id of the resource.
:paramtype resource_id: str
:keyword job_name: Job Name of the job to be fetched.
:paramtype job_name: str
"""
super().__init__(**kwargs)
self.resource_id = resource_id
self.job_name = job_name
class CrrJobRequestResource(Resource):
"""Request object for fetching CRR jobs.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id represents the complete path to the resource.
:vartype id: str
:ivar name: Resource name associated with the resource.
:vartype name: str
:ivar type: Resource type represents the complete path of the form
Namespace/ResourceType/ResourceType/...
:vartype type: str
:ivar location: Resource location.
:vartype location: str
:ivar tags: Resource tags.
:vartype tags: dict[str, str]
:ivar e_tag: Optional ETag.
:vartype e_tag: str
:ivar properties: CrrJobRequestResource properties.
:vartype properties: ~azure.mgmt.recoveryservicesbackup.passivestamp.models.CrrJobRequest
"""
_validation = {
"id": {"readonly": True},
"name": {"readonly": True},
"type": {"readonly": True},
}
_attribute_map = {
"id": {"key": "id", "type": "str"},
"name": {"key": "name", "type": "str"},
"type": {"key": "type", "type": "str"},
"location": {"key": "location", "type": "str"},
"tags": {"key": "tags", "type": "{str}"},
"e_tag": {"key": "eTag", "type": "str"},
"properties": {"key": "properties", "type": "CrrJobRequest"},
}
def __init__(
self,
*,
location: Optional[str] = None,
tags: Optional[Dict[str, str]] = None,
e_tag: Optional[str] = None,
properties: Optional["_models.CrrJobRequest"] = None,
**kwargs
):
"""
:keyword location: Resource location.
:paramtype location: str
:keyword tags: Resource tags.
:paramtype tags: dict[str, str]
:keyword e_tag: Optional ETag.
:paramtype e_tag: str
:keyword properties: CrrJobRequestResource properties.
:paramtype properties: ~azure.mgmt.recoveryservicesbackup.passivestamp.models.CrrJobRequest
"""
super().__init__(location=location, tags=tags, e_tag=e_tag, **kwargs)
self.properties = properties
class DiskExclusionProperties(_serialization.Model):
"""DiskExclusionProperties.
:ivar disk_lun_list: List of Disks' Logical Unit Numbers (LUN) to be used for VM Protection.
:vartype disk_lun_list: list[int]
:ivar is_inclusion_list: Flag to indicate whether DiskLunList is to be included/ excluded from
backup.
:vartype is_inclusion_list: bool
"""
_attribute_map = {
"disk_lun_list": {"key": "diskLunList", "type": "[int]"},
"is_inclusion_list": {"key": "isInclusionList", "type": "bool"},
}
def __init__(
self, *, disk_lun_list: Optional[List[int]] = None, is_inclusion_list: Optional[bool] = None, **kwargs
):
"""
:keyword disk_lun_list: List of Disks' Logical Unit Numbers (LUN) to be used for VM Protection.
:paramtype disk_lun_list: list[int]
:keyword is_inclusion_list: Flag to indicate whether DiskLunList is to be included/ excluded
from backup.
:paramtype is_inclusion_list: bool
"""
super().__init__(**kwargs)
self.disk_lun_list = disk_lun_list
self.is_inclusion_list = is_inclusion_list
class DiskInformation(_serialization.Model):
"""Disk information.
:ivar lun:
:vartype lun: int
:ivar name:
:vartype name: str
"""
_attribute_map = {
"lun": {"key": "lun", "type": "int"},
"name": {"key": "name", "type": "str"},
}
def __init__(self, *, lun: Optional[int] = None, name: Optional[str] = None, **kwargs):
"""
:keyword lun:
:paramtype lun: int
:keyword name:
:paramtype name: str
"""
super().__init__(**kwargs)
self.lun = lun
self.name = name
class DpmErrorInfo(_serialization.Model):
"""DPM workload-specific error information.
:ivar error_string: Localized error string.
:vartype error_string: str
:ivar recommendations: List of localized recommendations for above error code.
:vartype recommendations: list[str]
"""
_attribute_map = {
"error_string": {"key": "errorString", "type": "str"},
"recommendations": {"key": "recommendations", "type": "[str]"},
}
def __init__(self, *, error_string: Optional[str] = None, recommendations: Optional[List[str]] = None, **kwargs):
"""
:keyword error_string: Localized error string.
:paramtype error_string: str
:keyword recommendations: List of localized recommendations for above error code.
:paramtype recommendations: list[str]
"""
super().__init__(**kwargs)
self.error_string = error_string
self.recommendations = recommendations
class DpmJob(Job): # pylint: disable=too-many-instance-attributes
"""DPM workload-specific job object.
All required parameters must be populated in order to send to Azure.
:ivar entity_friendly_name: Friendly name of the entity on which the current job is executing.
:vartype entity_friendly_name: str
:ivar backup_management_type: Backup management type to execute the current job. Known values
are: "Invalid", "AzureIaasVM", "MAB", "DPM", "AzureBackupServer", "AzureSql", "AzureStorage",
"AzureWorkload", and "DefaultBackup".
:vartype backup_management_type: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.BackupManagementType
:ivar operation: The operation name.
:vartype operation: str
:ivar status: Job status.
:vartype status: str
:ivar start_time: The start time.
:vartype start_time: ~datetime.datetime
:ivar end_time: The end time.
:vartype end_time: ~datetime.datetime
:ivar activity_id: ActivityId of job.
:vartype activity_id: str
:ivar job_type: This property will be used as the discriminator for deciding the specific types
in the polymorphic chain of types. Required.
:vartype job_type: str
:ivar duration: Time elapsed for job.
:vartype duration: ~datetime.timedelta
:ivar dpm_server_name: DPM server name managing the backup item or backup job.
:vartype dpm_server_name: str
:ivar container_name: Name of cluster/server protecting current backup item, if any.
:vartype container_name: str
:ivar container_type: Type of container.
:vartype container_type: str
:ivar workload_type: Type of backup item.
:vartype workload_type: str
:ivar actions_info: The state/actions applicable on this job like cancel/retry.
:vartype actions_info: list[str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.JobSupportedAction]
:ivar error_details: The errors.
:vartype error_details:
list[~azure.mgmt.recoveryservicesbackup.passivestamp.models.DpmErrorInfo]
:ivar extended_info: Additional information for this job.
:vartype extended_info:
~azure.mgmt.recoveryservicesbackup.passivestamp.models.DpmJobExtendedInfo
"""
_validation = {
"job_type": {"required": True},
}
_attribute_map = {
"entity_friendly_name": {"key": "entityFriendlyName", "type": "str"},
"backup_management_type": {"key": "backupManagementType", "type": "str"},
"operation": {"key": "operation", "type": "str"},
"status": {"key": "status", "type": "str"},
"start_time": {"key": "startTime", "type": "iso-8601"},
"end_time": {"key": "endTime", "type": "iso-8601"},
"activity_id": {"key": "activityId", "type": "str"},
"job_type": {"key": "jobType", "type": "str"},
"duration": {"key": "duration", "type": "duration"},
"dpm_server_name": {"key": "dpmServerName", "type": "str"},
"container_name": {"key": "containerName", "type": "str"},
"container_type": {"key": "containerType", "type": "str"},
"workload_type": {"key": "workloadType", "type": "str"},
"actions_info": {"key": "actionsInfo", "type": "[str]"},
"error_details": {"key": "errorDetails", "type": "[DpmErrorInfo]"},
"extended_info": {"key": "extendedInfo", "type": "DpmJobExtendedInfo"},
}
def __init__(
self,
*,
entity_friendly_name: Optional[str] = None,
backup_management_type: Optional[Union[str, "_models.BackupManagementType"]] = None,
operation: Optional[str] = None,
status: Optional[str] = None,
start_time: Optional[datetime.datetime] = None,
end_time: Optional[datetime.datetime] = None,
activity_id: Optional[str] = None,
duration: Optional[datetime.timedelta] = None,
dpm_server_name: Optional[str] = None,
container_name: Optional[str] = None,
container_type: Optional[str] = None,
workload_type: Optional[str] = None,
actions_info: Optional[List[Union[str, "_models.JobSupportedAction"]]] = None,
error_details: Optional[List["_models.DpmErrorInfo"]] = None,
extended_info: Optional["_models.DpmJobExtendedInfo"] = None,
**kwargs
):
"""
:keyword entity_friendly_name: Friendly name of the entity on which the current job is
executing.
:paramtype entity_friendly_name: str
:keyword backup_management_type: Backup management type to execute the current job. Known
values are: "Invalid", "AzureIaasVM", "MAB", "DPM", "AzureBackupServer", "AzureSql",
"AzureStorage", "AzureWorkload", and "DefaultBackup".
:paramtype backup_management_type: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.BackupManagementType
:keyword operation: The operation name.
:paramtype operation: str
:keyword status: Job status.
:paramtype status: str
:keyword start_time: The start time.
:paramtype start_time: ~datetime.datetime
:keyword end_time: The end time.
:paramtype end_time: ~datetime.datetime
:keyword activity_id: ActivityId of job.
:paramtype activity_id: str
:keyword duration: Time elapsed for job.
:paramtype duration: ~datetime.timedelta
:keyword dpm_server_name: DPM server name managing the backup item or backup job.
:paramtype dpm_server_name: str
:keyword container_name: Name of cluster/server protecting current backup item, if any.
:paramtype container_name: str
:keyword container_type: Type of container.
:paramtype container_type: str
:keyword workload_type: Type of backup item.
:paramtype workload_type: str
:keyword actions_info: The state/actions applicable on this job like cancel/retry.
:paramtype actions_info: list[str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.JobSupportedAction]
:keyword error_details: The errors.
:paramtype error_details:
list[~azure.mgmt.recoveryservicesbackup.passivestamp.models.DpmErrorInfo]
:keyword extended_info: Additional information for this job.
:paramtype extended_info:
~azure.mgmt.recoveryservicesbackup.passivestamp.models.DpmJobExtendedInfo
"""
super().__init__(
entity_friendly_name=entity_friendly_name,
backup_management_type=backup_management_type,
operation=operation,
status=status,
start_time=start_time,
end_time=end_time,
activity_id=activity_id,
**kwargs
)
self.job_type = "DpmJob" # type: str
self.duration = duration
self.dpm_server_name = dpm_server_name
self.container_name = container_name
self.container_type = container_type
self.workload_type = workload_type
self.actions_info = actions_info
self.error_details = error_details
self.extended_info = extended_info
class DpmJobExtendedInfo(_serialization.Model):
"""Additional information on the DPM workload-specific job.
:ivar tasks_list: List of tasks associated with this job.
:vartype tasks_list:
list[~azure.mgmt.recoveryservicesbackup.passivestamp.models.DpmJobTaskDetails]
:ivar property_bag: The job properties.
:vartype property_bag: dict[str, str]
:ivar dynamic_error_message: Non localized error message on job execution.
:vartype dynamic_error_message: str
"""
_attribute_map = {
"tasks_list": {"key": "tasksList", "type": "[DpmJobTaskDetails]"},
"property_bag": {"key": "propertyBag", "type": "{str}"},
"dynamic_error_message": {"key": "dynamicErrorMessage", "type": "str"},
}
def __init__(
self,
*,
tasks_list: Optional[List["_models.DpmJobTaskDetails"]] = None,
property_bag: Optional[Dict[str, str]] = None,
dynamic_error_message: Optional[str] = None,
**kwargs
):
"""
:keyword tasks_list: List of tasks associated with this job.
:paramtype tasks_list:
list[~azure.mgmt.recoveryservicesbackup.passivestamp.models.DpmJobTaskDetails]
:keyword property_bag: The job properties.
:paramtype property_bag: dict[str, str]
:keyword dynamic_error_message: Non localized error message on job execution.
:paramtype dynamic_error_message: str
"""
super().__init__(**kwargs)
self.tasks_list = tasks_list
self.property_bag = property_bag
self.dynamic_error_message = dynamic_error_message
class DpmJobTaskDetails(_serialization.Model):
"""DPM workload-specific job task details.
:ivar task_id: The task display name.
:vartype task_id: str
:ivar start_time: The start time.
:vartype start_time: ~datetime.datetime
:ivar end_time: The end time.
:vartype end_time: ~datetime.datetime
:ivar duration: Time elapsed for task.
:vartype duration: ~datetime.timedelta
:ivar status: The status.
:vartype status: str
"""
_attribute_map = {
"task_id": {"key": "taskId", "type": "str"},
"start_time": {"key": "startTime", "type": "iso-8601"},
"end_time": {"key": "endTime", "type": "iso-8601"},
"duration": {"key": "duration", "type": "duration"},
"status": {"key": "status", "type": "str"},
}
def __init__(
self,
*,
task_id: Optional[str] = None,
start_time: Optional[datetime.datetime] = None,
end_time: Optional[datetime.datetime] = None,
duration: Optional[datetime.timedelta] = None,
status: Optional[str] = None,
**kwargs
):
"""
:keyword task_id: The task display name.
:paramtype task_id: str
:keyword start_time: The start time.
:paramtype start_time: ~datetime.datetime
:keyword end_time: The end time.
:paramtype end_time: ~datetime.datetime
:keyword duration: Time elapsed for task.
:paramtype duration: ~datetime.timedelta
:keyword status: The status.
:paramtype status: str
"""
super().__init__(**kwargs)
self.task_id = task_id
self.start_time = start_time
self.end_time = end_time
self.duration = duration
self.status = status
class DPMProtectedItem(ProtectedItem): # pylint: disable=too-many-instance-attributes
"""Additional information on Backup engine specific backup item.
All required parameters must be populated in order to send to Azure.
:ivar protected_item_type: backup item type. Required.
:vartype protected_item_type: str
:ivar backup_management_type: Type of backup management for the backed up item. Known values
are: "Invalid", "AzureIaasVM", "MAB", "DPM", "AzureBackupServer", "AzureSql", "AzureStorage",
"AzureWorkload", and "DefaultBackup".
:vartype backup_management_type: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.BackupManagementType
:ivar workload_type: Type of workload this item represents. Known values are: "Invalid", "VM",
"FileFolder", "AzureSqlDb", "SQLDB", "Exchange", "Sharepoint", "VMwareVM", "SystemState",
"Client", "GenericDataSource", "SQLDataBase", "AzureFileShare", "SAPHanaDatabase", and
"SAPAseDatabase".
:vartype workload_type: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.DataSourceType
:ivar container_name: Unique name of container.
:vartype container_name: str
:ivar source_resource_id: ARM ID of the resource to be backed up.
:vartype source_resource_id: str
:ivar policy_id: ID of the backup policy with which this item is backed up.
:vartype policy_id: str
:ivar last_recovery_point: Timestamp when the last (latest) backup copy was created for this
backup item.
:vartype last_recovery_point: ~datetime.datetime
:ivar backup_set_name: Name of the backup set the backup item belongs to.
:vartype backup_set_name: str
:ivar create_mode: Create mode to indicate recovery of existing soft deleted data source or
creation of new data source. Known values are: "Invalid", "Default", and "Recover".
:vartype create_mode: str or ~azure.mgmt.recoveryservicesbackup.passivestamp.models.CreateMode
:ivar deferred_delete_time_in_utc: Time for deferred deletion in UTC.
:vartype deferred_delete_time_in_utc: ~datetime.datetime
:ivar is_scheduled_for_deferred_delete: Flag to identify whether the DS is scheduled for
deferred delete.
:vartype is_scheduled_for_deferred_delete: bool
:ivar deferred_delete_time_remaining: Time remaining before the DS marked for deferred delete
is permanently deleted.
:vartype deferred_delete_time_remaining: str
:ivar is_deferred_delete_schedule_upcoming: Flag to identify whether the deferred deleted DS is
to be purged soon.
:vartype is_deferred_delete_schedule_upcoming: bool
:ivar is_rehydrate: Flag to identify that deferred deleted DS is to be moved into Pause state.
:vartype is_rehydrate: bool
:ivar resource_guard_operation_requests: ResourceGuardOperationRequests on which LAC check will
be performed.
:vartype resource_guard_operation_requests: list[str]
:ivar friendly_name: Friendly name of the managed item.
:vartype friendly_name: str
:ivar backup_engine_name: Backup Management server protecting this backup item.
:vartype backup_engine_name: str
:ivar protection_state: Protection state of the backup engine. Known values are: "Invalid",
"IRPending", "Protected", "ProtectionError", "ProtectionStopped", and "ProtectionPaused".
:vartype protection_state: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.ProtectedItemState
:ivar extended_info: Extended info of the backup item.
:vartype extended_info:
~azure.mgmt.recoveryservicesbackup.passivestamp.models.DPMProtectedItemExtendedInfo
"""
_validation = {
"protected_item_type": {"required": True},
}
_attribute_map = {
"protected_item_type": {"key": "protectedItemType", "type": "str"},
"backup_management_type": {"key": "backupManagementType", "type": "str"},
"workload_type": {"key": "workloadType", "type": "str"},
"container_name": {"key": "containerName", "type": "str"},
"source_resource_id": {"key": "sourceResourceId", "type": "str"},
"policy_id": {"key": "policyId", "type": "str"},
"last_recovery_point": {"key": "lastRecoveryPoint", "type": "iso-8601"},
"backup_set_name": {"key": "backupSetName", "type": "str"},
"create_mode": {"key": "createMode", "type": "str"},
"deferred_delete_time_in_utc": {"key": "deferredDeleteTimeInUTC", "type": "iso-8601"},
"is_scheduled_for_deferred_delete": {"key": "isScheduledForDeferredDelete", "type": "bool"},
"deferred_delete_time_remaining": {"key": "deferredDeleteTimeRemaining", "type": "str"},
"is_deferred_delete_schedule_upcoming": {"key": "isDeferredDeleteScheduleUpcoming", "type": "bool"},
"is_rehydrate": {"key": "isRehydrate", "type": "bool"},
"resource_guard_operation_requests": {"key": "resourceGuardOperationRequests", "type": "[str]"},
"friendly_name": {"key": "friendlyName", "type": "str"},
"backup_engine_name": {"key": "backupEngineName", "type": "str"},
"protection_state": {"key": "protectionState", "type": "str"},
"extended_info": {"key": "extendedInfo", "type": "DPMProtectedItemExtendedInfo"},
}
def __init__(
self,
*,
backup_management_type: Optional[Union[str, "_models.BackupManagementType"]] = None,
workload_type: Optional[Union[str, "_models.DataSourceType"]] = None,
container_name: Optional[str] = None,
source_resource_id: Optional[str] = None,
policy_id: Optional[str] = None,
last_recovery_point: Optional[datetime.datetime] = None,
backup_set_name: Optional[str] = None,
create_mode: Optional[Union[str, "_models.CreateMode"]] = None,
deferred_delete_time_in_utc: Optional[datetime.datetime] = None,
is_scheduled_for_deferred_delete: Optional[bool] = None,
deferred_delete_time_remaining: Optional[str] = None,
is_deferred_delete_schedule_upcoming: Optional[bool] = None,
is_rehydrate: Optional[bool] = None,
resource_guard_operation_requests: Optional[List[str]] = None,
friendly_name: Optional[str] = None,
backup_engine_name: Optional[str] = None,
protection_state: Optional[Union[str, "_models.ProtectedItemState"]] = None,
extended_info: Optional["_models.DPMProtectedItemExtendedInfo"] = None,
**kwargs
):
"""
:keyword backup_management_type: Type of backup management for the backed up item. Known values
are: "Invalid", "AzureIaasVM", "MAB", "DPM", "AzureBackupServer", "AzureSql", "AzureStorage",
"AzureWorkload", and "DefaultBackup".
:paramtype backup_management_type: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.BackupManagementType
:keyword workload_type: Type of workload this item represents. Known values are: "Invalid",
"VM", "FileFolder", "AzureSqlDb", "SQLDB", "Exchange", "Sharepoint", "VMwareVM", "SystemState",
"Client", "GenericDataSource", "SQLDataBase", "AzureFileShare", "SAPHanaDatabase", and
"SAPAseDatabase".
:paramtype workload_type: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.DataSourceType
:keyword container_name: Unique name of container.
:paramtype container_name: str
:keyword source_resource_id: ARM ID of the resource to be backed up.
:paramtype source_resource_id: str
:keyword policy_id: ID of the backup policy with which this item is backed up.
:paramtype policy_id: str
:keyword last_recovery_point: Timestamp when the last (latest) backup copy was created for this
backup item.
:paramtype last_recovery_point: ~datetime.datetime
:keyword backup_set_name: Name of the backup set the backup item belongs to.
:paramtype backup_set_name: str
:keyword create_mode: Create mode to indicate recovery of existing soft deleted data source or
creation of new data source. Known values are: "Invalid", "Default", and "Recover".
:paramtype create_mode: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.CreateMode
:keyword deferred_delete_time_in_utc: Time for deferred deletion in UTC.
:paramtype deferred_delete_time_in_utc: ~datetime.datetime
:keyword is_scheduled_for_deferred_delete: Flag to identify whether the DS is scheduled for
deferred delete.
:paramtype is_scheduled_for_deferred_delete: bool
:keyword deferred_delete_time_remaining: Time remaining before the DS marked for deferred
delete is permanently deleted.
:paramtype deferred_delete_time_remaining: str
:keyword is_deferred_delete_schedule_upcoming: Flag to identify whether the deferred deleted DS
is to be purged soon.
:paramtype is_deferred_delete_schedule_upcoming: bool
:keyword is_rehydrate: Flag to identify that deferred deleted DS is to be moved into Pause
state.
:paramtype is_rehydrate: bool
:keyword resource_guard_operation_requests: ResourceGuardOperationRequests on which LAC check
will be performed.
:paramtype resource_guard_operation_requests: list[str]
:keyword friendly_name: Friendly name of the managed item.
:paramtype friendly_name: str
:keyword backup_engine_name: Backup Management server protecting this backup item.
:paramtype backup_engine_name: str
:keyword protection_state: Protection state of the backup engine. Known values are: "Invalid",
"IRPending", "Protected", "ProtectionError", "ProtectionStopped", and "ProtectionPaused".
:paramtype protection_state: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.ProtectedItemState
:keyword extended_info: Extended info of the backup item.
:paramtype extended_info:
~azure.mgmt.recoveryservicesbackup.passivestamp.models.DPMProtectedItemExtendedInfo
"""
super().__init__(
backup_management_type=backup_management_type,
workload_type=workload_type,
container_name=container_name,
source_resource_id=source_resource_id,
policy_id=policy_id,
last_recovery_point=last_recovery_point,
backup_set_name=backup_set_name,
create_mode=create_mode,
deferred_delete_time_in_utc=deferred_delete_time_in_utc,
is_scheduled_for_deferred_delete=is_scheduled_for_deferred_delete,
deferred_delete_time_remaining=deferred_delete_time_remaining,
is_deferred_delete_schedule_upcoming=is_deferred_delete_schedule_upcoming,
is_rehydrate=is_rehydrate,
resource_guard_operation_requests=resource_guard_operation_requests,
**kwargs
)
self.protected_item_type = "DPMProtectedItem" # type: str
self.friendly_name = friendly_name
self.backup_engine_name = backup_engine_name
self.protection_state = protection_state
self.extended_info = extended_info
class DPMProtectedItemExtendedInfo(_serialization.Model): # pylint: disable=too-many-instance-attributes
"""Additional information of DPM Protected item.
:ivar protectable_object_load_path: Attribute to provide information on various DBs.
:vartype protectable_object_load_path: dict[str, str]
:ivar protected: To check if backup item is disk protected.
:vartype protected: bool
:ivar is_present_on_cloud: To check if backup item is cloud protected.
:vartype is_present_on_cloud: bool
:ivar last_backup_status: Last backup status information on backup item.
:vartype last_backup_status: str
:ivar last_refreshed_at: Last refresh time on backup item.
:vartype last_refreshed_at: ~datetime.datetime
:ivar oldest_recovery_point: Oldest cloud recovery point time.
:vartype oldest_recovery_point: ~datetime.datetime
:ivar recovery_point_count: cloud recovery point count.
:vartype recovery_point_count: int
:ivar on_premise_oldest_recovery_point: Oldest disk recovery point time.
:vartype on_premise_oldest_recovery_point: ~datetime.datetime
:ivar on_premise_latest_recovery_point: latest disk recovery point time.
:vartype on_premise_latest_recovery_point: ~datetime.datetime
:ivar on_premise_recovery_point_count: disk recovery point count.
:vartype on_premise_recovery_point_count: int
:ivar is_collocated: To check if backup item is collocated.
:vartype is_collocated: bool
:ivar protection_group_name: Protection group name of the backup item.
:vartype protection_group_name: str
:ivar disk_storage_used_in_bytes: Used Disk storage in bytes.
:vartype disk_storage_used_in_bytes: str
:ivar total_disk_storage_size_in_bytes: total Disk storage in bytes.
:vartype total_disk_storage_size_in_bytes: str
"""
_attribute_map = {
"protectable_object_load_path": {"key": "protectableObjectLoadPath", "type": "{str}"},
"protected": {"key": "protected", "type": "bool"},
"is_present_on_cloud": {"key": "isPresentOnCloud", "type": "bool"},
"last_backup_status": {"key": "lastBackupStatus", "type": "str"},
"last_refreshed_at": {"key": "lastRefreshedAt", "type": "iso-8601"},
"oldest_recovery_point": {"key": "oldestRecoveryPoint", "type": "iso-8601"},
"recovery_point_count": {"key": "recoveryPointCount", "type": "int"},
"on_premise_oldest_recovery_point": {"key": "onPremiseOldestRecoveryPoint", "type": "iso-8601"},
"on_premise_latest_recovery_point": {"key": "onPremiseLatestRecoveryPoint", "type": "iso-8601"},
"on_premise_recovery_point_count": {"key": "onPremiseRecoveryPointCount", "type": "int"},
"is_collocated": {"key": "isCollocated", "type": "bool"},
"protection_group_name": {"key": "protectionGroupName", "type": "str"},
"disk_storage_used_in_bytes": {"key": "diskStorageUsedInBytes", "type": "str"},
"total_disk_storage_size_in_bytes": {"key": "totalDiskStorageSizeInBytes", "type": "str"},
}
def __init__(
self,
*,
protectable_object_load_path: Optional[Dict[str, str]] = None,
protected: Optional[bool] = None,
is_present_on_cloud: Optional[bool] = None,
last_backup_status: Optional[str] = None,
last_refreshed_at: Optional[datetime.datetime] = None,
oldest_recovery_point: Optional[datetime.datetime] = None,
recovery_point_count: Optional[int] = None,
on_premise_oldest_recovery_point: Optional[datetime.datetime] = None,
on_premise_latest_recovery_point: Optional[datetime.datetime] = None,
on_premise_recovery_point_count: Optional[int] = None,
is_collocated: Optional[bool] = None,
protection_group_name: Optional[str] = None,
disk_storage_used_in_bytes: Optional[str] = None,
total_disk_storage_size_in_bytes: Optional[str] = None,
**kwargs
):
"""
:keyword protectable_object_load_path: Attribute to provide information on various DBs.
:paramtype protectable_object_load_path: dict[str, str]
:keyword protected: To check if backup item is disk protected.
:paramtype protected: bool
:keyword is_present_on_cloud: To check if backup item is cloud protected.
:paramtype is_present_on_cloud: bool
:keyword last_backup_status: Last backup status information on backup item.
:paramtype last_backup_status: str
:keyword last_refreshed_at: Last refresh time on backup item.
:paramtype last_refreshed_at: ~datetime.datetime
:keyword oldest_recovery_point: Oldest cloud recovery point time.
:paramtype oldest_recovery_point: ~datetime.datetime
:keyword recovery_point_count: cloud recovery point count.
:paramtype recovery_point_count: int
:keyword on_premise_oldest_recovery_point: Oldest disk recovery point time.
:paramtype on_premise_oldest_recovery_point: ~datetime.datetime
:keyword on_premise_latest_recovery_point: latest disk recovery point time.
:paramtype on_premise_latest_recovery_point: ~datetime.datetime
:keyword on_premise_recovery_point_count: disk recovery point count.
:paramtype on_premise_recovery_point_count: int
:keyword is_collocated: To check if backup item is collocated.
:paramtype is_collocated: bool
:keyword protection_group_name: Protection group name of the backup item.
:paramtype protection_group_name: str
:keyword disk_storage_used_in_bytes: Used Disk storage in bytes.
:paramtype disk_storage_used_in_bytes: str
:keyword total_disk_storage_size_in_bytes: total Disk storage in bytes.
:paramtype total_disk_storage_size_in_bytes: str
"""
super().__init__(**kwargs)
self.protectable_object_load_path = protectable_object_load_path
self.protected = protected
self.is_present_on_cloud = is_present_on_cloud
self.last_backup_status = last_backup_status
self.last_refreshed_at = last_refreshed_at
self.oldest_recovery_point = oldest_recovery_point
self.recovery_point_count = recovery_point_count
self.on_premise_oldest_recovery_point = on_premise_oldest_recovery_point
self.on_premise_latest_recovery_point = on_premise_latest_recovery_point
self.on_premise_recovery_point_count = on_premise_recovery_point_count
self.is_collocated = is_collocated
self.protection_group_name = protection_group_name
self.disk_storage_used_in_bytes = disk_storage_used_in_bytes
self.total_disk_storage_size_in_bytes = total_disk_storage_size_in_bytes
class EncryptionDetails(_serialization.Model):
"""Details needed if the VM was encrypted at the time of backup.
:ivar encryption_enabled: Identifies whether this backup copy represents an encrypted VM at the
time of backup.
:vartype encryption_enabled: bool
:ivar kek_url: Key Url.
:vartype kek_url: str
:ivar secret_key_url: Secret Url.
:vartype secret_key_url: str
:ivar kek_vault_id: ID of Key Vault where KEK is stored.
:vartype kek_vault_id: str
:ivar secret_key_vault_id: ID of Key Vault where Secret is stored.
:vartype secret_key_vault_id: str
"""
_attribute_map = {
"encryption_enabled": {"key": "encryptionEnabled", "type": "bool"},
"kek_url": {"key": "kekUrl", "type": "str"},
"secret_key_url": {"key": "secretKeyUrl", "type": "str"},
"kek_vault_id": {"key": "kekVaultId", "type": "str"},
"secret_key_vault_id": {"key": "secretKeyVaultId", "type": "str"},
}
def __init__(
self,
*,
encryption_enabled: Optional[bool] = None,
kek_url: Optional[str] = None,
secret_key_url: Optional[str] = None,
kek_vault_id: Optional[str] = None,
secret_key_vault_id: Optional[str] = None,
**kwargs
):
"""
:keyword encryption_enabled: Identifies whether this backup copy represents an encrypted VM at
the time of backup.
:paramtype encryption_enabled: bool
:keyword kek_url: Key Url.
:paramtype kek_url: str
:keyword secret_key_url: Secret Url.
:paramtype secret_key_url: str
:keyword kek_vault_id: ID of Key Vault where KEK is stored.
:paramtype kek_vault_id: str
:keyword secret_key_vault_id: ID of Key Vault where Secret is stored.
:paramtype secret_key_vault_id: str
"""
super().__init__(**kwargs)
self.encryption_enabled = encryption_enabled
self.kek_url = kek_url
self.secret_key_url = secret_key_url
self.kek_vault_id = kek_vault_id
self.secret_key_vault_id = secret_key_vault_id
class ErrorAdditionalInfo(_serialization.Model):
"""The resource management error additional info.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar type: The additional info type.
:vartype type: str
:ivar info: The additional info.
:vartype info: JSON
"""
_validation = {
"type": {"readonly": True},
"info": {"readonly": True},
}
_attribute_map = {
"type": {"key": "type", "type": "str"},
"info": {"key": "info", "type": "object"},
}
def __init__(self, **kwargs):
""" """
super().__init__(**kwargs)
self.type = None
self.info = None
class ErrorDetail(_serialization.Model):
"""Error Detail class which encapsulates Code, Message and Recommendations.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar code: Error code.
:vartype code: str
:ivar message: Error Message related to the Code.
:vartype message: str
:ivar recommendations: List of recommendation strings.
:vartype recommendations: list[str]
"""
_validation = {
"code": {"readonly": True},
"message": {"readonly": True},
"recommendations": {"readonly": True},
}
_attribute_map = {
"code": {"key": "code", "type": "str"},
"message": {"key": "message", "type": "str"},
"recommendations": {"key": "recommendations", "type": "[str]"},
}
def __init__(self, **kwargs):
""" """
super().__init__(**kwargs)
self.code = None
self.message = None
self.recommendations = None
class ExtendedProperties(_serialization.Model):
"""Extended Properties for Azure IaasVM Backup.
:ivar disk_exclusion_properties: Extended Properties for Disk Exclusion.
:vartype disk_exclusion_properties:
~azure.mgmt.recoveryservicesbackup.passivestamp.models.DiskExclusionProperties
"""
_attribute_map = {
"disk_exclusion_properties": {"key": "diskExclusionProperties", "type": "DiskExclusionProperties"},
}
def __init__(self, *, disk_exclusion_properties: Optional["_models.DiskExclusionProperties"] = None, **kwargs):
"""
:keyword disk_exclusion_properties: Extended Properties for Disk Exclusion.
:paramtype disk_exclusion_properties:
~azure.mgmt.recoveryservicesbackup.passivestamp.models.DiskExclusionProperties
"""
super().__init__(**kwargs)
self.disk_exclusion_properties = disk_exclusion_properties
class GenericProtectedItem(ProtectedItem): # pylint: disable=too-many-instance-attributes
"""Base class for backup items.
All required parameters must be populated in order to send to Azure.
:ivar protected_item_type: backup item type. Required.
:vartype protected_item_type: str
:ivar backup_management_type: Type of backup management for the backed up item. Known values
are: "Invalid", "AzureIaasVM", "MAB", "DPM", "AzureBackupServer", "AzureSql", "AzureStorage",
"AzureWorkload", and "DefaultBackup".
:vartype backup_management_type: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.BackupManagementType
:ivar workload_type: Type of workload this item represents. Known values are: "Invalid", "VM",
"FileFolder", "AzureSqlDb", "SQLDB", "Exchange", "Sharepoint", "VMwareVM", "SystemState",
"Client", "GenericDataSource", "SQLDataBase", "AzureFileShare", "SAPHanaDatabase", and
"SAPAseDatabase".
:vartype workload_type: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.DataSourceType
:ivar container_name: Unique name of container.
:vartype container_name: str
:ivar source_resource_id: ARM ID of the resource to be backed up.
:vartype source_resource_id: str
:ivar policy_id: ID of the backup policy with which this item is backed up.
:vartype policy_id: str
:ivar last_recovery_point: Timestamp when the last (latest) backup copy was created for this
backup item.
:vartype last_recovery_point: ~datetime.datetime
:ivar backup_set_name: Name of the backup set the backup item belongs to.
:vartype backup_set_name: str
:ivar create_mode: Create mode to indicate recovery of existing soft deleted data source or
creation of new data source. Known values are: "Invalid", "Default", and "Recover".
:vartype create_mode: str or ~azure.mgmt.recoveryservicesbackup.passivestamp.models.CreateMode
:ivar deferred_delete_time_in_utc: Time for deferred deletion in UTC.
:vartype deferred_delete_time_in_utc: ~datetime.datetime
:ivar is_scheduled_for_deferred_delete: Flag to identify whether the DS is scheduled for
deferred delete.
:vartype is_scheduled_for_deferred_delete: bool
:ivar deferred_delete_time_remaining: Time remaining before the DS marked for deferred delete
is permanently deleted.
:vartype deferred_delete_time_remaining: str
:ivar is_deferred_delete_schedule_upcoming: Flag to identify whether the deferred deleted DS is
to be purged soon.
:vartype is_deferred_delete_schedule_upcoming: bool
:ivar is_rehydrate: Flag to identify that deferred deleted DS is to be moved into Pause state.
:vartype is_rehydrate: bool
:ivar resource_guard_operation_requests: ResourceGuardOperationRequests on which LAC check will
be performed.
:vartype resource_guard_operation_requests: list[str]
:ivar friendly_name: Friendly name of the container.
:vartype friendly_name: str
:ivar policy_state: Indicates consistency of policy object and policy applied to this backup
item.
:vartype policy_state: str
:ivar protection_state: Backup state of this backup item. Known values are: "Invalid",
"IRPending", "Protected", "ProtectionError", "ProtectionStopped", and "ProtectionPaused".
:vartype protection_state: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.ProtectionState
:ivar protected_item_id: Data Plane Service ID of the protected item.
:vartype protected_item_id: int
:ivar source_associations: Loosely coupled (type, value) associations (example - parent of a
protected item).
:vartype source_associations: dict[str, str]
:ivar fabric_name: Name of this backup item's fabric.
:vartype fabric_name: str
"""
_validation = {
"protected_item_type": {"required": True},
}
_attribute_map = {
"protected_item_type": {"key": "protectedItemType", "type": "str"},
"backup_management_type": {"key": "backupManagementType", "type": "str"},
"workload_type": {"key": "workloadType", "type": "str"},
"container_name": {"key": "containerName", "type": "str"},
"source_resource_id": {"key": "sourceResourceId", "type": "str"},
"policy_id": {"key": "policyId", "type": "str"},
"last_recovery_point": {"key": "lastRecoveryPoint", "type": "iso-8601"},
"backup_set_name": {"key": "backupSetName", "type": "str"},
"create_mode": {"key": "createMode", "type": "str"},
"deferred_delete_time_in_utc": {"key": "deferredDeleteTimeInUTC", "type": "iso-8601"},
"is_scheduled_for_deferred_delete": {"key": "isScheduledForDeferredDelete", "type": "bool"},
"deferred_delete_time_remaining": {"key": "deferredDeleteTimeRemaining", "type": "str"},
"is_deferred_delete_schedule_upcoming": {"key": "isDeferredDeleteScheduleUpcoming", "type": "bool"},
"is_rehydrate": {"key": "isRehydrate", "type": "bool"},
"resource_guard_operation_requests": {"key": "resourceGuardOperationRequests", "type": "[str]"},
"friendly_name": {"key": "friendlyName", "type": "str"},
"policy_state": {"key": "policyState", "type": "str"},
"protection_state": {"key": "protectionState", "type": "str"},
"protected_item_id": {"key": "protectedItemId", "type": "int"},
"source_associations": {"key": "sourceAssociations", "type": "{str}"},
"fabric_name": {"key": "fabricName", "type": "str"},
}
def __init__(
self,
*,
backup_management_type: Optional[Union[str, "_models.BackupManagementType"]] = None,
workload_type: Optional[Union[str, "_models.DataSourceType"]] = None,
container_name: Optional[str] = None,
source_resource_id: Optional[str] = None,
policy_id: Optional[str] = None,
last_recovery_point: Optional[datetime.datetime] = None,
backup_set_name: Optional[str] = None,
create_mode: Optional[Union[str, "_models.CreateMode"]] = None,
deferred_delete_time_in_utc: Optional[datetime.datetime] = None,
is_scheduled_for_deferred_delete: Optional[bool] = None,
deferred_delete_time_remaining: Optional[str] = None,
is_deferred_delete_schedule_upcoming: Optional[bool] = None,
is_rehydrate: Optional[bool] = None,
resource_guard_operation_requests: Optional[List[str]] = None,
friendly_name: Optional[str] = None,
policy_state: Optional[str] = None,
protection_state: Optional[Union[str, "_models.ProtectionState"]] = None,
protected_item_id: Optional[int] = None,
source_associations: Optional[Dict[str, str]] = None,
fabric_name: Optional[str] = None,
**kwargs
):
"""
:keyword backup_management_type: Type of backup management for the backed up item. Known values
are: "Invalid", "AzureIaasVM", "MAB", "DPM", "AzureBackupServer", "AzureSql", "AzureStorage",
"AzureWorkload", and "DefaultBackup".
:paramtype backup_management_type: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.BackupManagementType
:keyword workload_type: Type of workload this item represents. Known values are: "Invalid",
"VM", "FileFolder", "AzureSqlDb", "SQLDB", "Exchange", "Sharepoint", "VMwareVM", "SystemState",
"Client", "GenericDataSource", "SQLDataBase", "AzureFileShare", "SAPHanaDatabase", and
"SAPAseDatabase".
:paramtype workload_type: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.DataSourceType
:keyword container_name: Unique name of container.
:paramtype container_name: str
:keyword source_resource_id: ARM ID of the resource to be backed up.
:paramtype source_resource_id: str
:keyword policy_id: ID of the backup policy with which this item is backed up.
:paramtype policy_id: str
:keyword last_recovery_point: Timestamp when the last (latest) backup copy was created for this
backup item.
:paramtype last_recovery_point: ~datetime.datetime
:keyword backup_set_name: Name of the backup set the backup item belongs to.
:paramtype backup_set_name: str
:keyword create_mode: Create mode to indicate recovery of existing soft deleted data source or
creation of new data source. Known values are: "Invalid", "Default", and "Recover".
:paramtype create_mode: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.CreateMode
:keyword deferred_delete_time_in_utc: Time for deferred deletion in UTC.
:paramtype deferred_delete_time_in_utc: ~datetime.datetime
:keyword is_scheduled_for_deferred_delete: Flag to identify whether the DS is scheduled for
deferred delete.
:paramtype is_scheduled_for_deferred_delete: bool
:keyword deferred_delete_time_remaining: Time remaining before the DS marked for deferred
delete is permanently deleted.
:paramtype deferred_delete_time_remaining: str
:keyword is_deferred_delete_schedule_upcoming: Flag to identify whether the deferred deleted DS
is to be purged soon.
:paramtype is_deferred_delete_schedule_upcoming: bool
:keyword is_rehydrate: Flag to identify that deferred deleted DS is to be moved into Pause
state.
:paramtype is_rehydrate: bool
:keyword resource_guard_operation_requests: ResourceGuardOperationRequests on which LAC check
will be performed.
:paramtype resource_guard_operation_requests: list[str]
:keyword friendly_name: Friendly name of the container.
:paramtype friendly_name: str
:keyword policy_state: Indicates consistency of policy object and policy applied to this backup
item.
:paramtype policy_state: str
:keyword protection_state: Backup state of this backup item. Known values are: "Invalid",
"IRPending", "Protected", "ProtectionError", "ProtectionStopped", and "ProtectionPaused".
:paramtype protection_state: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.ProtectionState
:keyword protected_item_id: Data Plane Service ID of the protected item.
:paramtype protected_item_id: int
:keyword source_associations: Loosely coupled (type, value) associations (example - parent of a
protected item).
:paramtype source_associations: dict[str, str]
:keyword fabric_name: Name of this backup item's fabric.
:paramtype fabric_name: str
"""
super().__init__(
backup_management_type=backup_management_type,
workload_type=workload_type,
container_name=container_name,
source_resource_id=source_resource_id,
policy_id=policy_id,
last_recovery_point=last_recovery_point,
backup_set_name=backup_set_name,
create_mode=create_mode,
deferred_delete_time_in_utc=deferred_delete_time_in_utc,
is_scheduled_for_deferred_delete=is_scheduled_for_deferred_delete,
deferred_delete_time_remaining=deferred_delete_time_remaining,
is_deferred_delete_schedule_upcoming=is_deferred_delete_schedule_upcoming,
is_rehydrate=is_rehydrate,
resource_guard_operation_requests=resource_guard_operation_requests,
**kwargs
)
self.protected_item_type = "GenericProtectedItem" # type: str
self.friendly_name = friendly_name
self.policy_state = policy_state
self.protection_state = protection_state
self.protected_item_id = protected_item_id
self.source_associations = source_associations
self.fabric_name = fabric_name
class GenericRecoveryPoint(RecoveryPoint):
"""Generic backup copy.
All required parameters must be populated in order to send to Azure.
:ivar object_type: This property will be used as the discriminator for deciding the specific
types in the polymorphic chain of types. Required.
:vartype object_type: str
:ivar friendly_name: Friendly name of the backup copy.
:vartype friendly_name: str
:ivar recovery_point_type: Type of the backup copy.
:vartype recovery_point_type: str
:ivar recovery_point_time: Time at which this backup copy was created.
:vartype recovery_point_time: ~datetime.datetime
:ivar recovery_point_additional_info: Additional information associated with this backup copy.
:vartype recovery_point_additional_info: str
"""
_validation = {
"object_type": {"required": True},
}
_attribute_map = {
"object_type": {"key": "objectType", "type": "str"},
"friendly_name": {"key": "friendlyName", "type": "str"},
"recovery_point_type": {"key": "recoveryPointType", "type": "str"},
"recovery_point_time": {"key": "recoveryPointTime", "type": "iso-8601"},
"recovery_point_additional_info": {"key": "recoveryPointAdditionalInfo", "type": "str"},
}
def __init__(
self,
*,
friendly_name: Optional[str] = None,
recovery_point_type: Optional[str] = None,
recovery_point_time: Optional[datetime.datetime] = None,
recovery_point_additional_info: Optional[str] = None,
**kwargs
):
"""
:keyword friendly_name: Friendly name of the backup copy.
:paramtype friendly_name: str
:keyword recovery_point_type: Type of the backup copy.
:paramtype recovery_point_type: str
:keyword recovery_point_time: Time at which this backup copy was created.
:paramtype recovery_point_time: ~datetime.datetime
:keyword recovery_point_additional_info: Additional information associated with this backup
copy.
:paramtype recovery_point_additional_info: str
"""
super().__init__(**kwargs)
self.object_type = "GenericRecoveryPoint" # type: str
self.friendly_name = friendly_name
self.recovery_point_type = recovery_point_type
self.recovery_point_time = recovery_point_time
self.recovery_point_additional_info = recovery_point_additional_info
class IaasVMRecoveryPoint(RecoveryPoint): # pylint: disable=too-many-instance-attributes
"""IaaS VM workload specific backup copy.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar object_type: This property will be used as the discriminator for deciding the specific
types in the polymorphic chain of types. Required.
:vartype object_type: str
:ivar recovery_point_type: Type of the backup copy.
:vartype recovery_point_type: str
:ivar recovery_point_time: Time at which this backup copy was created.
:vartype recovery_point_time: ~datetime.datetime
:ivar recovery_point_additional_info: Additional information associated with this backup copy.
:vartype recovery_point_additional_info: str
:ivar source_vm_storage_type: Storage type of the VM whose backup copy is created.
:vartype source_vm_storage_type: str
:ivar is_source_vm_encrypted: Identifies whether the VM was encrypted when the backup copy is
created.
:vartype is_source_vm_encrypted: bool
:ivar key_and_secret: Required details for recovering an encrypted VM. Applicable only when
IsSourceVMEncrypted is true.
:vartype key_and_secret:
~azure.mgmt.recoveryservicesbackup.passivestamp.models.KeyAndSecretDetails
:ivar is_instant_ilr_session_active: Is the session to recover items from this backup copy
still active.
:vartype is_instant_ilr_session_active: bool
:ivar recovery_point_tier_details: Recovery point tier information.
:vartype recovery_point_tier_details:
list[~azure.mgmt.recoveryservicesbackup.passivestamp.models.RecoveryPointTierInformation]
:ivar is_managed_virtual_machine: Whether VM is with Managed Disks.
:vartype is_managed_virtual_machine: bool
:ivar virtual_machine_size: Virtual Machine Size.
:vartype virtual_machine_size: str
:ivar original_storage_account_option: Original Storage Account Option.
:vartype original_storage_account_option: bool
:ivar os_type: OS type.
:vartype os_type: str
:ivar recovery_point_disk_configuration: Disk configuration.
:vartype recovery_point_disk_configuration:
~azure.mgmt.recoveryservicesbackup.passivestamp.models.RecoveryPointDiskConfiguration
:ivar zones: Identifies the zone of the VM at the time of backup. Applicable only for
zone-pinned Vms.
:vartype zones: list[str]
:ivar recovery_point_move_readiness_info: Eligibility of RP to be moved to another tier.
:vartype recovery_point_move_readiness_info: dict[str,
~azure.mgmt.recoveryservicesbackup.passivestamp.models.RecoveryPointMoveReadinessInfo]
"""
_validation = {
"object_type": {"required": True},
"recovery_point_type": {"readonly": True},
"recovery_point_time": {"readonly": True},
"recovery_point_additional_info": {"readonly": True},
"source_vm_storage_type": {"readonly": True},
"is_source_vm_encrypted": {"readonly": True},
}
_attribute_map = {
"object_type": {"key": "objectType", "type": "str"},
"recovery_point_type": {"key": "recoveryPointType", "type": "str"},
"recovery_point_time": {"key": "recoveryPointTime", "type": "iso-8601"},
"recovery_point_additional_info": {"key": "recoveryPointAdditionalInfo", "type": "str"},
"source_vm_storage_type": {"key": "sourceVMStorageType", "type": "str"},
"is_source_vm_encrypted": {"key": "isSourceVMEncrypted", "type": "bool"},
"key_and_secret": {"key": "keyAndSecret", "type": "KeyAndSecretDetails"},
"is_instant_ilr_session_active": {"key": "isInstantIlrSessionActive", "type": "bool"},
"recovery_point_tier_details": {"key": "recoveryPointTierDetails", "type": "[RecoveryPointTierInformation]"},
"is_managed_virtual_machine": {"key": "isManagedVirtualMachine", "type": "bool"},
"virtual_machine_size": {"key": "virtualMachineSize", "type": "str"},
"original_storage_account_option": {"key": "originalStorageAccountOption", "type": "bool"},
"os_type": {"key": "osType", "type": "str"},
"recovery_point_disk_configuration": {
"key": "recoveryPointDiskConfiguration",
"type": "RecoveryPointDiskConfiguration",
},
"zones": {"key": "zones", "type": "[str]"},
"recovery_point_move_readiness_info": {
"key": "recoveryPointMoveReadinessInfo",
"type": "{RecoveryPointMoveReadinessInfo}",
},
}
def __init__(
self,
*,
key_and_secret: Optional["_models.KeyAndSecretDetails"] = None,
is_instant_ilr_session_active: Optional[bool] = None,
recovery_point_tier_details: Optional[List["_models.RecoveryPointTierInformation"]] = None,
is_managed_virtual_machine: Optional[bool] = None,
virtual_machine_size: Optional[str] = None,
original_storage_account_option: Optional[bool] = None,
os_type: Optional[str] = None,
recovery_point_disk_configuration: Optional["_models.RecoveryPointDiskConfiguration"] = None,
zones: Optional[List[str]] = None,
recovery_point_move_readiness_info: Optional[Dict[str, "_models.RecoveryPointMoveReadinessInfo"]] = None,
**kwargs
):
"""
:keyword key_and_secret: Required details for recovering an encrypted VM. Applicable only when
IsSourceVMEncrypted is true.
:paramtype key_and_secret:
~azure.mgmt.recoveryservicesbackup.passivestamp.models.KeyAndSecretDetails
:keyword is_instant_ilr_session_active: Is the session to recover items from this backup copy
still active.
:paramtype is_instant_ilr_session_active: bool
:keyword recovery_point_tier_details: Recovery point tier information.
:paramtype recovery_point_tier_details:
list[~azure.mgmt.recoveryservicesbackup.passivestamp.models.RecoveryPointTierInformation]
:keyword is_managed_virtual_machine: Whether VM is with Managed Disks.
:paramtype is_managed_virtual_machine: bool
:keyword virtual_machine_size: Virtual Machine Size.
:paramtype virtual_machine_size: str
:keyword original_storage_account_option: Original Storage Account Option.
:paramtype original_storage_account_option: bool
:keyword os_type: OS type.
:paramtype os_type: str
:keyword recovery_point_disk_configuration: Disk configuration.
:paramtype recovery_point_disk_configuration:
~azure.mgmt.recoveryservicesbackup.passivestamp.models.RecoveryPointDiskConfiguration
:keyword zones: Identifies the zone of the VM at the time of backup. Applicable only for
zone-pinned Vms.
:paramtype zones: list[str]
:keyword recovery_point_move_readiness_info: Eligibility of RP to be moved to another tier.
:paramtype recovery_point_move_readiness_info: dict[str,
~azure.mgmt.recoveryservicesbackup.passivestamp.models.RecoveryPointMoveReadinessInfo]
"""
super().__init__(**kwargs)
self.object_type = "IaasVMRecoveryPoint" # type: str
self.recovery_point_type = None
self.recovery_point_time = None
self.recovery_point_additional_info = None
self.source_vm_storage_type = None
self.is_source_vm_encrypted = None
self.key_and_secret = key_and_secret
self.is_instant_ilr_session_active = is_instant_ilr_session_active
self.recovery_point_tier_details = recovery_point_tier_details
self.is_managed_virtual_machine = is_managed_virtual_machine
self.virtual_machine_size = virtual_machine_size
self.original_storage_account_option = original_storage_account_option
self.os_type = os_type
self.recovery_point_disk_configuration = recovery_point_disk_configuration
self.zones = zones
self.recovery_point_move_readiness_info = recovery_point_move_readiness_info
class IaasVMRestoreRequest(RestoreRequest): # pylint: disable=too-many-instance-attributes
"""IaaS VM workload-specific restore.
All required parameters must be populated in order to send to Azure.
:ivar object_type: This property will be used as the discriminator for deciding the specific
types in the polymorphic chain of types. Required.
:vartype object_type: str
:ivar recovery_point_id: ID of the backup copy to be recovered.
:vartype recovery_point_id: str
:ivar recovery_type: Type of this recovery. Known values are: "Invalid", "OriginalLocation",
"AlternateLocation", "RestoreDisks", and "Offline".
:vartype recovery_type: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.RecoveryType
:ivar source_resource_id: Fully qualified ARM ID of the VM which is being recovered.
:vartype source_resource_id: str
:ivar target_virtual_machine_id: This is the complete ARM Id of the VM that will be created.
For e.g.
/subscriptions/{subId}/resourcegroups/{rg}/provider/Microsoft.Compute/virtualmachines/{vm}.
:vartype target_virtual_machine_id: str
:ivar target_resource_group_id: This is the ARM Id of the resource group that you want to
create for this Virtual machine and other artifacts.
For e.g. /subscriptions/{subId}/resourcegroups/{rg}.
:vartype target_resource_group_id: str
:ivar storage_account_id: Fully qualified ARM ID of the storage account to which the VM has to
be restored.
:vartype storage_account_id: str
:ivar virtual_network_id: This is the virtual network Id of the vnet that will be attached to
the virtual machine.
User will be validated for join action permissions in the linked access.
:vartype virtual_network_id: str
:ivar subnet_id: Subnet ID, is the subnet ID associated with the to be restored VM. For Classic
VMs it would be
{VnetID}/Subnet/{SubnetName} and, for the Azure Resource Manager VMs it would be ARM resource
ID used to represent
the subnet.
:vartype subnet_id: str
:ivar target_domain_name_id: Fully qualified ARM ID of the domain name to be associated to the
VM being restored. This applies only to Classic
Virtual Machines.
:vartype target_domain_name_id: str
:ivar region: Region in which the virtual machine is restored.
:vartype region: str
:ivar affinity_group: Affinity group associated to VM to be restored. Used only for Classic
Compute Virtual Machines.
:vartype affinity_group: str
:ivar create_new_cloud_service: Should a new cloud service be created while restoring the VM.
If this is false, VM will be restored to the same
cloud service as it was at the time of backup.
:vartype create_new_cloud_service: bool
:ivar original_storage_account_option: Original Storage Account Option.
:vartype original_storage_account_option: bool
:ivar encryption_details: Details needed if the VM was encrypted at the time of backup.
:vartype encryption_details:
~azure.mgmt.recoveryservicesbackup.passivestamp.models.EncryptionDetails
:ivar restore_disk_lun_list: List of Disk LUNs for partial restore.
:vartype restore_disk_lun_list: list[int]
:ivar restore_with_managed_disks: Flag to denote of an Unmanaged disk VM should be restored
with Managed disks.
:vartype restore_with_managed_disks: bool
:ivar disk_encryption_set_id: DiskEncryptionSet's ID - needed if the VM needs to be encrypted
at rest during restore with customer managed key.
:vartype disk_encryption_set_id: str
:ivar zones: Target zone where the VM and its disks should be restored.
:vartype zones: list[str]
:ivar identity_info: Managed Identity information required to access customer storage account.
:vartype identity_info: ~azure.mgmt.recoveryservicesbackup.passivestamp.models.IdentityInfo
:ivar identity_based_restore_details: IaaS VM workload specific restore details for restores
using managed identity.
:vartype identity_based_restore_details:
~azure.mgmt.recoveryservicesbackup.passivestamp.models.IdentityBasedRestoreDetails
"""
_validation = {
"object_type": {"required": True},
}
_attribute_map = {
"object_type": {"key": "objectType", "type": "str"},
"recovery_point_id": {"key": "recoveryPointId", "type": "str"},
"recovery_type": {"key": "recoveryType", "type": "str"},
"source_resource_id": {"key": "sourceResourceId", "type": "str"},
"target_virtual_machine_id": {"key": "targetVirtualMachineId", "type": "str"},
"target_resource_group_id": {"key": "targetResourceGroupId", "type": "str"},
"storage_account_id": {"key": "storageAccountId", "type": "str"},
"virtual_network_id": {"key": "virtualNetworkId", "type": "str"},
"subnet_id": {"key": "subnetId", "type": "str"},
"target_domain_name_id": {"key": "targetDomainNameId", "type": "str"},
"region": {"key": "region", "type": "str"},
"affinity_group": {"key": "affinityGroup", "type": "str"},
"create_new_cloud_service": {"key": "createNewCloudService", "type": "bool"},
"original_storage_account_option": {"key": "originalStorageAccountOption", "type": "bool"},
"encryption_details": {"key": "encryptionDetails", "type": "EncryptionDetails"},
"restore_disk_lun_list": {"key": "restoreDiskLunList", "type": "[int]"},
"restore_with_managed_disks": {"key": "restoreWithManagedDisks", "type": "bool"},
"disk_encryption_set_id": {"key": "diskEncryptionSetId", "type": "str"},
"zones": {"key": "zones", "type": "[str]"},
"identity_info": {"key": "identityInfo", "type": "IdentityInfo"},
"identity_based_restore_details": {"key": "identityBasedRestoreDetails", "type": "IdentityBasedRestoreDetails"},
}
def __init__(
self,
*,
recovery_point_id: Optional[str] = None,
recovery_type: Optional[Union[str, "_models.RecoveryType"]] = None,
source_resource_id: Optional[str] = None,
target_virtual_machine_id: Optional[str] = None,
target_resource_group_id: Optional[str] = None,
storage_account_id: Optional[str] = None,
virtual_network_id: Optional[str] = None,
subnet_id: Optional[str] = None,
target_domain_name_id: Optional[str] = None,
region: Optional[str] = None,
affinity_group: Optional[str] = None,
create_new_cloud_service: Optional[bool] = None,
original_storage_account_option: Optional[bool] = None,
encryption_details: Optional["_models.EncryptionDetails"] = None,
restore_disk_lun_list: Optional[List[int]] = None,
restore_with_managed_disks: Optional[bool] = None,
disk_encryption_set_id: Optional[str] = None,
zones: Optional[List[str]] = None,
identity_info: Optional["_models.IdentityInfo"] = None,
identity_based_restore_details: Optional["_models.IdentityBasedRestoreDetails"] = None,
**kwargs
):
"""
:keyword recovery_point_id: ID of the backup copy to be recovered.
:paramtype recovery_point_id: str
:keyword recovery_type: Type of this recovery. Known values are: "Invalid", "OriginalLocation",
"AlternateLocation", "RestoreDisks", and "Offline".
:paramtype recovery_type: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.RecoveryType
:keyword source_resource_id: Fully qualified ARM ID of the VM which is being recovered.
:paramtype source_resource_id: str
:keyword target_virtual_machine_id: This is the complete ARM Id of the VM that will be created.
For e.g.
/subscriptions/{subId}/resourcegroups/{rg}/provider/Microsoft.Compute/virtualmachines/{vm}.
:paramtype target_virtual_machine_id: str
:keyword target_resource_group_id: This is the ARM Id of the resource group that you want to
create for this Virtual machine and other artifacts.
For e.g. /subscriptions/{subId}/resourcegroups/{rg}.
:paramtype target_resource_group_id: str
:keyword storage_account_id: Fully qualified ARM ID of the storage account to which the VM has
to be restored.
:paramtype storage_account_id: str
:keyword virtual_network_id: This is the virtual network Id of the vnet that will be attached
to the virtual machine.
User will be validated for join action permissions in the linked access.
:paramtype virtual_network_id: str
:keyword subnet_id: Subnet ID, is the subnet ID associated with the to be restored VM. For
Classic VMs it would be
{VnetID}/Subnet/{SubnetName} and, for the Azure Resource Manager VMs it would be ARM resource
ID used to represent
the subnet.
:paramtype subnet_id: str
:keyword target_domain_name_id: Fully qualified ARM ID of the domain name to be associated to
the VM being restored. This applies only to Classic
Virtual Machines.
:paramtype target_domain_name_id: str
:keyword region: Region in which the virtual machine is restored.
:paramtype region: str
:keyword affinity_group: Affinity group associated to VM to be restored. Used only for Classic
Compute Virtual Machines.
:paramtype affinity_group: str
:keyword create_new_cloud_service: Should a new cloud service be created while restoring the
VM. If this is false, VM will be restored to the same
cloud service as it was at the time of backup.
:paramtype create_new_cloud_service: bool
:keyword original_storage_account_option: Original Storage Account Option.
:paramtype original_storage_account_option: bool
:keyword encryption_details: Details needed if the VM was encrypted at the time of backup.
:paramtype encryption_details:
~azure.mgmt.recoveryservicesbackup.passivestamp.models.EncryptionDetails
:keyword restore_disk_lun_list: List of Disk LUNs for partial restore.
:paramtype restore_disk_lun_list: list[int]
:keyword restore_with_managed_disks: Flag to denote of an Unmanaged disk VM should be restored
with Managed disks.
:paramtype restore_with_managed_disks: bool
:keyword disk_encryption_set_id: DiskEncryptionSet's ID - needed if the VM needs to be
encrypted at rest during restore with customer managed key.
:paramtype disk_encryption_set_id: str
:keyword zones: Target zone where the VM and its disks should be restored.
:paramtype zones: list[str]
:keyword identity_info: Managed Identity information required to access customer storage
account.
:paramtype identity_info: ~azure.mgmt.recoveryservicesbackup.passivestamp.models.IdentityInfo
:keyword identity_based_restore_details: IaaS VM workload specific restore details for restores
using managed identity.
:paramtype identity_based_restore_details:
~azure.mgmt.recoveryservicesbackup.passivestamp.models.IdentityBasedRestoreDetails
"""
super().__init__(**kwargs)
self.object_type = "IaasVMRestoreRequest" # type: str
self.recovery_point_id = recovery_point_id
self.recovery_type = recovery_type
self.source_resource_id = source_resource_id
self.target_virtual_machine_id = target_virtual_machine_id
self.target_resource_group_id = target_resource_group_id
self.storage_account_id = storage_account_id
self.virtual_network_id = virtual_network_id
self.subnet_id = subnet_id
self.target_domain_name_id = target_domain_name_id
self.region = region
self.affinity_group = affinity_group
self.create_new_cloud_service = create_new_cloud_service
self.original_storage_account_option = original_storage_account_option
self.encryption_details = encryption_details
self.restore_disk_lun_list = restore_disk_lun_list
self.restore_with_managed_disks = restore_with_managed_disks
self.disk_encryption_set_id = disk_encryption_set_id
self.zones = zones
self.identity_info = identity_info
self.identity_based_restore_details = identity_based_restore_details
class IdentityBasedRestoreDetails(_serialization.Model):
"""IaaS VM workload specific restore details for restores using managed identity.
:ivar object_type: Gets the class type.
:vartype object_type: str
:ivar target_storage_account_id: Fully qualified ARM ID of the target storage account.
:vartype target_storage_account_id: str
"""
_attribute_map = {
"object_type": {"key": "objectType", "type": "str"},
"target_storage_account_id": {"key": "targetStorageAccountId", "type": "str"},
}
def __init__(self, *, object_type: Optional[str] = None, target_storage_account_id: Optional[str] = None, **kwargs):
"""
:keyword object_type: Gets the class type.
:paramtype object_type: str
:keyword target_storage_account_id: Fully qualified ARM ID of the target storage account.
:paramtype target_storage_account_id: str
"""
super().__init__(**kwargs)
self.object_type = object_type
self.target_storage_account_id = target_storage_account_id
class IdentityInfo(_serialization.Model):
"""Encapsulates Managed Identity related information.
:ivar is_system_assigned_identity: To differentiate if the managed identity is system assigned
or user assigned.
:vartype is_system_assigned_identity: bool
:ivar managed_identity_resource_id: Managed Identity Resource Id
Optional: Might not be required in the case of system assigned managed identity.
:vartype managed_identity_resource_id: str
"""
_attribute_map = {
"is_system_assigned_identity": {"key": "isSystemAssignedIdentity", "type": "bool"},
"managed_identity_resource_id": {"key": "managedIdentityResourceId", "type": "str"},
}
def __init__(
self,
*,
is_system_assigned_identity: Optional[bool] = None,
managed_identity_resource_id: Optional[str] = None,
**kwargs
):
"""
:keyword is_system_assigned_identity: To differentiate if the managed identity is system
assigned or user assigned.
:paramtype is_system_assigned_identity: bool
:keyword managed_identity_resource_id: Managed Identity Resource Id
Optional: Might not be required in the case of system assigned managed identity.
:paramtype managed_identity_resource_id: str
"""
super().__init__(**kwargs)
self.is_system_assigned_identity = is_system_assigned_identity
self.managed_identity_resource_id = managed_identity_resource_id
class InstantItemRecoveryTarget(_serialization.Model):
"""Target details for file / folder restore.
:ivar client_scripts: List of client scripts.
:vartype client_scripts:
list[~azure.mgmt.recoveryservicesbackup.passivestamp.models.ClientScriptForConnect]
"""
_attribute_map = {
"client_scripts": {"key": "clientScripts", "type": "[ClientScriptForConnect]"},
}
def __init__(self, *, client_scripts: Optional[List["_models.ClientScriptForConnect"]] = None, **kwargs):
"""
:keyword client_scripts: List of client scripts.
:paramtype client_scripts:
list[~azure.mgmt.recoveryservicesbackup.passivestamp.models.ClientScriptForConnect]
"""
super().__init__(**kwargs)
self.client_scripts = client_scripts
class JobQueryObject(_serialization.Model):
"""Filters to list the jobs.
:ivar status: Status of the job. Known values are: "Invalid", "InProgress", "Completed",
"Failed", "CompletedWithWarnings", "Cancelled", and "Cancelling".
:vartype status: str or ~azure.mgmt.recoveryservicesbackup.passivestamp.models.JobStatus
:ivar backup_management_type: Type of backup management for the job. Known values are:
"Invalid", "AzureIaasVM", "MAB", "DPM", "AzureBackupServer", "AzureSql", "AzureStorage",
"AzureWorkload", and "DefaultBackup".
:vartype backup_management_type: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.BackupManagementType
:ivar operation: Type of operation. Known values are: "Invalid", "Register", "UnRegister",
"ConfigureBackup", "Backup", "Restore", "DisableBackup", "DeleteBackupData",
"CrossRegionRestore", "Undelete", and "UpdateCustomerManagedKey".
:vartype operation: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.JobOperationType
:ivar job_id: JobID represents the job uniquely.
:vartype job_id: str
:ivar start_time: Job has started at this time. Value is in UTC.
:vartype start_time: ~datetime.datetime
:ivar end_time: Job has ended at this time. Value is in UTC.
:vartype end_time: ~datetime.datetime
"""
_attribute_map = {
"status": {"key": "status", "type": "str"},
"backup_management_type": {"key": "backupManagementType", "type": "str"},
"operation": {"key": "operation", "type": "str"},
"job_id": {"key": "jobId", "type": "str"},
"start_time": {"key": "startTime", "type": "iso-8601"},
"end_time": {"key": "endTime", "type": "iso-8601"},
}
def __init__(
self,
*,
status: Optional[Union[str, "_models.JobStatus"]] = None,
backup_management_type: Optional[Union[str, "_models.BackupManagementType"]] = None,
operation: Optional[Union[str, "_models.JobOperationType"]] = None,
job_id: Optional[str] = None,
start_time: Optional[datetime.datetime] = None,
end_time: Optional[datetime.datetime] = None,
**kwargs
):
"""
:keyword status: Status of the job. Known values are: "Invalid", "InProgress", "Completed",
"Failed", "CompletedWithWarnings", "Cancelled", and "Cancelling".
:paramtype status: str or ~azure.mgmt.recoveryservicesbackup.passivestamp.models.JobStatus
:keyword backup_management_type: Type of backup management for the job. Known values are:
"Invalid", "AzureIaasVM", "MAB", "DPM", "AzureBackupServer", "AzureSql", "AzureStorage",
"AzureWorkload", and "DefaultBackup".
:paramtype backup_management_type: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.BackupManagementType
:keyword operation: Type of operation. Known values are: "Invalid", "Register", "UnRegister",
"ConfigureBackup", "Backup", "Restore", "DisableBackup", "DeleteBackupData",
"CrossRegionRestore", "Undelete", and "UpdateCustomerManagedKey".
:paramtype operation: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.JobOperationType
:keyword job_id: JobID represents the job uniquely.
:paramtype job_id: str
:keyword start_time: Job has started at this time. Value is in UTC.
:paramtype start_time: ~datetime.datetime
:keyword end_time: Job has ended at this time. Value is in UTC.
:paramtype end_time: ~datetime.datetime
"""
super().__init__(**kwargs)
self.status = status
self.backup_management_type = backup_management_type
self.operation = operation
self.job_id = job_id
self.start_time = start_time
self.end_time = end_time
class JobResource(Resource):
"""Defines workload agnostic properties for a job.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id represents the complete path to the resource.
:vartype id: str
:ivar name: Resource name associated with the resource.
:vartype name: str
:ivar type: Resource type represents the complete path of the form
Namespace/ResourceType/ResourceType/...
:vartype type: str
:ivar location: Resource location.
:vartype location: str
:ivar tags: Resource tags.
:vartype tags: dict[str, str]
:ivar e_tag: Optional ETag.
:vartype e_tag: str
:ivar properties: JobResource properties.
:vartype properties: ~azure.mgmt.recoveryservicesbackup.passivestamp.models.Job
"""
_validation = {
"id": {"readonly": True},
"name": {"readonly": True},
"type": {"readonly": True},
}
_attribute_map = {
"id": {"key": "id", "type": "str"},
"name": {"key": "name", "type": "str"},
"type": {"key": "type", "type": "str"},
"location": {"key": "location", "type": "str"},
"tags": {"key": "tags", "type": "{str}"},
"e_tag": {"key": "eTag", "type": "str"},
"properties": {"key": "properties", "type": "Job"},
}
def __init__(
self,
*,
location: Optional[str] = None,
tags: Optional[Dict[str, str]] = None,
e_tag: Optional[str] = None,
properties: Optional["_models.Job"] = None,
**kwargs
):
"""
:keyword location: Resource location.
:paramtype location: str
:keyword tags: Resource tags.
:paramtype tags: dict[str, str]
:keyword e_tag: Optional ETag.
:paramtype e_tag: str
:keyword properties: JobResource properties.
:paramtype properties: ~azure.mgmt.recoveryservicesbackup.passivestamp.models.Job
"""
super().__init__(location=location, tags=tags, e_tag=e_tag, **kwargs)
self.properties = properties
class ResourceList(_serialization.Model):
"""Base for all lists of resources.
:ivar next_link: The uri to fetch the next page of resources. Call ListNext() fetches next page
of resources.
:vartype next_link: str
"""
_attribute_map = {
"next_link": {"key": "nextLink", "type": "str"},
}
def __init__(self, *, next_link: Optional[str] = None, **kwargs):
"""
:keyword next_link: The uri to fetch the next page of resources. Call ListNext() fetches next
page of resources.
:paramtype next_link: str
"""
super().__init__(**kwargs)
self.next_link = next_link
class JobResourceList(ResourceList):
"""List of Job resources.
:ivar next_link: The uri to fetch the next page of resources. Call ListNext() fetches next page
of resources.
:vartype next_link: str
:ivar value: List of resources.
:vartype value: list[~azure.mgmt.recoveryservicesbackup.passivestamp.models.JobResource]
"""
_attribute_map = {
"next_link": {"key": "nextLink", "type": "str"},
"value": {"key": "value", "type": "[JobResource]"},
}
def __init__(
self, *, next_link: Optional[str] = None, value: Optional[List["_models.JobResource"]] = None, **kwargs
):
"""
:keyword next_link: The uri to fetch the next page of resources. Call ListNext() fetches next
page of resources.
:paramtype next_link: str
:keyword value: List of resources.
:paramtype value: list[~azure.mgmt.recoveryservicesbackup.passivestamp.models.JobResource]
"""
super().__init__(next_link=next_link, **kwargs)
self.value = value
class KEKDetails(_serialization.Model):
"""KEK is encryption key for BEK.
:ivar key_url: Key is KEK.
:vartype key_url: str
:ivar key_vault_id: Key Vault ID where this Key is stored.
:vartype key_vault_id: str
:ivar key_backup_data: KEK data.
:vartype key_backup_data: str
"""
_attribute_map = {
"key_url": {"key": "keyUrl", "type": "str"},
"key_vault_id": {"key": "keyVaultId", "type": "str"},
"key_backup_data": {"key": "keyBackupData", "type": "str"},
}
def __init__(
self,
*,
key_url: Optional[str] = None,
key_vault_id: Optional[str] = None,
key_backup_data: Optional[str] = None,
**kwargs
):
"""
:keyword key_url: Key is KEK.
:paramtype key_url: str
:keyword key_vault_id: Key Vault ID where this Key is stored.
:paramtype key_vault_id: str
:keyword key_backup_data: KEK data.
:paramtype key_backup_data: str
"""
super().__init__(**kwargs)
self.key_url = key_url
self.key_vault_id = key_vault_id
self.key_backup_data = key_backup_data
class KeyAndSecretDetails(_serialization.Model):
"""BEK is bitlocker key.
KEK is encryption key for BEK
If the VM was encrypted then we will store following details :
#. Secret(BEK) - Url + Backup Data + vaultId.
#. Key(KEK) - Url + Backup Data + vaultId.
#. EncryptionMechanism
BEK and KEK can potentially have different vault ids.
:ivar kek_details: KEK is encryption key for BEK.
:vartype kek_details: ~azure.mgmt.recoveryservicesbackup.passivestamp.models.KEKDetails
:ivar bek_details: BEK is bitlocker encryption key.
:vartype bek_details: ~azure.mgmt.recoveryservicesbackup.passivestamp.models.BEKDetails
:ivar encryption_mechanism: Encryption mechanism: None/ SinglePass/ DoublePass.
:vartype encryption_mechanism: str
"""
_attribute_map = {
"kek_details": {"key": "kekDetails", "type": "KEKDetails"},
"bek_details": {"key": "bekDetails", "type": "BEKDetails"},
"encryption_mechanism": {"key": "encryptionMechanism", "type": "str"},
}
def __init__(
self,
*,
kek_details: Optional["_models.KEKDetails"] = None,
bek_details: Optional["_models.BEKDetails"] = None,
encryption_mechanism: Optional[str] = None,
**kwargs
):
"""
:keyword kek_details: KEK is encryption key for BEK.
:paramtype kek_details: ~azure.mgmt.recoveryservicesbackup.passivestamp.models.KEKDetails
:keyword bek_details: BEK is bitlocker encryption key.
:paramtype bek_details: ~azure.mgmt.recoveryservicesbackup.passivestamp.models.BEKDetails
:keyword encryption_mechanism: Encryption mechanism: None/ SinglePass/ DoublePass.
:paramtype encryption_mechanism: str
"""
super().__init__(**kwargs)
self.kek_details = kek_details
self.bek_details = bek_details
self.encryption_mechanism = encryption_mechanism
class KPIResourceHealthDetails(_serialization.Model):
"""KPI Resource Health Details.
:ivar resource_health_status: Resource Health Status. Known values are: "Healthy",
"TransientDegraded", "PersistentDegraded", "TransientUnhealthy", "PersistentUnhealthy", and
"Invalid".
:vartype resource_health_status: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.ResourceHealthStatus
:ivar resource_health_details: Resource Health Status.
:vartype resource_health_details:
list[~azure.mgmt.recoveryservicesbackup.passivestamp.models.ResourceHealthDetails]
"""
_attribute_map = {
"resource_health_status": {"key": "resourceHealthStatus", "type": "str"},
"resource_health_details": {"key": "resourceHealthDetails", "type": "[ResourceHealthDetails]"},
}
def __init__(
self,
*,
resource_health_status: Optional[Union[str, "_models.ResourceHealthStatus"]] = None,
resource_health_details: Optional[List["_models.ResourceHealthDetails"]] = None,
**kwargs
):
"""
:keyword resource_health_status: Resource Health Status. Known values are: "Healthy",
"TransientDegraded", "PersistentDegraded", "TransientUnhealthy", "PersistentUnhealthy", and
"Invalid".
:paramtype resource_health_status: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.ResourceHealthStatus
:keyword resource_health_details: Resource Health Status.
:paramtype resource_health_details:
list[~azure.mgmt.recoveryservicesbackup.passivestamp.models.ResourceHealthDetails]
"""
super().__init__(**kwargs)
self.resource_health_status = resource_health_status
self.resource_health_details = resource_health_details
class MabErrorInfo(_serialization.Model):
"""MAB workload-specific error information.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar error_string: Localized error string.
:vartype error_string: str
:ivar recommendations: List of localized recommendations.
:vartype recommendations: list[str]
"""
_validation = {
"error_string": {"readonly": True},
"recommendations": {"readonly": True},
}
_attribute_map = {
"error_string": {"key": "errorString", "type": "str"},
"recommendations": {"key": "recommendations", "type": "[str]"},
}
def __init__(self, **kwargs):
""" """
super().__init__(**kwargs)
self.error_string = None
self.recommendations = None
class MabFileFolderProtectedItem(ProtectedItem): # pylint: disable=too-many-instance-attributes
"""MAB workload-specific backup item.
All required parameters must be populated in order to send to Azure.
:ivar protected_item_type: backup item type. Required.
:vartype protected_item_type: str
:ivar backup_management_type: Type of backup management for the backed up item. Known values
are: "Invalid", "AzureIaasVM", "MAB", "DPM", "AzureBackupServer", "AzureSql", "AzureStorage",
"AzureWorkload", and "DefaultBackup".
:vartype backup_management_type: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.BackupManagementType
:ivar workload_type: Type of workload this item represents. Known values are: "Invalid", "VM",
"FileFolder", "AzureSqlDb", "SQLDB", "Exchange", "Sharepoint", "VMwareVM", "SystemState",
"Client", "GenericDataSource", "SQLDataBase", "AzureFileShare", "SAPHanaDatabase", and
"SAPAseDatabase".
:vartype workload_type: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.DataSourceType
:ivar container_name: Unique name of container.
:vartype container_name: str
:ivar source_resource_id: ARM ID of the resource to be backed up.
:vartype source_resource_id: str
:ivar policy_id: ID of the backup policy with which this item is backed up.
:vartype policy_id: str
:ivar last_recovery_point: Timestamp when the last (latest) backup copy was created for this
backup item.
:vartype last_recovery_point: ~datetime.datetime
:ivar backup_set_name: Name of the backup set the backup item belongs to.
:vartype backup_set_name: str
:ivar create_mode: Create mode to indicate recovery of existing soft deleted data source or
creation of new data source. Known values are: "Invalid", "Default", and "Recover".
:vartype create_mode: str or ~azure.mgmt.recoveryservicesbackup.passivestamp.models.CreateMode
:ivar deferred_delete_time_in_utc: Time for deferred deletion in UTC.
:vartype deferred_delete_time_in_utc: ~datetime.datetime
:ivar is_scheduled_for_deferred_delete: Flag to identify whether the DS is scheduled for
deferred delete.
:vartype is_scheduled_for_deferred_delete: bool
:ivar deferred_delete_time_remaining: Time remaining before the DS marked for deferred delete
is permanently deleted.
:vartype deferred_delete_time_remaining: str
:ivar is_deferred_delete_schedule_upcoming: Flag to identify whether the deferred deleted DS is
to be purged soon.
:vartype is_deferred_delete_schedule_upcoming: bool
:ivar is_rehydrate: Flag to identify that deferred deleted DS is to be moved into Pause state.
:vartype is_rehydrate: bool
:ivar resource_guard_operation_requests: ResourceGuardOperationRequests on which LAC check will
be performed.
:vartype resource_guard_operation_requests: list[str]
:ivar friendly_name: Friendly name of this backup item.
:vartype friendly_name: str
:ivar computer_name: Name of the computer associated with this backup item.
:vartype computer_name: str
:ivar last_backup_status: Status of last backup operation.
:vartype last_backup_status: str
:ivar last_backup_time: Timestamp of the last backup operation on this backup item.
:vartype last_backup_time: ~datetime.datetime
:ivar protection_state: Protected, ProtectionStopped, IRPending or ProtectionError.
:vartype protection_state: str
:ivar deferred_delete_sync_time_in_utc: Sync time for deferred deletion in UTC.
:vartype deferred_delete_sync_time_in_utc: int
:ivar extended_info: Additional information with this backup item.
:vartype extended_info:
~azure.mgmt.recoveryservicesbackup.passivestamp.models.MabFileFolderProtectedItemExtendedInfo
"""
_validation = {
"protected_item_type": {"required": True},
}
_attribute_map = {
"protected_item_type": {"key": "protectedItemType", "type": "str"},
"backup_management_type": {"key": "backupManagementType", "type": "str"},
"workload_type": {"key": "workloadType", "type": "str"},
"container_name": {"key": "containerName", "type": "str"},
"source_resource_id": {"key": "sourceResourceId", "type": "str"},
"policy_id": {"key": "policyId", "type": "str"},
"last_recovery_point": {"key": "lastRecoveryPoint", "type": "iso-8601"},
"backup_set_name": {"key": "backupSetName", "type": "str"},
"create_mode": {"key": "createMode", "type": "str"},
"deferred_delete_time_in_utc": {"key": "deferredDeleteTimeInUTC", "type": "iso-8601"},
"is_scheduled_for_deferred_delete": {"key": "isScheduledForDeferredDelete", "type": "bool"},
"deferred_delete_time_remaining": {"key": "deferredDeleteTimeRemaining", "type": "str"},
"is_deferred_delete_schedule_upcoming": {"key": "isDeferredDeleteScheduleUpcoming", "type": "bool"},
"is_rehydrate": {"key": "isRehydrate", "type": "bool"},
"resource_guard_operation_requests": {"key": "resourceGuardOperationRequests", "type": "[str]"},
"friendly_name": {"key": "friendlyName", "type": "str"},
"computer_name": {"key": "computerName", "type": "str"},
"last_backup_status": {"key": "lastBackupStatus", "type": "str"},
"last_backup_time": {"key": "lastBackupTime", "type": "iso-8601"},
"protection_state": {"key": "protectionState", "type": "str"},
"deferred_delete_sync_time_in_utc": {"key": "deferredDeleteSyncTimeInUTC", "type": "int"},
"extended_info": {"key": "extendedInfo", "type": "MabFileFolderProtectedItemExtendedInfo"},
}
def __init__(
self,
*,
backup_management_type: Optional[Union[str, "_models.BackupManagementType"]] = None,
workload_type: Optional[Union[str, "_models.DataSourceType"]] = None,
container_name: Optional[str] = None,
source_resource_id: Optional[str] = None,
policy_id: Optional[str] = None,
last_recovery_point: Optional[datetime.datetime] = None,
backup_set_name: Optional[str] = None,
create_mode: Optional[Union[str, "_models.CreateMode"]] = None,
deferred_delete_time_in_utc: Optional[datetime.datetime] = None,
is_scheduled_for_deferred_delete: Optional[bool] = None,
deferred_delete_time_remaining: Optional[str] = None,
is_deferred_delete_schedule_upcoming: Optional[bool] = None,
is_rehydrate: Optional[bool] = None,
resource_guard_operation_requests: Optional[List[str]] = None,
friendly_name: Optional[str] = None,
computer_name: Optional[str] = None,
last_backup_status: Optional[str] = None,
last_backup_time: Optional[datetime.datetime] = None,
protection_state: Optional[str] = None,
deferred_delete_sync_time_in_utc: Optional[int] = None,
extended_info: Optional["_models.MabFileFolderProtectedItemExtendedInfo"] = None,
**kwargs
):
"""
:keyword backup_management_type: Type of backup management for the backed up item. Known values
are: "Invalid", "AzureIaasVM", "MAB", "DPM", "AzureBackupServer", "AzureSql", "AzureStorage",
"AzureWorkload", and "DefaultBackup".
:paramtype backup_management_type: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.BackupManagementType
:keyword workload_type: Type of workload this item represents. Known values are: "Invalid",
"VM", "FileFolder", "AzureSqlDb", "SQLDB", "Exchange", "Sharepoint", "VMwareVM", "SystemState",
"Client", "GenericDataSource", "SQLDataBase", "AzureFileShare", "SAPHanaDatabase", and
"SAPAseDatabase".
:paramtype workload_type: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.DataSourceType
:keyword container_name: Unique name of container.
:paramtype container_name: str
:keyword source_resource_id: ARM ID of the resource to be backed up.
:paramtype source_resource_id: str
:keyword policy_id: ID of the backup policy with which this item is backed up.
:paramtype policy_id: str
:keyword last_recovery_point: Timestamp when the last (latest) backup copy was created for this
backup item.
:paramtype last_recovery_point: ~datetime.datetime
:keyword backup_set_name: Name of the backup set the backup item belongs to.
:paramtype backup_set_name: str
:keyword create_mode: Create mode to indicate recovery of existing soft deleted data source or
creation of new data source. Known values are: "Invalid", "Default", and "Recover".
:paramtype create_mode: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.CreateMode
:keyword deferred_delete_time_in_utc: Time for deferred deletion in UTC.
:paramtype deferred_delete_time_in_utc: ~datetime.datetime
:keyword is_scheduled_for_deferred_delete: Flag to identify whether the DS is scheduled for
deferred delete.
:paramtype is_scheduled_for_deferred_delete: bool
:keyword deferred_delete_time_remaining: Time remaining before the DS marked for deferred
delete is permanently deleted.
:paramtype deferred_delete_time_remaining: str
:keyword is_deferred_delete_schedule_upcoming: Flag to identify whether the deferred deleted DS
is to be purged soon.
:paramtype is_deferred_delete_schedule_upcoming: bool
:keyword is_rehydrate: Flag to identify that deferred deleted DS is to be moved into Pause
state.
:paramtype is_rehydrate: bool
:keyword resource_guard_operation_requests: ResourceGuardOperationRequests on which LAC check
will be performed.
:paramtype resource_guard_operation_requests: list[str]
:keyword friendly_name: Friendly name of this backup item.
:paramtype friendly_name: str
:keyword computer_name: Name of the computer associated with this backup item.
:paramtype computer_name: str
:keyword last_backup_status: Status of last backup operation.
:paramtype last_backup_status: str
:keyword last_backup_time: Timestamp of the last backup operation on this backup item.
:paramtype last_backup_time: ~datetime.datetime
:keyword protection_state: Protected, ProtectionStopped, IRPending or ProtectionError.
:paramtype protection_state: str
:keyword deferred_delete_sync_time_in_utc: Sync time for deferred deletion in UTC.
:paramtype deferred_delete_sync_time_in_utc: int
:keyword extended_info: Additional information with this backup item.
:paramtype extended_info:
~azure.mgmt.recoveryservicesbackup.passivestamp.models.MabFileFolderProtectedItemExtendedInfo
"""
super().__init__(
backup_management_type=backup_management_type,
workload_type=workload_type,
container_name=container_name,
source_resource_id=source_resource_id,
policy_id=policy_id,
last_recovery_point=last_recovery_point,
backup_set_name=backup_set_name,
create_mode=create_mode,
deferred_delete_time_in_utc=deferred_delete_time_in_utc,
is_scheduled_for_deferred_delete=is_scheduled_for_deferred_delete,
deferred_delete_time_remaining=deferred_delete_time_remaining,
is_deferred_delete_schedule_upcoming=is_deferred_delete_schedule_upcoming,
is_rehydrate=is_rehydrate,
resource_guard_operation_requests=resource_guard_operation_requests,
**kwargs
)
self.protected_item_type = "MabFileFolderProtectedItem" # type: str
self.friendly_name = friendly_name
self.computer_name = computer_name
self.last_backup_status = last_backup_status
self.last_backup_time = last_backup_time
self.protection_state = protection_state
self.deferred_delete_sync_time_in_utc = deferred_delete_sync_time_in_utc
self.extended_info = extended_info
class MabFileFolderProtectedItemExtendedInfo(_serialization.Model):
"""Additional information on the backed up item.
:ivar last_refreshed_at: Last time when the agent data synced to service.
:vartype last_refreshed_at: ~datetime.datetime
:ivar oldest_recovery_point: The oldest backup copy available.
:vartype oldest_recovery_point: ~datetime.datetime
:ivar recovery_point_count: Number of backup copies associated with the backup item.
:vartype recovery_point_count: int
"""
_attribute_map = {
"last_refreshed_at": {"key": "lastRefreshedAt", "type": "iso-8601"},
"oldest_recovery_point": {"key": "oldestRecoveryPoint", "type": "iso-8601"},
"recovery_point_count": {"key": "recoveryPointCount", "type": "int"},
}
def __init__(
self,
*,
last_refreshed_at: Optional[datetime.datetime] = None,
oldest_recovery_point: Optional[datetime.datetime] = None,
recovery_point_count: Optional[int] = None,
**kwargs
):
"""
:keyword last_refreshed_at: Last time when the agent data synced to service.
:paramtype last_refreshed_at: ~datetime.datetime
:keyword oldest_recovery_point: The oldest backup copy available.
:paramtype oldest_recovery_point: ~datetime.datetime
:keyword recovery_point_count: Number of backup copies associated with the backup item.
:paramtype recovery_point_count: int
"""
super().__init__(**kwargs)
self.last_refreshed_at = last_refreshed_at
self.oldest_recovery_point = oldest_recovery_point
self.recovery_point_count = recovery_point_count
class MabJob(Job): # pylint: disable=too-many-instance-attributes
"""MAB workload-specific job.
All required parameters must be populated in order to send to Azure.
:ivar entity_friendly_name: Friendly name of the entity on which the current job is executing.
:vartype entity_friendly_name: str
:ivar backup_management_type: Backup management type to execute the current job. Known values
are: "Invalid", "AzureIaasVM", "MAB", "DPM", "AzureBackupServer", "AzureSql", "AzureStorage",
"AzureWorkload", and "DefaultBackup".
:vartype backup_management_type: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.BackupManagementType
:ivar operation: The operation name.
:vartype operation: str
:ivar status: Job status.
:vartype status: str
:ivar start_time: The start time.
:vartype start_time: ~datetime.datetime
:ivar end_time: The end time.
:vartype end_time: ~datetime.datetime
:ivar activity_id: ActivityId of job.
:vartype activity_id: str
:ivar job_type: This property will be used as the discriminator for deciding the specific types
in the polymorphic chain of types. Required.
:vartype job_type: str
:ivar duration: Time taken by job to run.
:vartype duration: ~datetime.timedelta
:ivar actions_info: The state/actions applicable on jobs like cancel/retry.
:vartype actions_info: list[str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.JobSupportedAction]
:ivar mab_server_name: Name of server protecting the DS.
:vartype mab_server_name: str
:ivar mab_server_type: Server type of MAB container. Known values are: "Invalid", "Unknown",
"IaasVMContainer", "IaasVMServiceContainer", "DPMContainer", "AzureBackupServerContainer",
"MABContainer", "Cluster", "AzureSqlContainer", "Windows", "VCenter", "VMAppContainer",
"SQLAGWorkLoadContainer", "StorageContainer", and "GenericContainer".
:vartype mab_server_type: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.MabServerType
:ivar workload_type: Workload type of backup item. Known values are: "Invalid", "VM",
"FileFolder", "AzureSqlDb", "SQLDB", "Exchange", "Sharepoint", "VMwareVM", "SystemState",
"Client", "GenericDataSource", "SQLDataBase", "AzureFileShare", "SAPHanaDatabase", and
"SAPAseDatabase".
:vartype workload_type: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.WorkloadType
:ivar error_details: The errors.
:vartype error_details:
list[~azure.mgmt.recoveryservicesbackup.passivestamp.models.MabErrorInfo]
:ivar extended_info: Additional information on the job.
:vartype extended_info:
~azure.mgmt.recoveryservicesbackup.passivestamp.models.MabJobExtendedInfo
"""
_validation = {
"job_type": {"required": True},
}
_attribute_map = {
"entity_friendly_name": {"key": "entityFriendlyName", "type": "str"},
"backup_management_type": {"key": "backupManagementType", "type": "str"},
"operation": {"key": "operation", "type": "str"},
"status": {"key": "status", "type": "str"},
"start_time": {"key": "startTime", "type": "iso-8601"},
"end_time": {"key": "endTime", "type": "iso-8601"},
"activity_id": {"key": "activityId", "type": "str"},
"job_type": {"key": "jobType", "type": "str"},
"duration": {"key": "duration", "type": "duration"},
"actions_info": {"key": "actionsInfo", "type": "[str]"},
"mab_server_name": {"key": "mabServerName", "type": "str"},
"mab_server_type": {"key": "mabServerType", "type": "str"},
"workload_type": {"key": "workloadType", "type": "str"},
"error_details": {"key": "errorDetails", "type": "[MabErrorInfo]"},
"extended_info": {"key": "extendedInfo", "type": "MabJobExtendedInfo"},
}
def __init__(
self,
*,
entity_friendly_name: Optional[str] = None,
backup_management_type: Optional[Union[str, "_models.BackupManagementType"]] = None,
operation: Optional[str] = None,
status: Optional[str] = None,
start_time: Optional[datetime.datetime] = None,
end_time: Optional[datetime.datetime] = None,
activity_id: Optional[str] = None,
duration: Optional[datetime.timedelta] = None,
actions_info: Optional[List[Union[str, "_models.JobSupportedAction"]]] = None,
mab_server_name: Optional[str] = None,
mab_server_type: Optional[Union[str, "_models.MabServerType"]] = None,
workload_type: Optional[Union[str, "_models.WorkloadType"]] = None,
error_details: Optional[List["_models.MabErrorInfo"]] = None,
extended_info: Optional["_models.MabJobExtendedInfo"] = None,
**kwargs
):
"""
:keyword entity_friendly_name: Friendly name of the entity on which the current job is
executing.
:paramtype entity_friendly_name: str
:keyword backup_management_type: Backup management type to execute the current job. Known
values are: "Invalid", "AzureIaasVM", "MAB", "DPM", "AzureBackupServer", "AzureSql",
"AzureStorage", "AzureWorkload", and "DefaultBackup".
:paramtype backup_management_type: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.BackupManagementType
:keyword operation: The operation name.
:paramtype operation: str
:keyword status: Job status.
:paramtype status: str
:keyword start_time: The start time.
:paramtype start_time: ~datetime.datetime
:keyword end_time: The end time.
:paramtype end_time: ~datetime.datetime
:keyword activity_id: ActivityId of job.
:paramtype activity_id: str
:keyword duration: Time taken by job to run.
:paramtype duration: ~datetime.timedelta
:keyword actions_info: The state/actions applicable on jobs like cancel/retry.
:paramtype actions_info: list[str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.JobSupportedAction]
:keyword mab_server_name: Name of server protecting the DS.
:paramtype mab_server_name: str
:keyword mab_server_type: Server type of MAB container. Known values are: "Invalid", "Unknown",
"IaasVMContainer", "IaasVMServiceContainer", "DPMContainer", "AzureBackupServerContainer",
"MABContainer", "Cluster", "AzureSqlContainer", "Windows", "VCenter", "VMAppContainer",
"SQLAGWorkLoadContainer", "StorageContainer", and "GenericContainer".
:paramtype mab_server_type: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.MabServerType
:keyword workload_type: Workload type of backup item. Known values are: "Invalid", "VM",
"FileFolder", "AzureSqlDb", "SQLDB", "Exchange", "Sharepoint", "VMwareVM", "SystemState",
"Client", "GenericDataSource", "SQLDataBase", "AzureFileShare", "SAPHanaDatabase", and
"SAPAseDatabase".
:paramtype workload_type: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.WorkloadType
:keyword error_details: The errors.
:paramtype error_details:
list[~azure.mgmt.recoveryservicesbackup.passivestamp.models.MabErrorInfo]
:keyword extended_info: Additional information on the job.
:paramtype extended_info:
~azure.mgmt.recoveryservicesbackup.passivestamp.models.MabJobExtendedInfo
"""
super().__init__(
entity_friendly_name=entity_friendly_name,
backup_management_type=backup_management_type,
operation=operation,
status=status,
start_time=start_time,
end_time=end_time,
activity_id=activity_id,
**kwargs
)
self.job_type = "MabJob" # type: str
self.duration = duration
self.actions_info = actions_info
self.mab_server_name = mab_server_name
self.mab_server_type = mab_server_type
self.workload_type = workload_type
self.error_details = error_details
self.extended_info = extended_info
class MabJobExtendedInfo(_serialization.Model):
"""Additional information for the MAB workload-specific job.
:ivar tasks_list: List of tasks for this job.
:vartype tasks_list:
list[~azure.mgmt.recoveryservicesbackup.passivestamp.models.MabJobTaskDetails]
:ivar property_bag: The job properties.
:vartype property_bag: dict[str, str]
:ivar dynamic_error_message: Non localized error message specific to this job.
:vartype dynamic_error_message: str
"""
_attribute_map = {
"tasks_list": {"key": "tasksList", "type": "[MabJobTaskDetails]"},
"property_bag": {"key": "propertyBag", "type": "{str}"},
"dynamic_error_message": {"key": "dynamicErrorMessage", "type": "str"},
}
def __init__(
self,
*,
tasks_list: Optional[List["_models.MabJobTaskDetails"]] = None,
property_bag: Optional[Dict[str, str]] = None,
dynamic_error_message: Optional[str] = None,
**kwargs
):
"""
:keyword tasks_list: List of tasks for this job.
:paramtype tasks_list:
list[~azure.mgmt.recoveryservicesbackup.passivestamp.models.MabJobTaskDetails]
:keyword property_bag: The job properties.
:paramtype property_bag: dict[str, str]
:keyword dynamic_error_message: Non localized error message specific to this job.
:paramtype dynamic_error_message: str
"""
super().__init__(**kwargs)
self.tasks_list = tasks_list
self.property_bag = property_bag
self.dynamic_error_message = dynamic_error_message
class MabJobTaskDetails(_serialization.Model):
"""MAB workload-specific job task details.
:ivar task_id: The task display name.
:vartype task_id: str
:ivar start_time: The start time.
:vartype start_time: ~datetime.datetime
:ivar end_time: The end time.
:vartype end_time: ~datetime.datetime
:ivar duration: Time elapsed for task.
:vartype duration: ~datetime.timedelta
:ivar status: The status.
:vartype status: str
"""
_attribute_map = {
"task_id": {"key": "taskId", "type": "str"},
"start_time": {"key": "startTime", "type": "iso-8601"},
"end_time": {"key": "endTime", "type": "iso-8601"},
"duration": {"key": "duration", "type": "duration"},
"status": {"key": "status", "type": "str"},
}
def __init__(
self,
*,
task_id: Optional[str] = None,
start_time: Optional[datetime.datetime] = None,
end_time: Optional[datetime.datetime] = None,
duration: Optional[datetime.timedelta] = None,
status: Optional[str] = None,
**kwargs
):
"""
:keyword task_id: The task display name.
:paramtype task_id: str
:keyword start_time: The start time.
:paramtype start_time: ~datetime.datetime
:keyword end_time: The end time.
:paramtype end_time: ~datetime.datetime
:keyword duration: Time elapsed for task.
:paramtype duration: ~datetime.timedelta
:keyword status: The status.
:paramtype status: str
"""
super().__init__(**kwargs)
self.task_id = task_id
self.start_time = start_time
self.end_time = end_time
self.duration = duration
self.status = status
class NameInfo(_serialization.Model):
"""The name of usage.
:ivar value: Value of usage.
:vartype value: str
:ivar localized_value: Localized value of usage.
:vartype localized_value: str
"""
_attribute_map = {
"value": {"key": "value", "type": "str"},
"localized_value": {"key": "localizedValue", "type": "str"},
}
def __init__(self, *, value: Optional[str] = None, localized_value: Optional[str] = None, **kwargs):
"""
:keyword value: Value of usage.
:paramtype value: str
:keyword localized_value: Localized value of usage.
:paramtype localized_value: str
"""
super().__init__(**kwargs)
self.value = value
self.localized_value = localized_value
class NewErrorResponse(_serialization.Model):
"""The resource management error response.
:ivar error: The error object.
:vartype error: ~azure.mgmt.recoveryservicesbackup.passivestamp.models.NewErrorResponseError
"""
_attribute_map = {
"error": {"key": "error", "type": "NewErrorResponseError"},
}
def __init__(self, *, error: Optional["_models.NewErrorResponseError"] = None, **kwargs):
"""
:keyword error: The error object.
:paramtype error: ~azure.mgmt.recoveryservicesbackup.passivestamp.models.NewErrorResponseError
"""
super().__init__(**kwargs)
self.error = error
class NewErrorResponseError(_serialization.Model):
"""The error object.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar code: The error code.
:vartype code: str
:ivar message: The error message.
:vartype message: str
:ivar target: The error target.
:vartype target: str
:ivar details: The error details.
:vartype details: list[~azure.mgmt.recoveryservicesbackup.passivestamp.models.NewErrorResponse]
:ivar additional_info: The error additional info.
:vartype additional_info:
list[~azure.mgmt.recoveryservicesbackup.passivestamp.models.ErrorAdditionalInfo]
"""
_validation = {
"code": {"readonly": True},
"message": {"readonly": True},
"target": {"readonly": True},
"details": {"readonly": True},
"additional_info": {"readonly": True},
}
_attribute_map = {
"code": {"key": "code", "type": "str"},
"message": {"key": "message", "type": "str"},
"target": {"key": "target", "type": "str"},
"details": {"key": "details", "type": "[NewErrorResponse]"},
"additional_info": {"key": "additionalInfo", "type": "[ErrorAdditionalInfo]"},
}
def __init__(self, **kwargs):
""" """
super().__init__(**kwargs)
self.code = None
self.message = None
self.target = None
self.details = None
self.additional_info = None
class OperationStatus(_serialization.Model):
"""Operation status.
:ivar id: ID of the operation.
:vartype id: str
:ivar name: Name of the operation.
:vartype name: str
:ivar status: Operation status. Known values are: "Invalid", "InProgress", "Succeeded",
"Failed", and "Canceled".
:vartype status: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.OperationStatusValues
:ivar start_time: Operation start time. Format: ISO-8601.
:vartype start_time: ~datetime.datetime
:ivar end_time: Operation end time. Format: ISO-8601.
:vartype end_time: ~datetime.datetime
:ivar error: Error information related to this operation.
:vartype error: ~azure.mgmt.recoveryservicesbackup.passivestamp.models.OperationStatusError
:ivar properties: Additional information associated with this operation.
:vartype properties:
~azure.mgmt.recoveryservicesbackup.passivestamp.models.OperationStatusExtendedInfo
"""
_attribute_map = {
"id": {"key": "id", "type": "str"},
"name": {"key": "name", "type": "str"},
"status": {"key": "status", "type": "str"},
"start_time": {"key": "startTime", "type": "iso-8601"},
"end_time": {"key": "endTime", "type": "iso-8601"},
"error": {"key": "error", "type": "OperationStatusError"},
"properties": {"key": "properties", "type": "OperationStatusExtendedInfo"},
}
def __init__(
self,
*,
id: Optional[str] = None, # pylint: disable=redefined-builtin
name: Optional[str] = None,
status: Optional[Union[str, "_models.OperationStatusValues"]] = None,
start_time: Optional[datetime.datetime] = None,
end_time: Optional[datetime.datetime] = None,
error: Optional["_models.OperationStatusError"] = None,
properties: Optional["_models.OperationStatusExtendedInfo"] = None,
**kwargs
):
"""
:keyword id: ID of the operation.
:paramtype id: str
:keyword name: Name of the operation.
:paramtype name: str
:keyword status: Operation status. Known values are: "Invalid", "InProgress", "Succeeded",
"Failed", and "Canceled".
:paramtype status: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.OperationStatusValues
:keyword start_time: Operation start time. Format: ISO-8601.
:paramtype start_time: ~datetime.datetime
:keyword end_time: Operation end time. Format: ISO-8601.
:paramtype end_time: ~datetime.datetime
:keyword error: Error information related to this operation.
:paramtype error: ~azure.mgmt.recoveryservicesbackup.passivestamp.models.OperationStatusError
:keyword properties: Additional information associated with this operation.
:paramtype properties:
~azure.mgmt.recoveryservicesbackup.passivestamp.models.OperationStatusExtendedInfo
"""
super().__init__(**kwargs)
self.id = id
self.name = name
self.status = status
self.start_time = start_time
self.end_time = end_time
self.error = error
self.properties = properties
class OperationStatusError(_serialization.Model):
"""Error information associated with operation status call.
:ivar code: Error code of the operation failure.
:vartype code: str
:ivar message: Error message displayed if the operation failure.
:vartype message: str
"""
_attribute_map = {
"code": {"key": "code", "type": "str"},
"message": {"key": "message", "type": "str"},
}
def __init__(self, *, code: Optional[str] = None, message: Optional[str] = None, **kwargs):
"""
:keyword code: Error code of the operation failure.
:paramtype code: str
:keyword message: Error message displayed if the operation failure.
:paramtype message: str
"""
super().__init__(**kwargs)
self.code = code
self.message = message
class OperationStatusExtendedInfo(_serialization.Model):
"""Base class for additional information of operation status.
You probably want to use the sub-classes and not this class directly. Known sub-classes are:
OperationStatusJobExtendedInfo, OperationStatusJobsExtendedInfo,
OperationStatusProvisionILRExtendedInfo, OperationStatusRecoveryPointExtendedInfo
All required parameters must be populated in order to send to Azure.
:ivar object_type: This property will be used as the discriminator for deciding the specific
types in the polymorphic chain of types. Required.
:vartype object_type: str
"""
_validation = {
"object_type": {"required": True},
}
_attribute_map = {
"object_type": {"key": "objectType", "type": "str"},
}
_subtype_map = {
"object_type": {
"OperationStatusJobExtendedInfo": "OperationStatusJobExtendedInfo",
"OperationStatusJobsExtendedInfo": "OperationStatusJobsExtendedInfo",
"OperationStatusProvisionILRExtendedInfo": "OperationStatusProvisionILRExtendedInfo",
"OperationStatusRecoveryPointExtendedInfo": "OperationStatusRecoveryPointExtendedInfo",
}
}
def __init__(self, **kwargs):
""" """
super().__init__(**kwargs)
self.object_type = None # type: Optional[str]
class OperationStatusJobExtendedInfo(OperationStatusExtendedInfo):
"""Operation status job extended info.
All required parameters must be populated in order to send to Azure.
:ivar object_type: This property will be used as the discriminator for deciding the specific
types in the polymorphic chain of types. Required.
:vartype object_type: str
:ivar job_id: ID of the job created for this protected item.
:vartype job_id: str
"""
_validation = {
"object_type": {"required": True},
}
_attribute_map = {
"object_type": {"key": "objectType", "type": "str"},
"job_id": {"key": "jobId", "type": "str"},
}
def __init__(self, *, job_id: Optional[str] = None, **kwargs):
"""
:keyword job_id: ID of the job created for this protected item.
:paramtype job_id: str
"""
super().__init__(**kwargs)
self.object_type = "OperationStatusJobExtendedInfo" # type: str
self.job_id = job_id
class OperationStatusJobsExtendedInfo(OperationStatusExtendedInfo):
"""Operation status extended info for list of jobs.
All required parameters must be populated in order to send to Azure.
:ivar object_type: This property will be used as the discriminator for deciding the specific
types in the polymorphic chain of types. Required.
:vartype object_type: str
:ivar job_ids: IDs of the jobs created for the protected item.
:vartype job_ids: list[str]
:ivar failed_jobs_error: Stores all the failed jobs along with the corresponding error codes.
:vartype failed_jobs_error: dict[str, str]
"""
_validation = {
"object_type": {"required": True},
}
_attribute_map = {
"object_type": {"key": "objectType", "type": "str"},
"job_ids": {"key": "jobIds", "type": "[str]"},
"failed_jobs_error": {"key": "failedJobsError", "type": "{str}"},
}
def __init__(
self, *, job_ids: Optional[List[str]] = None, failed_jobs_error: Optional[Dict[str, str]] = None, **kwargs
):
"""
:keyword job_ids: IDs of the jobs created for the protected item.
:paramtype job_ids: list[str]
:keyword failed_jobs_error: Stores all the failed jobs along with the corresponding error
codes.
:paramtype failed_jobs_error: dict[str, str]
"""
super().__init__(**kwargs)
self.object_type = "OperationStatusJobsExtendedInfo" # type: str
self.job_ids = job_ids
self.failed_jobs_error = failed_jobs_error
class OperationStatusProvisionILRExtendedInfo(OperationStatusExtendedInfo):
"""Operation status extended info for ILR provision action.
All required parameters must be populated in order to send to Azure.
:ivar object_type: This property will be used as the discriminator for deciding the specific
types in the polymorphic chain of types. Required.
:vartype object_type: str
:ivar recovery_target: Target details for file / folder restore.
:vartype recovery_target:
~azure.mgmt.recoveryservicesbackup.passivestamp.models.InstantItemRecoveryTarget
"""
_validation = {
"object_type": {"required": True},
}
_attribute_map = {
"object_type": {"key": "objectType", "type": "str"},
"recovery_target": {"key": "recoveryTarget", "type": "InstantItemRecoveryTarget"},
}
def __init__(self, *, recovery_target: Optional["_models.InstantItemRecoveryTarget"] = None, **kwargs):
"""
:keyword recovery_target: Target details for file / folder restore.
:paramtype recovery_target:
~azure.mgmt.recoveryservicesbackup.passivestamp.models.InstantItemRecoveryTarget
"""
super().__init__(**kwargs)
self.object_type = "OperationStatusProvisionILRExtendedInfo" # type: str
self.recovery_target = recovery_target
class OperationStatusRecoveryPointExtendedInfo(OperationStatusExtendedInfo):
"""Operation status extended info for Updated Recovery Point.
All required parameters must be populated in order to send to Azure.
:ivar object_type: This property will be used as the discriminator for deciding the specific
types in the polymorphic chain of types. Required.
:vartype object_type: str
:ivar updated_recovery_point: Recovery Point info with updated source snapshot URI.
:vartype updated_recovery_point:
~azure.mgmt.recoveryservicesbackup.passivestamp.models.RecoveryPoint
:ivar deleted_backup_item_version: In case the share is in soft-deleted state, populate this
field with deleted backup item.
:vartype deleted_backup_item_version: str
"""
_validation = {
"object_type": {"required": True},
}
_attribute_map = {
"object_type": {"key": "objectType", "type": "str"},
"updated_recovery_point": {"key": "updatedRecoveryPoint", "type": "RecoveryPoint"},
"deleted_backup_item_version": {"key": "deletedBackupItemVersion", "type": "str"},
}
def __init__(
self,
*,
updated_recovery_point: Optional["_models.RecoveryPoint"] = None,
deleted_backup_item_version: Optional[str] = None,
**kwargs
):
"""
:keyword updated_recovery_point: Recovery Point info with updated source snapshot URI.
:paramtype updated_recovery_point:
~azure.mgmt.recoveryservicesbackup.passivestamp.models.RecoveryPoint
:keyword deleted_backup_item_version: In case the share is in soft-deleted state, populate this
field with deleted backup item.
:paramtype deleted_backup_item_version: str
"""
super().__init__(**kwargs)
self.object_type = "OperationStatusRecoveryPointExtendedInfo" # type: str
self.updated_recovery_point = updated_recovery_point
self.deleted_backup_item_version = deleted_backup_item_version
class PointInTimeRange(_serialization.Model):
"""Provides details for log ranges.
:ivar start_time: Start time of the time range for log recovery.
:vartype start_time: ~datetime.datetime
:ivar end_time: End time of the time range for log recovery.
:vartype end_time: ~datetime.datetime
"""
_attribute_map = {
"start_time": {"key": "startTime", "type": "iso-8601"},
"end_time": {"key": "endTime", "type": "iso-8601"},
}
def __init__(
self, *, start_time: Optional[datetime.datetime] = None, end_time: Optional[datetime.datetime] = None, **kwargs
):
"""
:keyword start_time: Start time of the time range for log recovery.
:paramtype start_time: ~datetime.datetime
:keyword end_time: End time of the time range for log recovery.
:paramtype end_time: ~datetime.datetime
"""
super().__init__(**kwargs)
self.start_time = start_time
self.end_time = end_time
class ProtectedItemQueryObject(_serialization.Model):
"""Filters to list backup items.
:ivar health_state: Health State for the backed up item. Known values are: "Passed",
"ActionRequired", "ActionSuggested", and "Invalid".
:vartype health_state: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.HealthState
:ivar backup_management_type: Backup management type for the backed up item. Known values are:
"Invalid", "AzureIaasVM", "MAB", "DPM", "AzureBackupServer", "AzureSql", "AzureStorage",
"AzureWorkload", and "DefaultBackup".
:vartype backup_management_type: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.BackupManagementType
:ivar item_type: Type of workload this item represents. Known values are: "Invalid", "VM",
"FileFolder", "AzureSqlDb", "SQLDB", "Exchange", "Sharepoint", "VMwareVM", "SystemState",
"Client", "GenericDataSource", "SQLDataBase", "AzureFileShare", "SAPHanaDatabase", and
"SAPAseDatabase".
:vartype item_type: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.DataSourceType
:ivar policy_name: Backup policy name associated with the backup item.
:vartype policy_name: str
:ivar container_name: Name of the container.
:vartype container_name: str
:ivar backup_engine_name: Backup Engine name.
:vartype backup_engine_name: str
:ivar friendly_name: Friendly name of protected item.
:vartype friendly_name: str
:ivar fabric_name: Name of the fabric.
:vartype fabric_name: str
:ivar backup_set_name: Name of the backup set.
:vartype backup_set_name: str
"""
_attribute_map = {
"health_state": {"key": "healthState", "type": "str"},
"backup_management_type": {"key": "backupManagementType", "type": "str"},
"item_type": {"key": "itemType", "type": "str"},
"policy_name": {"key": "policyName", "type": "str"},
"container_name": {"key": "containerName", "type": "str"},
"backup_engine_name": {"key": "backupEngineName", "type": "str"},
"friendly_name": {"key": "friendlyName", "type": "str"},
"fabric_name": {"key": "fabricName", "type": "str"},
"backup_set_name": {"key": "backupSetName", "type": "str"},
}
def __init__(
self,
*,
health_state: Optional[Union[str, "_models.HealthState"]] = None,
backup_management_type: Optional[Union[str, "_models.BackupManagementType"]] = None,
item_type: Optional[Union[str, "_models.DataSourceType"]] = None,
policy_name: Optional[str] = None,
container_name: Optional[str] = None,
backup_engine_name: Optional[str] = None,
friendly_name: Optional[str] = None,
fabric_name: Optional[str] = None,
backup_set_name: Optional[str] = None,
**kwargs
):
"""
:keyword health_state: Health State for the backed up item. Known values are: "Passed",
"ActionRequired", "ActionSuggested", and "Invalid".
:paramtype health_state: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.HealthState
:keyword backup_management_type: Backup management type for the backed up item. Known values
are: "Invalid", "AzureIaasVM", "MAB", "DPM", "AzureBackupServer", "AzureSql", "AzureStorage",
"AzureWorkload", and "DefaultBackup".
:paramtype backup_management_type: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.BackupManagementType
:keyword item_type: Type of workload this item represents. Known values are: "Invalid", "VM",
"FileFolder", "AzureSqlDb", "SQLDB", "Exchange", "Sharepoint", "VMwareVM", "SystemState",
"Client", "GenericDataSource", "SQLDataBase", "AzureFileShare", "SAPHanaDatabase", and
"SAPAseDatabase".
:paramtype item_type: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.DataSourceType
:keyword policy_name: Backup policy name associated with the backup item.
:paramtype policy_name: str
:keyword container_name: Name of the container.
:paramtype container_name: str
:keyword backup_engine_name: Backup Engine name.
:paramtype backup_engine_name: str
:keyword friendly_name: Friendly name of protected item.
:paramtype friendly_name: str
:keyword fabric_name: Name of the fabric.
:paramtype fabric_name: str
:keyword backup_set_name: Name of the backup set.
:paramtype backup_set_name: str
"""
super().__init__(**kwargs)
self.health_state = health_state
self.backup_management_type = backup_management_type
self.item_type = item_type
self.policy_name = policy_name
self.container_name = container_name
self.backup_engine_name = backup_engine_name
self.friendly_name = friendly_name
self.fabric_name = fabric_name
self.backup_set_name = backup_set_name
class ProtectedItemResource(Resource):
"""Base class for backup items.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id represents the complete path to the resource.
:vartype id: str
:ivar name: Resource name associated with the resource.
:vartype name: str
:ivar type: Resource type represents the complete path of the form
Namespace/ResourceType/ResourceType/...
:vartype type: str
:ivar location: Resource location.
:vartype location: str
:ivar tags: Resource tags.
:vartype tags: dict[str, str]
:ivar e_tag: Optional ETag.
:vartype e_tag: str
:ivar properties: ProtectedItemResource properties.
:vartype properties: ~azure.mgmt.recoveryservicesbackup.passivestamp.models.ProtectedItem
"""
_validation = {
"id": {"readonly": True},
"name": {"readonly": True},
"type": {"readonly": True},
}
_attribute_map = {
"id": {"key": "id", "type": "str"},
"name": {"key": "name", "type": "str"},
"type": {"key": "type", "type": "str"},
"location": {"key": "location", "type": "str"},
"tags": {"key": "tags", "type": "{str}"},
"e_tag": {"key": "eTag", "type": "str"},
"properties": {"key": "properties", "type": "ProtectedItem"},
}
def __init__(
self,
*,
location: Optional[str] = None,
tags: Optional[Dict[str, str]] = None,
e_tag: Optional[str] = None,
properties: Optional["_models.ProtectedItem"] = None,
**kwargs
):
"""
:keyword location: Resource location.
:paramtype location: str
:keyword tags: Resource tags.
:paramtype tags: dict[str, str]
:keyword e_tag: Optional ETag.
:paramtype e_tag: str
:keyword properties: ProtectedItemResource properties.
:paramtype properties: ~azure.mgmt.recoveryservicesbackup.passivestamp.models.ProtectedItem
"""
super().__init__(location=location, tags=tags, e_tag=e_tag, **kwargs)
self.properties = properties
class ProtectedItemResourceList(ResourceList):
"""List of ProtectedItem resources.
:ivar next_link: The uri to fetch the next page of resources. Call ListNext() fetches next page
of resources.
:vartype next_link: str
:ivar value: List of resources.
:vartype value:
list[~azure.mgmt.recoveryservicesbackup.passivestamp.models.ProtectedItemResource]
"""
_attribute_map = {
"next_link": {"key": "nextLink", "type": "str"},
"value": {"key": "value", "type": "[ProtectedItemResource]"},
}
def __init__(
self,
*,
next_link: Optional[str] = None,
value: Optional[List["_models.ProtectedItemResource"]] = None,
**kwargs
):
"""
:keyword next_link: The uri to fetch the next page of resources. Call ListNext() fetches next
page of resources.
:paramtype next_link: str
:keyword value: List of resources.
:paramtype value:
list[~azure.mgmt.recoveryservicesbackup.passivestamp.models.ProtectedItemResource]
"""
super().__init__(next_link=next_link, **kwargs)
self.value = value
class RecoveryPointDiskConfiguration(_serialization.Model):
"""Disk configuration.
:ivar number_of_disks_included_in_backup: Number of disks included in backup.
:vartype number_of_disks_included_in_backup: int
:ivar number_of_disks_attached_to_vm: Number of disks attached to the VM.
:vartype number_of_disks_attached_to_vm: int
:ivar included_disk_list: Information of disks included in backup.
:vartype included_disk_list:
list[~azure.mgmt.recoveryservicesbackup.passivestamp.models.DiskInformation]
:ivar excluded_disk_list: Information of disks excluded from backup.
:vartype excluded_disk_list:
list[~azure.mgmt.recoveryservicesbackup.passivestamp.models.DiskInformation]
"""
_attribute_map = {
"number_of_disks_included_in_backup": {"key": "numberOfDisksIncludedInBackup", "type": "int"},
"number_of_disks_attached_to_vm": {"key": "numberOfDisksAttachedToVm", "type": "int"},
"included_disk_list": {"key": "includedDiskList", "type": "[DiskInformation]"},
"excluded_disk_list": {"key": "excludedDiskList", "type": "[DiskInformation]"},
}
def __init__(
self,
*,
number_of_disks_included_in_backup: Optional[int] = None,
number_of_disks_attached_to_vm: Optional[int] = None,
included_disk_list: Optional[List["_models.DiskInformation"]] = None,
excluded_disk_list: Optional[List["_models.DiskInformation"]] = None,
**kwargs
):
"""
:keyword number_of_disks_included_in_backup: Number of disks included in backup.
:paramtype number_of_disks_included_in_backup: int
:keyword number_of_disks_attached_to_vm: Number of disks attached to the VM.
:paramtype number_of_disks_attached_to_vm: int
:keyword included_disk_list: Information of disks included in backup.
:paramtype included_disk_list:
list[~azure.mgmt.recoveryservicesbackup.passivestamp.models.DiskInformation]
:keyword excluded_disk_list: Information of disks excluded from backup.
:paramtype excluded_disk_list:
list[~azure.mgmt.recoveryservicesbackup.passivestamp.models.DiskInformation]
"""
super().__init__(**kwargs)
self.number_of_disks_included_in_backup = number_of_disks_included_in_backup
self.number_of_disks_attached_to_vm = number_of_disks_attached_to_vm
self.included_disk_list = included_disk_list
self.excluded_disk_list = excluded_disk_list
class RecoveryPointMoveReadinessInfo(_serialization.Model):
"""RecoveryPointMoveReadinessInfo.
:ivar is_ready_for_move:
:vartype is_ready_for_move: bool
:ivar additional_info:
:vartype additional_info: str
"""
_attribute_map = {
"is_ready_for_move": {"key": "isReadyForMove", "type": "bool"},
"additional_info": {"key": "additionalInfo", "type": "str"},
}
def __init__(self, *, is_ready_for_move: Optional[bool] = None, additional_info: Optional[str] = None, **kwargs):
"""
:keyword is_ready_for_move:
:paramtype is_ready_for_move: bool
:keyword additional_info:
:paramtype additional_info: str
"""
super().__init__(**kwargs)
self.is_ready_for_move = is_ready_for_move
self.additional_info = additional_info
class RecoveryPointResource(Resource):
"""Base class for backup copies. Workload-specific backup copies are derived from this class.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id represents the complete path to the resource.
:vartype id: str
:ivar name: Resource name associated with the resource.
:vartype name: str
:ivar type: Resource type represents the complete path of the form
Namespace/ResourceType/ResourceType/...
:vartype type: str
:ivar location: Resource location.
:vartype location: str
:ivar tags: Resource tags.
:vartype tags: dict[str, str]
:ivar e_tag: Optional ETag.
:vartype e_tag: str
:ivar properties: RecoveryPointResource properties.
:vartype properties: ~azure.mgmt.recoveryservicesbackup.passivestamp.models.RecoveryPoint
"""
_validation = {
"id": {"readonly": True},
"name": {"readonly": True},
"type": {"readonly": True},
}
_attribute_map = {
"id": {"key": "id", "type": "str"},
"name": {"key": "name", "type": "str"},
"type": {"key": "type", "type": "str"},
"location": {"key": "location", "type": "str"},
"tags": {"key": "tags", "type": "{str}"},
"e_tag": {"key": "eTag", "type": "str"},
"properties": {"key": "properties", "type": "RecoveryPoint"},
}
def __init__(
self,
*,
location: Optional[str] = None,
tags: Optional[Dict[str, str]] = None,
e_tag: Optional[str] = None,
properties: Optional["_models.RecoveryPoint"] = None,
**kwargs
):
"""
:keyword location: Resource location.
:paramtype location: str
:keyword tags: Resource tags.
:paramtype tags: dict[str, str]
:keyword e_tag: Optional ETag.
:paramtype e_tag: str
:keyword properties: RecoveryPointResource properties.
:paramtype properties: ~azure.mgmt.recoveryservicesbackup.passivestamp.models.RecoveryPoint
"""
super().__init__(location=location, tags=tags, e_tag=e_tag, **kwargs)
self.properties = properties
class RecoveryPointResourceList(ResourceList):
"""List of RecoveryPoint resources.
:ivar next_link: The uri to fetch the next page of resources. Call ListNext() fetches next page
of resources.
:vartype next_link: str
:ivar value: List of resources.
:vartype value:
list[~azure.mgmt.recoveryservicesbackup.passivestamp.models.RecoveryPointResource]
"""
_attribute_map = {
"next_link": {"key": "nextLink", "type": "str"},
"value": {"key": "value", "type": "[RecoveryPointResource]"},
}
def __init__(
self,
*,
next_link: Optional[str] = None,
value: Optional[List["_models.RecoveryPointResource"]] = None,
**kwargs
):
"""
:keyword next_link: The uri to fetch the next page of resources. Call ListNext() fetches next
page of resources.
:paramtype next_link: str
:keyword value: List of resources.
:paramtype value:
list[~azure.mgmt.recoveryservicesbackup.passivestamp.models.RecoveryPointResource]
"""
super().__init__(next_link=next_link, **kwargs)
self.value = value
class RecoveryPointTierInformation(_serialization.Model):
"""Recovery point tier information.
:ivar type: Recovery point tier type. Known values are: "Invalid", "InstantRP", "HardenedRP",
and "ArchivedRP".
:vartype type: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.RecoveryPointTierType
:ivar status: Recovery point tier status. Known values are: "Invalid", "Valid", "Disabled",
"Deleted", and "Rehydrated".
:vartype status: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.RecoveryPointTierStatus
:ivar extended_info: Recovery point tier status.
:vartype extended_info: dict[str, str]
"""
_attribute_map = {
"type": {"key": "type", "type": "str"},
"status": {"key": "status", "type": "str"},
"extended_info": {"key": "extendedInfo", "type": "{str}"},
}
def __init__(
self,
*,
type: Optional[Union[str, "_models.RecoveryPointTierType"]] = None,
status: Optional[Union[str, "_models.RecoveryPointTierStatus"]] = None,
extended_info: Optional[Dict[str, str]] = None,
**kwargs
):
"""
:keyword type: Recovery point tier type. Known values are: "Invalid", "InstantRP",
"HardenedRP", and "ArchivedRP".
:paramtype type: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.RecoveryPointTierType
:keyword status: Recovery point tier status. Known values are: "Invalid", "Valid", "Disabled",
"Deleted", and "Rehydrated".
:paramtype status: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.RecoveryPointTierStatus
:keyword extended_info: Recovery point tier status.
:paramtype extended_info: dict[str, str]
"""
super().__init__(**kwargs)
self.type = type
self.status = status
self.extended_info = extended_info
class RestoreFileSpecs(_serialization.Model):
"""Restore file specs like file path, type and target folder path info.
:ivar path: Source File/Folder path.
:vartype path: str
:ivar file_spec_type: Indicates what the Path variable stands for.
:vartype file_spec_type: str
:ivar target_folder_path: Destination folder path in target FileShare.
:vartype target_folder_path: str
"""
_attribute_map = {
"path": {"key": "path", "type": "str"},
"file_spec_type": {"key": "fileSpecType", "type": "str"},
"target_folder_path": {"key": "targetFolderPath", "type": "str"},
}
def __init__(
self,
*,
path: Optional[str] = None,
file_spec_type: Optional[str] = None,
target_folder_path: Optional[str] = None,
**kwargs
):
"""
:keyword path: Source File/Folder path.
:paramtype path: str
:keyword file_spec_type: Indicates what the Path variable stands for.
:paramtype file_spec_type: str
:keyword target_folder_path: Destination folder path in target FileShare.
:paramtype target_folder_path: str
"""
super().__init__(**kwargs)
self.path = path
self.file_spec_type = file_spec_type
self.target_folder_path = target_folder_path
class SQLDataDirectory(_serialization.Model):
"""SQLDataDirectory info.
:ivar type: Type of data directory mapping. Known values are: "Invalid", "Data", and "Log".
:vartype type: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.SQLDataDirectoryType
:ivar path: File path.
:vartype path: str
:ivar logical_name: Logical name of the file.
:vartype logical_name: str
"""
_attribute_map = {
"type": {"key": "type", "type": "str"},
"path": {"key": "path", "type": "str"},
"logical_name": {"key": "logicalName", "type": "str"},
}
def __init__(
self,
*,
type: Optional[Union[str, "_models.SQLDataDirectoryType"]] = None,
path: Optional[str] = None,
logical_name: Optional[str] = None,
**kwargs
):
"""
:keyword type: Type of data directory mapping. Known values are: "Invalid", "Data", and "Log".
:paramtype type: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.SQLDataDirectoryType
:keyword path: File path.
:paramtype path: str
:keyword logical_name: Logical name of the file.
:paramtype logical_name: str
"""
super().__init__(**kwargs)
self.type = type
self.path = path
self.logical_name = logical_name
class SQLDataDirectoryMapping(_serialization.Model):
"""Encapsulates information regarding data directory.
:ivar mapping_type: Type of data directory mapping. Known values are: "Invalid", "Data", and
"Log".
:vartype mapping_type: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.SQLDataDirectoryType
:ivar source_logical_name: Restore source logical name path.
:vartype source_logical_name: str
:ivar source_path: Restore source path.
:vartype source_path: str
:ivar target_path: Target path.
:vartype target_path: str
"""
_attribute_map = {
"mapping_type": {"key": "mappingType", "type": "str"},
"source_logical_name": {"key": "sourceLogicalName", "type": "str"},
"source_path": {"key": "sourcePath", "type": "str"},
"target_path": {"key": "targetPath", "type": "str"},
}
def __init__(
self,
*,
mapping_type: Optional[Union[str, "_models.SQLDataDirectoryType"]] = None,
source_logical_name: Optional[str] = None,
source_path: Optional[str] = None,
target_path: Optional[str] = None,
**kwargs
):
"""
:keyword mapping_type: Type of data directory mapping. Known values are: "Invalid", "Data", and
"Log".
:paramtype mapping_type: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.SQLDataDirectoryType
:keyword source_logical_name: Restore source logical name path.
:paramtype source_logical_name: str
:keyword source_path: Restore source path.
:paramtype source_path: str
:keyword target_path: Target path.
:paramtype target_path: str
"""
super().__init__(**kwargs)
self.mapping_type = mapping_type
self.source_logical_name = source_logical_name
self.source_path = source_path
self.target_path = target_path
class TargetAFSRestoreInfo(_serialization.Model):
"""Target Azure File Share Info.
:ivar name: File share name.
:vartype name: str
:ivar target_resource_id: Target file share resource ARM ID.
:vartype target_resource_id: str
"""
_attribute_map = {
"name": {"key": "name", "type": "str"},
"target_resource_id": {"key": "targetResourceId", "type": "str"},
}
def __init__(self, *, name: Optional[str] = None, target_resource_id: Optional[str] = None, **kwargs):
"""
:keyword name: File share name.
:paramtype name: str
:keyword target_resource_id: Target file share resource ARM ID.
:paramtype target_resource_id: str
"""
super().__init__(**kwargs)
self.name = name
self.target_resource_id = target_resource_id
class TargetRestoreInfo(_serialization.Model):
"""Details about target workload during restore operation.
:ivar overwrite_option: Can Overwrite if Target DataBase already exists. Known values are:
"Invalid", "FailOnConflict", and "Overwrite".
:vartype overwrite_option: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.OverwriteOptions
:ivar container_id: Resource Id name of the container in which Target DataBase resides.
:vartype container_id: str
:ivar database_name: Database name InstanceName/DataBaseName for SQL or System/DbName for SAP
Hana.
:vartype database_name: str
:ivar target_directory_for_file_restore: Target directory location for restore as files.
:vartype target_directory_for_file_restore: str
"""
_attribute_map = {
"overwrite_option": {"key": "overwriteOption", "type": "str"},
"container_id": {"key": "containerId", "type": "str"},
"database_name": {"key": "databaseName", "type": "str"},
"target_directory_for_file_restore": {"key": "targetDirectoryForFileRestore", "type": "str"},
}
def __init__(
self,
*,
overwrite_option: Optional[Union[str, "_models.OverwriteOptions"]] = None,
container_id: Optional[str] = None,
database_name: Optional[str] = None,
target_directory_for_file_restore: Optional[str] = None,
**kwargs
):
"""
:keyword overwrite_option: Can Overwrite if Target DataBase already exists. Known values are:
"Invalid", "FailOnConflict", and "Overwrite".
:paramtype overwrite_option: str or
~azure.mgmt.recoveryservicesbackup.passivestamp.models.OverwriteOptions
:keyword container_id: Resource Id name of the container in which Target DataBase resides.
:paramtype container_id: str
:keyword database_name: Database name InstanceName/DataBaseName for SQL or System/DbName for
SAP Hana.
:paramtype database_name: str
:keyword target_directory_for_file_restore: Target directory location for restore as files.
:paramtype target_directory_for_file_restore: str
"""
super().__init__(**kwargs)
self.overwrite_option = overwrite_option
self.container_id = container_id
self.database_name = database_name
self.target_directory_for_file_restore = target_directory_for_file_restore
class WorkloadCrrAccessToken(CrrAccessToken): # pylint: disable=too-many-instance-attributes
"""WorkloadCrrAccessToken.
All required parameters must be populated in order to send to Azure.
:ivar object_type: Type of the specific object - used for deserializing. Required.
:vartype object_type: str
:ivar access_token_string: Access token used for authentication.
:vartype access_token_string: str
:ivar subscription_id: Subscription Id of the source vault.
:vartype subscription_id: str
:ivar resource_group_name: Resource Group name of the source vault.
:vartype resource_group_name: str
:ivar resource_name: Resource Name of the source vault.
:vartype resource_name: str
:ivar resource_id: Resource Id of the source vault.
:vartype resource_id: str
:ivar protection_container_id: Protected item container id.
:vartype protection_container_id: int
:ivar recovery_point_id: Recovery Point Id.
:vartype recovery_point_id: str
:ivar recovery_point_time: Recovery Point Time.
:vartype recovery_point_time: str
:ivar container_name: Container Unique name.
:vartype container_name: str
:ivar container_type: Container Type.
:vartype container_type: str
:ivar backup_management_type: Backup Management Type.
:vartype backup_management_type: str
:ivar datasource_type: Datasource Type.
:vartype datasource_type: str
:ivar datasource_name: Datasource Friendly Name.
:vartype datasource_name: str
:ivar datasource_id: Datasource Id.
:vartype datasource_id: str
:ivar datasource_container_name: Datasource Container Unique Name.
:vartype datasource_container_name: str
:ivar coordinator_service_stamp_id: CoordinatorServiceStampId to be used by BCM in restore
call.
:vartype coordinator_service_stamp_id: str
:ivar coordinator_service_stamp_uri: CoordinatorServiceStampUri to be used by BCM in restore
call.
:vartype coordinator_service_stamp_uri: str
:ivar protection_service_stamp_id: ProtectionServiceStampId to be used by BCM in restore call.
:vartype protection_service_stamp_id: str
:ivar protection_service_stamp_uri: ProtectionServiceStampUri to be used by BCM in restore
call.
:vartype protection_service_stamp_uri: str
:ivar token_extended_information: Extended Information about the token like FileSpec etc.
:vartype token_extended_information: str
:ivar rp_tier_information: Recovery point Tier Information.
:vartype rp_tier_information: dict[str, str]
:ivar rp_original_sa_option: Recovery point information: Original SA option.
:vartype rp_original_sa_option: bool
:ivar rp_is_managed_virtual_machine: Recovery point information: Managed virtual machine.
:vartype rp_is_managed_virtual_machine: bool
:ivar rp_vm_size_description: Recovery point information: VM size description.
:vartype rp_vm_size_description: str
:ivar b_ms_active_region: Active region name of BMS Stamp.
:vartype b_ms_active_region: str
:ivar protectable_object_unique_name:
:vartype protectable_object_unique_name: str
:ivar protectable_object_friendly_name:
:vartype protectable_object_friendly_name: str
:ivar protectable_object_workload_type:
:vartype protectable_object_workload_type: str
:ivar protectable_object_protection_state:
:vartype protectable_object_protection_state: str
:ivar protectable_object_container_host_os_name:
:vartype protectable_object_container_host_os_name: str
:ivar protectable_object_parent_logical_container_name:
:vartype protectable_object_parent_logical_container_name: str
:ivar container_id: Container Id.
:vartype container_id: str
:ivar policy_name: Policy Name.
:vartype policy_name: str
:ivar policy_id: Policy Id.
:vartype policy_id: str
"""
_validation = {
"object_type": {"required": True},
}
_attribute_map = {
"object_type": {"key": "objectType", "type": "str"},
"access_token_string": {"key": "accessTokenString", "type": "str"},
"subscription_id": {"key": "subscriptionId", "type": "str"},
"resource_group_name": {"key": "resourceGroupName", "type": "str"},
"resource_name": {"key": "resourceName", "type": "str"},
"resource_id": {"key": "resourceId", "type": "str"},
"protection_container_id": {"key": "protectionContainerId", "type": "int"},
"recovery_point_id": {"key": "recoveryPointId", "type": "str"},
"recovery_point_time": {"key": "recoveryPointTime", "type": "str"},
"container_name": {"key": "containerName", "type": "str"},
"container_type": {"key": "containerType", "type": "str"},
"backup_management_type": {"key": "backupManagementType", "type": "str"},
"datasource_type": {"key": "datasourceType", "type": "str"},
"datasource_name": {"key": "datasourceName", "type": "str"},
"datasource_id": {"key": "datasourceId", "type": "str"},
"datasource_container_name": {"key": "datasourceContainerName", "type": "str"},
"coordinator_service_stamp_id": {"key": "coordinatorServiceStampId", "type": "str"},
"coordinator_service_stamp_uri": {"key": "coordinatorServiceStampUri", "type": "str"},
"protection_service_stamp_id": {"key": "protectionServiceStampId", "type": "str"},
"protection_service_stamp_uri": {"key": "protectionServiceStampUri", "type": "str"},
"token_extended_information": {"key": "tokenExtendedInformation", "type": "str"},
"rp_tier_information": {"key": "rpTierInformation", "type": "{str}"},
"rp_original_sa_option": {"key": "rpOriginalSAOption", "type": "bool"},
"rp_is_managed_virtual_machine": {"key": "rpIsManagedVirtualMachine", "type": "bool"},
"rp_vm_size_description": {"key": "rpVMSizeDescription", "type": "str"},
"b_ms_active_region": {"key": "bMSActiveRegion", "type": "str"},
"protectable_object_unique_name": {"key": "protectableObjectUniqueName", "type": "str"},
"protectable_object_friendly_name": {"key": "protectableObjectFriendlyName", "type": "str"},
"protectable_object_workload_type": {"key": "protectableObjectWorkloadType", "type": "str"},
"protectable_object_protection_state": {"key": "protectableObjectProtectionState", "type": "str"},
"protectable_object_container_host_os_name": {"key": "protectableObjectContainerHostOsName", "type": "str"},
"protectable_object_parent_logical_container_name": {
"key": "protectableObjectParentLogicalContainerName",
"type": "str",
},
"container_id": {"key": "containerId", "type": "str"},
"policy_name": {"key": "policyName", "type": "str"},
"policy_id": {"key": "policyId", "type": "str"},
}
def __init__( # pylint: disable=too-many-locals
self,
*,
access_token_string: Optional[str] = None,
subscription_id: Optional[str] = None,
resource_group_name: Optional[str] = None,
resource_name: Optional[str] = None,
resource_id: Optional[str] = None,
protection_container_id: Optional[int] = None,
recovery_point_id: Optional[str] = None,
recovery_point_time: Optional[str] = None,
container_name: Optional[str] = None,
container_type: Optional[str] = None,
backup_management_type: Optional[str] = None,
datasource_type: Optional[str] = None,
datasource_name: Optional[str] = None,
datasource_id: Optional[str] = None,
datasource_container_name: Optional[str] = None,
coordinator_service_stamp_id: Optional[str] = None,
coordinator_service_stamp_uri: Optional[str] = None,
protection_service_stamp_id: Optional[str] = None,
protection_service_stamp_uri: Optional[str] = None,
token_extended_information: Optional[str] = None,
rp_tier_information: Optional[Dict[str, str]] = None,
rp_original_sa_option: Optional[bool] = None,
rp_is_managed_virtual_machine: Optional[bool] = None,
rp_vm_size_description: Optional[str] = None,
b_ms_active_region: Optional[str] = None,
protectable_object_unique_name: Optional[str] = None,
protectable_object_friendly_name: Optional[str] = None,
protectable_object_workload_type: Optional[str] = None,
protectable_object_protection_state: Optional[str] = None,
protectable_object_container_host_os_name: Optional[str] = None,
protectable_object_parent_logical_container_name: Optional[str] = None,
container_id: Optional[str] = None,
policy_name: Optional[str] = None,
policy_id: Optional[str] = None,
**kwargs
):
"""
:keyword access_token_string: Access token used for authentication.
:paramtype access_token_string: str
:keyword subscription_id: Subscription Id of the source vault.
:paramtype subscription_id: str
:keyword resource_group_name: Resource Group name of the source vault.
:paramtype resource_group_name: str
:keyword resource_name: Resource Name of the source vault.
:paramtype resource_name: str
:keyword resource_id: Resource Id of the source vault.
:paramtype resource_id: str
:keyword protection_container_id: Protected item container id.
:paramtype protection_container_id: int
:keyword recovery_point_id: Recovery Point Id.
:paramtype recovery_point_id: str
:keyword recovery_point_time: Recovery Point Time.
:paramtype recovery_point_time: str
:keyword container_name: Container Unique name.
:paramtype container_name: str
:keyword container_type: Container Type.
:paramtype container_type: str
:keyword backup_management_type: Backup Management Type.
:paramtype backup_management_type: str
:keyword datasource_type: Datasource Type.
:paramtype datasource_type: str
:keyword datasource_name: Datasource Friendly Name.
:paramtype datasource_name: str
:keyword datasource_id: Datasource Id.
:paramtype datasource_id: str
:keyword datasource_container_name: Datasource Container Unique Name.
:paramtype datasource_container_name: str
:keyword coordinator_service_stamp_id: CoordinatorServiceStampId to be used by BCM in restore
call.
:paramtype coordinator_service_stamp_id: str
:keyword coordinator_service_stamp_uri: CoordinatorServiceStampUri to be used by BCM in restore
call.
:paramtype coordinator_service_stamp_uri: str
:keyword protection_service_stamp_id: ProtectionServiceStampId to be used by BCM in restore
call.
:paramtype protection_service_stamp_id: str
:keyword protection_service_stamp_uri: ProtectionServiceStampUri to be used by BCM in restore
call.
:paramtype protection_service_stamp_uri: str
:keyword token_extended_information: Extended Information about the token like FileSpec etc.
:paramtype token_extended_information: str
:keyword rp_tier_information: Recovery point Tier Information.
:paramtype rp_tier_information: dict[str, str]
:keyword rp_original_sa_option: Recovery point information: Original SA option.
:paramtype rp_original_sa_option: bool
:keyword rp_is_managed_virtual_machine: Recovery point information: Managed virtual machine.
:paramtype rp_is_managed_virtual_machine: bool
:keyword rp_vm_size_description: Recovery point information: VM size description.
:paramtype rp_vm_size_description: str
:keyword b_ms_active_region: Active region name of BMS Stamp.
:paramtype b_ms_active_region: str
:keyword protectable_object_unique_name:
:paramtype protectable_object_unique_name: str
:keyword protectable_object_friendly_name:
:paramtype protectable_object_friendly_name: str
:keyword protectable_object_workload_type:
:paramtype protectable_object_workload_type: str
:keyword protectable_object_protection_state:
:paramtype protectable_object_protection_state: str
:keyword protectable_object_container_host_os_name:
:paramtype protectable_object_container_host_os_name: str
:keyword protectable_object_parent_logical_container_name:
:paramtype protectable_object_parent_logical_container_name: str
:keyword container_id: Container Id.
:paramtype container_id: str
:keyword policy_name: Policy Name.
:paramtype policy_name: str
:keyword policy_id: Policy Id.
:paramtype policy_id: str
"""
super().__init__(
access_token_string=access_token_string,
subscription_id=subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
resource_id=resource_id,
protection_container_id=protection_container_id,
recovery_point_id=recovery_point_id,
recovery_point_time=recovery_point_time,
container_name=container_name,
container_type=container_type,
backup_management_type=backup_management_type,
datasource_type=datasource_type,
datasource_name=datasource_name,
datasource_id=datasource_id,
datasource_container_name=datasource_container_name,
coordinator_service_stamp_id=coordinator_service_stamp_id,
coordinator_service_stamp_uri=coordinator_service_stamp_uri,
protection_service_stamp_id=protection_service_stamp_id,
protection_service_stamp_uri=protection_service_stamp_uri,
token_extended_information=token_extended_information,
rp_tier_information=rp_tier_information,
rp_original_sa_option=rp_original_sa_option,
rp_is_managed_virtual_machine=rp_is_managed_virtual_machine,
rp_vm_size_description=rp_vm_size_description,
b_ms_active_region=b_ms_active_region,
**kwargs
)
self.object_type = "WorkloadCrrAccessToken" # type: str
self.protectable_object_unique_name = protectable_object_unique_name
self.protectable_object_friendly_name = protectable_object_friendly_name
self.protectable_object_workload_type = protectable_object_workload_type
self.protectable_object_protection_state = protectable_object_protection_state
self.protectable_object_container_host_os_name = protectable_object_container_host_os_name
self.protectable_object_parent_logical_container_name = protectable_object_parent_logical_container_name
self.container_id = container_id
self.policy_name = policy_name
self.policy_id = policy_id
| {
"content_hash": "459bb9f2c2c0e2dc95f1ae6d071311e4",
"timestamp": "",
"source": "github",
"line_count": 9618,
"max_line_length": 120,
"avg_line_length": 49.31648991474319,
"alnum_prop": 0.6691009980477561,
"repo_name": "Azure/azure-sdk-for-python",
"id": "e36298d0b708c8f078fa275a7ee4614b44501c5c",
"size": "474827",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/recoveryservices/azure-mgmt-recoveryservicesbackup/azure/mgmt/recoveryservicesbackup/passivestamp/models/_models_py3.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
"""
Django settings for CityBudgetVisualization project.
Generated by 'django-admin startproject' using Django 1.8.4.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '&=apqk0c_$etoap8*rdp@+l)(*4k7iv&4gyibzx$c+g8g)a2o$'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'CityBudgetVisualization.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'CityBudgetVisualization.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
| {
"content_hash": "74214a3b3dc46f06b8bf0cc89b8bb8c8",
"timestamp": "",
"source": "github",
"line_count": 102,
"max_line_length": 71,
"avg_line_length": 26.362745098039216,
"alnum_prop": 0.6965414652287095,
"repo_name": "jmavis/GovernmentBudgetVisualization",
"id": "ec35c670d3bcfe745434c68341bab0042e39adec",
"size": "2689",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "CityBudgetVisualization/CityBudgetVisualization/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4151"
}
],
"symlink_target": ""
} |
import unittest
from django.test import TestCase
from operating_system.public import OperatingSystem
from remote_host.public import RemoteHost
from ..operating_system_support import OperatingSystemRelations, AbstractedRemoteHostOperator
RELATION_MOCK = {
'1': {
'2': {
'3': {
'4': '5',
'6': '7',
}
},
'8': '9',
'10': '11',
},
'12': {
'13': '14',
'15': '16',
}
}
class TestOperationA():
pass
class TestOperationB():
pass
class AbstractedRemoteHostOperatorTestImplementation(AbstractedRemoteHostOperator):
def _get_operating_systems_to_supported_operation_mapping(self):
return {
('2', '8',): TestOperationA,
('13',): TestOperationB,
}
def _init_operator_class(self, operator_class):
return operator_class()
class TestOperatingSystemRelations(unittest.TestCase):
def setUp(self):
OperatingSystemRelations._RELATIONS = RELATION_MOCK
def test_get_subsystems(self):
self.assertEqual(
set(OperatingSystemRelations('2').get_subsystems()),
{'3', '4', '5', '6', '7'}
)
def test_get_subsystems__no_subsystems(self):
self.assertEquals(OperatingSystemRelations('11').get_subsystems(), [])
def test_get_subsystems__no_relations_known(self):
self.assertEquals(OperatingSystemRelations('999').get_subsystems(), [])
def test_is_parent_of(self):
self.assertTrue(OperatingSystemRelations('3').is_parent_of('5'))
def test_is_child_of(self):
self.assertTrue(OperatingSystemRelations('3').is_child_of('1'))
class TestAbstractedRemoteHostOperator(TestCase):
def setUp(self):
OperatingSystemRelations._RELATIONS = RELATION_MOCK
def test_initialization(self):
self.assertTrue(
isinstance(
AbstractedRemoteHostOperatorTestImplementation(RemoteHost.objects.create(os='8')).operator,
TestOperationA
)
)
self.assertTrue(
isinstance(
AbstractedRemoteHostOperatorTestImplementation(RemoteHost.objects.create(os='13')).operator,
TestOperationB
)
)
def test_initialization__related(self):
self.assertTrue(
isinstance(
AbstractedRemoteHostOperatorTestImplementation(RemoteHost.objects.create(os='4')).operator,
TestOperationA
)
)
self.assertTrue(
isinstance(
AbstractedRemoteHostOperatorTestImplementation(RemoteHost.objects.create(os='14')).operator,
TestOperationB
)
)
def test_initialization__not_supported(self):
with self.assertRaises(OperatingSystem.NotSupportedException):
AbstractedRemoteHostOperatorTestImplementation(RemoteHost.objects.create(os='16'))
| {
"content_hash": "9884b18e4706c4b1ed274702ca93e63c",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 108,
"avg_line_length": 28.066037735849058,
"alnum_prop": 0.6157983193277311,
"repo_name": "jdepoix/goto_cloud",
"id": "cee3690b528f36199b79280b65ec39c3859dab18",
"size": "2975",
"binary": false,
"copies": "1",
"ref": "refs/heads/development",
"path": "goto_cloud/operating_system_support/tests/test_operating_system_support.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "354421"
},
{
"name": "Shell",
"bytes": "619"
}
],
"symlink_target": ""
} |
from django.shortcuts import render
from .models import Card
from .models import Deck
from django.contrib.auth.models import User
from .forms import CreateUserForm
from django.http import HttpResponse
from django.contrib.auth import authenticate, login
from django.views.decorators.csrf import csrf_exempt
from .forms import DeckForm
from .forms import CardForm
import pandas as pd
import numpy
def redirect_view(request):
if request.user.is_authenticated():
query_set = Deck.objects.filter(user=request.user.id)
return render(request, "cards/decks_template.html", {"query_set": query_set})
return render(request, "cards/home_template.html")
def home_view(request):
return render(request, "cards/home_template.html")
def about_view(request):
return render(request, "cards/about_template.html")
def register_view(request):
if request.method == "POST":
form = CreateUserForm(request.POST)
if form.is_valid():
form.save()
user = authenticate(username=request.POST['username'], password=request.POST['password1'])
login(request, user)
query_set = Deck.objects.filter(user__exact=request.user.id)
return render(request, "cards/decks_template.html", {"query_set": query_set})
form = CreateUserForm(request.POST)
return render(request, "cards/register_template.html", {"form": form})
form = CreateUserForm()
return render(request, "cards/register_template.html", {"form": form})
# def card_view(request, id):
# id = 1
# query_set = Card.objects.get(pk=1)
# return render(request, "cards/card_template.html", {"query_set":query_set})
@csrf_exempt
def decks_view(request):
if request.method == "POST" and request.POST.get("function") == "deleteDeck":
Deck.objects.get(pk=request.POST.get("id")).delete()
query_set = Deck.objects.filter(user__exact=request.user.id)
return render(request, "cards/decks_template.html", {"query_set": query_set})
elif request.method == "POST" and request.FILES.get("bulkupload"):
try:
df = pd.read_csv(request.FILES.get("bulkupload"))
df.columns = [x.lower() for x in df.columns]
for item in set(df["deck"]):
if item not in set(item.name for item in Deck.objects.filter(user=request.user.id)):
Deck.objects.create(name=item, user=request.user)
for index, values in df[["question", "answer", "deck"]].iterrows():
Card.objects.create(question=values[0], answer=values[1], deck=next(x for x in Deck.objects.filter(user=request.user) if x.name==values[2]))
except:
return HttpResponse("Error in CSV. Please check columns and file format are correct.")
elif request.method == "POST":
form = DeckForm({"user": request.user.id, "name": request.POST.get("deckname")})
if form.is_valid():
form.save()
query_set = Deck.objects.filter(user__exact=request.user.id)
return render(request, "cards/decks_template.html", {"query_set": query_set})
@csrf_exempt
def edit_cards_view(request, id):
if request.method == "POST" and request.POST.get("function") == "deleteCard":
card_instance = Card.objects.get(pk=request.POST.get("card_id"))
card_instance.delete()
elif request.method == "POST" and request.POST.get("function") == "updateCard":
card_instance = Card.objects.get(pk=request.POST.get("id"))
card_instance.question = request.POST.get("question")
card_instance.answer = request.POST.get("answer")
card_instance.save()
elif request.method == "POST":
form = CardForm({"question":request.POST.get("question"), "answer": request.POST.get("answer"), "deck": id})
if form.is_valid():
form.save()
query_set = Card.objects.filter(deck__exact=id)
deck_name = Deck.objects.get(pk=id).name
deck_id = Deck.objects.get(pk=id).id
return render(request, "cards/edit_cards_template.html", {"query_set": query_set, "deck_name": deck_name, "deck_id":deck_id})
# add user validation
def delete_card_view(request, id1, id2):
model_instance = Card.objects.get(pk = id1)
model_instance.delete()
query_set = Card.objects.filter(deck__exact=id2)
deck_name = Deck.objects.get(pk=id2).name
deck_id = Deck.objects.get(pk=id2).id
return render(request, "cards/edit_cards_template.html", {"query_set": query_set, "deck_name": deck_name, "deck_id":deck_id})
def card_review_view(request, id):
if (Card.objects.filter(deck__exact=id).exists()):
card = sorted(Card.objects.filter(deck__exact=id), key=lambda x: x.current_rank)[0]
return render(request, "cards/card_review_template.html", {"card":card})
query_set = Card.objects.filter(deck__exact=id)
deck_name = Deck.objects.get(pk=id).name
deck_id = Deck.objects.get(pk=id).id
return render(request, "cards/edit_cards_template.html", {"query_set": query_set, "deck_name": deck_name, "deck_id":deck_id})
# it is currently moving from lowest to highest rank
def rank_update_view(request, deck_id, card_id, rank):
card_instance = Card.objects.get(id__exact=card_id)
card_instance.current_rank = rank
card_instance.save()
card = sorted(Card.objects.filter(deck__exact=deck_id), key=lambda x: x.current_rank)[0]
return render(request, "cards/card_review_template.html", {"card":card})
| {
"content_hash": "5d0cfa717a3888a32be850dfc28a5079",
"timestamp": "",
"source": "github",
"line_count": 126,
"max_line_length": 156,
"avg_line_length": 43.38095238095238,
"alnum_prop": 0.6655689718258324,
"repo_name": "introbig/srs",
"id": "ad35fbd929da6f3d4711d2a42fb3816120ddcf51",
"size": "5466",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cards/views.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3642"
},
{
"name": "HTML",
"bytes": "23655"
},
{
"name": "JavaScript",
"bytes": "440"
},
{
"name": "Python",
"bytes": "12068"
}
],
"symlink_target": ""
} |
import discord_logging
from classes.notification import Notification
from classes.subscription import Subscription
log = discord_logging.get_logger()
class _DatabaseNotification:
def __init__(self):
self.session = self.session # for pycharm linting
self.log_debug = self.log_debug
def add_notification(self, notification):
if self.log_debug:
log.debug("Saving new notification")
self.session.add(notification)
def get_count_pending_notifications(self):
if self.log_debug:
log.debug(f"Fetching count of pending notifications")
count = self.session.query(Notification)\
.order_by(Notification.id)\
.count()
return count
def get_pending_notifications(self, count=9999):
if self.log_debug:
log.debug(f"Fetching pending notifications")
notifications = self.session.query(Notification)\
.order_by(Notification.id)\
.limit(count)\
.all()
return notifications
def get_count_notifications_for_submission(self, submission):
if self.log_debug:
log.debug(f"Fetching count of pending notifications for submission: {submission}")
return self.session.query(Notification)\
.filter(Notification.submission == submission)\
.count()
def delete_notifications_for_submission(self, submission):
if self.log_debug:
log.debug(f"Deleting notifications for {submission}")
return self.session.query(Notification)\
.filter(Notification.submission == submission)\
.delete(synchronize_session='fetch')
def clear_all_notifications(self):
if self.log_debug:
log.debug(f"Clearing all notifications in queue")
self.session.query(Notification)\
.delete(synchronize_session='fetch')
def delete_notifications_for_subscription(self, subscription):
if self.log_debug:
log.debug(f"Deleting notifications for {subscription}")
return self.session.query(Notification)\
.filter(Notification.subscription == subscription)\
.delete(synchronize_session='fetch')
def delete_notification(self, notification):
if self.log_debug:
log.debug(f"Deleting notification by id: {notification.id}")
self.session.delete(notification)
| {
"content_hash": "f418144190af0bf46ba59a42f317c801",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 85,
"avg_line_length": 28.43243243243243,
"alnum_prop": 0.7490494296577946,
"repo_name": "Watchful1/RedditSubsBot",
"id": "17ef588c2cfd72b65119099a7ebdd71b9f1a095c",
"size": "2104",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/database/_notification.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "75583"
}
],
"symlink_target": ""
} |
import unittest
from pyramid import testing
class ViewTests(unittest.TestCase):
def setUp(self):
self.config = testing.setUp()
def tearDown(self):
testing.tearDown()
def test_my_view(self):
from .views import my_view
request = testing.DummyRequest()
info = my_view(request)
self.assertEqual(info['project'], 'MetadataCollector')
| {
"content_hash": "79a5fd263f309a3fa8b83196e1f8d321",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 62,
"avg_line_length": 23.176470588235293,
"alnum_prop": 0.649746192893401,
"repo_name": "jfunez/metadata_collector",
"id": "8139132190540996b7e5c18b480564caf42a40c8",
"size": "394",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "MetadataCollector/metadatacollector/tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2874"
},
{
"name": "JavaScript",
"bytes": "33926"
},
{
"name": "Python",
"bytes": "3756"
}
],
"symlink_target": ""
} |
from django.models.admin import log
from django.core import template
class AdminLogNode(template.Node):
def __init__(self, limit, varname, user):
self.limit, self.varname, self.user = limit, varname, user
def __repr__(self):
return "<GetAdminLog Node>"
def render(self, context):
if self.user is not None and not self.user.isdigit():
self.user = context[self.user].id
context[self.varname] = log.get_list(user__id__exact=self.user, limit=self.limit, select_related=True)
return ''
class DoGetAdminLog:
"""
Populates a template variable with the admin log for the given criteria.
Usage::
{% get_admin_log [limit] as [varname] for_user [context_var_containing_user_obj] %}
Examples::
{% get_admin_log 10 as admin_log for_user 23 %}
{% get_admin_log 10 as admin_log for_user user %}
{% get_admin_log 10 as admin_log %}
Note that ``context_var_containing_user_obj`` can be a hard-coded integer
(user ID) or the name of a template context variable containing the user
object whose ID you want.
"""
def __init__(self, tag_name):
self.tag_name = tag_name
def __call__(self, parser, token):
tokens = token.contents.split()
if len(tokens) < 4:
raise template.TemplateSyntaxError, "'%s' statements require two arguments" % self.tag_name
if not tokens[1].isdigit():
raise template.TemplateSyntaxError, "First argument in '%s' must be an integer" % self.tag_name
if tokens[2] != 'as':
raise template.TemplateSyntaxError, "Second argument in '%s' must be 'as'" % self.tag_name
if len(tokens) > 4:
if tokens[4] != 'for_user':
raise template.TemplateSyntaxError, "Fourth argument in '%s' must be 'for_user'" % self.tag_name
return AdminLogNode(limit=tokens[1], varname=tokens[3], user=(len(tokens) > 5 and tokens[5] or None))
template.register_tag('get_admin_log', DoGetAdminLog('get_admin_log'))
| {
"content_hash": "a5a6ce5e4723eddfeb591a62c3497f96",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 112,
"avg_line_length": 40.1764705882353,
"alnum_prop": 0.6349438750610054,
"repo_name": "tungvx/deploy",
"id": "b24f7c1dad9bbf0111f1fddb0ebc5c714b2bf0a5",
"size": "2049",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Django-0.90/django/contrib/admin/templatetags/log.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "400492"
},
{
"name": "JavaScript",
"bytes": "477245"
},
{
"name": "Python",
"bytes": "16861113"
},
{
"name": "Shell",
"bytes": "8221"
}
],
"symlink_target": ""
} |
"""defines constants for qjobs"""
from collections import OrderedDict, namedtuple
Itmtp = namedtuple('Itmtp', ['dscr', 'xml_tag'])
ITEMS = OrderedDict((
('i', Itmtp('job id', ['JB_job_number'])),
('p', Itmtp('job priority', ['JAT_prio'])),
('n', Itmtp('job name', ['JB_name'])),
('o', Itmtp('job owner', ['JB_owner'])),
('s', Itmtp('job state', ['state'])),
('t', Itmtp('job start/submission time', ['JAT_start_time',
'JB_submission_time'])),
('e', Itmtp('elapsed time since start/submission', [])),
('q', Itmtp('queue name without domain', [])),
('d', Itmtp('queue domain', [])),
('k', Itmtp('queue name with domain', ['queue_name'])),
('r', Itmtp('requested queue(s)', ['hard_req_queue'])),
('l', Itmtp('number of slots used', ['slots']))
))
| {
"content_hash": "80364bcaedda031dcf21062afae3c6cd",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 70,
"avg_line_length": 40.04761904761905,
"alnum_prop": 0.535077288941736,
"repo_name": "amorison/qjobs",
"id": "45dcf7caf394f639e5178be7d72588ee35e4c921",
"size": "841",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "qjobs/constants.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "15664"
}
],
"symlink_target": ""
} |
class Display:
def __init__(self, width=1, height=1):
self.grid = []
if width < 1 or height < 1:
raise Exception('width and height must both be > 0')
self.width = width
self.height = height
for y in range(height):
row = []
for x in range(width):
row.append('.')
self.grid.append(row)
def set_bit(self, x, y, value):
try:
self.grid[y][x] = value
except IndexError:
raise Exception("coordinate doesn't exist")
def get_bit(self, x, y):
try:
return self.grid[y][x]
except IndexError:
raise Exception("coordinate doesn't exist")
def rect(self, width, height):
for x in range(width):
for y in range(height):
self.set_bit(x, y, '#')
def rotate_row(self, row, distance):
if row > self.height:
raise Exception("row doesn't exist")
distance = distance * -1
self.grid[row] = self.grid[row][distance:] + self.grid[row][:distance]
def rotate_column(self, column, distance):
if column > self.width:
raise Exception("column doesn't exist")
distance = distance * -1
data = []
for y in range(self.height):
data.append(self.get_bit(column, y))
data = data[distance:] + data[:distance]
for y, bit in enumerate(data):
self.set_bit(column, y, bit)
@property
def count(self):
count = 0
for x in range(self.width):
for y in range(self.height):
if self.get_bit(x, y) == '#':
count += 1
return count
def __unicode__(self):
rows = []
for row in self.grid:
rows.append(''.join(row))
return '\n'.join(rows)
def __str__(self):
return self.__unicode__()
| {
"content_hash": "38b2c4ea8ae5871bf5cf8fab850d6963",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 78,
"avg_line_length": 28.82089552238806,
"alnum_prop": 0.5075090626618333,
"repo_name": "Apreche/advent2016",
"id": "7d4e321540c5796fbba1e75b6fcc7a1b747a61df",
"size": "1931",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "2016/08/display.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "16719"
}
],
"symlink_target": ""
} |
import os
import subprocess
import sys
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages
from setuptools.command.egg_info import egg_info
from distutils.command.install_data import install_data
from distutils.command.install import INSTALL_SCHEMES
from distutils.core import Command
from reviewboard import get_package_version, is_release, VERSION
# Make sure we're actually in the directory containing setup.py.
root_dir = os.path.dirname(__file__)
if root_dir != "":
os.chdir(root_dir)
# Tell distutils to put the data_files in platform-specific installation
# locations. See here for an explanation:
# http://groups.google.com/group/comp.lang.python/browse_thread/thread/35ec7b2fed36eaec/2105ee4d9e8042cb
for scheme in INSTALL_SCHEMES.values():
scheme['data'] = scheme['purelib']
class osx_install_data(install_data):
# On MacOS, the platform-specific lib dir is
# /System/Library/Framework/Python/.../
# which is wrong. Python 2.5 supplied with MacOS 10.5 has an
# Apple-specific fix for this in distutils.command.install_data#306. It
# fixes install_lib but not install_data, which is why we roll our own
# install_data class.
def finalize_options(self):
# By the time finalize_options is called, install.install_lib is
# set to the fixed directory, so we set the installdir to install_lib.
# The # install_data class uses ('install_data', 'install_dir') instead.
self.set_undefined_options('install', ('install_lib', 'install_dir'))
install_data.finalize_options(self)
class BuildEggInfo(egg_info):
def run(self):
if ('sdist' in sys.argv or
'bdist_egg' in sys.argv or
'install' in sys.argv):
self.run_command('build_media')
egg_info.run(self)
class BuildMedia(Command):
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
retcode = subprocess.call(['./contrib/internal/build-media.py'])
if retcode != 0:
raise RuntimeError('Failed to build media files')
cmdclasses = {
'install_data': install_data,
'egg_info': BuildEggInfo,
'build_media': BuildMedia,
}
if sys.platform == "darwin":
cmdclasses['install_data'] = osx_install_data
PACKAGE_NAME = 'ReviewBoard'
if is_release():
download_url = 'http://downloads.reviewboard.org/releases/%s/%s.%s/' % \
(PACKAGE_NAME, VERSION[0], VERSION[1])
else:
download_url = 'http://downloads.reviewboard.org/nightlies/'
# Build the reviewboard package.
setup(name=PACKAGE_NAME,
version=get_package_version(),
license="MIT",
description="Review Board, a web-based code review tool",
url="http://www.reviewboard.org/",
download_url=download_url,
author="The Review Board Project",
author_email="reviewboard@googlegroups.com",
maintainer="Christian Hammond",
maintainer_email="chipx86@chipx86.com",
packages=find_packages(),
entry_points = {
'console_scripts': [
'rb-site = reviewboard.cmdline.rbsite:main',
'rbssh = reviewboard.cmdline.rbssh:main',
],
'reviewboard.scmtools': [
'bzr = reviewboard.scmtools.bzr:BZRTool',
'clearcase = reviewboard.scmtools.clearcase:ClearCaseTool',
'cvs = reviewboard.scmtools.cvs:CVSTool',
'git = reviewboard.scmtools.git:GitTool',
'hg = reviewboard.scmtools.hg:HgTool',
'perforce = reviewboard.scmtools.perforce:PerforceTool',
'plastic = reviewboard.scmtools.plastic:PlasticTool',
'svn = reviewboard.scmtools.svn:SVNTool',
],
'reviewboard.auth_backends': [
'ad = reviewboard.accounts.backends:ActiveDirectoryBackend',
'ldap = reviewboard.accounts.backends:LDAPBackend',
'nis = reviewboard.accounts.backends:NISBackend',
'x509 = reviewboard.accounts.backends:X509Backend',
],
},
cmdclass=cmdclasses,
install_requires=[
'Django>=1.4',
'django_evolution>=0.6.7',
'Djblets>=0.7alpha0.dev',
'django-pipeline>=1.2.1',
'Pygments>=1.4',
'flup',
'paramiko>=1.7.6',
'python-dateutil==1.5',
'python-memcached',
'pytz',
'recaptcha-client',
],
dependency_links = [
"http://downloads.reviewboard.org/mirror/",
download_url,
],
include_package_data=True,
zip_safe=False,
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Web Environment",
"Framework :: Django",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Topic :: Software Development",
"Topic :: Software Development :: Quality Assurance",
]
)
| {
"content_hash": "ab85fb8af12851a07610221f240bfe75",
"timestamp": "",
"source": "github",
"line_count": 159,
"max_line_length": 104,
"avg_line_length": 32.62893081761006,
"alnum_prop": 0.6266383962991519,
"repo_name": "Khan/reviewboard",
"id": "27d95a979778eb5a18956ad7bd5b403d75f2c856",
"size": "5359",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "685"
},
{
"name": "C#",
"bytes": "340"
},
{
"name": "Java",
"bytes": "340"
},
{
"name": "JavaScript",
"bytes": "311685"
},
{
"name": "Objective-C",
"bytes": "288"
},
{
"name": "PHP",
"bytes": "225"
},
{
"name": "Perl",
"bytes": "103"
},
{
"name": "Python",
"bytes": "1656986"
},
{
"name": "Ruby",
"bytes": "172"
},
{
"name": "Shell",
"bytes": "829"
}
],
"symlink_target": ""
} |
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE",
"django_rv_apps.settings.local")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| {
"content_hash": "5c0ab6b422355bc90798cf87ed7a5306",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 77,
"avg_line_length": 37.36363636363637,
"alnum_prop": 0.6034063260340633,
"repo_name": "davrv93/creed-en-sus-profetas-backend",
"id": "921ae6aa34947dce6653ddb190c44e04fbda5a22",
"size": "844",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "manage.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "160568"
}
],
"symlink_target": ""
} |
from pymba import Vimba, VimbaException
if __name__ == '__main__':
with Vimba() as vimba:
interface = vimba.interface(0)
interface.open()
# get feature value via feature object
for feature_name in interface.feature_names():
feature = interface.feature(feature_name)
try:
value = feature.value
range_ = feature.range
# alternatively the feature value can be read as an object attribute
# value = getattr(interface, feature_name)
# or
# value = interface.someFeatureName
except VimbaException as e:
value = e
range_ = None
print('\n\t'.join(
str(x) for x in (
feature_name,
'value: {}'.format(value),
'range: {}'.format(range_))
if x is not None))
interface.close()
| {
"content_hash": "b8b1775c3371407cb525a4da3100b838",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 84,
"avg_line_length": 29.818181818181817,
"alnum_prop": 0.49085365853658536,
"repo_name": "morefigs/pymba",
"id": "c07bec3ddd7199586283de8a95ad708eeff9dccc",
"size": "984",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/interface/list_feature_values_and_ranges.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "77300"
}
],
"symlink_target": ""
} |
"""This module contains Google BigQuery to BigQuery operator."""
import warnings
from typing import TYPE_CHECKING, Dict, List, Optional, Sequence, Union
from airflow.models import BaseOperator
from airflow.providers.google.cloud.hooks.bigquery import BigQueryHook
if TYPE_CHECKING:
from airflow.utils.context import Context
class BigQueryToBigQueryOperator(BaseOperator):
"""
Copies data from one BigQuery table to another.
.. seealso::
For more details about these parameters:
https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.copy
:param source_project_dataset_tables: One or more
dotted ``(project:|project.)<dataset>.<table>`` BigQuery tables to use as the
source data. If ``<project>`` is not included, project will be the
project defined in the connection json. Use a list if there are multiple
source tables. (templated)
:param destination_project_dataset_table: The destination BigQuery
table. Format is: ``(project:|project.)<dataset>.<table>`` (templated)
:param write_disposition: The write disposition if the table already exists.
:param create_disposition: The create disposition if the table doesn't exist.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param bigquery_conn_id: (Deprecated) The connection ID used to connect to Google Cloud.
This parameter has been deprecated. You should pass the gcp_conn_id parameter instead.
:param delegate_to: The account to impersonate using domain-wide delegation of authority,
if any. For this to work, the service account making the request must have
domain-wide delegation enabled.
:param labels: a dictionary containing labels for the job/query,
passed to BigQuery
:param encryption_configuration: [Optional] Custom encryption configuration (e.g., Cloud KMS keys).
**Example**: ::
encryption_configuration = {
"kmsKeyName": "projects/testp/locations/us/keyRings/test-kr/cryptoKeys/test-key"
}
:param location: The location used for the operation.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
'source_project_dataset_tables',
'destination_project_dataset_table',
'labels',
'impersonation_chain',
)
template_ext: Sequence[str] = ('.sql',)
ui_color = '#e6f0e4'
def __init__(
self,
*,
source_project_dataset_tables: Union[List[str], str],
destination_project_dataset_table: str,
write_disposition: str = 'WRITE_EMPTY',
create_disposition: str = 'CREATE_IF_NEEDED',
gcp_conn_id: str = 'google_cloud_default',
bigquery_conn_id: Optional[str] = None,
delegate_to: Optional[str] = None,
labels: Optional[Dict] = None,
encryption_configuration: Optional[Dict] = None,
location: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
if bigquery_conn_id:
warnings.warn(
"The bigquery_conn_id parameter has been deprecated. You should pass "
"the gcp_conn_id parameter.",
DeprecationWarning,
stacklevel=3,
)
gcp_conn_id = bigquery_conn_id
self.source_project_dataset_tables = source_project_dataset_tables
self.destination_project_dataset_table = destination_project_dataset_table
self.write_disposition = write_disposition
self.create_disposition = create_disposition
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
self.labels = labels
self.encryption_configuration = encryption_configuration
self.location = location
self.impersonation_chain = impersonation_chain
def execute(self, context: 'Context') -> None:
self.log.info(
'Executing copy of %s into: %s',
self.source_project_dataset_tables,
self.destination_project_dataset_table,
)
hook = BigQueryHook(
bigquery_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to,
location=self.location,
impersonation_chain=self.impersonation_chain,
)
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
hook.run_copy(
source_project_dataset_tables=self.source_project_dataset_tables,
destination_project_dataset_table=self.destination_project_dataset_table,
write_disposition=self.write_disposition,
create_disposition=self.create_disposition,
labels=self.labels,
encryption_configuration=self.encryption_configuration,
)
| {
"content_hash": "31fd2c5117727fce1e779562608eb53f",
"timestamp": "",
"source": "github",
"line_count": 123,
"max_line_length": 103,
"avg_line_length": 45.21951219512195,
"alnum_prop": 0.6614527148507731,
"repo_name": "lyft/incubator-airflow",
"id": "527033ac433d526217e17f7504e35bded7017f32",
"size": "6349",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "airflow/providers/google/cloud/transfers/bigquery_to_bigquery.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "13715"
},
{
"name": "Dockerfile",
"bytes": "17280"
},
{
"name": "HTML",
"bytes": "161328"
},
{
"name": "JavaScript",
"bytes": "25360"
},
{
"name": "Jinja",
"bytes": "8565"
},
{
"name": "Jupyter Notebook",
"bytes": "2933"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "10019710"
},
{
"name": "Shell",
"bytes": "220780"
}
],
"symlink_target": ""
} |
from .catalog_item import CatalogItem
class USqlExternalDataSource(CatalogItem):
"""A Data Lake Analytics catalog U-SQL external datasource item.
:param compute_account_name: the name of the Data Lake Analytics account.
:type compute_account_name: str
:param version: the version of the catalog item.
:type version: str
:param database_name: the name of the database.
:type database_name: str
:param name: the name of the external data source.
:type name: str
:param provider: the name of the provider for the external data source.
:type provider: str
:param provider_string: the name of the provider string for the external
data source.
:type provider_string: str
:param pushdown_types: the list of types to push down from the external
data source.
:type pushdown_types: list[str]
"""
_attribute_map = {
'compute_account_name': {'key': 'computeAccountName', 'type': 'str'},
'version': {'key': 'version', 'type': 'str'},
'database_name': {'key': 'databaseName', 'type': 'str'},
'name': {'key': 'externalDataSourceName', 'type': 'str'},
'provider': {'key': 'provider', 'type': 'str'},
'provider_string': {'key': 'providerString', 'type': 'str'},
'pushdown_types': {'key': 'pushdownTypes', 'type': '[str]'},
}
def __init__(self, compute_account_name=None, version=None, database_name=None, name=None, provider=None, provider_string=None, pushdown_types=None):
super(USqlExternalDataSource, self).__init__(compute_account_name=compute_account_name, version=version)
self.database_name = database_name
self.name = name
self.provider = provider
self.provider_string = provider_string
self.pushdown_types = pushdown_types
| {
"content_hash": "716fa0362e0f312495b7990dfce634b8",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 153,
"avg_line_length": 44.170731707317074,
"alnum_prop": 0.6570955273329652,
"repo_name": "lmazuel/azure-sdk-for-python",
"id": "524e08b359842ada3e7f6f95532ad331bc033b95",
"size": "2285",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_external_data_source.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "42572767"
}
],
"symlink_target": ""
} |
import functools
import inspect
from functools import partial
from django import forms
from django.apps import apps
from django.core import checks, exceptions
from django.db import connection, router
from django.db.backends import utils
from django.db.models import Q
from django.db.models.constants import LOOKUP_SEP
from django.db.models.deletion import CASCADE, SET_DEFAULT, SET_NULL
from django.db.models.query_utils import PathInfo
from django.db.models.utils import make_model_tuple
from django.utils.functional import cached_property
from django.utils.translation import gettext_lazy as _
from . import Field
from .mixins import FieldCacheMixin
from .related_descriptors import (
ForwardManyToOneDescriptor, ForwardOneToOneDescriptor,
ManyToManyDescriptor, ReverseManyToOneDescriptor,
ReverseOneToOneDescriptor,
)
from .related_lookups import (
RelatedExact, RelatedGreaterThan, RelatedGreaterThanOrEqual, RelatedIn,
RelatedIsNull, RelatedLessThan, RelatedLessThanOrEqual,
)
from .reverse_related import (
ForeignObjectRel, ManyToManyRel, ManyToOneRel, OneToOneRel,
)
RECURSIVE_RELATIONSHIP_CONSTANT = 'self'
def resolve_relation(scope_model, relation):
"""
Transform relation into a model or fully-qualified model string of the form
"app_label.ModelName", relative to scope_model.
The relation argument can be:
* RECURSIVE_RELATIONSHIP_CONSTANT, i.e. the string "self", in which case
the model argument will be returned.
* A bare model name without an app_label, in which case scope_model's
app_label will be prepended.
* An "app_label.ModelName" string.
* A model class, which will be returned unchanged.
"""
# Check for recursive relations
if relation == RECURSIVE_RELATIONSHIP_CONSTANT:
relation = scope_model
# Look for an "app.Model" relation
if isinstance(relation, str):
if "." not in relation:
relation = "%s.%s" % (scope_model._meta.app_label, relation)
return relation
def lazy_related_operation(function, model, *related_models, **kwargs):
"""
Schedule `function` to be called once `model` and all `related_models`
have been imported and registered with the app registry. `function` will
be called with the newly-loaded model classes as its positional arguments,
plus any optional keyword arguments.
The `model` argument must be a model class. Each subsequent positional
argument is another model, or a reference to another model - see
`resolve_relation()` for the various forms these may take. Any relative
references will be resolved relative to `model`.
This is a convenience wrapper for `Apps.lazy_model_operation` - the app
registry model used is the one found in `model._meta.apps`.
"""
models = [model] + [resolve_relation(model, rel) for rel in related_models]
model_keys = (make_model_tuple(m) for m in models)
apps = model._meta.apps
return apps.lazy_model_operation(partial(function, **kwargs), *model_keys)
class RelatedField(FieldCacheMixin, Field):
"""Base class that all relational fields inherit from."""
# Field flags
one_to_many = False
one_to_one = False
many_to_many = False
many_to_one = False
@cached_property
def related_model(self):
# Can't cache this property until all the models are loaded.
apps.check_models_ready()
return self.remote_field.model
def check(self, **kwargs):
errors = super().check(**kwargs)
errors.extend(self._check_related_name_is_valid())
errors.extend(self._check_related_query_name_is_valid())
errors.extend(self._check_relation_model_exists())
errors.extend(self._check_referencing_to_swapped_model())
errors.extend(self._check_clashes())
return errors
def _check_related_name_is_valid(self):
import keyword
related_name = self.remote_field.related_name
if related_name is None:
return []
is_valid_id = True
if keyword.iskeyword(related_name):
is_valid_id = False
if not related_name.isidentifier():
is_valid_id = False
if not (is_valid_id or related_name.endswith('+')):
return [
checks.Error(
"The name '%s' is invalid related_name for field %s.%s" %
(self.remote_field.related_name, self.model._meta.object_name,
self.name),
hint="Related name must be a valid Python identifier or end with a '+'",
obj=self,
id='fields.E306',
)
]
return []
def _check_related_query_name_is_valid(self):
if self.remote_field.is_hidden():
return []
rel_query_name = self.related_query_name()
errors = []
if rel_query_name.endswith('_'):
errors.append(
checks.Error(
"Reverse query name '%s' must not end with an underscore."
% (rel_query_name,),
hint=("Add or change a related_name or related_query_name "
"argument for this field."),
obj=self,
id='fields.E308',
)
)
if LOOKUP_SEP in rel_query_name:
errors.append(
checks.Error(
"Reverse query name '%s' must not contain '%s'."
% (rel_query_name, LOOKUP_SEP),
hint=("Add or change a related_name or related_query_name "
"argument for this field."),
obj=self,
id='fields.E309',
)
)
return errors
def _check_relation_model_exists(self):
rel_is_missing = self.remote_field.model not in self.opts.apps.get_models()
rel_is_string = isinstance(self.remote_field.model, str)
model_name = self.remote_field.model if rel_is_string else self.remote_field.model._meta.object_name
if rel_is_missing and (rel_is_string or not self.remote_field.model._meta.swapped):
return [
checks.Error(
"Field defines a relation with model '%s', which is either "
"not installed, or is abstract." % model_name,
obj=self,
id='fields.E300',
)
]
return []
def _check_referencing_to_swapped_model(self):
if (self.remote_field.model not in self.opts.apps.get_models() and
not isinstance(self.remote_field.model, str) and
self.remote_field.model._meta.swapped):
model = "%s.%s" % (
self.remote_field.model._meta.app_label,
self.remote_field.model._meta.object_name
)
return [
checks.Error(
"Field defines a relation with the model '%s', which has "
"been swapped out." % model,
hint="Update the relation to point at 'settings.%s'." % self.remote_field.model._meta.swappable,
obj=self,
id='fields.E301',
)
]
return []
def _check_clashes(self):
"""Check accessor and reverse query name clashes."""
from django.db.models.base import ModelBase
errors = []
opts = self.model._meta
# `f.remote_field.model` may be a string instead of a model. Skip if model name is
# not resolved.
if not isinstance(self.remote_field.model, ModelBase):
return []
# Consider that we are checking field `Model.foreign` and the models
# are:
#
# class Target(models.Model):
# model = models.IntegerField()
# model_set = models.IntegerField()
#
# class Model(models.Model):
# foreign = models.ForeignKey(Target)
# m2m = models.ManyToManyField(Target)
# rel_opts.object_name == "Target"
rel_opts = self.remote_field.model._meta
# If the field doesn't install a backward relation on the target model
# (so `is_hidden` returns True), then there are no clashes to check
# and we can skip these fields.
rel_is_hidden = self.remote_field.is_hidden()
rel_name = self.remote_field.get_accessor_name() # i. e. "model_set"
rel_query_name = self.related_query_name() # i. e. "model"
field_name = "%s.%s" % (opts.object_name, self.name) # i. e. "Model.field"
# Check clashes between accessor or reverse query name of `field`
# and any other field name -- i.e. accessor for Model.foreign is
# model_set and it clashes with Target.model_set.
potential_clashes = rel_opts.fields + rel_opts.many_to_many
for clash_field in potential_clashes:
clash_name = "%s.%s" % (rel_opts.object_name, clash_field.name) # i.e. "Target.model_set"
if not rel_is_hidden and clash_field.name == rel_name:
errors.append(
checks.Error(
"Reverse accessor for '%s' clashes with field name '%s'." % (field_name, clash_name),
hint=("Rename field '%s', or add/change a related_name "
"argument to the definition for field '%s'.") % (clash_name, field_name),
obj=self,
id='fields.E302',
)
)
if clash_field.name == rel_query_name:
errors.append(
checks.Error(
"Reverse query name for '%s' clashes with field name '%s'." % (field_name, clash_name),
hint=("Rename field '%s', or add/change a related_name "
"argument to the definition for field '%s'.") % (clash_name, field_name),
obj=self,
id='fields.E303',
)
)
# Check clashes between accessors/reverse query names of `field` and
# any other field accessor -- i. e. Model.foreign accessor clashes with
# Model.m2m accessor.
potential_clashes = (r for r in rel_opts.related_objects if r.field is not self)
for clash_field in potential_clashes:
clash_name = "%s.%s" % ( # i. e. "Model.m2m"
clash_field.related_model._meta.object_name,
clash_field.field.name)
if not rel_is_hidden and clash_field.get_accessor_name() == rel_name:
errors.append(
checks.Error(
"Reverse accessor for '%s' clashes with reverse accessor for '%s'." % (field_name, clash_name),
hint=("Add or change a related_name argument "
"to the definition for '%s' or '%s'.") % (field_name, clash_name),
obj=self,
id='fields.E304',
)
)
if clash_field.get_accessor_name() == rel_query_name:
errors.append(
checks.Error(
"Reverse query name for '%s' clashes with reverse query name for '%s'."
% (field_name, clash_name),
hint=("Add or change a related_name argument "
"to the definition for '%s' or '%s'.") % (field_name, clash_name),
obj=self,
id='fields.E305',
)
)
return errors
def db_type(self, connection):
# By default related field will not have a column as it relates to
# columns from another table.
return None
def contribute_to_class(self, cls, name, private_only=False, **kwargs):
super().contribute_to_class(cls, name, private_only=private_only, **kwargs)
self.opts = cls._meta
if not cls._meta.abstract:
if self.remote_field.related_name:
related_name = self.remote_field.related_name
else:
related_name = self.opts.default_related_name
if related_name:
related_name = related_name % {
'class': cls.__name__.lower(),
'model_name': cls._meta.model_name.lower(),
'app_label': cls._meta.app_label.lower()
}
self.remote_field.related_name = related_name
if self.remote_field.related_query_name:
related_query_name = self.remote_field.related_query_name % {
'class': cls.__name__.lower(),
'app_label': cls._meta.app_label.lower(),
}
self.remote_field.related_query_name = related_query_name
def resolve_related_class(model, related, field):
field.remote_field.model = related
field.do_related_class(related, model)
lazy_related_operation(resolve_related_class, cls, self.remote_field.model, field=self)
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
if self.remote_field.limit_choices_to:
kwargs['limit_choices_to'] = self.remote_field.limit_choices_to
if self.remote_field.related_name is not None:
kwargs['related_name'] = self.remote_field.related_name
if self.remote_field.related_query_name is not None:
kwargs['related_query_name'] = self.remote_field.related_query_name
return name, path, args, kwargs
def get_forward_related_filter(self, obj):
"""
Return the keyword arguments that when supplied to
self.model.object.filter(), would select all instances related through
this field to the remote obj. This is used to build the querysets
returned by related descriptors. obj is an instance of
self.related_field.model.
"""
return {
'%s__%s' % (self.name, rh_field.name): getattr(obj, rh_field.attname)
for _, rh_field in self.related_fields
}
def get_reverse_related_filter(self, obj):
"""
Complement to get_forward_related_filter(). Return the keyword
arguments that when passed to self.related_field.model.object.filter()
select all instances of self.related_field.model related through
this field to obj. obj is an instance of self.model.
"""
base_filter = {
rh_field.attname: getattr(obj, lh_field.attname)
for lh_field, rh_field in self.related_fields
}
descriptor_filter = self.get_extra_descriptor_filter(obj)
base_q = Q(**base_filter)
if isinstance(descriptor_filter, dict):
return base_q & Q(**descriptor_filter)
elif descriptor_filter:
return base_q & descriptor_filter
return base_q
@property
def swappable_setting(self):
"""
Get the setting that this is powered from for swapping, or None
if it's not swapped in / marked with swappable=False.
"""
if self.swappable:
# Work out string form of "to"
if isinstance(self.remote_field.model, str):
to_string = self.remote_field.model
else:
to_string = self.remote_field.model._meta.label
return apps.get_swappable_settings_name(to_string)
return None
def set_attributes_from_rel(self):
self.name = (
self.name or
(self.remote_field.model._meta.model_name + '_' + self.remote_field.model._meta.pk.name)
)
if self.verbose_name is None:
self.verbose_name = self.remote_field.model._meta.verbose_name
self.remote_field.set_field_name()
def do_related_class(self, other, cls):
self.set_attributes_from_rel()
self.contribute_to_related_class(other, self.remote_field)
def get_limit_choices_to(self):
"""
Return ``limit_choices_to`` for this model field.
If it is a callable, it will be invoked and the result will be
returned.
"""
if callable(self.remote_field.limit_choices_to):
return self.remote_field.limit_choices_to()
return self.remote_field.limit_choices_to
def formfield(self, **kwargs):
"""
Pass ``limit_choices_to`` to the field being constructed.
Only passes it if there is a type that supports related fields.
This is a similar strategy used to pass the ``queryset`` to the field
being constructed.
"""
defaults = {}
if hasattr(self.remote_field, 'get_related_field'):
# If this is a callable, do not invoke it here. Just pass
# it in the defaults for when the form class will later be
# instantiated.
limit_choices_to = self.remote_field.limit_choices_to
defaults.update({
'limit_choices_to': limit_choices_to,
})
defaults.update(kwargs)
return super().formfield(**defaults)
def related_query_name(self):
"""
Define the name that can be used to identify this related object in a
table-spanning query.
"""
return self.remote_field.related_query_name or self.remote_field.related_name or self.opts.model_name
@property
def target_field(self):
"""
When filtering against this relation, return the field on the remote
model against which the filtering should happen.
"""
target_fields = self.get_path_info()[-1].target_fields
if len(target_fields) > 1:
raise exceptions.FieldError(
"The relation has multiple target fields, but only single target field was asked for")
return target_fields[0]
def get_cache_name(self):
return self.name
class ForeignObject(RelatedField):
"""
Abstraction of the ForeignKey relation to support multi-column relations.
"""
# Field flags
many_to_many = False
many_to_one = True
one_to_many = False
one_to_one = False
requires_unique_target = True
related_accessor_class = ReverseManyToOneDescriptor
forward_related_accessor_class = ForwardManyToOneDescriptor
rel_class = ForeignObjectRel
def __init__(self, to, on_delete, from_fields, to_fields, rel=None, related_name=None,
related_query_name=None, limit_choices_to=None, parent_link=False,
swappable=True, **kwargs):
if rel is None:
rel = self.rel_class(
self, to,
related_name=related_name,
related_query_name=related_query_name,
limit_choices_to=limit_choices_to,
parent_link=parent_link,
on_delete=on_delete,
)
super().__init__(rel=rel, **kwargs)
self.from_fields = from_fields
self.to_fields = to_fields
self.swappable = swappable
def check(self, **kwargs):
errors = super().check(**kwargs)
errors.extend(self._check_to_fields_exist())
errors.extend(self._check_unique_target())
return errors
def _check_to_fields_exist(self):
# Skip nonexistent models.
if isinstance(self.remote_field.model, str):
return []
errors = []
for to_field in self.to_fields:
if to_field:
try:
self.remote_field.model._meta.get_field(to_field)
except exceptions.FieldDoesNotExist:
errors.append(
checks.Error(
"The to_field '%s' doesn't exist on the related "
"model '%s'."
% (to_field, self.remote_field.model._meta.label),
obj=self,
id='fields.E312',
)
)
return errors
def _check_unique_target(self):
rel_is_string = isinstance(self.remote_field.model, str)
if rel_is_string or not self.requires_unique_target:
return []
try:
self.foreign_related_fields
except exceptions.FieldDoesNotExist:
return []
if not self.foreign_related_fields:
return []
unique_foreign_fields = {
frozenset([f.name])
for f in self.remote_field.model._meta.get_fields()
if getattr(f, 'unique', False)
}
unique_foreign_fields.update({
frozenset(ut)
for ut in self.remote_field.model._meta.unique_together
})
foreign_fields = {f.name for f in self.foreign_related_fields}
has_unique_constraint = any(u <= foreign_fields for u in unique_foreign_fields)
if not has_unique_constraint and len(self.foreign_related_fields) > 1:
field_combination = ', '.join(
"'%s'" % rel_field.name for rel_field in self.foreign_related_fields
)
model_name = self.remote_field.model.__name__
return [
checks.Error(
"No subset of the fields %s on model '%s' is unique."
% (field_combination, model_name),
hint=(
"Add unique=True on any of those fields or add at "
"least a subset of them to a unique_together constraint."
),
obj=self,
id='fields.E310',
)
]
elif not has_unique_constraint:
field_name = self.foreign_related_fields[0].name
model_name = self.remote_field.model.__name__
return [
checks.Error(
"'%s.%s' must set unique=True because it is referenced by "
"a foreign key." % (model_name, field_name),
obj=self,
id='fields.E311',
)
]
else:
return []
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
kwargs['on_delete'] = self.remote_field.on_delete
kwargs['from_fields'] = self.from_fields
kwargs['to_fields'] = self.to_fields
if self.remote_field.parent_link:
kwargs['parent_link'] = self.remote_field.parent_link
# Work out string form of "to"
if isinstance(self.remote_field.model, str):
kwargs['to'] = self.remote_field.model
else:
kwargs['to'] = "%s.%s" % (
self.remote_field.model._meta.app_label,
self.remote_field.model._meta.object_name,
)
# If swappable is True, then see if we're actually pointing to the target
# of a swap.
swappable_setting = self.swappable_setting
if swappable_setting is not None:
# If it's already a settings reference, error
if hasattr(kwargs['to'], "setting_name"):
if kwargs['to'].setting_name != swappable_setting:
raise ValueError(
"Cannot deconstruct a ForeignKey pointing to a model "
"that is swapped in place of more than one model (%s and %s)"
% (kwargs['to'].setting_name, swappable_setting)
)
# Set it
from django.db.migrations.writer import SettingsReference
kwargs['to'] = SettingsReference(
kwargs['to'],
swappable_setting,
)
return name, path, args, kwargs
def resolve_related_fields(self):
if not self.from_fields or len(self.from_fields) != len(self.to_fields):
raise ValueError('Foreign Object from and to fields must be the same non-zero length')
if isinstance(self.remote_field.model, str):
raise ValueError('Related model %r cannot be resolved' % self.remote_field.model)
related_fields = []
for index in range(len(self.from_fields)):
from_field_name = self.from_fields[index]
to_field_name = self.to_fields[index]
from_field = (self if from_field_name == 'self'
else self.opts.get_field(from_field_name))
to_field = (self.remote_field.model._meta.pk if to_field_name is None
else self.remote_field.model._meta.get_field(to_field_name))
related_fields.append((from_field, to_field))
return related_fields
@property
def related_fields(self):
if not hasattr(self, '_related_fields'):
self._related_fields = self.resolve_related_fields()
return self._related_fields
@property
def reverse_related_fields(self):
return [(rhs_field, lhs_field) for lhs_field, rhs_field in self.related_fields]
@property
def local_related_fields(self):
return tuple(lhs_field for lhs_field, rhs_field in self.related_fields)
@property
def foreign_related_fields(self):
return tuple(rhs_field for lhs_field, rhs_field in self.related_fields if rhs_field)
def get_local_related_value(self, instance):
return self.get_instance_value_for_fields(instance, self.local_related_fields)
def get_foreign_related_value(self, instance):
return self.get_instance_value_for_fields(instance, self.foreign_related_fields)
@staticmethod
def get_instance_value_for_fields(instance, fields):
ret = []
opts = instance._meta
for field in fields:
# Gotcha: in some cases (like fixture loading) a model can have
# different values in parent_ptr_id and parent's id. So, use
# instance.pk (that is, parent_ptr_id) when asked for instance.id.
if field.primary_key:
possible_parent_link = opts.get_ancestor_link(field.model)
if (not possible_parent_link or
possible_parent_link.primary_key or
possible_parent_link.model._meta.abstract):
ret.append(instance.pk)
continue
ret.append(getattr(instance, field.attname))
return tuple(ret)
def get_attname_column(self):
attname, column = super().get_attname_column()
return attname, None
def get_joining_columns(self, reverse_join=False):
source = self.reverse_related_fields if reverse_join else self.related_fields
return tuple((lhs_field.column, rhs_field.column) for lhs_field, rhs_field in source)
def get_reverse_joining_columns(self):
return self.get_joining_columns(reverse_join=True)
def get_extra_descriptor_filter(self, instance):
"""
Return an extra filter condition for related object fetching when
user does 'instance.fieldname', that is the extra filter is used in
the descriptor of the field.
The filter should be either a dict usable in .filter(**kwargs) call or
a Q-object. The condition will be ANDed together with the relation's
joining columns.
A parallel method is get_extra_restriction() which is used in
JOIN and subquery conditions.
"""
return {}
def get_extra_restriction(self, where_class, alias, related_alias):
"""
Return a pair condition used for joining and subquery pushdown. The
condition is something that responds to as_sql(compiler, connection)
method.
Note that currently referring both the 'alias' and 'related_alias'
will not work in some conditions, like subquery pushdown.
A parallel method is get_extra_descriptor_filter() which is used in
instance.fieldname related object fetching.
"""
return None
def get_path_info(self, filtered_relation=None):
"""Get path from this field to the related model."""
opts = self.remote_field.model._meta
from_opts = self.model._meta
return [PathInfo(
from_opts=from_opts,
to_opts=opts,
target_fields=self.foreign_related_fields,
join_field=self,
m2m=False,
direct=True,
filtered_relation=filtered_relation,
)]
def get_reverse_path_info(self, filtered_relation=None):
"""Get path from the related model to this field's model."""
opts = self.model._meta
from_opts = self.remote_field.model._meta
return [PathInfo(
from_opts=from_opts,
to_opts=opts,
target_fields=(opts.pk,),
join_field=self.remote_field,
m2m=not self.unique,
direct=False,
filtered_relation=filtered_relation,
)]
@classmethod
@functools.lru_cache(maxsize=None)
def get_lookups(cls):
bases = inspect.getmro(cls)
bases = bases[:bases.index(ForeignObject) + 1]
class_lookups = [parent.__dict__.get('class_lookups', {}) for parent in bases]
return cls.merge_dicts(class_lookups)
def contribute_to_class(self, cls, name, private_only=False, **kwargs):
super().contribute_to_class(cls, name, private_only=private_only, **kwargs)
setattr(cls, self.name, self.forward_related_accessor_class(self))
def contribute_to_related_class(self, cls, related):
# Internal FK's - i.e., those with a related name ending with '+' -
# and swapped models don't get a related descriptor.
if not self.remote_field.is_hidden() and not related.related_model._meta.swapped:
setattr(cls._meta.concrete_model, related.get_accessor_name(), self.related_accessor_class(related))
# While 'limit_choices_to' might be a callable, simply pass
# it along for later - this is too early because it's still
# model load time.
if self.remote_field.limit_choices_to:
cls._meta.related_fkey_lookups.append(self.remote_field.limit_choices_to)
ForeignObject.register_lookup(RelatedIn)
ForeignObject.register_lookup(RelatedExact)
ForeignObject.register_lookup(RelatedLessThan)
ForeignObject.register_lookup(RelatedGreaterThan)
ForeignObject.register_lookup(RelatedGreaterThanOrEqual)
ForeignObject.register_lookup(RelatedLessThanOrEqual)
ForeignObject.register_lookup(RelatedIsNull)
class ForeignKey(ForeignObject):
"""
Provide a many-to-one relation by adding a column to the local model
to hold the remote value.
By default ForeignKey will target the pk of the remote model but this
behavior can be changed by using the ``to_field`` argument.
"""
# Field flags
many_to_many = False
many_to_one = True
one_to_many = False
one_to_one = False
rel_class = ManyToOneRel
empty_strings_allowed = False
default_error_messages = {
'invalid': _('%(model)s instance with %(field)s %(value)r does not exist.')
}
description = _("Foreign Key (type determined by related field)")
def __init__(self, to, on_delete, related_name=None, related_query_name=None,
limit_choices_to=None, parent_link=False, to_field=None,
db_constraint=True, **kwargs):
try:
to._meta.model_name
except AttributeError:
assert isinstance(to, str), (
"%s(%r) is invalid. First parameter to ForeignKey must be "
"either a model, a model name, or the string %r" % (
self.__class__.__name__, to,
RECURSIVE_RELATIONSHIP_CONSTANT,
)
)
else:
# For backwards compatibility purposes, we need to *try* and set
# the to_field during FK construction. It won't be guaranteed to
# be correct until contribute_to_class is called. Refs #12190.
to_field = to_field or (to._meta.pk and to._meta.pk.name)
kwargs['rel'] = self.rel_class(
self, to, to_field,
related_name=related_name,
related_query_name=related_query_name,
limit_choices_to=limit_choices_to,
parent_link=parent_link,
on_delete=on_delete,
)
kwargs.setdefault('db_index', True)
super().__init__(to, on_delete, from_fields=['self'], to_fields=[to_field], **kwargs)
self.db_constraint = db_constraint
def check(self, **kwargs):
errors = super().check(**kwargs)
errors.extend(self._check_on_delete())
errors.extend(self._check_unique())
return errors
def _check_on_delete(self):
on_delete = getattr(self.remote_field, 'on_delete', None)
if on_delete == SET_NULL and not self.null:
return [
checks.Error(
'Field specifies on_delete=SET_NULL, but cannot be null.',
hint='Set null=True argument on the field, or change the on_delete rule.',
obj=self,
id='fields.E320',
)
]
elif on_delete == SET_DEFAULT and not self.has_default():
return [
checks.Error(
'Field specifies on_delete=SET_DEFAULT, but has no default value.',
hint='Set a default value, or change the on_delete rule.',
obj=self,
id='fields.E321',
)
]
else:
return []
def _check_unique(self, **kwargs):
return [
checks.Warning(
'Setting unique=True on a ForeignKey has the same effect as using a OneToOneField.',
hint='ForeignKey(unique=True) is usually better served by a OneToOneField.',
obj=self,
id='fields.W342',
)
] if self.unique else []
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
del kwargs['to_fields']
del kwargs['from_fields']
# Handle the simpler arguments
if self.db_index:
del kwargs['db_index']
else:
kwargs['db_index'] = False
if self.db_constraint is not True:
kwargs['db_constraint'] = self.db_constraint
# Rel needs more work.
to_meta = getattr(self.remote_field.model, "_meta", None)
if self.remote_field.field_name and (
not to_meta or (to_meta.pk and self.remote_field.field_name != to_meta.pk.name)):
kwargs['to_field'] = self.remote_field.field_name
return name, path, args, kwargs
def to_python(self, value):
return self.target_field.to_python(value)
@property
def target_field(self):
return self.foreign_related_fields[0]
def get_reverse_path_info(self, filtered_relation=None):
"""Get path from the related model to this field's model."""
opts = self.model._meta
from_opts = self.remote_field.model._meta
return [PathInfo(
from_opts=from_opts,
to_opts=opts,
target_fields=(opts.pk,),
join_field=self.remote_field,
m2m=not self.unique,
direct=False,
filtered_relation=filtered_relation,
)]
def validate(self, value, model_instance):
if self.remote_field.parent_link:
return
super().validate(value, model_instance)
if value is None:
return
using = router.db_for_read(self.remote_field.model, instance=model_instance)
qs = self.remote_field.model._default_manager.using(using).filter(
**{self.remote_field.field_name: value}
)
qs = qs.complex_filter(self.get_limit_choices_to())
if not qs.exists():
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={
'model': self.remote_field.model._meta.verbose_name, 'pk': value,
'field': self.remote_field.field_name, 'value': value,
}, # 'pk' is included for backwards compatibility
)
def get_attname(self):
return '%s_id' % self.name
def get_attname_column(self):
attname = self.get_attname()
column = self.db_column or attname
return attname, column
def get_default(self):
"""Return the to_field if the default value is an object."""
field_default = super().get_default()
if isinstance(field_default, self.remote_field.model):
return getattr(field_default, self.target_field.attname)
return field_default
def get_db_prep_save(self, value, connection):
if value is None or (value == '' and
(not self.target_field.empty_strings_allowed or
connection.features.interprets_empty_strings_as_nulls)):
return None
else:
return self.target_field.get_db_prep_save(value, connection=connection)
def get_db_prep_value(self, value, connection, prepared=False):
return self.target_field.get_db_prep_value(value, connection, prepared)
def contribute_to_related_class(self, cls, related):
super().contribute_to_related_class(cls, related)
if self.remote_field.field_name is None:
self.remote_field.field_name = cls._meta.pk.name
def formfield(self, *, using=None, **kwargs):
if isinstance(self.remote_field.model, str):
raise ValueError("Cannot create form field for %r yet, because "
"its related model %r has not been loaded yet" %
(self.name, self.remote_field.model))
defaults = {
'form_class': forms.ModelChoiceField,
'queryset': self.remote_field.model._default_manager.using(using),
'to_field_name': self.remote_field.field_name,
}
defaults.update(kwargs)
return super().formfield(**defaults)
def db_check(self, connection):
return []
def db_type(self, connection):
return self.target_field.rel_db_type(connection=connection)
def db_parameters(self, connection):
return {"type": self.db_type(connection), "check": self.db_check(connection)}
def convert_empty_strings(self, value, expression, connection):
if (not value) and isinstance(value, str):
return None
return value
def get_db_converters(self, connection):
converters = super().get_db_converters(connection)
if connection.features.interprets_empty_strings_as_nulls:
converters += [self.convert_empty_strings]
return converters
def get_col(self, alias, output_field=None):
return super().get_col(alias, output_field or self.target_field)
class OneToOneField(ForeignKey):
"""
A OneToOneField is essentially the same as a ForeignKey, with the exception
that it always carries a "unique" constraint with it and the reverse
relation always returns the object pointed to (since there will only ever
be one), rather than returning a list.
"""
# Field flags
many_to_many = False
many_to_one = False
one_to_many = False
one_to_one = True
related_accessor_class = ReverseOneToOneDescriptor
forward_related_accessor_class = ForwardOneToOneDescriptor
rel_class = OneToOneRel
description = _("One-to-one relationship")
def __init__(self, to, on_delete, to_field=None, **kwargs):
kwargs['unique'] = True
super().__init__(to, on_delete, to_field=to_field, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
if "unique" in kwargs:
del kwargs['unique']
return name, path, args, kwargs
def formfield(self, **kwargs):
if self.remote_field.parent_link:
return None
return super().formfield(**kwargs)
def save_form_data(self, instance, data):
if isinstance(data, self.remote_field.model):
setattr(instance, self.name, data)
else:
setattr(instance, self.attname, data)
def _check_unique(self, **kwargs):
# Override ForeignKey since check isn't applicable here.
return []
def create_many_to_many_intermediary_model(field, klass):
from django.db import models
def set_managed(model, related, through):
through._meta.managed = model._meta.managed or related._meta.managed
to_model = resolve_relation(klass, field.remote_field.model)
name = '%s_%s' % (klass._meta.object_name, field.name)
lazy_related_operation(set_managed, klass, to_model, name)
to = make_model_tuple(to_model)[1]
from_ = klass._meta.model_name
if to == from_:
to = 'to_%s' % to
from_ = 'from_%s' % from_
meta = type('Meta', (), {
'db_table': field._get_m2m_db_table(klass._meta),
'auto_created': klass,
'app_label': klass._meta.app_label,
'db_tablespace': klass._meta.db_tablespace,
'unique_together': (from_, to),
'verbose_name': _('%(from)s-%(to)s relationship') % {'from': from_, 'to': to},
'verbose_name_plural': _('%(from)s-%(to)s relationships') % {'from': from_, 'to': to},
'apps': field.model._meta.apps,
})
# Construct and return the new class.
return type(name, (models.Model,), {
'Meta': meta,
'__module__': klass.__module__,
from_: models.ForeignKey(
klass,
related_name='%s+' % name,
db_tablespace=field.db_tablespace,
db_constraint=field.remote_field.db_constraint,
on_delete=CASCADE,
),
to: models.ForeignKey(
to_model,
related_name='%s+' % name,
db_tablespace=field.db_tablespace,
db_constraint=field.remote_field.db_constraint,
on_delete=CASCADE,
)
})
class ManyToManyField(RelatedField):
"""
Provide a many-to-many relation by using an intermediary model that
holds two ForeignKey fields pointed at the two sides of the relation.
Unless a ``through`` model was provided, ManyToManyField will use the
create_many_to_many_intermediary_model factory to automatically generate
the intermediary model.
"""
# Field flags
many_to_many = True
many_to_one = False
one_to_many = False
one_to_one = False
rel_class = ManyToManyRel
description = _("Many-to-many relationship")
def __init__(self, to, related_name=None, related_query_name=None,
limit_choices_to=None, symmetrical=None, through=None,
through_fields=None, db_constraint=True, db_table=None,
swappable=True, **kwargs):
try:
to._meta
except AttributeError:
assert isinstance(to, str), (
"%s(%r) is invalid. First parameter to ManyToManyField must be "
"either a model, a model name, or the string %r" %
(self.__class__.__name__, to, RECURSIVE_RELATIONSHIP_CONSTANT)
)
if symmetrical is None:
symmetrical = (to == RECURSIVE_RELATIONSHIP_CONSTANT)
if through is not None:
assert db_table is None, (
"Cannot specify a db_table if an intermediary model is used."
)
kwargs['rel'] = self.rel_class(
self, to,
related_name=related_name,
related_query_name=related_query_name,
limit_choices_to=limit_choices_to,
symmetrical=symmetrical,
through=through,
through_fields=through_fields,
db_constraint=db_constraint,
)
self.has_null_arg = 'null' in kwargs
super().__init__(**kwargs)
self.db_table = db_table
self.swappable = swappable
def check(self, **kwargs):
errors = super().check(**kwargs)
errors.extend(self._check_unique(**kwargs))
errors.extend(self._check_relationship_model(**kwargs))
errors.extend(self._check_ignored_options(**kwargs))
errors.extend(self._check_table_uniqueness(**kwargs))
return errors
def _check_unique(self, **kwargs):
if self.unique:
return [
checks.Error(
'ManyToManyFields cannot be unique.',
obj=self,
id='fields.E330',
)
]
return []
def _check_ignored_options(self, **kwargs):
warnings = []
if self.has_null_arg:
warnings.append(
checks.Warning(
'null has no effect on ManyToManyField.',
obj=self,
id='fields.W340',
)
)
if self._validators:
warnings.append(
checks.Warning(
'ManyToManyField does not support validators.',
obj=self,
id='fields.W341',
)
)
if (self.remote_field.limit_choices_to and self.remote_field.through and
not self.remote_field.through._meta.auto_created):
warnings.append(
checks.Warning(
'limit_choices_to has no effect on ManyToManyField '
'with a through model.',
obj=self,
id='fields.W343',
)
)
return warnings
def _check_relationship_model(self, from_model=None, **kwargs):
if hasattr(self.remote_field.through, '_meta'):
qualified_model_name = "%s.%s" % (
self.remote_field.through._meta.app_label, self.remote_field.through.__name__)
else:
qualified_model_name = self.remote_field.through
errors = []
if self.remote_field.through not in self.opts.apps.get_models(include_auto_created=True):
# The relationship model is not installed.
errors.append(
checks.Error(
"Field specifies a many-to-many relation through model "
"'%s', which has not been installed." % qualified_model_name,
obj=self,
id='fields.E331',
)
)
else:
assert from_model is not None, (
"ManyToManyField with intermediate "
"tables cannot be checked if you don't pass the model "
"where the field is attached to."
)
# Set some useful local variables
to_model = resolve_relation(from_model, self.remote_field.model)
from_model_name = from_model._meta.object_name
if isinstance(to_model, str):
to_model_name = to_model
else:
to_model_name = to_model._meta.object_name
relationship_model_name = self.remote_field.through._meta.object_name
self_referential = from_model == to_model
# Check symmetrical attribute.
if (self_referential and self.remote_field.symmetrical and
not self.remote_field.through._meta.auto_created):
errors.append(
checks.Error(
'Many-to-many fields with intermediate tables must not be symmetrical.',
obj=self,
id='fields.E332',
)
)
# Count foreign keys in intermediate model
if self_referential:
seen_self = sum(
from_model == getattr(field.remote_field, 'model', None)
for field in self.remote_field.through._meta.fields
)
if seen_self > 2 and not self.remote_field.through_fields:
errors.append(
checks.Error(
"The model is used as an intermediate model by "
"'%s', but it has more than two foreign keys "
"to '%s', which is ambiguous. You must specify "
"which two foreign keys Django should use via the "
"through_fields keyword argument." % (self, from_model_name),
hint="Use through_fields to specify which two foreign keys Django should use.",
obj=self.remote_field.through,
id='fields.E333',
)
)
else:
# Count foreign keys in relationship model
seen_from = sum(
from_model == getattr(field.remote_field, 'model', None)
for field in self.remote_field.through._meta.fields
)
seen_to = sum(
to_model == getattr(field.remote_field, 'model', None)
for field in self.remote_field.through._meta.fields
)
if seen_from > 1 and not self.remote_field.through_fields:
errors.append(
checks.Error(
("The model is used as an intermediate model by "
"'%s', but it has more than one foreign key "
"from '%s', which is ambiguous. You must specify "
"which foreign key Django should use via the "
"through_fields keyword argument.") % (self, from_model_name),
hint=(
'If you want to create a recursive relationship, '
'use ForeignKey("self", symmetrical=False, through="%s").'
) % relationship_model_name,
obj=self,
id='fields.E334',
)
)
if seen_to > 1 and not self.remote_field.through_fields:
errors.append(
checks.Error(
"The model is used as an intermediate model by "
"'%s', but it has more than one foreign key "
"to '%s', which is ambiguous. You must specify "
"which foreign key Django should use via the "
"through_fields keyword argument." % (self, to_model_name),
hint=(
'If you want to create a recursive relationship, '
'use ForeignKey("self", symmetrical=False, through="%s").'
) % relationship_model_name,
obj=self,
id='fields.E335',
)
)
if seen_from == 0 or seen_to == 0:
errors.append(
checks.Error(
"The model is used as an intermediate model by "
"'%s', but it does not have a foreign key to '%s' or '%s'." % (
self, from_model_name, to_model_name
),
obj=self.remote_field.through,
id='fields.E336',
)
)
# Validate `through_fields`.
if self.remote_field.through_fields is not None:
# Validate that we're given an iterable of at least two items
# and that none of them is "falsy".
if not (len(self.remote_field.through_fields) >= 2 and
self.remote_field.through_fields[0] and self.remote_field.through_fields[1]):
errors.append(
checks.Error(
"Field specifies 'through_fields' but does not provide "
"the names of the two link fields that should be used "
"for the relation through model '%s'." % qualified_model_name,
hint="Make sure you specify 'through_fields' as through_fields=('field1', 'field2')",
obj=self,
id='fields.E337',
)
)
# Validate the given through fields -- they should be actual
# fields on the through model, and also be foreign keys to the
# expected models.
else:
assert from_model is not None, (
"ManyToManyField with intermediate "
"tables cannot be checked if you don't pass the model "
"where the field is attached to."
)
source, through, target = from_model, self.remote_field.through, self.remote_field.model
source_field_name, target_field_name = self.remote_field.through_fields[:2]
for field_name, related_model in ((source_field_name, source),
(target_field_name, target)):
possible_field_names = []
for f in through._meta.fields:
if hasattr(f, 'remote_field') and getattr(f.remote_field, 'model', None) == related_model:
possible_field_names.append(f.name)
if possible_field_names:
hint = "Did you mean one of the following foreign keys to '%s': %s?" % (
related_model._meta.object_name,
', '.join(possible_field_names),
)
else:
hint = None
try:
field = through._meta.get_field(field_name)
except exceptions.FieldDoesNotExist:
errors.append(
checks.Error(
"The intermediary model '%s' has no field '%s'."
% (qualified_model_name, field_name),
hint=hint,
obj=self,
id='fields.E338',
)
)
else:
if not (hasattr(field, 'remote_field') and
getattr(field.remote_field, 'model', None) == related_model):
errors.append(
checks.Error(
"'%s.%s' is not a foreign key to '%s'." % (
through._meta.object_name, field_name,
related_model._meta.object_name,
),
hint=hint,
obj=self,
id='fields.E339',
)
)
return errors
def _check_table_uniqueness(self, **kwargs):
if isinstance(self.remote_field.through, str) or not self.remote_field.through._meta.managed:
return []
registered_tables = {
model._meta.db_table: model
for model in self.opts.apps.get_models(include_auto_created=True)
if model != self.remote_field.through and model._meta.managed
}
m2m_db_table = self.m2m_db_table()
model = registered_tables.get(m2m_db_table)
# The second condition allows multiple m2m relations on a model if
# some point to a through model that proxies another through model.
if model and model._meta.concrete_model != self.remote_field.through._meta.concrete_model:
if model._meta.auto_created:
def _get_field_name(model):
for field in model._meta.auto_created._meta.many_to_many:
if field.remote_field.through is model:
return field.name
opts = model._meta.auto_created._meta
clashing_obj = '%s.%s' % (opts.label, _get_field_name(model))
else:
clashing_obj = '%s' % model._meta.label
return [
checks.Error(
"The field's intermediary table '%s' clashes with the "
"table name of '%s'." % (m2m_db_table, clashing_obj),
obj=self,
id='fields.E340',
)
]
return []
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
# Handle the simpler arguments.
if self.db_table is not None:
kwargs['db_table'] = self.db_table
if self.remote_field.db_constraint is not True:
kwargs['db_constraint'] = self.remote_field.db_constraint
# Rel needs more work.
if isinstance(self.remote_field.model, str):
kwargs['to'] = self.remote_field.model
else:
kwargs['to'] = "%s.%s" % (
self.remote_field.model._meta.app_label,
self.remote_field.model._meta.object_name,
)
if getattr(self.remote_field, 'through', None) is not None:
if isinstance(self.remote_field.through, str):
kwargs['through'] = self.remote_field.through
elif not self.remote_field.through._meta.auto_created:
kwargs['through'] = "%s.%s" % (
self.remote_field.through._meta.app_label,
self.remote_field.through._meta.object_name,
)
# If swappable is True, then see if we're actually pointing to the target
# of a swap.
swappable_setting = self.swappable_setting
if swappable_setting is not None:
# If it's already a settings reference, error.
if hasattr(kwargs['to'], "setting_name"):
if kwargs['to'].setting_name != swappable_setting:
raise ValueError(
"Cannot deconstruct a ManyToManyField pointing to a "
"model that is swapped in place of more than one model "
"(%s and %s)" % (kwargs['to'].setting_name, swappable_setting)
)
from django.db.migrations.writer import SettingsReference
kwargs['to'] = SettingsReference(
kwargs['to'],
swappable_setting,
)
return name, path, args, kwargs
def _get_path_info(self, direct=False, filtered_relation=None):
"""Called by both direct and indirect m2m traversal."""
pathinfos = []
int_model = self.remote_field.through
linkfield1 = int_model._meta.get_field(self.m2m_field_name())
linkfield2 = int_model._meta.get_field(self.m2m_reverse_field_name())
if direct:
join1infos = linkfield1.get_reverse_path_info()
join2infos = linkfield2.get_path_info(filtered_relation)
else:
join1infos = linkfield2.get_reverse_path_info()
join2infos = linkfield1.get_path_info(filtered_relation)
# Get join infos between the last model of join 1 and the first model
# of join 2. Assume the only reason these may differ is due to model
# inheritance.
join1_final = join1infos[-1].to_opts
join2_initial = join2infos[0].from_opts
if join1_final is join2_initial:
intermediate_infos = []
elif issubclass(join1_final.model, join2_initial.model):
intermediate_infos = join1_final.get_path_to_parent(join2_initial.model)
else:
intermediate_infos = join2_initial.get_path_from_parent(join1_final.model)
pathinfos.extend(join1infos)
pathinfos.extend(intermediate_infos)
pathinfos.extend(join2infos)
return pathinfos
def get_path_info(self, filtered_relation=None):
return self._get_path_info(direct=True, filtered_relation=filtered_relation)
def get_reverse_path_info(self, filtered_relation=None):
return self._get_path_info(direct=False, filtered_relation=filtered_relation)
def _get_m2m_db_table(self, opts):
"""
Function that can be curried to provide the m2m table name for this
relation.
"""
if self.remote_field.through is not None:
return self.remote_field.through._meta.db_table
elif self.db_table:
return self.db_table
else:
m2m_table_name = '%s_%s' % (utils.strip_quotes(opts.db_table), self.name)
return utils.truncate_name(m2m_table_name, connection.ops.max_name_length())
def _get_m2m_attr(self, related, attr):
"""
Function that can be curried to provide the source accessor or DB
column name for the m2m table.
"""
cache_attr = '_m2m_%s_cache' % attr
if hasattr(self, cache_attr):
return getattr(self, cache_attr)
if self.remote_field.through_fields is not None:
link_field_name = self.remote_field.through_fields[0]
else:
link_field_name = None
for f in self.remote_field.through._meta.fields:
if (f.is_relation and f.remote_field.model == related.related_model and
(link_field_name is None or link_field_name == f.name)):
setattr(self, cache_attr, getattr(f, attr))
return getattr(self, cache_attr)
def _get_m2m_reverse_attr(self, related, attr):
"""
Function that can be curried to provide the related accessor or DB
column name for the m2m table.
"""
cache_attr = '_m2m_reverse_%s_cache' % attr
if hasattr(self, cache_attr):
return getattr(self, cache_attr)
found = False
if self.remote_field.through_fields is not None:
link_field_name = self.remote_field.through_fields[1]
else:
link_field_name = None
for f in self.remote_field.through._meta.fields:
if f.is_relation and f.remote_field.model == related.model:
if link_field_name is None and related.related_model == related.model:
# If this is an m2m-intermediate to self,
# the first foreign key you find will be
# the source column. Keep searching for
# the second foreign key.
if found:
setattr(self, cache_attr, getattr(f, attr))
break
else:
found = True
elif link_field_name is None or link_field_name == f.name:
setattr(self, cache_attr, getattr(f, attr))
break
return getattr(self, cache_attr)
def contribute_to_class(self, cls, name, **kwargs):
# To support multiple relations to self, it's useful to have a non-None
# related name on symmetrical relations for internal reasons. The
# concept doesn't make a lot of sense externally ("you want me to
# specify *what* on my non-reversible relation?!"), so we set it up
# automatically. The funky name reduces the chance of an accidental
# clash.
if self.remote_field.symmetrical and (
self.remote_field.model == "self" or self.remote_field.model == cls._meta.object_name):
self.remote_field.related_name = "%s_rel_+" % name
elif self.remote_field.is_hidden():
# If the backwards relation is disabled, replace the original
# related_name with one generated from the m2m field name. Django
# still uses backwards relations internally and we need to avoid
# clashes between multiple m2m fields with related_name == '+'.
self.remote_field.related_name = "_%s_%s_+" % (cls.__name__.lower(), name)
super().contribute_to_class(cls, name, **kwargs)
# The intermediate m2m model is not auto created if:
# 1) There is a manually specified intermediate, or
# 2) The class owning the m2m field is abstract.
# 3) The class owning the m2m field has been swapped out.
if not cls._meta.abstract:
if self.remote_field.through:
def resolve_through_model(_, model, field):
field.remote_field.through = model
lazy_related_operation(resolve_through_model, cls, self.remote_field.through, field=self)
elif not cls._meta.swapped:
self.remote_field.through = create_many_to_many_intermediary_model(self, cls)
# Add the descriptor for the m2m relation.
setattr(cls, self.name, ManyToManyDescriptor(self.remote_field, reverse=False))
# Set up the accessor for the m2m table name for the relation.
self.m2m_db_table = partial(self._get_m2m_db_table, cls._meta)
def contribute_to_related_class(self, cls, related):
# Internal M2Ms (i.e., those with a related name ending with '+')
# and swapped models don't get a related descriptor.
if not self.remote_field.is_hidden() and not related.related_model._meta.swapped:
setattr(cls, related.get_accessor_name(), ManyToManyDescriptor(self.remote_field, reverse=True))
# Set up the accessors for the column names on the m2m table.
self.m2m_column_name = partial(self._get_m2m_attr, related, 'column')
self.m2m_reverse_name = partial(self._get_m2m_reverse_attr, related, 'column')
self.m2m_field_name = partial(self._get_m2m_attr, related, 'name')
self.m2m_reverse_field_name = partial(self._get_m2m_reverse_attr, related, 'name')
get_m2m_rel = partial(self._get_m2m_attr, related, 'remote_field')
self.m2m_target_field_name = lambda: get_m2m_rel().field_name
get_m2m_reverse_rel = partial(self._get_m2m_reverse_attr, related, 'remote_field')
self.m2m_reverse_target_field_name = lambda: get_m2m_reverse_rel().field_name
def set_attributes_from_rel(self):
pass
def value_from_object(self, obj):
return [] if obj.pk is None else list(getattr(obj, self.attname).all())
def save_form_data(self, instance, data):
getattr(instance, self.attname).set(data)
def formfield(self, *, using=None, **kwargs):
defaults = {
'form_class': forms.ModelMultipleChoiceField,
'queryset': self.remote_field.model._default_manager.using(using),
}
defaults.update(kwargs)
# If initial is passed in, it's a list of related objects, but the
# MultipleChoiceField takes a list of IDs.
if defaults.get('initial') is not None:
initial = defaults['initial']
if callable(initial):
initial = initial()
defaults['initial'] = [i.pk for i in initial]
return super().formfield(**defaults)
def db_check(self, connection):
return None
def db_type(self, connection):
# A ManyToManyField is not represented by a single column,
# so return None.
return None
def db_parameters(self, connection):
return {"type": None, "check": None}
| {
"content_hash": "8ea3f3b21520b662cda88247683dd0e8",
"timestamp": "",
"source": "github",
"line_count": 1647,
"max_line_length": 119,
"avg_line_length": 41.523375834851244,
"alnum_prop": 0.5612890961996813,
"repo_name": "uranusjr/django",
"id": "cfb5ea1ef601349632b4d440436fdd9ec0a91462",
"size": "68389",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "django/db/models/fields/related.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "84168"
},
{
"name": "HTML",
"bytes": "219466"
},
{
"name": "JavaScript",
"bytes": "255420"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Python",
"bytes": "12238521"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "130"
}
],
"symlink_target": ""
} |
from oscar.util.selection import *
def train_unit(obs, building_id, action_train_id):
command_center = find_position(obs, building_id)
return [actions.FunctionCall(SELECT_POINT, [NEW_SELECTION, command_center]),
actions.FunctionCall(action_train_id, [NOT_QUEUED])]
| {
"content_hash": "0c800463195911420a423521bcda450c",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 80,
"avg_line_length": 41,
"alnum_prop": 0.7177700348432056,
"repo_name": "Xaxetrov/OSCAR",
"id": "880a24ad68b58bd27a9549043c9dadfd7e8a10d4",
"size": "287",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "oscar/meta_action/train_unit.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "250337"
},
{
"name": "Shell",
"bytes": "3498"
}
],
"symlink_target": ""
} |
import pygame, sys, random
from pygame.locals import *
#Global Variables and Constants
WINDOW_WIDTH = 700
WINDOW_HEIGHT = 700
PADDLE_WIDTH = 10
PADDLE_LENGTH = 60
PADDLE_VELOCITY = 15
FONTSIZE = 30
BLACK = (0,0,0)
WHITE = (255,255,255)
SCORE_LIMIT=10
#create pygame window
pygame.init()
DS = pygame.display.set_mode((WINDOW_WIDTH, WINDOW_HEIGHT))
pygame.display.set_caption("Pong")
FPS = 30
FPSCLOCK = pygame.time.Clock()
#create sounds
pygame.mixer.init()
BOOP_X = pygame.mixer.Sound('CollisionX.wav')
BOOP_Y = pygame.mixer.Sound('CollisionY.wav')
BOOP_SCORE = pygame.mixer.Sound('Score.wav')
class Ball():
def __init__(self):
#places the ball at the center of the screen whenever a new ball is made
self.x = WINDOW_WIDTH/2
self.y = WINDOW_HEIGHT/2
self.r = 10
#initializes the balls X and Y velocity
#x velocity can be either -7 or 7
#y velocity can be any value within the domain [-5,2]U[2,5]
self.directionX = [-1,1]
self.vx = 7 * self.directionX[random.randint(0,1)]
self.vy = random.randint(2,5)*self.directionX[random.randint(0,1)]
#draws the ball at its current position
def draw(self, DS):
pygame.draw.circle(DS, WHITE, (self.x,self.y), self.r)
#handles when a ball collides with a paddle
def collideX(self):
#adds 2 to the balls x speed
if(self.vx>0):
self.vx=self.vx+2
else:
self.vx=self.vx-2
#plays collision sound
BOOP_X.play()
#reverses velocity
self.vx = -self.vx
#handles when a ball collides with one of the walls
def collideY(self):
#adds 2 to the balls y speed
if(self.vy>0):
self.vy = self.vy+2
else:
self.vy = self.vy-2
#plays collision sound
BOOP_Y.play()
#reverses y velocity
self.vy = -self.vy
#handles collision checking
def checkCollision(self, pPLAYER, pCPU):
#checks if a ball has hit the top or bottom wall
if(self.y-self.r <= 0 or self.y+self.r >= WINDOW_HEIGHT):
self.collideY()
#checks if a ball has collided with one of the paddles
if((self.x-self.r <= PADDLE_WIDTH and (self.y+self.r >= pPLAYER.y and self.y-self.r <= pPLAYER.y+PADDLE_LENGTH)) or (self.x+self.r >= WINDOW_WIDTH-PADDLE_WIDTH and (self.y+self.r >= pCPU.y and self.y-self.r <= pCPU.y+PADDLE_LENGTH))):
self.collideX()
#changes the balls position based on the balls current velocity
def update(self):
self.x = self.x+self.vx
self.y = self.y+self.vy
class Paddle():
def __init__(self, x, y, width, length):
self.x=x
self.y=y
#sets the paddles velocity to a constant. this velocity never changes
self.vy=PADDLE_VELOCITY
self.WIDTH=width
self.LENGTH=length
#booleans to handle whether a paddle should move up or down
self.up=False
self.down=False
#draws the paddles on the window
def draw(self, DS):
pygame.draw.rect(DS, WHITE, pygame.Rect(self.x, self.y, self.WIDTH, self.LENGTH))
class ScoreBoard():
def __init__(self):
#initializes the both scores to 0
self.scorePlayer = 0
self.scoreCPU = 0
#creates a font to display the scores with
self.myFont = pygame.font.SysFont("Verdana", FONTSIZE)
def update(self, DS):
#creates the two text images for each score
self.scorePlayerIMG = self.myFont.render(str(self.scorePlayer), 0, WHITE)
self.scoreCPUIMG = self.myFont.render(str(self.scoreCPU), 0, WHITE)
#blits the scores to the windows surface
DS.blit(self.scorePlayerIMG, (WINDOW_WIDTH/4 - FONTSIZE/2, 10))
DS.blit(self.scoreCPUIMG, (3 * WINDOW_WIDTH/4 - FONTSIZE/2, 10))
#a definition to display the start screen
def StartScreen(DS):
FONTSIZE=20
DS.fill(BLACK)
#creates a font for the start text
startFont = pygame.font.SysFont("Verdana", FONTSIZE)
#creates the start to text
startText = startFont.render("Welcome to PONG! Press spacebar key to begin!", 0 , WHITE)
#blits the start text to the window
DS.blit(startText, (WINDOW_WIDTH/6,WINDOW_HEIGHT/2-10))
pygame.display.update()
startscreen = True
while startscreen:
for event in pygame.event.get():
if event.type==pygame.KEYDOWN and event.key==pygame.K_SPACE:
startscreen=False
elif event.type == QUIT:
pygame.quit()
sys.exit()
#a definition to display the end screen
def WinnerScreen(DS, s):
DS.fill(BLACK)
winnerFont = pygame.font.SysFont("Verdana", FONTSIZE)
winner = winnerFont.render(s, 0, WHITE)
DS.blit(winner, (WINDOW_WIDTH/3, WINDOW_HEIGHT/2-10))
pygame.display.update()
while True:
for event in pygame.event.get():
if event.type==QUIT:
pygame.quit()
sys.exit()
def main():
#a variable to keep track of the frames
fps_count=0
#a variable to track whether someone has recently scored
has_scored = False
#displays
StartScreen(DS)
#creates initial ball
b = Ball()
#creates player and computer paddles
pPLAYER = Paddle(0, WINDOW_HEIGHT/2-PADDLE_LENGTH/2, PADDLE_WIDTH, PADDLE_LENGTH)
pCPU = Paddle(WINDOW_WIDTH-PADDLE_WIDTH, WINDOW_HEIGHT/2-PADDLE_LENGTH/2, PADDLE_WIDTH, PADDLE_LENGTH)
sb = ScoreBoard()
#Game Loop
while True:
#DrawsBackground
DS.fill(BLACK)
#Draws Median Line
pygame.draw.line(DS, WHITE, (WINDOW_WIDTH/2,0), (WINDOW_WIDTH/2, WINDOW_HEIGHT))
sb.update(DS)
#Draws Paddles
pPLAYER.draw(DS)
pCPU.draw(DS)
#Check for Collisions and Update
b.checkCollision(pPLAYER, pCPU)
b.update()
#Draw Ball
b.draw(DS)
#update the display
pygame.display.update()
FPSCLOCK.tick(FPS)
if(has_scored):
has_scored=False
pygame.time.delay(700)
#Checks for scoring
if b.x>WINDOW_WIDTH:
#resets ball and paddles
b = Ball()
pPLAYER = Paddle(0, WINDOW_HEIGHT/2-PADDLE_LENGTH/2, PADDLE_WIDTH, PADDLE_LENGTH)
pCPU = Paddle(WINDOW_WIDTH-PADDLE_WIDTH, WINDOW_HEIGHT/2-PADDLE_LENGTH/2, PADDLE_WIDTH, PADDLE_LENGTH)
sb.scorePlayer+=1
has_scored = True
BOOP_SCORE.play()
#if score limit is reached, jumps to end screen
if(sb.scorePlayer>=SCORE_LIMIT):
WinnerScreen(DS, "You Win :)")
elif b.x<0:
#resets ball and paddles
b = Ball()
pPLAYER = Paddle(0, WINDOW_HEIGHT/2-PADDLE_LENGTH/2, PADDLE_WIDTH, PADDLE_LENGTH)
pCPU = Paddle(WINDOW_WIDTH-PADDLE_WIDTH, WINDOW_HEIGHT/2-PADDLE_LENGTH/2, PADDLE_WIDTH, PADDLE_LENGTH)
sb.scoreCPU+=1
has_scored = True
BOOP_SCORE.play()
#if score limit is reached, jumps to end screen
if(sb.scoreCPU>=SCORE_LIMIT):
WinnerScreen(DS, "You Lose :(")
#Checks for input from user
for event in pygame.event.get():
#checks for if a key is pressed
if event.type==pygame.KEYDOWN and event.key==pygame.K_w:
pPLAYER.up=True
if event.type==pygame.KEYDOWN and event.key==pygame.K_s:
pPLAYER.down=True
#checks for if a key is released
if event.type==pygame.KEYUP and event.key==pygame.K_w:
pPLAYER.up=False
if event.type==pygame.KEYUP and event.key==pygame.K_s:
pPLAYER.down=False
#checks if the window is being closed
if event.type==QUIT:
pygame.quit()
sys.exit()
#ai paddle movement state
#ai paddle only looks whether to change movement every 1/15 of a second
#ai paddle checks whether the ball's y position is higher or lower and will plan to move accordingly
if(fps_count % 3 == 0 and b.y < pCPU.y-10):
pCPU.up = True
pCPU.down = False
elif(fps_count % 3 == 0 and b.y > pCPU.y+PADDLE_LENGTH+10):
pCPU.up = False
pCPU.down = True
#ai paddle only looks to pause every 2/15 of a second
elif(fps_count % 12 == 0):
pCPU.up = False
pCPU.down = False
#player paddle movement
if(pPLAYER.up and pPLAYER.y > 0):
pPLAYER.y=pPLAYER.y-pPLAYER.vy
elif(pPLAYER.down and pPLAYER.y+PADDLE_LENGTH < WINDOW_HEIGHT):
pPLAYER.y=pPLAYER.y+pPLAYER.vy
#ai paddle movement
if(pCPU.up and pCPU.y > 0):
pCPU.y = pCPU.y-pCPU.vy
elif(pCPU.down and pCPU.y+PADDLE_LENGTH < WINDOW_HEIGHT):
pCPU.y = pCPU.y+pCPU.vy
#FPS Count to keep track of when the ai paddle should act
fps_count+=1
main()
| {
"content_hash": "3d7b64441202c7a75786123973537611",
"timestamp": "",
"source": "github",
"line_count": 295,
"max_line_length": 242,
"avg_line_length": 31.406779661016948,
"alnum_prop": 0.5882352941176471,
"repo_name": "scottnm/Pong",
"id": "e496d0db2f73a2c13ffa77e19bc688af81c9e2bd",
"size": "9265",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pong_v2.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C#",
"bytes": "3002"
},
{
"name": "Python",
"bytes": "33707"
}
],
"symlink_target": ""
} |
xshutter_state=0 ## TODO: read the shutter state and set this accordingly
## Open shutter
def xshutter_trigger():
sleep_time = 0.005
caput('XF:11BMB-CT{MC:06}Asyn.AOUT','M112=1')
sleep(sleep_time)
caput('XF:11BMB-CT{MC:06}Asyn.AOUT','M111=1')
sleep(sleep_time)
caput('XF:11BMB-CT{MC:06}Asyn.AOUT','M112=0')
sleep(sleep_time)
caput('XF:11BMB-CT{MC:06}Asyn.AOUT','M111=1')
def xshutter(inout,q=0):
global xshutter_state
if inout=='o' or inout=='open' or inout==1:
if xshutter_state==0:
xshutter_trigger()
xshutter_state = 1
if q==0:
print('Experimental shutter opened')
return(xshutter_state)
elif xshutter_state==1:
print('Experimental shutter is already open; no changes made')
else:
print('xshutter_state is neither 0 nor 1; no changes made')
if inout=='c' or inout=='close' or inout==0:
if xshutter_state==1:
xshutter_trigger()
xshutter_state = 0
if q==0:
print('Experimental shutter closed')
return(xshutter_state)
elif xshutter_state==0:
print('Experimental shutter is already closed; no changes made')
else:
print('xshutter_state is neither 0 nor 1; no changes made')
| {
"content_hash": "7422da46d77b88105f3971a715f5dfcd",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 76,
"avg_line_length": 32.476190476190474,
"alnum_prop": 0.5791788856304986,
"repo_name": "yugangzhang/GitTest",
"id": "b8fb4fc4e072b4a0e68cad9daeb9f3e77c7bb3ea",
"size": "2071",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "CMS_Profile/19-exp_shutter.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "367631"
}
],
"symlink_target": ""
} |
def rate():
# OP complained that the input compares were not working
# if x == [7, 10]
# I mentioned that the input is a string and the range compares were wrong
try:
x = int(raw_input('Rate service from 1 to 10: '))
except ValueError:
raise SystemExit('Enter a number between 1 and 10')
if x >= 7 and x <= 10: print('Thanks!')
elif x >= 4 and x < 7: print('Thank you.')
elif x >= 1 and x < 4: print('Crap.')
else: print('Enter a number between 1 and 10')
| {
"content_hash": "189e5ad63dc0e013cf8f9c4a24c9c74a",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 78,
"avg_line_length": 42.333333333333336,
"alnum_prop": 0.6062992125984252,
"repo_name": "bandarji/lekhan",
"id": "91f8d323c724a5cf3c73206e29086150078cb976",
"size": "508",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/reddit/input_num.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "66166"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import unittest
from contextlib import contextmanager
import mock
import cloudping
class AWSContext(object):
"""Mock for the execution context."""
function_name = 'CloudPing'
function_version = '1.0'
invoked_function_arn = ''
memory_limit_in_mb = 128
aws_request_id = '1234567890'
log_group_name = 'CloudPing'
log_stream_name = 'CloudPingStream'
identity = None
client_context = None
def get_remaining_time_in_millis(self):
return 10000
class PingHandlerTestCase(unittest.TestCase):
"""Lambda function to ping requested webpage."""
@contextmanager
def assert_remote_call(self, *args, **kwargs):
"""Custom assert for remote requests calls."""
with mock.patch('cloudping.requests') as mock_requests:
yield
mock_requests.request.assert_called_with(*args, **kwargs)
mock_requests.request.return_value.raise_for_status.assert_called_with()
def test_ping_default(self):
"""Default settings to ping a site."""
with self.assert_remote_call(
'GET', 'http://example.com/', allow_redirects=False, timeout=5):
cloudping.ping({}, AWSContext())
def test_domain_option(self):
"""Configure the domain to check."""
with self.assert_remote_call(
'GET', 'http://test.example.com/', allow_redirects=False, timeout=5):
cloudping.ping({'domain': 'test.example.com'}, AWSContext())
def test_path_option(self):
"""Configure the path to check."""
with self.assert_remote_call(
'GET', 'http://example.com/test/', allow_redirects=False, timeout=5):
cloudping.ping({'path': '/test/'}, AWSContext())
def test_protocol_option(self):
"""Configure the protocol to check."""
with self.assert_remote_call(
'GET', 'https://example.com/', allow_redirects=False, timeout=5):
cloudping.ping({'protocol': 'https'}, AWSContext())
def test_method_option(self):
"""Configure the HTTP method to check."""
with self.assert_remote_call(
'POST', 'http://example.com/', allow_redirects=False, timeout=5):
cloudping.ping({'method': 'POST'}, AWSContext())
def test_redirect_option(self):
"""Configure if redirects are followed for the check."""
with self.assert_remote_call(
'GET', 'http://example.com/', allow_redirects=True, timeout=5):
cloudping.ping({'allow_redirects': True}, AWSContext())
def test_timeout_option(self):
"""Configure timeout for the check."""
with self.assert_remote_call(
'GET', 'http://example.com/', allow_redirects=False, timeout=10):
cloudping.ping({'timeout': 10}, AWSContext())
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "25ce4b8bee72201f819860e5b917de05",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 85,
"avg_line_length": 35.13253012048193,
"alnum_prop": 0.6128257887517147,
"repo_name": "mlavin/cloudping",
"id": "3c1a3c1a752f939223bb6ee493dfa4e7340037c9",
"size": "2916",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Makefile",
"bytes": "298"
},
{
"name": "Python",
"bytes": "3489"
}
],
"symlink_target": ""
} |
from nova.openstack.common import log as logging
from nova.compute import vm_states
from nova import rpc
from nova import context
from nova import conductor
from paxes_nova import _
#from nova.openstack.common.rpc.amqp import RpcContext
LOG = logging.getLogger(__name__)
EVENT_TYPE_ERROR = 'ERROR'
EVENT_TYPE_WARN = 'WARN'
def set_instance_error_state_and_notify(instance):
"""
Set an instance to ERROR state and send out a notification
"""
# set instance to error state. Instance
# could be a dictionary when this method is called during
# virtual machine delete process.
instance['vm_state'] = vm_states.ERROR
conductor.API().instance_update(
context.get_admin_context(), instance['uuid'],
vm_state=vm_states.ERROR,
task_state=None)
instance_name = instance['name']
host_name = instance['host']
LOG.warn(_('Unable to find virtual machine %(inst_name)s '
'on host %(host)s. Set state to ERROR')
% {'inst_name': instance_name,
'host': host_name})
# Send event notification
note = {'event_type': 'compute.instance.log',
'msg': _('Unable to find virtual machine {instance_name} on '
'host {host_name}. An operation might have been '
'performed on the virtual machine outside of PowerVC or'
' the deploy of the virtual machine failed.'
'The virtual machine is now set to Error state in the '
'database.'),
'instance_name': instance_name,
'host_name': host_name}
notifier = rpc.get_notifier(service='compute', host=host_name)
notifier.warn(context.get_admin_context(), 'compute.instance.log',
note)
| {
"content_hash": "84d7f77e37d059e9f61d23e66047a156",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 77,
"avg_line_length": 39.68888888888889,
"alnum_prop": 0.6259798432250839,
"repo_name": "windskyer/k_nova",
"id": "acd8d44de8be813ba0f51da8d6ad6da27388d528",
"size": "1967",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "paxes_nova/virt/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "371"
},
{
"name": "HTML",
"bytes": "2364"
},
{
"name": "JavaScript",
"bytes": "116320"
},
{
"name": "Python",
"bytes": "3193811"
},
{
"name": "Shell",
"bytes": "7129"
}
],
"symlink_target": ""
} |
from point import Point
import unittest
class TestPoint(unittest.TestCase):
def setUp(self): pass
def tearDown(self): pass
def test_init(self):
test = Point(2,3)
self.assertEqual(test.x, 2)
self.assertEqual(test.y, 3)
def test_str(self):
self.assertEqual(str(Point(14,88)), '(14,88)')
def test_eq(self):
self.assertEqual(Point(21,37), Point(21,37))
def test_neq(self):
self.assertNotEqual(Point(14,88), Point(21,37))
def test_add(self):
self.assertEqual(Point(14,88)+Point(21,37), Point(35,125))
def test_sub(self):
self.assertEqual(Point(1,2), Point(3,5)-Point(2,3))
def test_mul(self):
self.assertEqual(Point(2,3)*Point(3,8), 30)
def test_cross(self):
self.assertEqual(Point(2,3).cross(Point(3,8)), 7)
def test_length(self):
self.assertEqual(Point(3,4).length(), 5)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "e75bb6d9016b9c39454e37bd4eb9b781",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 66,
"avg_line_length": 24,
"alnum_prop": 0.603125,
"repo_name": "vyzyv/university",
"id": "57263564f39959f093ef16b423255375d876c176",
"size": "960",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/Zestaw6/6.2/point_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "40687"
},
{
"name": "C++",
"bytes": "24187"
},
{
"name": "CSS",
"bytes": "1490"
},
{
"name": "Java",
"bytes": "55933"
},
{
"name": "Makefile",
"bytes": "1841"
},
{
"name": "Python",
"bytes": "49234"
}
],
"symlink_target": ""
} |
from django import forms
from django.utils.translation import ugettext_lazy as _
from wagtail.core import blocks
from wagtailstreamforms.conf import get_setting
from wagtailstreamforms.fields import BaseField, register
class SingleLineTextField(BaseField):
field_class = forms.CharField
label = _("Text field (single line)")
class MultiLineTextField(BaseField):
field_class = forms.CharField
widget = forms.widgets.Textarea
label = _("Text field (multi line)")
class DateField(BaseField):
field_class = forms.DateField
icon = "date"
label = _("Date field")
class DateTimeField(BaseField):
field_class = forms.DateTimeField
icon = "time"
label = _("Time field")
class EmailField(BaseField):
field_class = forms.EmailField
icon = "mail"
label = _("Email field")
class URLField(BaseField):
field_class = forms.URLField
icon = "link"
label = _("URL field")
class NumberField(BaseField):
field_class = forms.DecimalField
label = _("Number field")
class DropdownField(BaseField):
field_class = forms.ChoiceField
icon = "arrow-down-big"
label = _("Dropdown field")
def get_options(self, block_value):
options = super().get_options(block_value)
choices = [(c.strip(), c.strip()) for c in block_value.get("choices")]
if block_value.get("empty_label"):
choices.insert(0, ("", block_value.get("empty_label")))
options.update({"choices": choices})
return options
def get_form_block(self):
return blocks.StructBlock(
[
("label", blocks.CharBlock()),
("help_text", blocks.CharBlock(required=False)),
("required", blocks.BooleanBlock(required=False)),
("empty_label", blocks.CharBlock(required=False)),
("choices", blocks.ListBlock(blocks.CharBlock(label="Option"))),
],
icon=self.icon,
label=self.label,
)
class MultiSelectField(BaseField):
field_class = forms.MultipleChoiceField
icon = "list-ul"
label = _("Multiselect field")
def get_options(self, block_value):
options = super().get_options(block_value)
choices = [(c.strip(), c.strip()) for c in block_value.get("choices")]
options.update({"choices": choices})
return options
def get_form_block(self):
return blocks.StructBlock(
[
("label", blocks.CharBlock()),
("help_text", blocks.CharBlock(required=False)),
("required", blocks.BooleanBlock(required=False)),
("choices", blocks.ListBlock(blocks.CharBlock(label="Option"))),
],
icon=self.icon,
label=self.label,
)
class RadioField(BaseField):
field_class = forms.ChoiceField
widget = forms.widgets.RadioSelect
icon = "radio-empty"
label = _("Radio buttons")
def get_options(self, block_value):
options = super().get_options(block_value)
choices = [(c.strip(), c.strip()) for c in block_value.get("choices")]
options.update({"choices": choices})
return options
def get_form_block(self):
return blocks.StructBlock(
[
("label", blocks.CharBlock()),
("help_text", blocks.CharBlock(required=False)),
("required", blocks.BooleanBlock(required=False)),
("choices", blocks.ListBlock(blocks.CharBlock(label="Option"))),
],
icon=self.icon,
label=self.label,
)
class CheckboxesField(BaseField):
field_class = forms.MultipleChoiceField
widget = forms.widgets.CheckboxSelectMultiple
icon = "tick-inverse"
label = _("Checkboxes")
def get_options(self, block_value):
options = super().get_options(block_value)
choices = [(c.strip(), c.strip()) for c in block_value.get("choices")]
options.update({"choices": choices})
return options
def get_form_block(self):
return blocks.StructBlock(
[
("label", blocks.CharBlock()),
("help_text", blocks.CharBlock(required=False)),
("required", blocks.BooleanBlock(required=False)),
("choices", blocks.ListBlock(blocks.CharBlock(label="Option"))),
],
icon=self.icon,
label=self.label,
)
class CheckboxField(BaseField):
field_class = forms.BooleanField
icon = "tick-inverse"
label = _("Checkbox field")
def get_form_block(self):
return blocks.StructBlock(
[
("label", blocks.CharBlock()),
("help_text", blocks.CharBlock(required=False)),
("required", blocks.BooleanBlock(required=False)),
],
icon=self.icon,
label=self.label,
)
class HiddenField(BaseField):
field_class = forms.CharField
widget = forms.widgets.HiddenInput
icon = "no-view"
label = _("Hidden field")
class SingleFileField(BaseField):
field_class = forms.FileField
widget = forms.widgets.FileInput
icon = "doc-full-inverse"
label = _("File field")
def get_form_block(self):
return blocks.StructBlock(
[
("label", blocks.CharBlock()),
("help_text", blocks.CharBlock(required=False)),
("required", blocks.BooleanBlock(required=False)),
],
icon=self.icon,
label=self.label,
)
class MultiFileField(BaseField):
field_class = forms.FileField
widget = forms.widgets.FileInput(attrs={"multiple": True})
icon = "doc-full-inverse"
label = _("Files field")
def get_form_block(self):
return blocks.StructBlock(
[
("label", blocks.CharBlock()),
("help_text", blocks.CharBlock(required=False)),
("required", blocks.BooleanBlock(required=False)),
],
icon=self.icon,
label=self.label,
)
FIELD_MAPPING = {
"singleline": SingleLineTextField,
"multiline": MultiLineTextField,
"date": DateField,
"datetime": DateTimeField,
"email": EmailField,
"url": URLField,
"number": NumberField,
"dropdown": DropdownField,
"radio": RadioField,
"checkboxes": CheckboxesField,
"checkbox": CheckboxField,
"hidden": HiddenField,
"singlefile": SingleFileField,
"multifile": MultiFileField,
}
enabled_fields = get_setting("ENABLED_FIELDS")
for field_name in enabled_fields:
cls = FIELD_MAPPING.get(field_name, None)
if not cls:
raise KeyError("Field with name '%s' does not exist" % field_name)
register(field_name, cls)
| {
"content_hash": "4d60ca5160f54119d2fbc27ad92cc579",
"timestamp": "",
"source": "github",
"line_count": 233,
"max_line_length": 80,
"avg_line_length": 29.317596566523605,
"alnum_prop": 0.593324549846289,
"repo_name": "AccentDesign/wagtailstreamforms",
"id": "8ce340906c3c549999dc4b623e86bf9904b7f7bf",
"size": "6831",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wagtailstreamforms/wagtailstreamforms_fields.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "690"
},
{
"name": "HTML",
"bytes": "14735"
},
{
"name": "JavaScript",
"bytes": "213"
},
{
"name": "Makefile",
"bytes": "438"
},
{
"name": "Python",
"bytes": "189375"
},
{
"name": "SCSS",
"bytes": "2257"
},
{
"name": "Shell",
"bytes": "559"
}
],
"symlink_target": ""
} |
"""
Flask Extension Tests
~~~~~~~~~~~~~~~~~~~~~
Tests the Flask extensions.
:copyright: (c) 2010 by Ali Afshar.
:license: BSD, see LICENSE for more details.
"""
from __future__ import with_statement
import os
import sys
import shutil
import urllib2
import tempfile
import subprocess
import argparse
from flask import json
from setuptools.package_index import PackageIndex
from setuptools.archive_util import unpack_archive
flask_svc_url = 'http://flask.pocoo.org/extensions/'
# OS X has awful paths when using mkstemp or gettempdir(). I don't
# care about security or clashes here, so pick something that is
# actually rememberable.
if sys.platform == 'darwin':
_tempdir = '/private/tmp'
else:
_tempdir = tempfile.gettempdir()
tdir = _tempdir + '/flaskext-test'
flaskdir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
# virtualenv hack *cough*
os.environ['PYTHONDONTWRITEBYTECODE'] = ''
RESULT_TEMPATE = u'''\
<!doctype html>
<title>Flask-Extension Test Results</title>
<style type=text/css>
body { font-family: 'Georgia', serif; font-size: 17px; color: #000; }
a { color: #004B6B; }
a:hover { color: #6D4100; }
h1, h2, h3 { font-family: 'Garamond', 'Georgia', serif; font-weight: normal; }
h1 { font-size: 30px; margin: 15px 0 5px 0; }
h2 { font-size: 24px; margin: 15px 0 5px 0; }
h3 { font-size: 19px; margin: 15px 0 5px 0; }
textarea, code,
pre { font-family: 'Consolas', 'Menlo', 'Deja Vu Sans Mono',
'Bitstream Vera Sans Mono', monospace!important; font-size: 15px;
background: #eee; }
pre { padding: 7px 15px; line-height: 1.3; }
p { line-height: 1.4; }
table { border: 1px solid black; border-collapse: collapse;
margin: 15px 0; }
td, th { border: 1px solid black; padding: 4px 10px;
text-align: left; }
th { background: #eee; font-weight: normal; }
tr.success { background: #D3F5CC; }
tr.failed { background: #F5D2CB; }
</style>
<h1>Flask-Extension Test Results</h1>
<p>
This page contains the detailed test results for the test run of
all {{ 'approved' if approved }} Flask extensions.
<h2>Summary</h2>
<table class=results>
<thead>
<tr>
<th>Extension
<th>Version
<th>Author
<th>License
<th>Outcome
{%- for iptr, _ in results[0].logs|dictsort %}
<th>{{ iptr }}
{%- endfor %}
</tr>
</thead>
<tbody>
{%- for result in results %}
{% set outcome = 'success' if result.success else 'failed' %}
<tr class={{ outcome }}>
<th>{{ result.name }}
<td>{{ result.version }}
<td>{{ result.author }}
<td>{{ result.license }}
<td>{{ outcome }}
{%- for iptr, _ in result.logs|dictsort %}
<td><a href="#{{ result.name }}-{{ iptr }}">see log</a>
{%- endfor %}
</tr>
{%- endfor %}
</tbody>
</table>
<h2>Test Logs</h2>
<p>Detailed test logs for all tests on all platforms:
{%- for result in results %}
{%- for iptr, log in result.logs|dictsort %}
<h3 id="{{ result.name }}-{{ iptr }}">
{{ result.name }} - {{ result.version }} [{{ iptr }}]</h3>
<pre>{{ log }}</pre>
{%- endfor %}
{%- endfor %}
'''
def log(msg, *args):
print '[EXTTEST]', msg % args
class TestResult(object):
def __init__(self, name, folder, statuscode, interpreters):
intrptr = os.path.join(folder, '.tox/%s/bin/python'
% interpreters[0])
self.statuscode = statuscode
self.folder = folder
self.success = statuscode == 0
def fetch(field):
try:
c = subprocess.Popen([intrptr, 'setup.py',
'--' + field], cwd=folder,
stdout=subprocess.PIPE)
return c.communicate()[0].strip()
except OSError:
return '?'
self.name = name
self.license = fetch('license')
self.author = fetch('author')
self.version = fetch('version')
self.logs = {}
for interpreter in interpreters:
logfile = os.path.join(folder, '.tox/%s/log/test.log'
% interpreter)
if os.path.isfile(logfile):
self.logs[interpreter] = open(logfile).read()
else:
self.logs[interpreter] = ''
def create_tdir():
try:
shutil.rmtree(tdir)
except Exception:
pass
os.mkdir(tdir)
def package_flask():
distfolder = tdir + '/.flask-dist'
c = subprocess.Popen(['python', 'setup.py', 'sdist', '--formats=gztar',
'--dist', distfolder], cwd=flaskdir)
c.wait()
return os.path.join(distfolder, os.listdir(distfolder)[0])
def get_test_command(checkout_dir):
if os.path.isfile(checkout_dir + '/Makefile'):
return 'make test'
return 'python setup.py test'
def fetch_extensions_list():
req = urllib2.Request(flask_svc_url, headers={'accept':'application/json'})
d = urllib2.urlopen(req).read()
data = json.loads(d)
for ext in data['extensions']:
yield ext
def checkout_extension(name):
log('Downloading extension %s to temporary folder', name)
root = os.path.join(tdir, name)
os.mkdir(root)
checkout_path = PackageIndex().download(name, root)
unpack_archive(checkout_path, root)
path = None
for fn in os.listdir(root):
path = os.path.join(root, fn)
if os.path.isdir(path):
break
log('Downloaded to %s', path)
return path
tox_template = """[tox]
envlist=%(env)s
[testenv]
deps=
%(deps)s
distribute
py
commands=bash flaskext-runtest.sh {envlogdir}/test.log
downloadcache=%(cache)s
"""
def create_tox_ini(checkout_path, interpreters, flask_dep):
tox_path = os.path.join(checkout_path, 'tox-flask-test.ini')
if not os.path.exists(tox_path):
with open(tox_path, 'w') as f:
f.write(tox_template % {
'env': ','.join(interpreters),
'cache': tdir,
'deps': flask_dep
})
return tox_path
def iter_extensions(only_approved=True):
for ext in fetch_extensions_list():
if ext['approved'] or not only_approved:
yield ext['name']
def test_extension(name, interpreters, flask_dep):
checkout_path = checkout_extension(name)
log('Running tests with tox in %s', checkout_path)
# figure out the test command and write a wrapper script. We
# can't write that directly into the tox ini because tox does
# not invoke the command from the shell so we have no chance
# to pipe the output into a logfile. The /dev/null hack is
# to trick py.test (if used) into not guessing widths from the
# invoking terminal.
test_command = get_test_command(checkout_path)
log('Test command: %s', test_command)
f = open(checkout_path + '/flaskext-runtest.sh', 'w')
f.write(test_command + ' &> "$1" < /dev/null\n')
f.close()
# if there is a tox.ini, remove it, it will cause troubles
# for us. Remove it if present, we are running tox ourselves
# afterall.
create_tox_ini(checkout_path, interpreters, flask_dep)
rv = subprocess.call(['tox', '-c', 'tox-flask-test.ini'], cwd=checkout_path)
return TestResult(name, checkout_path, rv, interpreters)
def run_tests(extensions, interpreters):
results = {}
create_tdir()
log('Packaging Flask')
flask_dep = package_flask()
log('Running extension tests')
log('Temporary Environment: %s', tdir)
for name in extensions:
log('Testing %s', name)
result = test_extension(name, interpreters, flask_dep)
if result.success:
log('Extension test succeeded')
else:
log('Extension test failed')
results[name] = result
return results
def render_results(results, approved):
from jinja2 import Template
items = results.values()
items.sort(key=lambda x: x.name.lower())
rv = Template(RESULT_TEMPATE, autoescape=True).render(results=items,
approved=approved)
fd, filename = tempfile.mkstemp(suffix='.html')
os.fdopen(fd, 'w').write(rv.encode('utf-8') + '\n')
return filename
def main():
parser = argparse.ArgumentParser(description='Runs Flask extension tests')
parser.add_argument('--all', dest='all', action='store_true',
help='run against all extensions, not just approved')
parser.add_argument('--browse', dest='browse', action='store_true',
help='show browser with the result summary')
parser.add_argument('--env', dest='env', default='py25,py26,py27',
help='the tox environments to run against')
parser.add_argument('--extension=', dest='extension', default=None,
help='tests a single extension')
args = parser.parse_args()
if args.extension is not None:
only_approved = False
extensions = [args.extension]
else:
only_approved = not args.all
extensions = iter_extensions(only_approved)
results = run_tests(extensions, [x.strip() for x in args.env.split(',')])
filename = render_results(results, only_approved)
if args.browse:
import webbrowser
webbrowser.open('file:///' + filename.lstrip('/'))
print 'Results written to', filename
if __name__ == '__main__':
main()
| {
"content_hash": "5bf7f9af51af54a07181e19f9febe3e8",
"timestamp": "",
"source": "github",
"line_count": 310,
"max_line_length": 82,
"avg_line_length": 31.1,
"alnum_prop": 0.590602634581475,
"repo_name": "HackingHabits/PersonalPasswordManager",
"id": "d1d5d991be77579a70cfd4763068548bb7a550b9",
"size": "9665",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/Flask/scripts/flaskext_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "2860"
},
{
"name": "Python",
"bytes": "2051"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function, unicode_literals
import pytest
from ...tests.helper import pickle_protocol, check_pickling_recovery
from ...extern.six.moves import zip
from ... import cosmology as cosm
originals = [cosm.FLRW]
xfails = [False]
@pytest.mark.parametrize(("original","xfail"),
zip(originals, xfails))
def test_flrw(pickle_protocol, original, xfail):
if xfail:
pytest.xfail()
check_pickling_recovery(original, pickle_protocol)
| {
"content_hash": "68f3a770e96a9269dd01d56635a8fcb7",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 82,
"avg_line_length": 30.470588235294116,
"alnum_prop": 0.7046332046332047,
"repo_name": "kelle/astropy",
"id": "d9e199f62d7a8ebd70402c8d8d3292b0ab12cdeb",
"size": "582",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "astropy/cosmology/tests/test_pickle.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "366877"
},
{
"name": "C++",
"bytes": "1825"
},
{
"name": "HTML",
"bytes": "1172"
},
{
"name": "Jupyter Notebook",
"bytes": "62553"
},
{
"name": "Python",
"bytes": "8072264"
},
{
"name": "Shell",
"bytes": "446"
},
{
"name": "TeX",
"bytes": "778"
}
],
"symlink_target": ""
} |
"""Utilities for testing endpoints for permissions
To use, import and create class:
class Test<YourEndpointName>Endpoints(EndpointTestBase, test.TestCase):
def setUp(self):
self.init()
# call self.get_in_program_id() to get program_id that people _should_ have access to
# call self.get_out_program_id() to get program_id that people _should not_ have access to
# call self.run_passing_urls(urls) with a list of urls that everyone should view
# call self.run_failing_urls(urls) with a list of urls that everyone should not view
"""
import datetime
from django import test
from django.urls import reverse
from factories import (
workflow_models as w_factories,
django_models as d_factories,
indicators_models as i_factories
)
from indicators.models import Indicator
from workflow.models import Organization, PROGRAM_ROLE_CHOICES, COUNTRY_ROLE_CHOICES
class EndpointTestContext(object):
def init(self):
self.home_country = w_factories.CountryFactory(
country='HOME country',
code='HM'
)
self.in_country = w_factories.CountryFactory(
country='IN country',
code='IN'
)
self.out_country = w_factories.CountryFactory(
country='OUT country',
code='OUT'
)
self.program_in_country = w_factories.ProgramFactory(
name='program in IN country',
gaitid='inpr',
country=self.in_country,
reporting_period_start=datetime.date(2016, 1, 1),
reporting_period_end=datetime.date(2016, 12, 31)
)
self.program_in_country.country.add(self.in_country)
self.program_out_of_country = w_factories.ProgramFactory(
name='program in OUT country',
gaitid='outpr',
country=self.out_country,
reporting_period_start=datetime.date(2016, 1, 1),
reporting_period_end=datetime.date(2016, 12, 31)
)
self.program_out_of_country.country.clear()
self.program_out_of_country.country.add(self.out_country)
self.add_indicators()
self.add_results()
if Organization.objects.filter(pk=1).count() == 1:
self.mercy_corps_organization = Organization.objects.get(pk=1)
else:
self.mercy_corps_organization = w_factories.OrganizationFactory(
pk=1
)
self.partner_organization = w_factories.OrganizationFactory(
name="Partner Org"
)
user = d_factories.UserFactory(
first_name='mercy corps',
last_name='country low'
)
self.mercy_corps_country_low = w_factories.TolaUserFactory(
name="mercy corps country low",
organization=self.mercy_corps_organization,
country=self.home_country,
user=user
)
w_factories.grant_country_access(self.mercy_corps_country_low, self.in_country,
COUNTRY_ROLE_CHOICES[0][0])
user = d_factories.UserFactory(
first_name='mercy corps',
last_name='country admin'
)
self.mercy_corps_country_admin = w_factories.TolaUserFactory(
name="mercy corps country admin",
organization=self.mercy_corps_organization,
country=self.home_country,
user=user
)
w_factories.grant_country_access(self.mercy_corps_country_admin, self.in_country,
COUNTRY_ROLE_CHOICES[1][0])
user = d_factories.UserFactory(
first_name='mercy corps',
last_name='program low'
)
self.mercy_corps_low = w_factories.TolaUserFactory(
name="mercy corps low",
organization=self.mercy_corps_organization,
country=self.home_country,
user=user
)
w_factories.grant_program_access(self.mercy_corps_low, self.program_in_country,
self.in_country, PROGRAM_ROLE_CHOICES[0][0])
user = d_factories.UserFactory(
first_name='mercy corps',
last_name='program medium'
)
self.mercy_corps_medium = w_factories.TolaUserFactory(
name="mercy corps medium",
organization=self.mercy_corps_organization,
country=self.home_country,
user=user
)
w_factories.grant_program_access(self.mercy_corps_medium, self.program_in_country,
self.in_country, PROGRAM_ROLE_CHOICES[1][0])
user = d_factories.UserFactory(
first_name='mercy corps',
last_name='program high'
)
self.mercy_corps_high = w_factories.TolaUserFactory(
name="mercy corps high",
organization=self.mercy_corps_organization,
country=self.home_country,
user=user
)
w_factories.grant_program_access(self.mercy_corps_high, self.program_in_country,
self.in_country, PROGRAM_ROLE_CHOICES[2][0])
user = d_factories.UserFactory(
first_name='mercy corps',
last_name='super admin'
)
self.mercy_corps_super_admin = w_factories.TolaUserFactory(
name="mercy corps super admin",
organization=self.mercy_corps_organization,
country=self.home_country,
user=user
)
self.mercy_corps_super_admin.user.is_superuser = True
self.mercy_corps_super_admin.user.save()
user = d_factories.UserFactory(
first_name='non mercy corps',
last_name='program low'
)
self.non_mercy_corps_low = w_factories.TolaUserFactory(
name="non-MC low",
organization=self.partner_organization,
country=self.home_country,
user=user
)
w_factories.grant_program_access(self.non_mercy_corps_low, self.program_in_country,
self.in_country, PROGRAM_ROLE_CHOICES[0][0])
user = d_factories.UserFactory(
first_name='non mercy corps',
last_name='program medium'
)
self.non_mercy_corps_medium = w_factories.TolaUserFactory(
name="non-MC medium",
organization=self.partner_organization,
country=self.home_country,
user=user
)
w_factories.grant_program_access(self.non_mercy_corps_medium, self.program_in_country,
self.in_country, PROGRAM_ROLE_CHOICES[1][0])
user = d_factories.UserFactory(
first_name='non mercy corps',
last_name='program high'
)
self.non_mercy_corps_high = w_factories.TolaUserFactory(
name="non-MC medium",
organization=self.partner_organization,
country=self.home_country,
user=user
)
w_factories.grant_program_access(self.non_mercy_corps_high, self.program_in_country,
self.in_country, PROGRAM_ROLE_CHOICES[2][0])
self.external_service = i_factories.ExternalServiceFactory()
def add_indicators(self):
self.indicator_in_country = i_factories.IndicatorFactory(
program=self.program_in_country,
target_frequency=Indicator.LOP
)
self.indicator_out_of_country = i_factories.IndicatorFactory(
program=self.program_out_of_country,
target_frequency=Indicator.LOP
)
self.indicator_in_country_target_frequency_type_event = i_factories.IndicatorFactory(
program=self.program_in_country,
target_frequency=Indicator.EVENT
)
self.indicator_out_of_country_target_frequency_type_event = i_factories.IndicatorFactory(
program=self.program_out_of_country,
target_frequency=Indicator.EVENT
)
def add_periodic_targets(self):
self.pt_out_of_country = i_factories.PeriodicTargetFactory(
indicator=self.indicator_out_of_country,
start_date=self.program_out_of_country.reporting_period_start,
end_date=self.program_out_of_country.reporting_period_end
)
self.pt_in_country = i_factories.PeriodicTargetFactory(
indicator=self.indicator_in_country,
start_date=self.program_in_country.reporting_period_start,
end_date=self.program_in_country.reporting_period_end
)
def add_results(self):
self.result_out_of_country = i_factories.ResultFactory(
indicator=self.indicator_out_of_country,
program=self.program_out_of_country,
achieved=100
)
self.result_in_country = i_factories.ResultFactory(
indicator=self.indicator_in_country,
program=self.program_in_country,
achieved=100
)
def add_pinned_report(self, tolauser, out=True):
program = self.program_out_of_country if out else self.program_in_country
pinned_report = i_factories.PinnedReportFactory(
tola_user=tolauser,
program=program
)
return pinned_report.pk
@property
def high_users(self):
for user in [self.mercy_corps_high,
self.non_mercy_corps_high,
self.mercy_corps_super_admin]:
yield user
@property
def non_high_users(self):
for user in [self.mercy_corps_low,
self.mercy_corps_medium,
self.mercy_corps_country_low,
self.mercy_corps_country_admin,
self.non_mercy_corps_low,
self.non_mercy_corps_medium]:
yield user
@property
def all_non_superadmin_users(self):
for user in [self.mercy_corps_low,
self.mercy_corps_medium,
self.mercy_corps_high,
self.mercy_corps_country_low,
self.mercy_corps_country_admin,
self.non_mercy_corps_low,
self.non_mercy_corps_medium,
self.non_mercy_corps_high]:
yield user
@property
def all_users(self):
for user in [u for u in self.all_non_superadmin_users] + [self.mercy_corps_super_admin,]:
yield user
class EndpointTestBase(object):
url = None
url_kwargs = {}
access_level = None
post_data = {}
get_params = {}
delete = None
redirect = False
no_login_redirect = False
def init(self):
self.context = EndpointTestContext()
self.context.init()
self.client = test.Client()
def get_permissioned_users(self):
users = []
if self.access_level in ['high', 'medium', 'low']:
users.append((self.context.mercy_corps_high, 'MC high'))
users.append((self.context.non_mercy_corps_high, 'Non MC high'))
if self.access_level in ['medium', 'low']:
users.append((self.context.mercy_corps_medium, 'MC medium'))
users.append((self.context.non_mercy_corps_medium, 'Non MC medium'))
if self.access_level == 'low':
users.append((self.context.mercy_corps_low, 'MC low'))
users.append((self.context.non_mercy_corps_low, 'Non MC low'))
users.append((self.context.mercy_corps_country_low, 'MC country-based low'))
if self.access_level == 'admin':
users.append((self.context.mercy_corps_country_admin, 'MC admin'))
return users
def get_non_permissioned_users(self):
users = []
if self.access_level == 'high':
users.append((self.context.mercy_corps_medium, 'MC medium'))
users.append((self.context.non_mercy_corps_medium, 'Non MC medium'))
if self.access_level in ['high', 'medium']:
users.append((self.context.mercy_corps_low, 'MC low'))
users.append((self.context.non_mercy_corps_low, 'Non MC low'))
users.append((self.context.mercy_corps_country_low, 'MC country-based low'))
return users
def get_all_users(self):
return self.context.all_non_superadmin_users
def get_out_url(self):
kwargs = {}
if 'program' in self.url_kwargs:
kwargs['program'] = self.context.program_out_of_country.pk
if 'indicator' in self.url_kwargs:
if self.url_kwargs['indicator'] == 'event':
kwargs['indicator'] = self.context.indicator_out_of_country_target_frequency_type_event.pk
else:
kwargs['indicator'] = self.context.indicator_out_of_country.pk
if 'pk' in self.url_kwargs and self.url_kwargs['pk'] == 'indicator':
kwargs['pk'] = self.context.indicator_out_of_country.pk
if 'pk' in self.url_kwargs and self.url_kwargs['pk'] == 'periodic_target':
kwargs['pk'] = self.context.pt_out_of_country.pk
if 'pk' in self.url_kwargs and self.url_kwargs['pk'] == 'result':
kwargs['pk'] = self.context.result_out_of_country.pk
if 'deleteall' in self.url_kwargs:
kwargs['deleteall'] = self.url_kwargs['deleteall']
if 'reporttype' in self.url_kwargs:
kwargs['reporttype'] = self.url_kwargs['reporttype']
if 'service' in self.url_kwargs:
kwargs['service'] = self.context.external_service.id
if 'program_id' in self.get_params:
self.get_params['program_id'] = self.context.program_out_of_country.pk
return reverse(self.url, kwargs=kwargs)
def get_in_url(self):
kwargs = {}
if 'program' in self.url_kwargs:
kwargs['program'] = self.context.program_in_country.pk
if 'indicator' in self.url_kwargs:
if self.url_kwargs['indicator'] == 'event':
kwargs['indicator'] = self.context.indicator_in_country_target_frequency_type_event.pk
else:
kwargs['indicator'] = self.context.indicator_in_country.pk
if 'pk' in self.url_kwargs and self.url_kwargs['pk'] == 'indicator':
kwargs['pk'] = self.context.indicator_in_country.pk
if 'pk' in self.url_kwargs and self.url_kwargs['pk'] == 'periodic_target':
kwargs['pk'] = self.context.pt_in_country.pk
if 'pk' in self.url_kwargs and self.url_kwargs['pk'] == 'result':
kwargs['pk'] = self.context.result_in_country.pk
if 'deleteall' in self.url_kwargs:
kwargs['deleteall'] = self.url_kwargs['deleteall']
if 'reporttype' in self.url_kwargs:
kwargs['reporttype'] = self.url_kwargs['reporttype']
if 'service' in self.url_kwargs:
kwargs['service'] = self.context.external_service.id
if 'program_id' in self.get_params:
self.get_params['program_id'] = self.context.program_in_country.pk
return reverse(self.url, kwargs=kwargs)
def fetch_get_response(self, tolauser, url):
self.client.logout()
if tolauser is not None:
self.client.force_login(tolauser.user)
return self.client.get(url, self.get_params)
def fetch_post_response(self, tolauser, url):
self.client.logout()
if tolauser is not None:
self.client.force_login(tolauser.user)
return self.client.post(url, self.post_data)
def fetch_delete_response(self, tolauser, url):
self.client.logout()
if tolauser is not None:
self.client.force_login(tolauser.user)
return self.client.delete(url, self.post_data)
def assert_passes(self, response, msg):
self.assertEqual(response.status_code, 200,
'{msg} but got response status code {code}'.format(
msg=msg, code=response.status_code
))
def assert_post_passes(self, response, msg):
# assert not 403 (some posts redirect (302) and some succeed (200) but should not be forbidden)
self.assertNotEqual(response.status_code, 403,
'{msg} but got response status code {code}'.format(
msg=msg, code=response.status_code))
def assert_forbidden(self, response, msg):
if self.redirect:
self.assertRedirects(response, reverse('index'), msg_prefix=msg)
else:
self.assertEqual(response.status_code, 403,
'{msg} but got response {code}'.format(msg=msg, code=response.status_code))
def assert_redirects_to_login(self, response, msg, url):
# for AJAX views where "continue on after login" makes no sense:
if self.no_login_redirect:
self.assertEqual(response.status_code, 403,
msg + "anonymous user should get 403 at this endpoint, got {}".format(
response.status_code))
# otherwise:
else:
self.assertRedirects(response, reverse('login') + '?next=' + url, msg_prefix=msg)
def run_get_tests(self, skip_out_country=False):
if not skip_out_country:
# get out of country url:
url = self.get_out_url()
# ensure superuser can access:
response = self.fetch_get_response(self.context.mercy_corps_super_admin, url)
self.assert_passes(response, 'superuser should have access to {}'.format(url))
# ensure all users cannot access:
for user in self.get_all_users():
response = self.fetch_get_response(user, url)
self.assert_forbidden(
response, 'user not assigned to country should redirect from {}'.format(url))
# ensure anonymous user cannot access:
response = self.fetch_get_response(None, url)
self.assert_redirects_to_login(response, 'anonymous user should redirect from {}'.format(url), url)
# get in country url:
url = self.get_in_url()
# ensure superuser can access:
response = self.fetch_get_response(self.context.mercy_corps_super_admin, url)
self.assert_passes(response, 'superuser should have access to {}'.format(url))
# ensure all users with appropriate access can access:
for user, level in self.get_permissioned_users():
response = self.fetch_get_response(user, url)
self.assert_passes(response, 'user level {0} should have access to {1}'.format(
level, url
))
# ensure users without appropriate access cannot access:
for user, level in self.get_non_permissioned_users():
response = self.fetch_get_response(user, url)
self.assert_forbidden(response, 'user level {0} should not have access to {1}'.format(
level, url))
# ensure anonymous user cannot access:
response = self.fetch_get_response(None, url)
self.assert_redirects_to_login(response, 'anonymous user should redirect from {}'.format(url), url)
def run_post_tests(self, method='post'):
if method == 'post':
fetch_method = self.fetch_post_response
elif method == 'delete':
fetch_method = self.fetch_delete_response
else:
raise ValueError('invalid method {}'.format(method))
if self.delete == 'indicator':
self.context.add_indicators()
if self.delete == 'periodic_target':
self.context.add_periodic_targets()
if self.delete == 'result':
self.context.add_results()
if 'program' in self.post_data:
self.post_data['program'] = self.context.program_out_of_country.pk
if self.delete == 'pinned_report':
self.post_data['pinned_report_id'] = self.context.add_pinned_report(
self.context.mercy_corps_super_admin, True)
response = fetch_method(self.context.mercy_corps_super_admin, self.get_out_url())
self.assert_post_passes(response, 'superuser should be able to {0} to {1}'.format(method, self.get_out_url()))
# ensure all users cannot access:
for user in self.get_all_users():
if self.delete == 'indicator':
self.context.add_indicators()
if self.delete == 'periodic_target':
self.context.add_periodic_targets()
if self.delete == 'result':
self.context.add_results()
if self.delete == 'pinned_report':
self.post_data['pinned_report_id'] = self.context.add_pinned_report(user, True)
response = fetch_method(user, self.get_out_url())
self.assert_forbidden(
response, 'user not assigned to country should redirect from {}'.format(self.get_out_url()))
# ensure anonymous user cannot access:
if self.delete == 'indicator':
self.context.add_indicators()
if self.delete == 'periodic_target':
self.context.add_periodic_targets()
if self.delete == 'result':
self.context.add_results()
response = fetch_method(None, self.get_out_url())
self.assert_redirects_to_login(response, 'anonymous user should redirect from {}'.format(
self.get_out_url()), self.get_out_url())
# ensure superuser can access:
if self.delete == 'indicator':
self.context.add_indicators()
if self.delete == 'periodic_target':
self.context.add_periodic_targets()
if self.delete == 'result':
self.context.add_results()
if 'program' in self.post_data:
self.post_data['program'] = self.context.program_in_country.pk
if self.delete == 'pinned_report':
self.post_data['pinned_report_id'] = self.context.add_pinned_report(
self.context.mercy_corps_super_admin, False
)
response = fetch_method(self.context.mercy_corps_super_admin, self.get_in_url())
self.assert_post_passes(response, 'superuser should be able to {0} to {1}'.format(
method, self.get_in_url()))
# ensure all users with appropriate access can access:
for user, level in self.get_permissioned_users():
if self.delete == 'indicator':
self.context.add_indicators()
if self.delete == 'periodic_target':
self.context.add_periodic_targets()
if self.delete == 'result':
self.context.add_results()
if self.delete == 'pinned_report':
self.post_data['pinned_report_id'] = self.context.add_pinned_report(user, False)
response = fetch_method(user, self.get_in_url())
self.assert_post_passes(response, 'user level {0} should have {1} access to {2}'.format(
level, method, self.get_in_url()))
for user, level in self.get_non_permissioned_users():
if self.delete == 'indicator':
self.context.add_indicators()
if self.delete == 'periodic_target':
self.context.add_periodic_targets()
if self.delete == 'result':
self.context.add_results()
if self.delete == 'pinned_report':
self.post_data['pinned_report_id'] = self.context.add_pinned_report(user, False)
response = fetch_method(user, self.get_in_url())
self.assert_forbidden(response, 'user level {0} should not have {1} access to {2}'.format(
level, method, self.get_in_url()))
# ensure anonymous user cannot acccess:
if self.delete == 'indicator':
self.context.add_indicators()
if self.delete == 'periodic_target':
self.context.add_periodic_targets()
if self.delete == 'result':
self.context.add_results()
response = fetch_method(None, self.get_in_url())
self.assert_redirects_to_login(response, 'anonymous user should redirect from {}'.format(
self.get_in_url()), self.get_in_url())
| {
"content_hash": "46304304f42bdd0f19de44fb6e544414",
"timestamp": "",
"source": "github",
"line_count": 534,
"max_line_length": 118,
"avg_line_length": 45.47565543071161,
"alnum_prop": 0.5921594465491682,
"repo_name": "mercycorps/TolaActivity",
"id": "a62b8ef37198489fc1c5689759c2ff7e0c8b7c52",
"size": "24284",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tola/test/endpoint_permissions_test_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "432462"
},
{
"name": "Dockerfile",
"bytes": "109"
},
{
"name": "HTML",
"bytes": "437661"
},
{
"name": "JavaScript",
"bytes": "5654491"
},
{
"name": "Python",
"bytes": "1741812"
},
{
"name": "Shell",
"bytes": "4752"
}
],
"symlink_target": ""
} |
"""Helper methods for configuring deployment manager command flags."""
from googlecloudsdk.api_lib.deployment_manager import dm_v2_util
from googlecloudsdk.calliope import arg_parsers
def AddDeploymentNameFlag(parser):
"""Add properties flag."""
parser.add_argument('deployment_name', help='Deployment name.')
def AddPropertiesFlag(parser):
"""Add properties flag."""
parser.add_argument(
'--properties',
help='A comma seperated, key=value, map '
'to be used when deploying a template file directly.',
type=arg_parsers.ArgDict(operators=dm_v2_util.NewParserDict()),
dest='properties')
def AddAsyncFlag(parser):
"""Add the async argument."""
parser.add_argument(
'--async',
help='Return immediately and print information about the Operation in '
'progress rather than waiting for the Operation to complete. '
'(default=False)',
dest='async',
default=False,
action='store_true')
def AddDeletePolicyFlag(parser, request_class):
"""Add the delete_policy argument."""
parser.add_argument(
'--delete-policy',
help=('Delete policy for resources that will change as part of an update '
'or delete. DELETE deletes the resource while ABANDON just removes '
'the resource reference from the deployment.'),
default='DELETE',
choices=(sorted(request_class.DeletePolicyValueValuesEnum
.to_dict().keys())))
def AddFingerprintFlag(parser):
"""Add the fingerprint argument."""
parser.add_argument(
'--fingerprint',
help=('The fingerprint to use in requests to modify a deployment. If not '
'specified, a get deployment request will be made to fetch the '
'latest fingerprint. A fingerprint is a randomly generated value '
'that is part of the update, stop, and cancel-preview request to '
'perform optimistic locking. It is initially generated by '
'Deployment Manager and changes after every request to modify '
'data. The latest fingerprint is printed when deployment data is '
'modified.'),
dest='fingerprint')
| {
"content_hash": "b46e05e83b2a339e7e773ef9e442987c",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 80,
"avg_line_length": 36.83050847457627,
"alnum_prop": 0.6728025770823746,
"repo_name": "Sorsly/subtle",
"id": "d1b21e3c03723fd4a7b509f2c84019e437d74a73",
"size": "2769",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "google-cloud-sdk/lib/googlecloudsdk/command_lib/deployment_manager/flags.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1581"
},
{
"name": "CSS",
"bytes": "226"
},
{
"name": "HTML",
"bytes": "4637"
},
{
"name": "JavaScript",
"bytes": "3037"
},
{
"name": "PHP",
"bytes": "4543"
},
{
"name": "Pascal",
"bytes": "31"
},
{
"name": "Python",
"bytes": "13243860"
},
{
"name": "Roff",
"bytes": "1050600"
},
{
"name": "Shell",
"bytes": "16136"
},
{
"name": "Smarty",
"bytes": "2484"
},
{
"name": "SourcePawn",
"bytes": "308"
}
],
"symlink_target": ""
} |
"""
Tests for DynamoDB v2 high-level abstractions.
"""
from __future__ import with_statement
import os
import time
from tests.unit import unittest
from boto.dynamodb2 import exceptions
from boto.dynamodb2.fields import (HashKey, RangeKey, KeysOnlyIndex,
GlobalKeysOnlyIndex)
from boto.dynamodb2.items import Item
from boto.dynamodb2.table import Table
from boto.dynamodb2.types import NUMBER
try:
import json
except ImportError:
import simplejson as json
class DynamoDBv2Test(unittest.TestCase):
dynamodb = True
def test_integration(self):
# Test creating a full table with all options specified.
users = Table.create('users', schema=[
HashKey('username'),
RangeKey('friend_count', data_type=NUMBER)
], throughput={
'read': 5,
'write': 5,
}, indexes=[
KeysOnlyIndex('LastNameIndex', parts=[
HashKey('username'),
RangeKey('last_name')
]),
])
self.addCleanup(users.delete)
self.assertEqual(len(users.schema), 2)
self.assertEqual(users.throughput['read'], 5)
# Wait for it.
time.sleep(60)
# Make sure things line up if we're introspecting the table.
users_hit_api = Table('users')
users_hit_api.describe()
self.assertEqual(len(users.schema), len(users_hit_api.schema))
self.assertEqual(users.throughput, users_hit_api.throughput)
self.assertEqual(len(users.indexes), len(users_hit_api.indexes))
# Test putting some items individually.
users.put_item(data={
'username': 'johndoe',
'first_name': 'John',
'last_name': 'Doe',
'friend_count': 4
})
users.put_item(data={
'username': 'alice',
'first_name': 'Alice',
'last_name': 'Expert',
'friend_count': 2
})
time.sleep(5)
# Test batch writing.
with users.batch_write() as batch:
batch.put_item({
'username': 'jane',
'first_name': 'Jane',
'last_name': 'Doe',
'friend_count': 3
})
batch.delete_item(username='alice', friend_count=2)
batch.put_item({
'username': 'bob',
'first_name': 'Bob',
'last_name': 'Smith',
'friend_count': 1
})
time.sleep(5)
# Does it exist? It should?
self.assertTrue(users.has_item(username='jane', friend_count=3))
# But this shouldn't be there...
self.assertFalse(users.has_item(
username='mrcarmichaeljones',
friend_count=72948
))
# Test getting an item & updating it.
# This is the "safe" variant (only write if there have been no
# changes).
jane = users.get_item(username='jane', friend_count=3)
self.assertEqual(jane['first_name'], 'Jane')
jane['last_name'] = 'Doh'
self.assertTrue(jane.save())
# Test strongly consistent getting of an item.
# Additionally, test the overwrite behavior.
client_1_jane = users.get_item(
username='jane',
friend_count=3,
consistent=True
)
self.assertEqual(jane['first_name'], 'Jane')
client_2_jane = users.get_item(
username='jane',
friend_count=3,
consistent=True
)
self.assertEqual(jane['first_name'], 'Jane')
# Write & assert the ``first_name`` is gone, then...
del client_1_jane['first_name']
self.assertTrue(client_1_jane.save())
check_name = users.get_item(
username='jane',
friend_count=3,
consistent=True
)
self.assertEqual(check_name['first_name'], None)
# ...overwrite the data with what's in memory.
client_2_jane['first_name'] = 'Joan'
# Now a write that fails due to default expectations...
self.assertRaises(exceptions.JSONResponseError, client_2_jane.save)
# ... so we force an overwrite.
self.assertTrue(client_2_jane.save(overwrite=True))
check_name_again = users.get_item(
username='jane',
friend_count=3,
consistent=True
)
self.assertEqual(check_name_again['first_name'], 'Joan')
# Reset it.
jane['username'] = 'jane'
jane['first_name'] = 'Jane'
jane['last_name'] = 'Doe'
jane['friend_count'] = 3
self.assertTrue(jane.save(overwrite=True))
# Test the partial update behavior.
client_3_jane = users.get_item(
username='jane',
friend_count=3,
consistent=True
)
client_4_jane = users.get_item(
username='jane',
friend_count=3,
consistent=True
)
client_3_jane['favorite_band'] = 'Feed Me'
# No ``overwrite`` needed due to new data.
self.assertTrue(client_3_jane.save())
# Expectations are only checked on the ``first_name``, so what wouldn't
# have succeeded by default does succeed here.
client_4_jane['first_name'] = 'Jacqueline'
self.assertTrue(client_4_jane.partial_save())
partial_jane = users.get_item(
username='jane',
friend_count=3,
consistent=True
)
self.assertEqual(partial_jane['favorite_band'], 'Feed Me')
self.assertEqual(partial_jane['first_name'], 'Jacqueline')
# Reset it.
jane['username'] = 'jane'
jane['first_name'] = 'Jane'
jane['last_name'] = 'Doe'
jane['friend_count'] = 3
self.assertTrue(jane.save(overwrite=True))
# Ensure that partial saves of a brand-new object work.
sadie = Item(users, data={
'username': 'sadie',
'first_name': 'Sadie',
'favorite_band': 'Zedd',
'friend_count': 7
})
self.assertTrue(sadie.partial_save())
serverside_sadie = users.get_item(
username='sadie',
friend_count=7,
consistent=True
)
self.assertEqual(serverside_sadie['first_name'], 'Sadie')
# Test the eventually consistent query.
results = users.query(
username__eq='johndoe',
last_name__eq='Doe',
index='LastNameIndex',
attributes=('username',),
reverse=True
)
for res in results:
self.assertTrue(res['username'] in ['johndoe',])
self.assertEqual(res.keys(), ['username'])
# Ensure that queries with attributes don't return the hash key.
results = users.query(
username__eq='johndoe',
friend_count__eq=4,
attributes=('first_name',)
)
for res in results:
self.assertTrue(res['first_name'] in ['John',])
self.assertEqual(res.keys(), ['first_name'])
# Test the strongly consistent query.
c_results = users.query(
username__eq='johndoe',
last_name__eq='Doe',
index='LastNameIndex',
reverse=True,
consistent=True
)
for res in c_results:
self.assertTrue(res['username'] in ['johndoe',])
# Test scans without filters.
all_users = users.scan(limit=7)
self.assertEqual(all_users.next()['username'], 'bob')
self.assertEqual(all_users.next()['username'], 'jane')
self.assertEqual(all_users.next()['username'], 'johndoe')
# Test scans with a filter.
filtered_users = users.scan(limit=2, username__beginswith='j')
self.assertEqual(filtered_users.next()['username'], 'jane')
self.assertEqual(filtered_users.next()['username'], 'johndoe')
# Test deleting a single item.
johndoe = users.get_item(username='johndoe', friend_count=4)
johndoe.delete()
# Test the eventually consistent batch get.
results = users.batch_get(keys=[
{'username': 'bob', 'friend_count': 1},
{'username': 'jane', 'friend_count': 3}
])
batch_users = []
for res in results:
batch_users.append(res)
self.assertTrue(res['first_name'] in ['Bob', 'Jane'])
self.assertEqual(len(batch_users), 2)
# Test the strongly consistent batch get.
c_results = users.batch_get(keys=[
{'username': 'bob', 'friend_count': 1},
{'username': 'jane', 'friend_count': 3}
], consistent=True)
c_batch_users = []
for res in c_results:
c_batch_users.append(res)
self.assertTrue(res['first_name'] in ['Bob', 'Jane'])
self.assertEqual(len(c_batch_users), 2)
# Test count, but in a weak fashion. Because lag time.
self.assertTrue(users.count() > -1)
# Test query count
count = users.query_count(
username__eq='bob',
)
self.assertEqual(count, 1)
# Test without LSIs (describe calls shouldn't fail).
admins = Table.create('admins', schema=[
HashKey('username')
])
self.addCleanup(admins.delete)
time.sleep(60)
admins.describe()
self.assertEqual(admins.throughput['read'], 5)
self.assertEqual(admins.indexes, [])
# A single query term should fail on a table with *ONLY* a HashKey.
self.assertRaises(
exceptions.QueryError,
admins.query,
username__eq='johndoe'
)
# But it shouldn't break on more complex tables.
res = users.query(username__eq='johndoe')
# Test putting with/without sets.
mau5_created = users.put_item(data={
'username': 'mau5',
'first_name': 'dead',
'last_name': 'mau5',
'friend_count': 2,
'friends': set(['skrill', 'penny']),
})
self.assertTrue(mau5_created)
penny_created = users.put_item(data={
'username': 'penny',
'first_name': 'Penny',
'friend_count': 0,
'friends': set([]),
})
self.assertTrue(penny_created)
# Test attributes.
mau5 = users.get_item(
username='mau5',
friend_count=2,
attributes=['username', 'first_name']
)
self.assertEqual(mau5['username'], 'mau5')
self.assertEqual(mau5['first_name'], 'dead')
self.assertTrue('last_name' not in mau5)
def test_unprocessed_batch_writes(self):
# Create a very limited table w/ low throughput.
users = Table.create('slow_users', schema=[
HashKey('user_id'),
], throughput={
'read': 1,
'write': 1,
})
self.addCleanup(users.delete)
# Wait for it.
time.sleep(60)
with users.batch_write() as batch:
for i in range(500):
batch.put_item(data={
'user_id': str(i),
'name': 'Droid #{0}'.format(i),
})
# Before ``__exit__`` runs, we should have a bunch of unprocessed
# items.
self.assertTrue(len(batch._unprocessed) > 0)
# Post-__exit__, they should all be gone.
self.assertEqual(len(batch._unprocessed), 0)
def test_gsi(self):
users = Table.create('gsi_users', schema=[
HashKey('user_id'),
], throughput={
'read': 5,
'write': 3,
},
global_indexes=[
GlobalKeysOnlyIndex('StuffIndex', parts=[
HashKey('user_id')
], throughput={
'read': 2,
'write': 1,
}),
])
self.addCleanup(users.delete)
# Wait for it.
time.sleep(60)
users.update(
throughput={
'read': 3,
'write': 4
},
global_indexes={
'StuffIndex': {
'read': 1,
'write': 2
}
}
)
# Wait again for the changes to finish propagating.
time.sleep(120)
def test_query_with_limits(self):
# Per the DDB team, it's recommended to do many smaller gets with a
# reduced page size.
# Clamp down the page size while ensuring that the correct number of
# results are still returned.
posts = Table.create('posts', schema=[
HashKey('thread'),
RangeKey('posted_on')
], throughput={
'read': 5,
'write': 5,
})
self.addCleanup(posts.delete)
# Wait for it.
time.sleep(60)
# Add some data.
test_data_path = os.path.join(
os.path.dirname(__file__),
'forum_test_data.json'
)
with open(test_data_path, 'r') as test_data:
data = json.load(test_data)
with posts.batch_write() as batch:
for post in data:
batch.put_item(post)
time.sleep(5)
# Test the reduced page size.
results = posts.query(
thread__eq='Favorite chiptune band?',
posted_on__gte='2013-12-24T00:00:00',
max_page_size=2
)
all_posts = list(results)
self.assertEqual(
[post['posted_by'] for post in all_posts],
['joe', 'jane', 'joe', 'joe', 'jane', 'joe']
)
self.assertEqual(results._fetches, 3)
| {
"content_hash": "cd528a921cecfc6a86a3919f760b6e73",
"timestamp": "",
"source": "github",
"line_count": 439,
"max_line_length": 79,
"avg_line_length": 31.555808656036447,
"alnum_prop": 0.5269616689525735,
"repo_name": "dablak/boto",
"id": "5cf00291f3a27e0b3cdec2eff285fa5dc48cfddd",
"size": "14977",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "tests/integration/dynamodb2/test_highlevel.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3224"
},
{
"name": "Python",
"bytes": "5296027"
},
{
"name": "Shell",
"bytes": "3071"
}
],
"symlink_target": ""
} |
import loguru
import pytest
@pytest.fixture(autouse=True)
def reset_logger():
def reset():
loguru.logger.remove()
loguru.logger.__init__(
loguru._logger.Core(), None, 0, False, False, False, False, True, None, {}
)
loguru._logger.context.set({})
reset()
yield
reset()
@pytest.fixture
def writer():
def w(message):
w.written.append(message)
w.written = []
w.read = lambda: "".join(w.written)
w.clear = lambda: w.written.clear()
return w
| {
"content_hash": "1803c2ac4c85685d0570a08bc489ea03",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 86,
"avg_line_length": 18.964285714285715,
"alnum_prop": 0.5781544256120528,
"repo_name": "twotwo/tools-python",
"id": "f46132ac442dabe73a3730b8b6f0b2f9b010aef6",
"size": "531",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "loguru-sample/tests/conftest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Awk",
"bytes": "1760"
},
{
"name": "Batchfile",
"bytes": "171"
},
{
"name": "CSS",
"bytes": "7877"
},
{
"name": "Dockerfile",
"bytes": "670"
},
{
"name": "HTML",
"bytes": "7506"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "173223"
},
{
"name": "Shell",
"bytes": "1094"
}
],
"symlink_target": ""
} |
import sys
import numpy
from math import sqrt,exp
from ase.data import atomic_numbers, chemical_symbols
from ase.units import Bohr
from asap3 import FullNeighborList
# Gobal parameters
parameters = {
# E0 s0 V0 eta2 kappa lambda n0 Lattice type
# eV bohr eV bohr^-1 bohr^-1 bohr^-1 bohr^-3
'H': (-2.21, 0.71, 2.132, 1.652, 2.790, 1.892, 0.00547, 'dimer'),
'Al': (-3.28, 3.00, 1.493, 1.240, 2.000, 1.169, 0.00700, 'fcc'),
'Cu': (-3.51, 2.67, 2.476, 1.652, 2.740, 1.906, 0.00910, 'fcc'),
'Ag': (-2.96, 3.01, 2.132, 1.652, 2.790, 1.892, 0.00547, 'fcc'),
'Au': (-3.80, 3.00, 2.321, 1.674, 2.873, 2.182, 0.00703, 'fcc'),
'Ni': (-4.44, 2.60, 3.673, 1.669, 2.757, 1.948, 0.01030, 'fcc'),
'Pd': (-3.90, 2.87, 2.773, 1.818, 3.107, 2.155, 0.00688, 'fcc'),
'Pt': (-5.85, 2.90, 4.067, 1.812, 3.145, 2.192, 0.00802, 'fcc'),
'C': (-1.97, 1.18, 0.132, 3.652, 5.790, 2.892, 0.01322, 'dimer'),
'N': (-4.97, 1.18, 0.132, 2.652, 3.790, 2.892, 0.01222, 'dimer'),
'O': (-2.97, 1.25, 2.132, 3.652, 5.790, 4.892, 0.00850, 'dimer')}
beta = 1.809 # Calculated from the following equation
# beta = ((16 * Pi) / 3)^(1/3) / 2^(1/2)
# The "largest" element possibly supported by the calculator (this is determined by the list chemical_symbols).
NumElle = len(chemical_symbols)
class EMT:
""" This class is an implementation of the revised edition of the Effective Medium Theory approach of calculating the energy of a given FCC crystal system. The functional form of the equations used can be found in ******* """
def __init__(self,Params=None,ZeroPoint=1):
""" Initializes the EMT object. The input Params is used to specify userdefined parameters and the input
Zeropoint is used to specify whether the potential energy measured realtive to E0 = Ecoh (ZeroPoint = 1)
or E0 = 0 (ZeroPoint = 0). """
# Secures that the calculator is initialized correctly the first time it is used.
self.energy = None
self.ZeroPoint = ZeroPoint
# If no parameters have been specified when creating the EMT object the default parameters are used.
if Params is None:
self.parameters = parameters
else:
self.parameters = Params
def initialize(self, atoms):
""" Method which initializes the EMT calculator by defining all needed values used in the
calculations. """
# A list, Z, of the element type for each atom in the system is defined:
self.Z = atoms.get_atomic_numbers()
# The number of atoms are calculated:
self.N = len(self.Z)
# Lists of the values for eta2, kappa, Seq, E0, V0, n0 and L (lambda) for the element types Z are
# defined:
# The "largest" element number is corrected with regards to the method of counting in python.
self.eta2 = numpy.zeros(NumElle)
self.kappa = numpy.zeros(NumElle)
self.Seq = numpy.zeros(NumElle)
self.E0 = numpy.zeros(NumElle)
self.V0 = numpy.zeros(NumElle)
self.L = numpy.zeros(NumElle)
self.n0 = numpy.zeros(NumElle)
for i in range(NumElle):
if chemical_symbols[i] in self.parameters:
self.eta2[i] = self.parameters[chemical_symbols[i]][3] / Bohr
self.kappa[i] = self.parameters[chemical_symbols[i]][4] / Bohr
self.Seq[i] = self.parameters[chemical_symbols[i]][1] * Bohr
self.E0[i] = self.parameters[chemical_symbols[i]][0]
self.V0[i] = self.parameters[chemical_symbols[i]][2]
self.L[i] = self.parameters[chemical_symbols[i]][5] / Bohr
self.n0[i] = self.parameters[chemical_symbols[i]][6] / (Bohr**3)
# Calculation of the X*X-arrays:
# r_cut; X*X-array of vaules for the cutoff length for the atompair of type (Z,Z')
# sigmaaRCUT; X*X-array of values for sigmaa evaluated in r_cut[Z,Z']
# sigmabRCUT; X*X-array of values for sigmab evaluated in r_cut[Z,Z']
# dsigmaadrRCUT; X*X-array of values for the deriviative of sigmaa evaluated in r_cut[Z,Z']
# dsigmabdrRCUT; X*X-array of values for the deriviative of sigmab evaluated in r_cut[Z,Z']
# chi; X*X-array of values for chi for the atompair of type (Z,Z')
# The cutoff distances are calculated using that the lattice constant, a0 = sqrt(2)*beta*s0:
self.r_cut = numpy.zeros([NumElle,NumElle])
for i in range(NumElle):
for j in range(NumElle):
# Check to see if the two elements considered are defined or not. Only calculating an
# r_cut if both are. r_cut[Z,Z'] is calculated as the cutoff distance for the the one of
# elements Z,Z' which has the largest a0.
if self.Seq[i] and self.Seq[j] != 0:
self.r_cut[i,j] = (1./2. * (sqrt(3. / 2.) + sqrt(4. / 2.)) * (sqrt(2) * beta) *
max(self.Seq[i],self.Seq[j]))
### Calculations for sigmaaRCUT, sigmabRCUT, d_sigmaadrRCUT and d_sigmabdrRCUT ###
self.dsigmaadrRCUT = numpy.zeros([NumElle,NumElle])
self.dsigmabdrRCUT = numpy.zeros([NumElle,NumElle])
self.sigmaaRCUT = numpy.zeros([NumElle,NumElle])
self.sigmabRCUT = numpy.zeros([NumElle,NumElle])
for i in range(NumElle):
for j in range(NumElle):
# Check to see if r_cut[i,j] is defined for this pair of elements. r_cut[i,j] == 0 means
# that it is not defined.
if self.r_cut[i,j] != 0:
self.sigmaaRCUT[i,j] = (numpy.exp(self.eta2[j] *
(-self.r_cut[i,j] + self.Seq[j] * beta)))
self.sigmabRCUT[i,j] = (numpy.exp(self.kappa[j] *
(-self.r_cut[i,j] / beta + self.Seq[j])))
self.dsigmaadrRCUT[i,j] = -self.eta2[j] * self.sigmaaRCUT[i,j]
self.dsigmabdrRCUT[i,j] = -self.kappa[j] / beta * self.sigmabRCUT[i,j]
### Calculations for chi[Z,Z'] ###
self.chi = numpy.zeros([NumElle,NumElle])
for i in range(NumElle):
for j in range(NumElle):
# Check to see if the elements i,j are defined.
if self.n0[i] and self.n0[j] != 0:
self.chi[i,j] = self.n0[i] / self.n0[j]
### Calculations of gamma1 and gamma2 ###
# Four (3 x NumElle)-arrays for lambda_1,2 (named L_1,2) and sigmaa_1,2 are calculated with the distance,
# r_ij = (beta * Seq, sqrt(2) * beta * Seq, sqrt(3) * beta * Seq) for all supportet elements.
# where Z = Z'.
# The NumberNearestNeighbours variable is set to the number of nearest neighbors included in the model.
NumberNearestNeighbours = 3
# arrays for lambda and sigmaa are initialized
L_1_Z = numpy.zeros([NumberNearestNeighbours,NumElle])
L_2_Z = numpy.zeros([NumberNearestNeighbours,NumElle])
sigmaa_Z = numpy.zeros([NumberNearestNeighbours,NumElle])
sigmab_Z = numpy.zeros([NumberNearestNeighbours,NumElle])
# The values for each are calculated for each neighbour distance
for i in range(NumberNearestNeighbours):
L_1_Z[i] = (self.dsigmaadrRCUT[range(NumElle),range(NumElle)] *
((sqrt(1 + i) * beta * self.Seq) - self.r_cut[range(NumElle),range(NumElle)]) +
self.sigmaaRCUT[range(NumElle),range(NumElle)])
L_2_Z[i] = (self.dsigmabdrRCUT[range(NumElle),range(NumElle)] *
((sqrt(1 + i) * beta * self.Seq) - self.r_cut[range(NumElle),range(NumElle)]) +
self.sigmabRCUT[range(NumElle),range(NumElle)])
sigmaa_Z[i] = numpy.exp(self.eta2 * (-(sqrt(1 + i) * beta * self.Seq) + self.Seq * beta))
sigmab_Z[i] = numpy.exp(self.kappa * (-(sqrt(1 + i) * self.Seq) + self.Seq))
# The factor (self.Seq/self.Seq) is an array of zeros and ones and is only used to secure that only
# the elements which are actually defined in "parameters" gives a gamma_1,2 different from zero.
self.gamma1 = ((self.Seq/self.Seq) *
(12 * (sigmaa_Z[0] - L_1_Z[0]) +
6 * (sigmaa_Z[1] - L_1_Z[1]) +
24 * (sigmaa_Z[2] - L_1_Z[2]) ))
self.gamma2 = ((self.Seq/self.Seq) *
(12 * (sigmab_Z[0] - L_2_Z[0]) +
6 * (sigmab_Z[1] - L_2_Z[1]) +
24 * (sigmab_Z[2] - L_2_Z[2]) ))
### Construction of a Full Neighborlist for the system of atoms,
self.nbList = FullNeighborList(self.r_cut.max(),atoms)
### Initialization of the variables holding the forces on and energy of the atoms ###
self.forces = None
self.energy = None
def NeighborList_rcutReduced(self,i):
""" Method which makes sure that only the neighboratoms within the correct cutoff for the involved
element types Z,Z' are included in the calculations by modifying the output of the FullNeighborList
function. """
# Relavant data about the neighbor atom, j, for atom i which can possible give a contribution are
# selected
(other_j,r_ij,rsq) = self.nbList.get_neighbors(i)
# The neighbor atoms which will actually give a contribution to the energy, based on the individual
# cutoff distances between atom i of type Z and atom j of type Z', are selected.
# The neighbor atoms which fullfill the condition are chosen
keep = numpy.sqrt(rsq) <= self.r_cut[self.Z[i],self.Z[other_j]]
# The lists of data about the neighbor atoms are updated
if len(keep) != 0:
return (other_j[keep],r_ij[keep],rsq[keep])
else:
# nbList returned empty lists, but we cannot index a shape (0,3) array (r_ij)
# with an empty list (bug in numpy?).
return (other_j,r_ij,rsq)
def update(self, atoms):
""" This method is called by the atoms object to which the calculator is attached, it secures that the
energy (and/or force) of the system is recalculated if this is required. """
need_calc = False
if (self.energy is None or len(self.Z) != len(atoms) or (self.Z != atoms.get_atomic_numbers()).any()):
# The calculator is initialized with regards to the atoms object.
self.initialize(atoms)
need_calc = True
elif (self.positions != atoms.get_positions()).any():
# The atoms object has not changed enough for the calculator to need a reinitialization but a
# new calculation of the value for the energies are still needed.
need_calc = True
if need_calc:
self.positions = atoms.get_positions()
self.nbList.check_and_update(atoms)
self.energy = self.calculate_Energy()
self.forces = self.calculate_Force()
# Returns the energy of the atoms (the method calculates the energy first if needed be)
def get_potential_energy(self, atoms):
self.update(atoms)
return self.energy
# Returns the forces on the atoms (the method calculates the forces first if needed be)
def get_forces(self, atoms):
self.update(atoms)
return self.forces.copy()
def get_stress(self, atoms):
raise NotImplementedError('No stresses implemented')
########## ENERGY Calculations ##########
### sigma_1,2 ###
def calculate_sigma12(self):
""" Calculates and returns sigma_1 and sigma_2. """
# The N-arrays for sigma_1,2 are initialized
sigma_1 = numpy.zeros(self.N)
sigma_2 = numpy.zeros(self.N)
for i in range(self.N):
# The numbers of the neighbor atoms, the relative position vectors and length of the position
# vectors squared between atom i and the neighbor atoms j are defined in three arrays.
(other_j,r_ij,rsq) = self.NeighborList_rcutReduced(i)
# The values for the linear subtracktion functions evaluated at norm(r_ij,2) for all the atom
# pairs, [i,other_j], are calculated.
L_1_i = (self.dsigmaadrRCUT[self.Z[i],self.Z[other_j]] *
(numpy.sqrt(rsq) - self.r_cut[self.Z[i],self.Z[other_j]]) +
self.sigmaaRCUT[self.Z[i],self.Z[other_j]])
L_2_i = (self.dsigmabdrRCUT[self.Z[i],self.Z[other_j]] *
(numpy.sqrt(rsq) - self.r_cut[self.Z[i],self.Z[other_j]]) +
self.sigmabRCUT[self.Z[i],self.Z[other_j]])
# sigmaa_i and sigmab_i are evaluated at norm(r_ij,2) for all the atom pairs, [i,other_j].
sigmaa_i = (numpy.exp(self.eta2[self.Z[other_j]] *
(-numpy.sqrt(rsq) + self.Seq[self.Z[other_j]] * beta)))
sigmab_i = (numpy.exp(self.kappa[self.Z[other_j]] *
(-numpy.sqrt(rsq) / beta + self.Seq[self.Z[other_j]])))
#if (i == 1):
# print sigmaa_i
# The values of sigma_1_i and sigma_2_i are calculated for
# the atom i. Where max(a,b) is introduced in order to
# secure a none zero minimumvalue for sigma_1 so the
# following calculations wont result in an error.
sigma_1[i] = max( pow(10,-9) , (self.chi[self.Z[i],self.Z[other_j]] * (sigmaa_i - L_1_i)).sum() )
sigma_2[i] = (self.chi[self.Z[i],self.Z[other_j]] * (sigmab_i - L_2_i)).sum()
#TESTER#
#print sigma_1[:10]
return (sigma_1,sigma_2)
### s_i ###
def calculate_s(self,sigma_1):
""" Calculates and returns an N-array containing the neutrality sphere radii, s, for the atoms of the
system is calculated."""
return self.Seq[self.Z] - numpy.log(sigma_1 / self.gamma1[self.Z]) / (beta * self.eta2[self.Z])
### E_tot ###
def calculate_Energy_function(self,s,sigma_2):
""" Calculates and returns the total energy of the system using s and sigma_2. """
# Calculation of the N-array containing the cohesive energy for each of the N atoms.
E_c = ( self.E0[self.Z] * (self.L[self.Z] * (s - self.Seq[self.Z]) + 1) *
numpy.exp(-self.L[self.Z] * (s - self.Seq[self.Z])) ) - self.E0[self.Z] * self.ZeroPoint
# Calculation of the N-array containing the atomic sphere correction energy for each of the N atoms.
E_as = (6 * ( self.V0[self.Z] * numpy.exp(-self.kappa[self.Z] * (s - self.Seq[self.Z]))
-self.V0[self.Z] * sigma_2 / self.gamma2[self.Z] ) )
# Calculation of the total energy
return (E_c + E_as).sum()
### Final Energy Calculator ###
def calculate_Energy(self):
""" Calculates and returnes the energy of the atoms in the atom object to which the
EMT calculator is attached. The calculations are done using the following methods,
also defined in EMT.py: calculate_sigma12(self), calculate_s(self,sigma_1),
calculate_Energy_function(self,s,sigma_2). """
(sigma_1,sigma_2) = self.calculate_sigma12()
s = self.calculate_s(sigma_1)
# The total energy is calculated and returned
return self.calculate_Energy_function(s,sigma_2)
########## FORCE Calculations ##########
### dsdsigma_1 ###
def calculate_dsdsigma_1(self,sigma_1):
""" Calculates and returns dsdsigma_1 using sigma_1. """
# An N-array containing the the deriviative of neutrality sphere radii, s, with regards to sigma_1 for
# the atoms of the system is calculated.
dsdsigma_1 = -1 / (beta * self.eta2[self.Z] * sigma_1)
return dsdsigma_1
### dE_cids, dE_asds, dE_asdsigma_2 ###
def calculate_Deriviative_of_Energy(self,s):
""" Calculates and returns the deriviatives of E_cs and E_as with regards to s and sigma_2. """
# Calculation of the N-array containing the deriviative of the cohesive energy with regards to s for
# each of the N atoms.
dE_cds = -( self.E0[self.Z] * self.L[self.Z] * self.L[self.Z] *
numpy.exp(-self.L[self.Z] * (s - self.Seq[self.Z])) * (s - self.Seq[self.Z]) )
# Calculation of the N-array containing the deriviative of the atomic sphere correction energy with
# regards to s for each of the N atoms.
dE_asds = -6 * self.kappa[self.Z] * self.V0[self.Z] * numpy.exp(-self.kappa[self.Z] * (s - self.Seq[self.Z]))
# Calculation of the N-array containing the deriviative of the atomic sphere correction energy with
# regards to sigma_2 for each of the N atoms.
dE_asdsigma_2 = -6 * self.V0[self.Z] / (self.gamma2[self.Z])
return (dE_cds,dE_asds,dE_asdsigma_2)
### F_kalpha ###
def calculate_Force_function(self,dE_cds,dE_asds,dE_asdsigma_2,dsdsigma_1):
""" Calculates the force on all k atoms in the three directions {x,y,z} representet by alpha. """
# An array for the force is initialized
F = numpy.zeros([self.N,3])
for k in range(self.N):
# The atoms interacting with atom k are selected.
(other_i,r_ki,rsq) = self.NeighborList_rcutReduced(k)
#print other_i
#print r_ki
#print numpy.sqrt(rsq)
# The values for dr_ijdr_kalpha are calculated for the relevant atoms, k and other_i.
dr_kidr_kalpha = r_ki / numpy.sqrt(rsq)[:,numpy.newaxis]
## The force on the k'th atom caused by the atoms, other_i's, interactions with k are calculated ##
# The values for dsigmaa_idr_ij and dsigmab_idr_ij are calculated with regards to the k'th atom
sigmaa_k = numpy.exp(self.eta2[self.Z[other_i]] *
(-numpy.sqrt(rsq) + self.Seq[self.Z[other_i]] * beta))
sigmab_k = numpy.exp(self.kappa[self.Z[other_i]] *
(-numpy.sqrt(rsq) / beta + self.Seq[self.Z[other_i]]))
dsigmaa_kdr_ki = (-self.eta2[self.Z[other_i]] * sigmaa_k)[:,numpy.newaxis]
dsigmab_kdr_ki = (-self.kappa[self.Z[other_i]] / beta * sigmab_k)[:,numpy.newaxis]
# Values for dL_1idr_ij and dL_2idr_ij are calculated with regards to the k'th atom
dL_1kdr_ki = self.dsigmaadrRCUT[self.Z[k],self.Z[other_i]][:,numpy.newaxis]
dL_2kdr_ki = self.dsigmabdrRCUT[self.Z[k],self.Z[other_i]][:,numpy.newaxis]
# First the value of dsigma1_idr_kaplha and dsigma2_idr_kaplha are calculated for the k'th atom
dsigma1_kdr_kalpha = ( self.chi[self.Z[k],self.Z[other_i]][:,numpy.newaxis] *
(dsigmaa_kdr_ki - dL_1kdr_ki) * dr_kidr_kalpha ).sum(axis=0)
dsigma2_kdr_kalpha = ( self.chi[self.Z[k],self.Z[other_i]][:,numpy.newaxis] *
(dsigmab_kdr_ki - dL_2kdr_ki) * dr_kidr_kalpha ).sum(axis=0)
""" TJEK DER SKAL FJERNES SENERE """
assert len(dsigma1_kdr_kalpha) == 3
""" TJEK DER SKAL FJERNES SENERE """
# The contribution to the force on atom k from the k'th atoms interaction with the other_i atoms is
# calculated
F[k] = (dE_cds[k] * dsdsigma_1[k] * dsigma1_kdr_kalpha +
dE_asds[k] * dsdsigma_1[k] * dsigma1_kdr_kalpha +
dE_asdsigma_2[k] * dsigma2_kdr_kalpha)
# The values for dsigmaa_idr_ij and dsigmab_idr_ij are calculated with regards to the atoms other_i
# where j = k for all other_i (thus we only need one value of dsigmaa_idr_ij).
sigmaa_i = numpy.exp(self.eta2[self.Z[k]] * (-numpy.sqrt(rsq) + self.Seq[self.Z[k]] * beta))
sigmab_i = numpy.exp(self.kappa[self.Z[k]] * (-numpy.sqrt(rsq) / beta + self.Seq[self.Z[k]]))
dsigmaa_idr_ik = (-self.eta2[self.Z[k]] * sigmaa_i)[:,numpy.newaxis]
dsigmab_idr_ik = (-self.kappa[self.Z[k]] / beta * sigmab_i)[:,numpy.newaxis]
# Values for dL_1idr_ij and dL_2idr_ij are calculated with regards to the atoms other_i
# where j = k for all other_i.
dL_1idr_ik = self.dsigmaadrRCUT[self.Z[other_i],self.Z[k]][:,numpy.newaxis]
dL_2idr_ik = self.dsigmabdrRCUT[self.Z[other_i],self.Z[k]][:,numpy.newaxis]
# First the value of dsigma1_idr_kaplha and dsigma2_idr_kaplha are calculated with regards to the atoms
# other_i where j are only the atom k for all other_i. (thus the sum only has one element for all other_i.
# which results in the calculations leading to an [other_i,3]-array.
dsigma1_idr_kalpha = (self.chi[self.Z[other_i],self.Z[k]][:,numpy.newaxis] *
(dsigmaa_idr_ik - dL_1idr_ik) * (dr_kidr_kalpha) )
dsigma2_idr_kalpha = (self.chi[self.Z[other_i],self.Z[k]][:,numpy.newaxis] *
(dsigmab_idr_ik - dL_2idr_ik) * (dr_kidr_kalpha) )
# The contribution to the force on atom k from the other_i atoms interaction with the k'th atom is now
# calculated
F[k] += (dE_cds[other_i][:,numpy.newaxis] * dsdsigma_1[other_i][:,numpy.newaxis] * dsigma1_idr_kalpha +
dE_asds[other_i][:,numpy.newaxis] * dsdsigma_1[other_i][:,numpy.newaxis] * dsigma1_idr_kalpha +
dE_asdsigma_2[other_i][:,numpy.newaxis] * dsigma2_idr_kalpha).sum(axis=0)
""" TJEK DER SKAL FJERNES SENERE """
assert len(F[k]) == 3
""" TJEK DER SKAL FJERNES SENERE """
return F
### Final Force Calculator ###
def calculate_Force(self):
""" Calculates and returnes the force acting on each of the atoms in the atoms object to which the
EMT calculator is attached. These calculations are done using the following methods,
also defined in EMT.py: calculate_sigma12(self), calculate_s(self,sigma_1), calculate_dsdsigma_1
(self,sigma_1), calculate_Deriviative_of_Energy(self,s) and calculate_Force_function
(self,dE_cds,dE_asds,dE_asdsigma_2,dsdsigma_1) """
(sigma_1,sigma_2) = self.calculate_sigma12()
s = self.calculate_s(sigma_1)
dsdsigma_1 = self.calculate_dsdsigma_1(sigma_1)
(dE_cds,dE_asds,dE_asdsigma_2) = self.calculate_Deriviative_of_Energy(s)
# The force is calculated and returned
return self.calculate_Force_function(dE_cds,dE_asds,dE_asdsigma_2,dsdsigma_1)
| {
"content_hash": "316290893cde0804de69af5db97af9a8",
"timestamp": "",
"source": "github",
"line_count": 454,
"max_line_length": 229,
"avg_line_length": 50.8942731277533,
"alnum_prop": 0.5791136501341643,
"repo_name": "auag92/n2dm",
"id": "ac1a722ba3d5ee244c19d64ae114791354fa9b05",
"size": "23251",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Asap-3.8.4/Python/asap3/EMT2011_py.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "4529"
},
{
"name": "C++",
"bytes": "1472384"
},
{
"name": "CSS",
"bytes": "5059"
},
{
"name": "Jupyter Notebook",
"bytes": "7328"
},
{
"name": "Makefile",
"bytes": "86067"
},
{
"name": "Matlab",
"bytes": "87"
},
{
"name": "Python",
"bytes": "1232765"
},
{
"name": "Shell",
"bytes": "13226"
},
{
"name": "Smarty",
"bytes": "4212"
},
{
"name": "TeX",
"bytes": "5561"
}
],
"symlink_target": ""
} |
from recipe_scrapers.momswithcrockpots import MomsWithCrockPots
from tests import ScraperTest
class TestMomsWithCrockPotsScraper(ScraperTest):
scraper_class = MomsWithCrockPots
def test_host(self):
self.assertEqual("momswithcrockpots.com", self.harvester_class.host())
def test_canonical_url(self):
self.assertEqual(
"https://momswithcrockpots.com/slow-cooked-macaroni-cheese/",
self.harvester_class.canonical_url(),
)
def test_title(self):
self.assertEqual(self.harvester_class.title(), "Crockpot Macaroni & Cheese")
def test_total_time(self):
self.assertEqual(225, self.harvester_class.total_time())
def test_yields(self):
self.assertEqual("6 servings", self.harvester_class.yields())
def test_ingredients(self):
self.assertCountEqual(
[
"8 ounces macaroni",
"2 teaspoons olive oil",
"1 cup evaporated milk",
"1/2 cup milk",
"1/2 teaspoon salt",
"1/4 teaspoon ground black pepper",
"2 cups Cheddar cheese (shredded, or a Cheddar blend)",
"4 tablespoons butter (melted)",
],
self.harvester_class.ingredients(),
)
def test_instructions(self):
return self.assertEqual(
'Cook the macaroni following package directions. Drain in a colander and rinse with hot water. Drain well.\nGenerously butter the sides and bottom of a 3 1/2- to 4-quart slow cooker (I use about 2 tablespoons of butter).\nCombine the macaroni with the remaining ingredients in the slow cooker and blend well. Cover the slow cooker and cook on LOW for 2 1/2 to 3 1/2 hours, stirring a few times.\nIf desired, spoon the cooked macaroni and cheese mixture into a baking dish, sprinkle with a little more cheese, and put under the broiler for a minute or 2, just until cheese is melted.\nWhen the macaroni and cheese is done, feel free to spoon into a baking dish, top with a little more cheese, and put under the broiler for a minute or two for that "fresh from the oven" look.\nUse a gluten free pasta',
self.harvester_class.instructions(),
)
def test_ratings(self):
self.assertEqual(4.0, self.harvester_class.ratings())
| {
"content_hash": "769440a0db363a0cf1888829ec03f780",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 813,
"avg_line_length": 47.63265306122449,
"alnum_prop": 0.6636675235646958,
"repo_name": "hhursev/recipe-scraper",
"id": "57e53de4c263a1cc1e7479dbfbea35b93d526b5d",
"size": "2334",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/test_momswithcrockpots.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "88554"
}
],
"symlink_target": ""
} |
import sys
import os
import shutil
from subprocess import call, check_output
import time
def job_is_running(job_id):
'''
Returns True if the PBS job with the given id is running.
'''
return call(['qstat', job_id]) == 0
def main():
'''
Brokers communication between Delft3D, MATLAB and Dakota through files.
'''
# Files and directories.
start_dir = os.path.dirname(os.path.realpath(__file__))
initialize_dir = os.path.join(start_dir, 'initialize')
input_template = 'WLD.sed.template'
input_file = 'WLD.sed'
cells_file = 'nesting.txt'
analysis_program = 'total_sed_cal'
analysis_program_file = analysis_program + '.m'
analysis_results_file = analysis_program + '.out'
# Copy the contents of the initialize directory into the current
# run directory. (Don't use shutil.copytree because the
# destination directory exists.)
for f in os.listdir(initialize_dir):
shutil.copy(os.path.join(initialize_dir, f), os.getcwd())
# Use the parsing utility `dprepro` (from $DAKOTA_DIR/bin) to
# substitute the parameters from Dakota into the input template,
# creating a new Delft3D input file.
shutil.copy(os.path.join(start_dir, input_template), os.getcwd())
call(['dprepro', sys.argv[1], input_template, input_file])
# Call Delft3D, using the updated input file. Note that `qsub`
# returns immediately with the PBS job id.
r = check_output(['qsub', 'run_delft3d_wave.sh'])
job_id = r.rstrip('\n')
# Poll the Delft3D job every 10 min. Proceed when it's finished.
while job_is_running(job_id):
time.sleep(600)
# Call a MATLAB script to read the Delft3D output, calculate the
# desired responses, and write the Dakota results file.
shutil.copy(os.path.join(start_dir, cells_file), os.getcwd())
shutil.copy(os.path.join(start_dir, analysis_program_file), os.getcwd())
print('Current directory: ' + os.getcwd())
matlab_cmd = '-r "' + analysis_program + '; exit"'
r = call(['matlab', matlab_cmd])
print('MATLAB exit status code = ' + str(r))
shutil.move(analysis_results_file, sys.argv[2])
if __name__ == '__main__':
main()
| {
"content_hash": "4ae7af6817423b20fcf6228572b93183",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 76,
"avg_line_length": 36.56666666666667,
"alnum_prop": 0.6631722880583409,
"repo_name": "mdpiper/dakota-experiments",
"id": "405d7a806ca20278f48e18354301b6d2e7bc90aa",
"size": "2323",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "experiments/delft3d-vector-parameter-study-2/run_delft3d.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "MATLAB",
"bytes": "10253"
},
{
"name": "Python",
"bytes": "79630"
},
{
"name": "Shell",
"bytes": "3389"
}
],
"symlink_target": ""
} |
import random
import datetime as dt
from pyoperant import panels
from pyoperant import utils
class Shaper(object):
"""
Run a shaping routine in the operant chamber that will teach an
to peck the center key to hear a stimulus, then peck one of the side keys for reward.
training sequence:
Block 1: Hopper comes up on VI (stays up for 5 s) for the first day
that the animal is in the apparatus. Center key flashes for 5 sec, prior
to the hopper access. If the center key is pressed while flashing, then
the hopper comes up and then the session jumps to block 2 immediately.
Block 2: The center key flashes until pecked. When pecked the hopper comes up for
4 sec. Run 100 trials.
Block 3: The center key flashes until pecked, then either the right or left (p = .5)
key flashes until pecked, then the hopper comes up for 3 sec. Run 100 trials.
Block 4: Wait for peck to non-flashing center key, then right or left key flashes
until pecked, then food for 2.5 sec. Run 100 trials."""
def __init__(self, panel, log, parameters, error_callback=None):
self.panel = panel
assert isinstance(panel, panels.BasePanel)
self.log = log
assert log is not None
self.parameters = parameters
assert 'light_schedule' in self.parameters
self.error_callback = error_callback
self.recent_state = 0
self.last_response = None
self.block1 = self._null_block(1)
self.block2 = self._null_block(2)
self.block3 = self._null_block(3)
self.block4 = self._null_block(4)
self.block5 = self._null_block(5)
def run_shape(self, start_state='block1'):
self.log.warning('Starting shaping procedure')
utils.run_state_machine( start_in=start_state,
error_state='block1',
error_callback=self.error_callback,
block1=self.block1,
block2=self.block2,
block3=self.block3,
block4=self.block4,
block5=self.block5,
sleep_block=self._run_sleep,
free_food_block=self._free_food)
self.log.warning('Shaping procedure complete. Remember to disable shaping in your config file')
def _null_block(self, block_num):
def temp():
return self.block_name(block_num + 1)
return temp
def _check_free_food_block(self):
""" Checks if it is currently a free food block
"""
if 'free_food_schedule' in self.parameters:
if utils.check_time(self.parameters['free_food_schedule']):
return True
return
def _hopper_block(self, block_num):
"""
Block 1: Hopper comes up on VI (stays up for 5 s) for the first day
that the animal is in the apparatus. Center key flashes for 5 sec, prior
to the hopper access. If the center key is pressed while flashing, then
the hopper comes up and then the session jumps to block 2 immediately"""
def temp():
self.recent_state = block_num
self.log.warning('Starting %s'%(self.block_name(block_num)))
if self._check_free_food_block(): return 'free_food_block'
utils.run_state_machine( start_in='init',
error_state='wait',
error_callback=self.error_callback,
init=self._block_init('wait'),
wait=self._wait_block(10, 40,'check'),
check=self._check_block('flash_mid', 1, float('inf')),
flash_mid=self._flash_poll(self.panel.center, 5, 'reward', 'pre_reward'),
pre_reward=self._pre_reward('reward'),
reward=self.reward(5, 'check2'),
check2=self._check_block('wait', 1, float('inf')))
# check if its time for free food
if self._check_free_food_block(): return 'free_food_block'
if not utils.check_time(self.parameters['light_schedule']):
return 'sleep_block'
return self.block_name(block_num + 1)
return temp
def _center_peck_block(self, block_num, reps=100, revert_timeout=10800):
"""Block 2: The center key flashes until pecked. When pecked the hopper comes up for
4 sec. Run 100 trials.
reverts to revert_state if no response before timeout (60*60*3=10800)"""
def temp():
self.recent_state = block_num
self.log.warning('Starting %s'%(self.block_name(block_num)))
utils.run_state_machine( start_in='init',
error_state='check',
error_callback=self.error_callback,
init=self._block_init('check'),
check=self._check_block('poll_mid', reps, revert_timeout),
poll_mid=self._flash_poll(self.panel.center, 10, 'check', 'pre_reward'),
pre_reward=self._pre_reward('reward'),
reward=self.reward(4, 'check'))
if not utils.check_time(self.parameters['light_schedule']):
return 'sleep_block'
if self._check_free_food_block(): return 'free_food_block'
if self.responded_block:
return self.block_name(block_num + 1)
else:
return self.block_name(block_num - 1)
return temp
def _block_init(self, next_state):
def temp():
self.block_start = dt.datetime.now()
self.log.info('Block start time: %s'%(self.block_start.isoformat(' ')))
self.log.info("Blk #\tTrl #\tResp Key\tResp Time")
self.responded_block = False
self.response_counter = 0
return next_state
return temp
def _check_block(self, next_state, reps, revert_timeout):
def temp():
if not self.responded_block:
elapsed_time = (dt.datetime.now() - self.block_start).total_seconds()
if elapsed_time > revert_timeout:
self.log.warning("No response in block %d, reverting to block %d. Time: %s"%(self.recent_state, self.recent_state - 1, dt.datetime.now().isoformat(' ')))
return None
else:
if self.response_counter >= reps:
return None
if not utils.check_time(self.parameters['light_schedule']):
return None
if self._check_free_food_block(): return 'free_food_block'
return next_state
return temp
def _pre_reward(self, next_state):
def temp():
self.responded_block = True
self.response_counter = self.response_counter + 1
return next_state
return temp
def _wait_block(self, t_min, t_max, next_state):
def temp():
if t_min == t_max:
t = t_max
else:
t = random.randrange(t_min, t_max)
utils.wait(t)
return next_state
return temp
def _poll(self, component, duration, next_state, reward_state=None, poll_state=None):
if poll_state == None:
poll_state = self._poll_main
def temp():
utils.run_state_machine( start_in='init',
init=self._polling_init('main'),
main=poll_state(component, duration))
if self.responded_poll:
return reward_state
else:
return next_state
return temp
def _flash_poll(self, component, duration, next_state, reward_state=None):
return self._poll(component, duration, next_state, reward_state, poll_state=self._flashing_main)
def _light_poll(self, component, duration, next_state, reward_state=None):
return self._poll(component, duration, next_state, reward_state, poll_state=self._light_main)
def _polling_init(self, next_state):
def temp():
self.polling_start = dt.datetime.now()
self.responded_poll = False
self.last_response = None
return next_state
return temp
# TODO: remake to not hog CPU
def _poll_main(self, component, duration):
def temp():
elapsed_time = (dt.datetime.now() - self.polling_start).total_seconds()
if elapsed_time <= duration:
if component.status():
self.responded_poll = True
self.last_response = component.name
return None
utils.wait(.015)
return 'main'
else:
return None
return temp
def _flashing_main(self, component, duration, period=1):
def temp():
elapsed_time = (dt.datetime.now() - self.polling_start).total_seconds()
if elapsed_time <= duration:
if ((elapsed_time % period) - (period / 2.0)) < 0:
component.on()
else:
component.off()
if component.status():
component.off()
self.responded_poll = True
self.last_response = component.name
return None
utils.wait(.015)
return 'main'
else:
component.off()
return None
return temp
def _light_main(self, component, duration):
def temp():
elapsed_time = (dt.datetime.now() - self.polling_start).total_seconds()
if elapsed_time <= duration:
component.on()
if component.status():
component.off()
self.responded_poll = True
self.last_response = component.name
return None
utils.wait(.015)
return 'main'
else:
component.off()
return None
return temp
#TODO: catch errors here
def reward(self, value, next_state):
def temp():
self.log.info('%d\t%d\t%s\t%s'%(self.recent_state, self.response_counter, self.last_response, dt.datetime.now().isoformat(' ')))
self.panel.reward(value=value)
return next_state
return temp
def _rand_state(self, states):
def temp():
return random.choice(states)
return temp
# defining functions for sleep
#TODO: there should really be a separate sleeper or some better solution
def sleep_pre(self):
self.log.debug('lights off. going to sleep...')
return 'main'
def sleep_main(self):
""" reset expal parameters for the next day """
self.log.debug('sleeping...')
self.panel.house_light.off()
utils.wait(self.parameters['idle_poll_interval'])
if not utils.check_time(self.parameters['light_schedule']):
return 'main'
else:
return 'post'
def sleep_post(self):
self.log.debug('ending sleep')
self.panel.house_light.on()
# self.init_summary()
return None
def _run_sleep(self):
utils.run_state_machine(start_in='pre',
error_state='post',
error_callback=self.error_callback,
pre=self.sleep_pre,
main=self.sleep_main,
post=self.sleep_post)
return self.block_name(self.recent_state)
def free_food_pre(self):
self.log.debug('Buffet starting.')
return 'main'
def free_food_main(self):
""" reset expal parameters for the next day """
utils.run_state_machine(start_in='wait',
error_state='wait',
error_callback=self.error_callback,
wait=self._wait_block(5, 5, 'food'),
food=self.deliver_free_food(10, 'checker'),
checker=self.food_checker('wait')
)
if not utils.check_time(self.parameters['free_food_schedule']):
return 'post'
else:
return 'main'
def food_checker(self, next_state):
# should we still be giving free food?
def temp():
if 'free_food_schedule' in self.parameters:
if utils.check_time(self.parameters['free_food_schedule']):
return next_state
return None
return temp
def free_food_post(self):
self.log.debug('Free food over.')
self.panel.house_light.on()
# self.init_summary()
return None
def _free_food(self):
utils.run_state_machine(start_in='pre',
error_state='post',
error_callback=self.error_callback,
pre=self.free_food_pre,
main=self.free_food_main,
post=self.free_food_post)
return self.block_name(self.recent_state)
def deliver_free_food(self, value, next_state):
""" reward function with no frills
"""
def temp():
self.log.debug('Doling out some yum yums.')
self.panel.reward(value=value)
return next_state
return temp
def block_name(self, block_num):
if block_num >= 1 and block_num <= 5:
return "block%d"%block_num
else:
return None
class Shaper2AC(Shaper):
"""Run a shaping routine in the operant chamber that will teach an
to peck the center key to hear a stimulus, then peck one of the side keys for reward.
training sequence:
Block 1: Hopper comes up on VI (stays up for 5 s) for the first day
that the animal is in the apparatus. Center key flashes for 5 sec, prior
to the hopper access. If the center key is pressed while flashing, then
the hopper comes up and then the session jumps to block 2 immediately.
Block 2: The center key flashes until pecked. When pecked the hopper comes up for
4 sec. Run 100 trials.
Block 3: The center key flashes until pecked, then either the right or left (p = .5)
key flashes until pecked, then the hopper comes up for 3 sec. Run 100 trials.
Block 4: Wait for peck to non-flashing center key, then right or left key flashes
until pecked, then food for 2.5 sec. Run 100 trials."""
def __init__(self, panel, log, parameters, error_callback=None):
super(Shaper2AC, self).__init__(panel, log, parameters, error_callback)
self.block1 = self._hopper_block(1)
self.block2 = self._center_peck_block(2)
self.block3 = self._response_2ac_block(3)
self.block4 = self._response_2ac_no_flash_block(4)
def _response_2ac_block(self, block_num, reps=100, revert_timeout=10800):
"""Block 3: The center key flashes until pecked, then either the right or left (p = .5)
key flashes until pecked, then the hopper comes up for 3 sec. Run 100 trials."""
def temp():
self.recent_state = block_num
self.log.warning('Starting %s'%(self.block_name(block_num)))
utils.run_state_machine( start_in='init',
error_state='check',
error_callback=self.error_callback,
init=self._block_init('check'),
check=self._check_block('poll_mid', reps, revert_timeout),
poll_mid=self._flash_poll(self.panel.center, 10, 'check', 'coin_flip'),
coin_flip=self._rand_state(('check_right', 'check_left')),
check_right=self._check_block('poll_right', reps, revert_timeout),
poll_right=self._flash_poll(self.panel.right, 10, 'check_right', 'pre_reward'),
check_left=self._check_block('poll_left', reps, revert_timeout),
poll_left=self._flash_poll(self.panel.left, 10, 'check_left', 'pre_reward'),
pre_reward=self._pre_reward('reward'),
reward=self.reward(3, 'check'))
if not utils.check_time(self.parameters['light_schedule']):
return 'sleep_block'
if self._check_free_food_block(): return 'free_food_block'
if self.responded_block:
return self.block_name(block_num + 1)
else:
return self.block_name(block_num - 1)
return temp
def _response_2ac_no_flash_block(self, block_num, reps=100, revert_timeout=10800):
"""Block 4: Wait for peck to non-flashing center key, then right or left key flashes
until pecked, then food for 2.5 sec. Run 100 trials."""
def temp():
self.recent_state = block_num
self.log.warning('Starting %s'%(self.block_name(block_num)))
utils.run_state_machine( start_in='init',
error_state='check',
error_callback=self.error_callback,
init=self._block_init('check'),
check=self._check_block('poll_mid', reps, revert_timeout),
poll_mid=self._poll(self.panel.center, 10, 'check', 'coin_flip'),
coin_flip=self._rand_state(('check_right', 'check_left')),
check_right=self._check_block('poll_right', reps, revert_timeout),
poll_right=self._flash_poll(self.panel.right, 10, 'check_right', 'pre_reward'),
check_left=self._check_block('poll_left', reps, revert_timeout),
poll_left=self._flash_poll(self.panel.left, 10, 'check_left', 'pre_reward'),
pre_reward=self._pre_reward('reward'),
reward=self.reward(2.5, 'check'))
if not utils.check_time(self.parameters['light_schedule']):
return 'sleep_block'
if self._check_free_food_block(): return 'free_food_block'
if self.responded_block:
return self.block_name(block_num + 1)
else:
return self.block_name(block_num - 1)
return temp
class ShaperGoNogo(Shaper):
"""accomodate go/nogo terminal procedure along with one or two hopper 2choice procedures
Go/Nogo shaping works like this:
Block 1: Hopper comes up on VI (stays up for 5 s) for the first day
that the animal is in the apparatus. Center key flashes for 5 sec, prior
to the hopper access. If the center key is pressed while flashing, then
the hopper comes up and then the session jumps to block 2 immediately.
Block 2: The center key flashes until pecked. When pecked the hopper comes up for
4 sec. Run 100 trials.
Block 3: Wait for a peck to non-flashing center key, when you get it, the hopper
comes up for 2.5 sec. Run 100 trials.
NOTE: when you run the go/nog procedure in a 2 hopper apparatus, it uses only the
right hand key and hopper. If you do this often, you may want to add the
facility for use of the left hand key and hopper."""
def __init__(self, panel, log, parameters, error_callback=None):
super(ShaperGoNogo, self).__init__(panel, log, parameters, error_callback)
self.block1 = self._hopper_block(1)
self.block2 = self._center_peck_block(2)
self.block3 = self._center_peck_no_flash_block(3)
def _center_peck_no_flash_block(self, block_num):
raise NotImplementedError
class ShaperFemalePref(Shaper):
"""run a shaping routine for female pecking preferencein the operant chamber
termial proc: peck one of the side keys for stimulus presentation followed by reward.
Training sequence invoked as:
Block 1: Hopper comes up on VI (stays up for 5 s) for the first day
that the animal is in the apparatus.
Left and right keylights flash for 5 sec, prior
to the hopper access. If either L or R key is pressed while flashing, then
the hopper comes up and the session jumps to block 2 immediately.
Block 2: randomly choose either L or R key to flash until pecked. When pecked the hopper
comes up for 4 sec.
Block 3: Wait for peck to non-flashing L or R key (chosen at random). When pecked,
give food for 2.5 sec."""
def __init__(self, panel, log, parameters, error_callback=None):
super(ShaperFemalePref, self).__init__(panel, log, parameters, error_callback)
self.block1 = self._hopper_block(1)
self.block2 = self._female_choice_block(2)
self.block3 = self._female_choice_no_flash_block(3)
def _female_choice_block(self, block_num):
raise NotImplementedError
def _female_choice_no_flash_block(self, block_num):
raise NotImplementedError
class Shaper3AC(Shaper):
"""run a shaping routine for 3AC the operant chamber
termial proc: peck center key for stimulus presentation then peck one of three keys L-C-R, or give no response.
Training sequence invoked as:
Block 1: Hopper comes up on VI (stays up for 5 s) for the first day
that the animal is in the apparatus. Center key flashes for 5 sec, prior
to the hopper access. If the center key is pressed while flashing, then
the hopper comes up and then the session jumps to block 2 immediately.
Block 2: The center key flashes until pecked. When pecked the hopper comes up for
4 sec. Run 100 trials.
Block 3: The center key flashes until pecked, then either the right, left, or center
key flashes (p=0.333) until pecked, then the hopper comes up for 3 sec. Run 150 trials.
Block 4: Wait for peck to non-flashing center key, then right, center,or left key flashes
until pecked, then food for 2.5 sec. Run 150 trials."""
def __init__(self, panel, log, parameters, error_callback=None):
super(Shaper3AC, self).__init__(panel, log, parameters, error_callback)
self.block1 = self._hopper_block(1)
self.block2 = self._center_peck_block(2)
self.block3 = self._response_3ac_block(3)
self.block4 = self._response_3ac_no_flash_block(4)
def _response_3ac_block(self, block_num, reps=100, revert_timeout=10800):
"""Block 3: The center key flashes until pecked, then either the right, left, or center
key flashes (p=0.333) until pecked, then the hopper comes up for 3 sec. Run 150 trials."""
def temp():
self.recent_state = block_num
self.log.warning('Starting %s'%(self.block_name(block_num)))
utils.run_state_machine( start_in='init',
error_state='check',
error_callback=self.error_callback,
init=self._block_init('check'),
check=self._check_block('poll_mid', reps, revert_timeout),
poll_mid=self._flash_poll(self.panel.center, 10, 'check', 'coin_flip'),
coin_flip=self._rand_state(('check_right', 'check_center', 'check_left')),
check_right=self._check_block('poll_right', reps, revert_timeout),
poll_right=self._flash_poll(self.panel.right, 10, 'check_right', 'pre_reward'),
check_center=self._check_block('poll_center', reps, revert_timeout),
poll_center=self._flash_poll(self.panel.center, 10, 'check_center', 'pre_reward'),
check_left=self._check_block('poll_left', reps, revert_timeout),
poll_left=self._flash_poll(self.panel.left, 10, 'check_left', 'pre_reward'),
pre_reward=self._pre_reward('reward'),
reward=self.reward(3, 'check'))
if not utils.check_time(self.parameters['light_schedule']):
return 'sleep_block'
if self._check_free_food_block(): return 'free_food_block'
if self.responded_block:
return self.block_name(block_num + 1)
else:
return self.block_name(block_num - 1)
return temp
def _response_3ac_no_flash_block(self, block_num, reps=150, revert_timeout=10800):
"""Block 4: Wait for peck to non-flashing center key, then right, center,or left key flashes
until pecked, then food for 2.5 sec. Run 150 trials."""
def temp():
self.recent_state = block_num
self.log.warning('Starting %s'%(self.block_name(block_num)))
utils.run_state_machine( start_in='init',
error_state='check',
error_callback=self.error_callback,
init=self._block_init('check'),
check=self._check_block('poll_mid', reps, revert_timeout),
poll_mid=self._poll(self.panel.center, 10, 'check', 'coin_flip'),
coin_flip=self._rand_state(('check_right', 'check_center', 'check_left')),
check_right=self._check_block('poll_right', reps, revert_timeout),
poll_right=self._flash_poll(self.panel.right, 10, 'check_right', 'pre_reward'),
check_center=self._check_block('poll_center', reps, revert_timeout),
poll_center=self._flash_poll(self.panel.center, 10, 'check_center', 'pre_reward'),
check_left=self._check_block('poll_left', reps, revert_timeout),
poll_left=self._flash_poll(self.panel.left, 10, 'check_left', 'pre_reward'),
pre_reward=self._pre_reward('reward'),
reward=self.reward(2.5, 'check'))
if not utils.check_time(self.parameters['light_schedule']):
return 'sleep_block'
if self._check_free_food_block(): return 'free_food_block'
if self.responded_block:
return self.block_name(block_num + 1)
else:
return self.block_name(block_num - 1)
return temp
class Shaper3ACMatching(Shaper3AC):
def __init__(self, panel, log, parameters, get_stimuli, error_callback=None):
super(Shaper3AC, self).__init__(panel, log, parameters, error_callback)
assert hasattr(get_stimuli, '__call__')
self.get_stimuli = get_stimuli
self.block5 = self._response_3ac_matching_audio_block(5)
def _response_3ac_matching_audio_block(self, block_num, reps=150, revert_timeout=10800):
def temp():
self.recent_state = block_num
self.log.info('Starting %s'%(self.block_name(block_num)))
utils.run_state_machine( start_in='init',
error_state='check',
error_callback=self.error_callback,
init=self._block_init('check'),
check=self._check_block('poll_mid', reps, revert_timeout),
poll_mid=self._poll(self.panel.center, 10, 'check', 'coin_flip'),
coin_flip=self._rand_state(('check_right', 'check_center', 'check_left')),
check_right=self._check_block('audio_right', reps, revert_timeout),
audio_right=self._play_audio('poll_right', 'R'),
poll_right=self._flash_poll(self.panel.right, 10, 'check_right', 'close_audio'),
check_center=self._check_block('audio_center', reps, revert_timeout),
audio_center=self._play_audio('poll_center', 'C'),
poll_center=self._flash_poll(self.panel.center, 10, 'check_center', 'close_audio'),
check_left=self._check_block('audio_left', reps, revert_timeout),
audio_left=self._play_audio('poll_left', 'L'),
poll_left=self._flash_poll(self.panel.left, 10, 'check_left', 'close_audio'),
close_audio=self._close_audio('pre_reward'),
pre_reward=self._pre_reward('reward'),
reward=self.reward(2.5, 'check'))
if not utils.check_time(self.parameters['light_schedule']):
return 'sleep_block'
if self._check_free_food_block(): return 'free_food_block'
if self.responded_block:
return self.block_name(block_num + 1)
else:
return self.block_name(block_num - 1)
return temp
def _play_audio(self, next_state, trial_class):
def temp():
trial_stim, trial_motifs = self.get_stimuli(trial_class)
self.log.debug("presenting stimulus %s" % trial_stim.name)
self.panel.speaker.queue(trial_stim.file_origin)
self.panel.speaker.play()
return next_state
return temp
def _close_audio(self, next_state):
def temp():
self.panel.speaker.stop()
return next_state
return temp
| {
"content_hash": "4f7813b313d10a2c9c6a1680a6268ed1",
"timestamp": "",
"source": "github",
"line_count": 610,
"max_line_length": 174,
"avg_line_length": 51.247540983606555,
"alnum_prop": 0.5352036083298679,
"repo_name": "theilmbh/pyoperant",
"id": "bee5bca7242a141b444c042a1f62579c4bb6121f",
"size": "31261",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyoperant/behavior/shape.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "2143"
},
{
"name": "C++",
"bytes": "1870"
},
{
"name": "Jupyter Notebook",
"bytes": "58096"
},
{
"name": "Makefile",
"bytes": "150"
},
{
"name": "Perl",
"bytes": "8684"
},
{
"name": "Python",
"bytes": "232695"
}
],
"symlink_target": ""
} |
import pytest
from app.models import User
def test_password_setter():
u = User(password='supersecret')
assert u.password_hash is not None
def test_no_password_getter():
u = User(password='supersecret')
with pytest.raises(AttributeError):
u.password
def test_password_verification():
u = User(password='cat')
assert u.verify_password('cat')
assert u.verify_password('dog') is False
def test_password_salts_are_random():
u = User(password='cat')
u2 = User(password='cat')
assert u.password_hash != u2.password_hash
def test_valid_email_change(session):
u = User(email='test@example.com')
session.add(u)
session.commit()
token = u.generate_email_change_token('new-email@example.com')
assert u.change_email(token)
def test_duplicate_email_change(session):
u = User(email='test@example.com')
u2 = User(email='old-email@example.com')
session.add(u)
session.add(u2)
session.commit()
token = u2.generate_email_change_token('test@example.com')
# It's not possible to change to another user's email
assert u2.change_email(token) is False
def test_invalid_email_change_token(session):
u1 = User(email='john@example.com', password='cat')
u2 = User(email='susan@example.org', password='dog')
session.add(u1)
session.add(u2)
session.commit()
token = u1.generate_email_change_token('david@example.net')
# It's not possible to use another user's token
assert u2.change_email(token) is False
assert u2.email == 'susan@example.org'
| {
"content_hash": "051aec2d3804c7d61b3ea01cf57d0837",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 66,
"avg_line_length": 27,
"alnum_prop": 0.6794380587484036,
"repo_name": "teknik-eksjo/chronos",
"id": "ceba6828a43a1829d84800384c51c1652f9e339b",
"size": "1566",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "web/tests/test_user_model.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "7491"
},
{
"name": "CSS",
"bytes": "5623"
},
{
"name": "HTML",
"bytes": "14677"
},
{
"name": "JavaScript",
"bytes": "29984"
},
{
"name": "Makefile",
"bytes": "7864"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "71380"
}
],
"symlink_target": ""
} |
from github_way.main import return_true
def test_return_true():
assert return_true() is True
| {
"content_hash": "095633f5c821f2c6447ce04e8a2275ff",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 39,
"avg_line_length": 24.5,
"alnum_prop": 0.7346938775510204,
"repo_name": "katakumpo/github-way",
"id": "65990008848fe52a57a67c1a466e32095a09280f",
"size": "98",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "133"
}
],
"symlink_target": ""
} |
from core.himesis import Himesis
import uuid
class Hlayer2rule2(Himesis):
def __init__(self):
"""
Creates the himesis graph representing the DSLTrans rule layer2rule2.
"""
# Flag this instance as compiled now
self.is_compiled = True
super(Hlayer2rule2, self).__init__(name='Hlayer2rule2', num_nodes=0, edges=[])
# Set the graph attributes
self["mm__"] = ['HimesisMM']
self["name"] = """layer2rule2"""
self["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'layer2rule2')
# match model. We only support one match model
self.add_node()
self.vs[0]["mm__"] = """MatchModel"""
# apply model node
self.add_node()
self.vs[1]["mm__"] = """ApplyModel"""
# paired with relation between match and apply models
self.add_node()
self.vs[2]["mm__"] = """paired_with"""
self.vs[2]["attr1"] = """layer2rule2"""
# match class VoidType(layer2rule2class0VoidType) node
self.add_node()
self.vs[3]["mm__"] = """VoidType"""
self.vs[3]["attr1"] = """+"""
# match class OperationParameter(layer2rule2class1OperationParameter) node
self.add_node()
self.vs[4]["mm__"] = """OperationParameter"""
self.vs[4]["attr1"] = """+"""
# apply class VoidType(layer2rule2class2VoidType) node
self.add_node()
self.vs[5]["mm__"] = """VoidType"""
self.vs[5]["attr1"] = """1"""
# apply class Argument(layer2rule2class3Argument) node
self.add_node()
self.vs[6]["mm__"] = """Argument"""
self.vs[6]["attr1"] = """1"""
# match association OperationParameter--type-->VoidType node
self.add_node()
self.vs[7]["attr1"] = """type"""
self.vs[7]["mm__"] = """directLink_S"""
# apply association Argument--type-->VoidType node
self.add_node()
self.vs[8]["attr1"] = """type"""
self.vs[8]["mm__"] = """directLink_T"""
# backward association Argument-->OperationParameternode
self.add_node()
self.vs[9]["mm__"] = """backward_link"""
# Add the edges
self.add_edges([
(0,3), # matchmodel -> match_class VoidType(layer2rule2class0VoidType)
(0,4), # matchmodel -> match_class OperationParameter(layer2rule2class1OperationParameter)
(1,5), # applymodel -> apply_classVoidType(layer2rule2class2VoidType)
(1,6), # applymodel -> apply_classArgument(layer2rule2class3Argument)
(4,7), # match classOperationParameter(layer2rule2class1OperationParameter) -> association type
(7,3), # associationtype -> match_classOperationParameter(layer2rule2class0VoidType)
(6,8), # apply class Argument(layer2rule2class3Argument) -> association type
(8,5), # associationtype -> apply_classVoidType(layer2rule2class2VoidType)
(6,9), # apply class Argument(layer2rule2class1OperationParameter) -> backward_association
(9,4), # backward_associationOperationParameter -> match_class OperationParameter(layer2rule2class1OperationParameter)
(0,2), # matchmodel -> pairedwith
(2,1) # pairedwith -> applyModel
])
self["equations"] = []
| {
"content_hash": "c723cd9e308cb5878ac8d2efe0543cc5",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 121,
"avg_line_length": 35.329268292682926,
"alnum_prop": 0.6738004832585434,
"repo_name": "levilucio/SyVOLT",
"id": "66680bb87ed295a17e154cf39ccc9708a6cb127f",
"size": "2897",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mbeddr2C_MM/transformation_from_mps/Hlayer2rule2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "166159"
},
{
"name": "Python",
"bytes": "34207588"
},
{
"name": "Shell",
"bytes": "1118"
}
],
"symlink_target": ""
} |
import unittest
from refiner.geometry import Box
class BoxTestCase(unittest.TestCase):
def test_width_height_constructor(self):
box = Box(10, 10, width=10, height=10)
self.assertEqual(box.left, 10, 'incorrect left')
self.assertEqual(box.top, 10, 'incorrect top')
self.assertEqual(box.right, 20, 'incorrect right')
self.assertEqual(box.bottom, 20, 'incorrect bottom')
self.assertEqual(box.width, 10, 'incorrect width')
self.assertEqual(box.height, 10, 'incorrect height')
def test_right_bottom_constructor(self):
box = Box(10, 10, right=20, bottom=20)
self.assertEqual(box.left, 10, 'incorrect left')
self.assertEqual(box.top, 10, 'incorrect top')
self.assertEqual(box.right, 20, 'incorrect right')
self.assertEqual(box.bottom, 20, 'incorrect bottom')
self.assertEqual(box.width, 10, 'incorrect width')
self.assertEqual(box.height, 10, 'incorrect height')
def test_constructors_equiv(self):
width = 123
height = 456
box1 = Box(0, 0, width=width, height=height)
box2 = Box(0, 0, right=width, bottom=height)
self.assertEqual(box1, box2, 'boxes differ')
def test_default_constructor(self):
box = Box(12, 34, 56, 78, 9)
self.assertEqual(box.left, 12, 'incorrect left')
self.assertEqual(box.top, 34, 'incorrect top')
self.assertEqual(box.width, 56, 'incorrect width')
self.assertEqual(box.height, 78, 'incorrect height')
self.assertEqual(box.page, 9, 'incorrect page')
def test_eq(self):
box = Box(10, 10, 10, 10, page=1)
self.assertNotEqual(box, Box(11, 10, 10, 10, 1), 'left neq')
self.assertNotEqual(box, Box(10, 11, 10, 10, 1), 'top neq')
self.assertNotEqual(box, Box(10, 10, 11, 10, 1), 'width neq')
self.assertNotEqual(box, Box(10, 10, 10, 11, 1), 'height neq')
self.assertNotEqual(box, Box(10, 10, 10, 10, 2), 'page neq')
self.assertNotEqual(box, Box(10, 10, 10, 10), 'page neq (None)')
self.assertEqual(box, Box(10, 10, 10, 10, 1), 'should be equal')
self.assertEqual(
Box(1, 2, 3, 4, page=None),
Box(1, 2, 3, 4, page=None),
'should be equal'
)
def test_set_width(self):
box = Box(10, 10, 10, 10)
box.width = 20
self.assertEqual(box.right, 30, 'incorrect right')
self.assertEqual(box, Box(10, 10, 20, 10), 'width set error')
def test_set_right(self):
box = Box(10, 10, 10, 10)
box.right = 30
self.assertEqual(box.width, 20, 'incorrect width')
self.assertEqual(box, Box(10, 10, 20, 10), 'set right error')
def test_set_height(self):
box = Box(10, 10, 10, 10)
box.height = 20
self.assertEqual(box.bottom, 30, 'incorrect bottom')
self.assertEqual(box, Box(10, 10, 10, 20), 'set height error')
def test_set_bottom(self):
box = Box(10, 10, 10, 10)
box.bottom = 30
self.assertEqual(box.height, 20, 'incorrect height')
self.assertEqual(box, Box(10, 10, 10, 20), 'set bottom error')
def test_contains_no_page(self):
cont = Box(0, 0, 100, 100)
self.assertFalse(cont.contains(Box(-10, -10, 120, 120)))
self.assertFalse(cont.contains(Box(110, 0, 100, 100)))
self.assertFalse(cont.contains(Box(0, 110, 100, 100)))
self.assertFalse(cont.contains(Box(-50, 25, 100, 50)))
self.assertFalse(cont.contains(Box(25, -50, 50, 100)))
self.assertFalse(cont.contains(Box(50, 25, 100, 50)))
self.assertFalse(cont.contains(Box(25, 50, 50, 100)))
self.assertTrue(cont.contains(cont))
self.assertTrue(cont.contains(Box(0, 0, 100, 100)))
self.assertTrue(cont.contains(Box(0, 25, 50, 50)))
self.assertTrue(cont.contains(Box(25, 0, 50, 50)))
self.assertTrue(cont.contains(Box(50, 25, 50, 50)))
self.assertTrue(cont.contains(Box(25, 50, 50, 50)))
self.assertTrue(cont.contains(Box(10, 10, 80, 80)))
def test_contains_with_pages(self):
cont = Box(0, 0, 100, 100, page=1)
self.assertFalse(cont.contains(Box(0, 0, 100, 100, page=2)))
self.assertFalse(cont.contains(Box(0, 0, 100, 100, page=None)))
self.assertTrue(cont.contains(cont))
self.assertTrue(cont.contains(Box(0, 0, 100, 100, page=1)))
self.assertTrue(cont.contains(Box(10, 10, 80, 80, page=1)))
nopage = Box(0, 0, 100, 100, page=None)
self.assertTrue(nopage.contains(Box(0, 0, 100, 100, page=2)))
self.assertTrue(nopage.contains(Box(0, 0, 100, 100, page=None)))
self.assertTrue(nopage.contains(nopage))
self.assertTrue(nopage.contains(Box(0, 0, 100, 100, page=1)))
self.assertTrue(nopage.contains(Box(10, 10, 80, 80, page=1)))
def test_scale(self):
box = Box(10, 10, 10, 10)
doubleBox = box.scale(2)
self.assertEqual(doubleBox.left, 20, '(2x) incorrect left')
self.assertEqual(doubleBox.top, 20, '(2x) incorrect top')
self.assertEqual(doubleBox.width, 20, '(2x) incorrect width')
self.assertEqual(doubleBox.height, 20, '(2x) incorrect height')
self.assertEqual(doubleBox.right, 40, '(2x) incorrect right')
self.assertEqual(doubleBox.bottom, 40, '(2x) incorrect bottom')
halfBox = box.scale(0.5)
self.assertEqual(halfBox.left, 5, '(0.5x) incorrect left')
self.assertEqual(halfBox.top, 5, '(0.5x) incorrect top')
self.assertEqual(halfBox.width, 5, '(0.5x) incorrect width')
self.assertEqual(halfBox.height, 5, '(0.5x) incorrect height')
self.assertEqual(halfBox.right, 10, '(0.5x) incorrect right')
self.assertEqual(halfBox.bottom, 10, '(0.5x) incorrect bottom')
fracBox = box.scale(1.01)
self.assertEqual(fracBox.left, 10.1, '(1.01x) incorrect left')
self.assertEqual(fracBox.top, 10.1, '(1.01x) incorrect top')
self.assertEqual(fracBox.width, 10.1, '(1.01x) incorrect width')
self.assertEqual(fracBox.height, 10.1, '(1.01x) incorrect height')
self.assertEqual(fracBox.right, 20.2, '(1.01x) incorrect right')
self.assertEqual(fracBox.bottom, 20.2, '(1.01x) incorrect bottom')
| {
"content_hash": "70d00cd061925affd729ead7b6a54fe8",
"timestamp": "",
"source": "github",
"line_count": 131,
"max_line_length": 74,
"avg_line_length": 48.083969465648856,
"alnum_prop": 0.6167645658040959,
"repo_name": "maxspencer/pdfrefiner",
"id": "f60d13342b67fada8185724f7c795ffd2569ebfd",
"size": "6299",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "refiner/test/test_geometry.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "41023"
}
],
"symlink_target": ""
} |
from django.db import models
# Create your models here.
class Challenge(models.Model):
challenge_title = models.CharField(max_length=120)
challenge_text = models.TextField()
solution = models.TextField()
# data for API
# usually in JSON
# some challenges need data to be generated on the fly
# https://docs.djangoproject.com/en/1.8/ref/models/fields/#blank
# avoid using null on textfields ...
# https://docs.djangoproject.com/en/1.8/ref/models/fields/#null
api_data = models.TextField(blank=True)
# type of api_data, either json or not
is_api_data_json = models.BooleanField()
# does challenge data require processing or can be
# rendered directly? If it former, mention the handler name
# or leave it empty.
handler = models.CharField(max_length=60, blank=True)
challenge_id = models.IntegerField()
def __str__(self):
return self.challenge_title
def save(self):
# `save` is overridden here to add `challenge_id`. In current
# implementation `challenge_id` starts with 0 and gets incremented
# by 1 for every insertion.
#
# However following code is...not really correct and will have
# race condition and other concurrency issues.
self.challenge_id = Challenge.objects.count()
super(Challenge, self).save()
| {
"content_hash": "f1afe7e13a5c789bae05e87250bc4df1",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 74,
"avg_line_length": 37.833333333333336,
"alnum_prop": 0.6769456681350955,
"repo_name": "avinassh/learning-scraping",
"id": "24705f28b495c706bfb85da290bced9411f66e0d",
"size": "1362",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "challenges/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4159"
},
{
"name": "HTML",
"bytes": "3248"
},
{
"name": "Python",
"bytes": "22154"
}
],
"symlink_target": ""
} |
import copy
import six
from odin import exceptions, registration
from odin.exceptions import ValidationError
from odin.fields import NOT_PROVIDED
from odin.utils import cached_property
RESOURCE_TYPE_FIELD = '$'
META_OPTION_NAMES = ('name', 'namespace', 'name_space', 'verbose_name', 'verbose_name_plural', 'abstract', 'doc_group')
class ResourceOptions(object):
def __init__(self, meta):
self.meta = meta
self.parents = []
self.fields = []
self.name = None
self.class_name = None
self.name_space = NOT_PROVIDED
self.verbose_name = None
self.verbose_name_plural = None
self.abstract = False
self.doc_group = None
def contribute_to_class(self, cls, name):
cls._meta = self
self.name = cls.__name__
self.class_name = "%s.%s" % (cls.__module__, cls.__name__)
if self.meta:
meta_attrs = self.meta.__dict__.copy()
for name in self.meta.__dict__:
if name.startswith('_'):
del meta_attrs[name]
for attr_name in META_OPTION_NAMES:
if attr_name in meta_attrs:
# Allow meta to be defined as namespace
if attr_name == 'namespace':
setattr(self, 'name_space', meta_attrs.pop(attr_name))
else:
setattr(self, attr_name, meta_attrs.pop(attr_name))
elif hasattr(self.meta, attr_name):
setattr(self, attr_name, getattr(self.meta, attr_name))
# Any leftover attributes must be invalid.
if meta_attrs != {}:
raise TypeError("'class Meta' got invalid attribute(s): %s" % ','.join(meta_attrs.keys()))
del self.meta
if not self.verbose_name:
self.verbose_name = self.name.replace('_', ' ').strip('_ ')
if not self.verbose_name_plural:
self.verbose_name_plural = self.verbose_name + 's'
@cached_property
def field_map(self):
return {f.attname: f for f in self.fields}
def add_field(self, field):
self.fields.append(field)
@property
def resource_name(self):
"""
Full name of resource including namespace (if specified)
"""
if self.name_space:
return "%s.%s" % (self.name_space, self.name)
else:
return self.name
@cached_property
def parent_resource_names(self):
"""
List of parent resource names.
"""
return [p._meta.resource_name for p in self.parents]
def __repr__(self):
return '<Options for %s>' % self.resource_name
class ResourceBase(type):
"""
Metaclass for all Resources.
"""
def __new__(cls, name, bases, attrs):
super_new = super(ResourceBase, cls).__new__
# attrs will never be empty for classes declared in the standard way
# (ie. with the `class` keyword). This is quite robust.
if name == 'NewBase' and attrs == {}:
return super_new(cls, name, bases, attrs)
parents = [b for b in bases if isinstance(b, ResourceBase) and not (b.__name__ == 'NewBase'
and b.__mro__ == (b, object))]
if not parents:
# If this isn't a subclass of Resource, don't do anything special.
return super_new(cls, name, bases, attrs)
# Create the class.
module = attrs.pop('__module__')
new_class = super_new(cls, name, bases, {'__module__': module})
attr_meta = attrs.pop('Meta', None)
abstract = getattr(attr_meta, 'abstract', False)
if not attr_meta:
meta = getattr(new_class, 'Meta', None)
else:
meta = attr_meta
base_meta = getattr(new_class, '_meta', None)
new_class.add_to_class('_meta', ResourceOptions(meta))
# Generate a namespace if one is not provided
if new_class._meta.name_space is NOT_PROVIDED and base_meta:
# Namespace is inherited
if (not new_class._meta.name_space) or (new_class._meta.name_space is NOT_PROVIDED):
new_class._meta.name_space = base_meta.name_space
if new_class._meta.name_space is NOT_PROVIDED:
new_class._meta.name_space = module
# Bail out early if we have already created this class.
r = registration.get_resource(new_class._meta.resource_name)
if r is not None:
return r
# Add all attributes to the class.
for obj_name, obj in attrs.items():
new_class.add_to_class(obj_name, obj)
# All the fields of any type declared on this model
field_attnames = set([f.attname for f in new_class._meta.fields])
for base in parents:
if not hasattr(base, '_meta'):
# Things without _meta aren't functional models, so they're
# uninteresting parents.
continue
parent_fields = base._meta.fields
# Check for clashes between locally declared fields and those
# on the base classes (we cannot handle shadowed fields at the
# moment).
for field in parent_fields:
if field.attname in field_attnames:
raise Exception('Local field %r in class %r clashes with field of similar name from '
'base class %r' % (field.attname, name, base.__name__))
for field in parent_fields:
new_class.add_to_class(field.attname, copy.deepcopy(field))
new_class._meta.parents += base._meta.parents
new_class._meta.parents.append(base)
if abstract:
return new_class
# Register resource
registration.register_resources(new_class)
# Because of the way imports happen (recursively), we may or may not be
# the first time this model tries to register with the framework. There
# should only be one class for each model, so we always return the
# registered version.
return registration.get_resource(new_class._meta.resource_name)
def add_to_class(cls, name, value):
if hasattr(value, 'contribute_to_class'):
value.contribute_to_class(cls, name)
else:
setattr(cls, name, value)
class Resource(six.with_metaclass(ResourceBase)):
def __init__(self, **kwargs):
for field in iter(self._meta.fields):
try:
val = kwargs.pop(field.attname)
except KeyError:
val = field.get_default()
setattr(self, field.attname, val)
if kwargs:
raise TypeError("'%s' is an invalid keyword argument for this function" % list(kwargs)[0])
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self)
def __str__(self):
return '%s resource' % self._meta.resource_name
def to_dict(self):
"""
Convert this resource into a dict
"""
return {f.name: f.prepare(f.value_from_object(self)) for f in self._meta.fields}
def convert_to(self, to_resource, **field_values):
"""
Convert this resource into a specified to resource.
A mapping must be defined for conversion between this resource and to_resource or an exception will be raised.
"""
self.full_clean()
mapping = registration.get_mapping(self.__class__, to_resource)
return mapping(self).convert(**field_values)
def extra_attrs(self, attrs):
"""
Called during de-serialisation of data if there are any extra fields defined in the document.
This allows the resource to decide how to handle these fields. By default they are ignored.
"""
pass
def clean(self):
"""
Chance to do more in depth validation.
"""
pass
def full_clean(self):
"""
Calls clean_fields, clean on the resource and raises ``ValidationError``
for any errors that occurred.
"""
errors = {}
try:
self.clean_fields()
except ValidationError as e:
errors = e.update_error_dict(errors)
try:
self.clean()
except ValidationError as e:
errors = e.update_error_dict(errors)
if errors:
raise ValidationError(errors)
def clean_fields(self):
errors = {}
for f in self._meta.fields:
raw_value = f.value_from_object(self)
if f.null and raw_value is None:
continue
try:
raw_value = f.clean(raw_value)
except ValidationError as e:
errors[f.name] = e.messages
# Check for resource level clean methods.
clean_method = getattr(self, "clean_%s" % f.attname, None)
if callable(clean_method):
try:
raw_value = clean_method(raw_value)
except ValidationError as e:
errors.setdefault(f.name, []).extend(e.messages)
setattr(self, f.attname, raw_value)
if errors:
raise ValidationError(errors)
def create_resource_from_dict(d, resource_name=None, full_clean=True):
"""
Create a resource from a dict.
"""
assert isinstance(d, dict)
# Get the correct resource name
document_resource_name = d.pop(RESOURCE_TYPE_FIELD, resource_name)
if not (document_resource_name or resource_name):
raise exceptions.ValidationError("Resource not defined.")
resource_type = registration.get_resource(document_resource_name)
if not resource_type:
raise exceptions.ValidationError("Resource `%s` is not registered." % document_resource_name)
# Check if we have an inherited type.
if resource_name and not (resource_name == document_resource_name or
resource_name in resource_type._meta.parent_resource_names):
raise exceptions.ValidationError(
"Expected resource `%s` does not match resource defined in document `%s`." % (
resource_name, document_resource_name))
errors = {}
attrs = {}
for f in resource_type._meta.fields:
value = d.pop(f.name, NOT_PROVIDED)
try:
attrs[f.attname] = f.clean(value)
except exceptions.ValidationError as ve:
errors[f.name] = ve.error_messages
if errors:
raise exceptions.ValidationError(errors)
new_resource = resource_type(**attrs)
if d:
new_resource.extra_attrs(d)
if full_clean:
new_resource.full_clean()
return new_resource
def build_object_graph(d, resource_name=None):
"""
Generate an object graph from a dict
:raises ValidationError: During building of the object graph and issues discovered are raised as a ValidationError.
"""
if isinstance(d, dict):
return create_resource_from_dict(d, resource_name)
if isinstance(d, list):
return [build_object_graph(o, resource_name) for o in d]
return d
| {
"content_hash": "8f1d812b653d05c080302f3f55cf0d18",
"timestamp": "",
"source": "github",
"line_count": 326,
"max_line_length": 119,
"avg_line_length": 34.54907975460123,
"alnum_prop": 0.5787978336144899,
"repo_name": "tjmcewan/odin",
"id": "06aba2b6ecc56d28f9df25c49aede8d0bde3c536",
"size": "11287",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "odin/resources.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
import tensorflow as tf
from candidate_selection.tensorflow_models.components.abstract_component import AbstractComponent
class TargetComparator(AbstractComponent):
variable_prefix = None
variables = None
comparison = None
def __init__(self, variables, variable_prefix="", comparison="dot_product"):
self.variables = variables
self.variable_prefix = variable_prefix
if self.variable_prefix != "":
self.variable_prefix += "_"
self.comparison = comparison
def get_comparison_scores(self, all_target_embeddings, element_embeddings):
target_embeddings = tf.nn.embedding_lookup(all_target_embeddings, self.variables.get_variable(self.variable_prefix + "target_indices"))
#target_embeddings = tf.Print(target_embeddings, [self.variables.get_variable(self.variable_prefix + "target_indices")], summarize=25)
if self.comparison == "dot_product":
comparison = tf.reduce_sum(target_embeddings * element_embeddings, axis=1)
elif self.comparison == "sum":
comparison = target_embeddings + element_embeddings
elif self.comparison == "concat":
comparison = tf.concat([target_embeddings, element_embeddings], axis=1)
return comparison
def prepare_tensorflow_variables(self, mode="train"):
self.variables.add_variable(self.variable_prefix + "target_indices", tf.placeholder(tf.int32))
def handle_variable_assignment(self, batch_dictionary, mode):
self.variables.assign_variable(self.variable_prefix + "target_indices", batch_dictionary["neighborhood_input_model"].get_instance_indices())
| {
"content_hash": "920e1c15dbfb5af5d544950576189771",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 148,
"avg_line_length": 42.48717948717949,
"alnum_prop": 0.7012673506336753,
"repo_name": "MichSchli/QuestionAnsweringGCN",
"id": "ab4fe6a35fc834554cb2d16a3ed9674eb8eb6994",
"size": "1657",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "old_version/candidate_selection/tensorflow_models/components/extras/target_comparator.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "730851"
},
{
"name": "Shell",
"bytes": "1446"
}
],
"symlink_target": ""
} |
import re
class METAR:
"""
METAR
Purpose: Parses and keeps track of variables related to METAR observations
Started: 25 June 2010 by Tim Supinie (tsupinie@ou.edu)
Completed: 26 June 2010
Modified: [not yet]
"""
# Private static list giving the priority for reporting the sky conditions (higher priorities are near
# the front of the list).
_skyc_priority = ['OVC', 'BKN', 'SCT', 'FEW', 'CLR', 'SKC']
# Private static list giving the priority for reporting precipitation types (higher priorities are near
# the front of the list).
_wsym_priority = ['PL', 'SN', 'RA', 'SG', 'DZ', 'IC', 'BR', 'FG', 'VA', 'DU', 'HZ']
# Private static list giving the priority for reporting precipitation descriptors (higher priorities
# are near the front of the list).
_wmod_priority = ['TS', 'FZ', 'BL', 'DR', 'SH', 'MI', 'PR', 'BC']
# Privat static list giving the priority for reporting precipitation intensity (higher priorities are
# near the front of the list).
_wint_priority = ['+', '', '-']
def __init__(self, metar_string):
"""
__init__()
Purpose: Constructor for the METAR class. Calls the _parse() method on the METAR.
Parameters: metar_string
A string containing the METAR to be parsed.
"""
# Parse the METAR
self._parse(metar_string)
return
def get(self, attr):
"""
get() [public]
Purpose: Get a meteorological value from the METAR.
Parameters: attr [type=str]
Which attribute to return.
Returns: The value of the attribute for this METAR.
"""
if hasattr(self, attr):
return getattr(self, attr)
else:
return None
def _parse(self, metar_string):
"""
_parse() [private]
Purpose: Parse the metar string and extract relevant variables.
Parameters: metar_string [type=str]
A string containing the METAR to be parsed.
Returns: [nothing]
"""
# Split the observation from the remarks (they're handle differently. And the remarks are weird.)
observation, remarks = metar_string.split('RMK')
# Note: before you try to understand this code, you may want to read up on regular expressions (regex).
# Regex information and its Python API can be found here: http://docs.python.org/library/re.html
# Match the METAR string at the very beginning (or SPECI, indicating a special observation).
# The (?=[\\s]) looks for spaces ahead of the string, but doesn't include it in the match (necessary later).
metr_match = re.search('(METAR|SPECI)(?=[\\s])', observation)
# Match the ICAO identifier (K followed by 3 characters). The (?<=[\\s]) looks for spaces behind
# the string, but doesn't include it in the match (again, necessary later).
icao_match = re.search('(?<=[\\s])(K[\\w]{3})(?=[\\s])', observation)
# Match the time stamp (three sets of two digits followed by a Z. String multiplication is done on the
# two-digit sets because the regex engine needs three sets of parenthesis to capture three values.)
time_match = re.search('(?<=[\\s])' + '([\\d]{2})' * 3 + 'Z(?=[\\s])', observation)
# Match the wind observation (a set of three digits or "VRB" followed by a set of two digits, followed by an optional
# "gust" cluster, which is a G and two digits, followed by a KT).
wind_match = re.search('(?<=[\\s])([\\d]{3}|VRB)([\\d]{2})(?:G([\\d]{2}))?KT(?=[\\s])', observation)
# Match the visibility (not strictly necessary for the station plot, and untested. I may have broken it.)
visb_match = re.search('(?<=[\\s])([\\d][\\s]?[\\d]?/?[\\d]?)SM(?=[\\s])', observation)
# Match the sky conditions (any of the strings in _skyc_priority followed by three digits for the ceiling
# height, followed by an optional CB cluster).
skyc_matches = re.findall('(?<=[\\s])(' + "|".join(METAR._skyc_priority) +
')([\\d]{3})?(?:CB)?(?=[\\s])', observation)
# Match the weather observation (any two of the strings in _wmod_priority, both of which are optional,
# followed by any two of the strings in _wsym_priority, one of which is optional).
wsym_matches = re.findall('(?<=[\\s])(\\+|\\-)?' + ('(' + "|".join(METAR._wmod_priority) + ')?') * 2 +
('(' + "|".join(METAR._wsym_priority) + ')') * 2 + '?(?=[\\s])', observation)
# Match the temperature and dewpoint observations (two two digit numbers, each of which may be preceded
# by an M, separated by a slash).
temp_match = re.search('(?<=[\\s])(M?[\\d]{2})/(M?[\\d]{2})(?=[\\s])', observation)
# Match the pressure observation (an A followed by four digits).
pres_match = re.search('(?<=[\\s])A([\\d]{4})(?=[\\s])', observation)
if not metr_match:
# METAR doesn't start with the METAR or SPECI, so it's not a valid METAR
raise ValueError(metar_string + ' is not a valid METAR (no METAR or SPECI header).')
if icao_match:
setattr(self, 'station_id', icao_match.group(1))
else:
# METAR doesn't have an ICAO station identifier, so it's not a valid METAR
raise ValueError(metar_string + ' is not a valid METAR (no ICAO station identifier).')
if time_match:
# Set the observation time
setattr(self, 'time_hours', int(time_match.group(2)))
setattr(self, 'time_minutes', int(time_match.group(3)))
if wind_match:
# Set the wind observations
wdir = wind_match.group(1)
if (wdir != 'VRB'): wdir = int(wdir)
setattr(self, 'wind_direction', wdir)
setattr(self, 'wind_speed', int(wind_match.group(2)))
if wind_match.lastindex == 3:
setattr(self, 'wind_gust', int(wind_match.group(3)))
if visb_match:
visb_string = visb_match.group(1)
visb = 0
if ' ' in visb_string:
whole, partial = visb_string.split(' ')
numerator, denominator = partial.split('/')
visb = int(whole) + float(numerator) / float(denominator)
elif '/' in visb_string:
numerator, denominator = visb_string.split('/')
visb = float(numerator) / float(denominator)
else:
visb = int(visb_string)
setattr(self, 'visibility', visb)
pass
if skyc_matches:
# Parse out the sky cover and pick the most important one. The most important has the lowest
# index in the _skyc_priority list.
skyc = zip(*skyc_matches)[0]
for cover in METAR._skyc_priority:
if cover in skyc:
setattr(self, 'sky_conditions', cover)
break
if wsym_matches:
# Parse out the weather symbols and pick the most important one. The most important is scored
# by index in the _wsym_priority list plus a factor for intensity. The factor is -1 for a +
# intensity, 0 for no intensity and +1 for a - intensity (yeah, backwards, I know ...). The
# most important weather symbol has the lowest score.
scores = ([], [])
for wsym in wsym_matches:
intensity = wsym[0]
for idx in range(len(METAR._wsym_priority)):
weather = METAR._wsym_priority[idx]
if weather in wsym:
scores[0].append(idx + METAR._wint_priority.index(intensity) - 1)
scores[1].append("".join(wsym))
break
setattr(self, 'current_weather', scores[1][scores[0].index(min(scores[0]))])
if temp_match:
# Parse the temperature and dewpoint and set them.
def parseTemp(temp):
"""
parseTemp() [local]
Purpose: Parse METAR temperature to an actual value
Parameters: temp [type=str]
Temperature (in degrees Celsius, with negative values denoted by a preceding M)
Returns: Temperature of type float in degrees Celsius.
"""
if temp[0] == 'M':
return -1 * int(temp[1:])
else:
return int(temp)
setattr(self, 'temperature', parseTemp(temp_match.group(1)))
setattr(self, 'dewpoint', parseTemp(temp_match.group(2)))
if pres_match:
# Convert the pressure to in Hg and set it.
setattr(self, 'pressure', float(pres_match.group(1)) / 100)
return
if __name__ == "__main__":
# m = METAR("METAR KOUN 240212Z AUTO 16008G17KT 10SM CLR 32/21 A2998 RMK A02=")
m = METAR("METAR KMDW 232256Z 30028G39KT 1SM R31C/P6000FT -TSRA SQ FEW028 BKN036CB OVC042 26/20 A2985 RMK AO2 PK WND 30039/2255 RAB55 PRESRR FRQ LTGICCG OHD TS OHD MOV E-SE P0003=")
print "Station:", m.get('station_id')
print "Time: %d:%d UTC" % (m.get('time_hours'), m.get('time_minutes'))
print "Wind Direction:", m.get('wind_direction')
print "Wind Speed:", m.get('wind_speed')
print "Wind Gust:", m.get('wind_gust')
print "Visibility:", m.get('visibility')
print "Sky Conditions:", m.get('sky_conditions')
print "Current Weather:", m.get('current_weather')
print "Temperature:", m.get('temperature')
print "Dewpoint:", m.get('dewpoint')
print "Pressure:", m.get('pressure')
| {
"content_hash": "9d24e9062583822c5c772348299548e8",
"timestamp": "",
"source": "github",
"line_count": 199,
"max_line_length": 185,
"avg_line_length": 49.562814070351756,
"alnum_prop": 0.5665618980026361,
"repo_name": "pulsatrixwx/PulsatrixWx",
"id": "400a4f0aceb4f70c3afbeea942f3b7ed3b955c9f",
"size": "9863",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "util/metar.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
import collections
import numpy as np
start_token = 'G'
end_token = 'E'
def process_poems(file_name):
poems = []
with open(file_name, "r", encoding='utf-8', ) as f:
for line in f.readlines():
try:
title, content = line.strip().split(':')
content = content.replace(' ', '')
if '_' in content or '(' in content or '(' in content or '《' in content or '[' in content or \
start_token in content or end_token in content:
continue
if len(content) < 5 or len(content) > 79:
continue
content = start_token + content + end_token
poems.append(content)
except ValueError as e:
pass
poems = sorted(poems, key=lambda l: len(line))
all_words = []
for poem in poems:
all_words += [word for word in poem]
counter = collections.Counter(all_words)
count_pairs = sorted(counter.items(), key=lambda x: -x[1])
words, _ = zip(*count_pairs)
words = words[:len(words)] + (' ',)
word_int_map = dict(zip(words, range(len(words))))
poems_vector = [list(map(lambda word: word_int_map.get(word, len(words)), poem)) for poem in poems]
return poems_vector, word_int_map, words
def generate_batch(batch_size, poems_vec, word_to_int):
n_chunk = len(poems_vec) // batch_size
x_batches = []
y_batches = []
for i in range(n_chunk):
start_index = i * batch_size
end_index = start_index + batch_size
batches = poems_vec[start_index:end_index]
length = max(map(len, batches))
x_data = np.full((batch_size, length), word_to_int[' '], np.int32)
for row in range(batch_size):
x_data[row, :len(batches[row])] = batches[row]
y_data = np.copy(x_data)
y_data[:, :-1] = x_data[:, 1:]
x_batches.append(x_data)
y_batches.append(y_data)
return x_batches, y_batches | {
"content_hash": "2f95fe163231b771a40d7d0fe0f37ace",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 110,
"avg_line_length": 35.67857142857143,
"alnum_prop": 0.5530530530530531,
"repo_name": "koala-ai/tensorflow_nlp",
"id": "534a62ef49c6daa19009aa40f9aa4594beb0e9d5",
"size": "2026",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nlp/poems/dataset/poems.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "441077"
}
],
"symlink_target": ""
} |
INDEX_DIR = "IndexFiles.index"
import sys, os, lucene
from java.nio.file import Paths
from org.apache.lucene.analysis.standard import StandardAnalyzer
from org.apache.lucene.index import DirectoryReader
from org.apache.lucene.queryparser.classic import QueryParser
from org.apache.lucene.store import SimpleFSDirectory
from org.apache.lucene.search import IndexSearcher
"""
This script is loosely based on the Lucene (java implementation) demo class
org.apache.lucene.demo.SearchFiles. It will prompt for a search query, then it
will search the Lucene index in the current directory called 'index' for the
search query entered against the 'contents' field. It will then display the
'path' and 'name' fields for each of the hits it finds in the index. Note that
search.close() is currently commented out because it causes a stack overflow in
some cases.
"""
def run(searcher, analyzer):
while True:
print
print "Hit enter with no input to quit."
command = raw_input("Query:")
if command == '':
return
print
print "Searching for:", command
query = QueryParser("contents", analyzer).parse(command)
scoreDocs = searcher.search(query, 50).scoreDocs
print "%s total matching documents." % len(scoreDocs)
for scoreDoc in scoreDocs:
doc = searcher.doc(scoreDoc.doc)
print 'path:', doc.get("path"), 'name:', doc.get("name")
if __name__ == '__main__':
lucene.initVM(vmargs=['-Djava.awt.headless=true'])
print 'lucene', lucene.VERSION
base_dir = os.path.dirname(os.path.abspath(sys.argv[0]))
directory = SimpleFSDirectory(Paths.get(os.path.join(base_dir, INDEX_DIR)))
searcher = IndexSearcher(DirectoryReader.open(directory))
analyzer = StandardAnalyzer()
run(searcher, analyzer)
del searcher
| {
"content_hash": "54e0f1f693b8241e037187087d3a7cfe",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 79,
"avg_line_length": 38.416666666666664,
"alnum_prop": 0.705531453362256,
"repo_name": "svn2github/pylucene",
"id": "9e037ed1af8af16950b0013313f0b5b09aceb4f0",
"size": "1867",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "samples/SearchFiles.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "900187"
},
{
"name": "Java",
"bytes": "56008"
},
{
"name": "Makefile",
"bytes": "14219"
},
{
"name": "Python",
"bytes": "631813"
}
],
"symlink_target": ""
} |
from .a_component import AComponent
class AHeading(AComponent):
def __init__(self, size, *args, **kwargs):
self.size = size
super().__init__(*args, **kwargs)
def tagname(self):
return "h{}".format(self.size)
| {
"content_hash": "7326581f8e123fdaf345149502856832",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 46,
"avg_line_length": 24.3,
"alnum_prop": 0.5925925925925926,
"repo_name": "Sefrwahed/Alfred",
"id": "458d8e8f8fcfcc2e3787a56fa2af30cbc41b9c56",
"size": "243",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "alfred/modules/api/view_components/a_heading.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "170810"
},
{
"name": "HTML",
"bytes": "5961"
},
{
"name": "JavaScript",
"bytes": "1336"
},
{
"name": "Python",
"bytes": "135757"
},
{
"name": "TeX",
"bytes": "212"
}
],
"symlink_target": ""
} |
"""Shared constants and functions."""
import collections
import os
from official.nlp.data import tagging_data_lib
from official.nlp.configs import encoders
from official.nlp.tasks.tagging import TaggingConfig, ModelConfig
import tensorflow_hub as hub
import tensorflow as tf
from absl import logging
from training.model_setup_config import ModelSize
# HACK: Required to make bert.tokenization work with TF2.
tf.gfile = tf.io.gfile
from com_google_research_bert import tokenization # pylint: disable=wrong-import-position
LABEL_CONTAINER_NAME = "lucid"
LF_ADDRESS_LABEL = "address"
LF_TELEPHONE_LABEL = "phone"
MAIN_LABEL_ADDRESS = "ADDRESS"
MAIN_LABEL_TELEPHONE = "TELEPHONE"
MAIN_LABELS = (MAIN_LABEL_ADDRESS, MAIN_LABEL_TELEPHONE)
# "O" is short for "outside" and a magic value used by seqeval
# Not assigning "O" the index 0 help to find bugs where the label is always set
# to zero.
LABEL_OUTSIDE = "O"
LABEL_BEGIN_TELEPHONE = "B-TELEPHONE"
LABEL_INSIDE_TELEPHONE = "I-TELEPHONE"
LABEL_BEGIN_ADDRESS = "B-ADDRESS"
LABEL_INSIDE_ADDRESS = "I-ADDRESS"
LABELS = (LABEL_BEGIN_TELEPHONE, LABEL_INSIDE_TELEPHONE, LABEL_OUTSIDE,
LABEL_BEGIN_ADDRESS, LABEL_INSIDE_ADDRESS)
ADDITIONAL_LABELS = (
"B-DATE", "I-DATE", "B-NUMBER", "I-NUMBER", "B-LETTERS_SEPARATE",
"I-LETTERS_SEPARATE", "B-MEASURE", "I-MEASURE", "B-MONEY", "I-MONEY",
"B-ELECTRONIC", "I-ELECTRONIC", "B-ROMAN_NUMERAL_AS_CARDINAL",
"I-ROMAN_NUMERAL_AS_CARDINAL", "B-EMOTICON_EMOJI", "I-EMOTICON_EMOJI",
"B-ABBREVIATION_TO_EXPAND", "I-ABBREVIATION_TO_EXPAND",
"B-VERBATIM_SEQUENCE", "I-VERBATIM_SEQUENCE", "B-TIME", "I-TIME",
"B-CONNECTOR_RANGE", "I-CONNECTOR_RANGE", "B-DURATION", "I-DURATION",
"B-CONNECTOR_SILENT", "I-CONNECTOR_SILENT", "B-CONNECTOR_GENERAL",
"I-CONNECTOR_GENERAL", "B-FRACTION", "I-FRACTION", "B-LETTERS_AS_WORD",
"I-LETTERS_AS_WORD", "B-ORDINAL", "I-ORDINAL", "B-CONNECTOR_RATIO",
"I-CONNECTOR_RATIO", "B-ROMAN_NUMERAL_AS_DEFINITE_ORDINAL",
"I-ROMAN_NUMERAL_AS_DEFINITE_ORDINAL", "B-DIGITS", "I-DIGITS",
"B-CONNECTOR_SCORE", "I-CONNECTOR_SCORE", "B-CHUNKED_NUMBER",
"I-CHUNKED_NUMBER", "B-CONNECTOR_MATH", "I-CONNECTOR_MATH",
"B-CONNECTOR_DIMENSION", "I-CONNECTOR_DIMENSION", "B-MULTI_UNIT_MEASURE",
"I-MULTI_UNIT_MEASURE", "B-ROMAN_NUMERAL_AS_ORDINAL",
"I-ROMAN_NUMERAL_AS_ORDINAL", "B-CHEMICAL_FORMULA", "I-CHEMICAL_FORMULA")
# Copied from tagging_data_lib.
UNK_TOKEN = "[UNK]"
PADDING_LABEL_ID = -1
MOVING_WINDOW_MASK_LABEL_ID = -2
BERT_SENTENCE_START = "[CLS]"
BERT_SENTENCE_SEPARATOR = "[SEP]"
BERT_SENTENCE_PADDING = "[PAD]"
LABEL_ID_MAP = {label: i for i, label in enumerate(LABELS + ADDITIONAL_LABELS)}
LabeledExample = collections.namedtuple(
"LabeledExample",
["prefix", "selection", "suffix", "complete_text", "label"])
def get_tokenizer(model_config):
"""Returns a FullTokenizer."""
# The tiny and base models both have the same tokenizer, so we can always
# use the one of the base model.
if model_config.size is None:
model_config.size = ModelSize.BASE
module_url = _get_hub_url(model_config)
model = hub.KerasLayer(module_url, trainable=False)
vocab_file = model.resolved_object.vocab_file.asset_path.numpy()
do_lower_case = model.resolved_object.do_lower_case.numpy()
return tokenization.FullTokenizer(vocab_file=vocab_file,
do_lower_case=do_lower_case)
def split_into_words(text, tokenizer):
"""Splits the text given the tokenizer."""
return tokenizer.basic_tokenizer.tokenize(text)
def remove_whitespace_and_parse(text, tokenizer):
"""Removes all whitespace and some special characters.
The tokenizer discards some utf-8 characters, such as the right-to-left
indicator. Applying the tokenizer is slow, but the safest way to guarantee
consistent behaviour.
"""
return "".join(split_into_words(text, tokenizer))
def add_tfrecord_label(text, label, tokenizer, example, use_additional_labels):
"""Adds one label for each word in the text to the example."""
words = split_into_words(text, tokenizer)
if label in MAIN_LABELS or (use_additional_labels
and label in MAIN_LABELS + ADDITIONAL_LABELS):
example.add_word_and_label_id(words[0], LABEL_ID_MAP["B-%s" % label])
for word in words[1:]:
example.add_word_and_label_id(word, LABEL_ID_MAP["I-%s" % label])
else:
for word in words:
example.add_word_and_label_id(word, LABEL_ID_MAP[LABEL_OUTSIDE])
def _tokenize_example(example,
max_length,
tokenizer,
text_preprocessing=None,
moving_window_overlap=20,
mask_overlap=False):
"""Tokenizes words and breaks long example into short ones.
Very similiar to _tokenize_example in tagging_data_lib, but implements a
moving window. The tokens closest to the border are repeated in the next
sub-sentence. The half of the repeated tokens that are closest to the border
are not labeled if mask_overlap is True.
"""
if moving_window_overlap % 2 != 0:
raise ValueError("moving_window_overlap must be even.")
half_moving_window_overlap = moving_window_overlap // 2
moving_window_padding = [MOVING_WINDOW_MASK_LABEL_ID
] * half_moving_window_overlap
# Needs additional [CLS] and [SEP] tokens and space for the moving window.
max_length = max_length - 2 - moving_window_overlap
new_examples = []
new_example = tagging_data_lib.InputExample(
sentence_id=example.sentence_id, sub_sentence_id=0)
for i, word in enumerate(example.words):
if any(x < 0 for x in example.label_ids):
raise ValueError("Unexpected negative label_id: %s" %
example.label_ids)
if text_preprocessing:
word = text_preprocessing(word)
subwords = tokenizer.tokenize(word)
word_is_empty = not subwords
# max_length takes the moving window into account, to avoid that the
# sequence becomes too long after copying the previous context.
word_too_long = len(subwords) > max_length
# If the current sequence would be completely masked out due to the
# moving window, a new example must not be started yet.
current_sequence_too_short = len(
new_example.words) <= moving_window_overlap and len(
subwords) + len(new_example.words) > max_length
if (word_is_empty or word_too_long
or current_sequence_too_short) and word:
subwords = [UNK_TOKEN]
if len(subwords) + len(new_example.words) > max_length:
# A copy is needed as the original list is modified below.
previous_label_ids = new_example.label_ids.copy()
previous_label_words = new_example.words
if mask_overlap and moving_window_overlap > 0:
# The last tokens have very little context, they are labeled in
# the next sub-sentence.
new_example.label_ids[
-half_moving_window_overlap:] = moving_window_padding
# Start a new example.
new_examples.append(new_example)
last_sub_sentence_id = new_example.sub_sentence_id
new_example = tagging_data_lib.InputExample(
sentence_id=example.sentence_id,
sub_sentence_id=last_sub_sentence_id + 1)
if moving_window_overlap > 0:
# The previously masked tokens need to be labeled, additional
# tokens are copied and masked to be used as context.
new_example.words.extend(
previous_label_words[-moving_window_overlap:])
if mask_overlap:
new_example.label_ids.extend(moving_window_padding)
new_example.label_ids.extend(
previous_label_ids[-half_moving_window_overlap:])
else:
new_example.label_ids.extend(
previous_label_ids[-moving_window_overlap:])
for j, subword in enumerate(subwords):
# Use the real label for the first subword, and pad label for
# the remainings.
subword_label = example.label_ids[i] if j == 0 else PADDING_LABEL_ID
new_example.add_word_and_label_id(subword, subword_label)
assert new_example.words
new_examples.append(new_example)
return new_examples
def write_example_to_file(examples,
tokenizer,
max_seq_length,
output_file,
text_preprocessing=None,
moving_window_overlap=20,
mask_overlap=False):
"""Writes `InputExample`s to a tfrecord file with `tf.train.Example` protos.
Identical to tagging_data_lib.write_example_to_file except for the
additional parameters that are passed to _tokenize_example.
Args:
examples: A list of `InputExample` instances.
tokenizer: The tokenizer to be applied on the data.
max_seq_length: Maximum length of generated sequences.
output_file: The name of the output tfrecord file.
text_preprocessing: optional preprocessing run on each word prior to
tokenization.
moving_window_overlap: Size of the moving window.
mask_overlap: Whether to mask the overlap introduced by the moving
window or not.
Returns:
The total number of tf.train.Example proto written to file.
"""
tf.io.gfile.makedirs(os.path.dirname(output_file))
writer = tf.io.TFRecordWriter(output_file)
num_tokenized_examples = 0
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
logging.info("Writing example %d of %d to %s", ex_index,
len(examples), output_file)
tokenized_examples = _tokenize_example(example, max_seq_length,
tokenizer, text_preprocessing,
moving_window_overlap,
mask_overlap)
num_tokenized_examples += len(tokenized_examples)
for per_tokenized_example in tokenized_examples:
# pylint: disable=protected-access
tf_example = tagging_data_lib._convert_single_example(
per_tokenized_example, max_seq_length, tokenizer)
# pylint: enable=protected-access
writer.write(tf_example.SerializeToString())
writer.close()
return num_tokenized_examples
def _get_hub_url(model_config):
if model_config.size == ModelSize.TINY:
assert not model_config.case_sensitive
return "https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-2_H-128_A-2/1" # pylint: disable=line-too-long
else:
if model_config.case_sensitive:
return "https://tfhub.dev/tensorflow/bert_en_cased_L-12_H-768_A-12/2" # pylint: disable=line-too-long
else:
return "https://tfhub.dev/tensorflow/bert_en_uncased_L-12_H-768_A-12/2" # pylint: disable=line-too-long
def get_tagging_config(model_config,
label_list,
train_data_config=None,
validation_data_config=None):
"""Returns a TaggingConfig."""
common_params = {
"train_data": train_data_config,
"validation_data": validation_data_config,
"class_names": label_list
}
if model_config.pretrained:
return TaggingConfig(hub_module_url=_get_hub_url(model_config),
**common_params)
else:
assert model_config.size == ModelSize.TINY
return TaggingConfig(model=ModelConfig(encoder=encoders.EncoderConfig(
bert=encoders.BertEncoderConfig(num_layers=2,
hidden_size=128,
num_attention_heads=2,
intermediate_size=128 * 4))),
**common_params)
| {
"content_hash": "fa160b57e645965ff6337fc005a1bfb7",
"timestamp": "",
"source": "github",
"line_count": 288,
"max_line_length": 121,
"avg_line_length": 42.802083333333336,
"alnum_prop": 0.6260241745761337,
"repo_name": "googleinterns/bert-annotator",
"id": "fd7ef6abb8532e880cfbf7b3c2e784e62f9f61c8",
"size": "12931",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "training/utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "161864"
},
{
"name": "Dockerfile",
"bytes": "2902"
},
{
"name": "Python",
"bytes": "117022"
},
{
"name": "Shell",
"bytes": "1031"
},
{
"name": "Starlark",
"bytes": "8664"
}
],
"symlink_target": ""
} |
from functools import update_wrapper
from collections import Mapping
VictronServicePrefix = 'com.victronenergy'
def safeadd(*values):
""" Adds all parameters passed to this function. Parameters which are None
are ignored. If all parameters are None, the function will return None
as well.
"""
values = [v for v in values if v is not None]
return sum(values) if values else None
def safemax(v0, v1):
if v0 is None or v1 is None:
return None
return max(v0, v1)
def service_base_name(service_name):
'''Returns the part of a Victron D-Bus service name that defines it type.
Example: com.victronenergy.vebus.ttyO1 yields com.victronenergy.vebus'''
if not service_name.startswith(VictronServicePrefix) or service_name[len(VictronServicePrefix)] != '.':
raise Exception('Not a victron service')
i = service_name.find('.', len(VictronServicePrefix) + 1)
if i == -1:
return service_name
return service_name[:i]
def service_instance_name(service_name, instance):
'''Combines service base name and device instance to a identifier that is unique for each D-Bus
services without relying on communication port name etc.
Example: com.victronenergy.grid.cgwacs_ttyUSB0_di30_mb1 yields com.victronenergy.grid/30'''
return '%s/%s' % (service_base_name(service_name), instance)
def gpio_paths(etc_path):
try:
with open(etc_path, 'rt') as r:
return r.read().strip().split()
except IOError:
return []
def copy_dbus_value(monitor, src_service, src_path, dest_service, dest_path, copy_invalid=False, offset=None):
value = monitor.get_value(src_service, src_path)
if copy_invalid or value is not None:
if offset is not None: value += offset
monitor.set_value_async(dest_service, dest_path, value)
class SmartDict(dict):
def __getattr__(self, n):
try:
return self[n]
except IndexError:
raise AttributeError(n)
def __setattr__(self, k, v):
self[k] = v
class reify(object):
""" Decorator for class methods. Turns the method into a property that
is evaluated once, and then replaces the property, effectively caching
it and evaluating it only once. """
def __init__(self, wrapped):
self.wrapped = wrapped
update_wrapper(self, wrapped)
def __get__(self, inst, objtype=None):
if inst is None:
return self
val = self.wrapped(inst)
setattr(inst, self.wrapped.__name__, val)
return val
class smart_dict(dict):
# Dictionary that can be accessed via attributes.
def __getattr__(self, k):
try:
v = self[k]
if isinstance(v, Mapping):
return self.__class__(v)
return v
except KeyError:
raise AttributeError(k)
def __setattr__(self, k, v):
self[k] = v
| {
"content_hash": "804dac919ba3169a3ba08fd81490ac9e",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 110,
"avg_line_length": 29.288888888888888,
"alnum_prop": 0.7139605462822458,
"repo_name": "victronenergy/dbus-systemcalc-py",
"id": "181bc2397e48fb814b6163227f13bcb71653a57c",
"size": "2636",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sc_utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "2468"
},
{
"name": "Python",
"bytes": "493722"
}
],
"symlink_target": ""
} |
__author__ = 'sachinpatney'
class IAction:
def __init__(self):
pass
def __do__(self):
raise Exception('This should be overridden')
| {
"content_hash": "b7bf587341381ae0d3e3b260be88d02b",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 52,
"avg_line_length": 15.9,
"alnum_prop": 0.5660377358490566,
"repo_name": "sachinio/redalert",
"id": "7cb7c44bfe3acd134998576cd4f968a897486832",
"size": "159",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hardware/watcher/actions/actions_common.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "748"
},
{
"name": "C++",
"bytes": "6681"
},
{
"name": "CSS",
"bytes": "210059"
},
{
"name": "HTML",
"bytes": "7039"
},
{
"name": "JavaScript",
"bytes": "5898"
},
{
"name": "PHP",
"bytes": "2643"
},
{
"name": "Python",
"bytes": "39759"
},
{
"name": "Shell",
"bytes": "2581"
}
],
"symlink_target": ""
} |
"""
calico.calcollections
~~~~~~~~~~~~~~~~~~~~~
Collection classes and utils.
"""
import logging
_log = logging.getLogger(__name__)
class SetDelta(object):
"""Represents a change to a base set.
Tracks the minimum collection of additions and removals required to apply
the changes to the set.
"""
def __init__(self, base_set):
"""Constructor.
:param set base_set: the set to calculate deltas from.
"""
self.base_set = base_set
self.added_entries = set()
self.removed_entries = set()
def add(self, entry):
"""Record an addition to the set."""
if entry not in self.base_set:
# Entry wasn't in the set before so store that it needs to be
# added.
self.added_entries.add(entry)
else:
# Add overrides any previous remove.
self.removed_entries.discard(entry)
def remove(self, entry):
"""Record a removal from the set."""
if entry in self.base_set:
# Entry was in the set before so store that it needs to be
# removed.
self.removed_entries.add(entry)
else:
# Remove overrides any previous add.
self.added_entries.discard(entry)
def apply_and_reset(self):
"""Apply the differences to the base set."""
self.base_set.difference_update(self.removed_entries)
self.base_set.update(self.added_entries)
self.removed_entries = set()
self.added_entries = set()
@property
def resulting_size(self):
return (len(self.base_set) -
len(self.removed_entries) +
len(self.added_entries))
@property
def empty(self):
return not (self.added_entries or self.removed_entries)
class MultiDict(object):
"""
Represents a mapping from key to a set of values.
Implementation note: as an occupancy optimization, if there is only
one value for a given key, it is stored as a bare value rather than
a set of one item.
"""
def __init__(self, set_cls=set):
"""Constructor.
:param set_cls: The type of set to use to hold values. For
example, setting this to an ordered set type would result in
the values being sorted.
"""
self._set_cls = set_cls
self._index = {}
def add(self, key, value):
"""Add value to the set of values for the given key.
Idempotent: does nothing if the mapping is already present.
"""
# As an occupancy optimization, we only store a set of items if there's
# more than one. Otherwise, we store the item itself in the index.
# Use setdefault to insert the item only if it's not present.
index_entry = self._index.setdefault(key, value)
if index_entry != value:
# Failed to insert the new value as the single entry, examine
# what we got.
if isinstance(index_entry, self._set_cls):
# Already have multiple values for that entry, add the new one
# to the set.
index_entry.add(value)
else:
# There was an entry but it wasn't the one we tried to add,
# promote the entry to a set.
index_entry = self._set_cls([index_entry, value])
self._index[key] = index_entry
def discard(self, key, value):
"""Discards a value from the set associated with the given key.
Idempotent: does nothing if the mapping is already gone.
"""
if key in self._index:
index_entry = self._index[key]
if isinstance(index_entry, self._set_cls):
# Had multiple values for this key before, remove just this
# value.
index_entry.discard(value)
if len(index_entry) == 1:
# No-longer have multiple values go back to storing just a
# single value.
index_entry = index_entry.pop()
self._index[key] = index_entry
elif index_entry == value:
# Entry was the only value for this key. Delete it completely.
del self._index[key]
def __contains__(self, item):
"""Implements the 'in' operator, True if the key is present."""
return item in self._index
def contains(self, key, value):
"""
:return: True if the given key/value mapping is present.
"""
index_entry = self._index.get(key)
if isinstance(index_entry, self._set_cls):
return value in index_entry
else:
return value == index_entry
def iter_values(self, key):
"""
:return: an iterator over the values for the given key. WARNING:
care should be taken not to modify the values associated with
that key while iterating.
"""
if key in self._index:
index_entry = self._index[key]
if isinstance(index_entry, self._set_cls):
return iter(index_entry)
else:
return iter([index_entry])
return iter([])
def num_items(self, key):
"""
:return: The number of items associated with the given key. Returns 0
if the key is not in the mapping.
"""
if key in self._index:
index_entry = self._index[key]
if isinstance(index_entry, self._set_cls):
return len(index_entry)
else:
return 1
return 0
def __nonzero__(self):
"""Implement bool(<multidict>). True if we have some entries."""
return bool(self._index)
| {
"content_hash": "c309e2b5d22cc0592f7b026f674ae22f",
"timestamp": "",
"source": "github",
"line_count": 170,
"max_line_length": 79,
"avg_line_length": 34.252941176470586,
"alnum_prop": 0.5620814013395157,
"repo_name": "TrimBiggs/calico",
"id": "993c635dbc2e8bf651deb82433e30ca369b16ca1",
"size": "6430",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "calico/calcollections.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "853711"
},
{
"name": "Shell",
"bytes": "13082"
}
],
"symlink_target": ""
} |
from math import ceil
import pytest
from scipy.stats import norm, randint
import numpy as np
from sklearn.datasets import make_classification
from sklearn.dummy import DummyClassifier
from sklearn.experimental import enable_halving_search_cv # noqa
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.model_selection import LeaveOneGroupOut
from sklearn.model_selection import LeavePGroupsOut
from sklearn.model_selection import GroupKFold
from sklearn.model_selection import GroupShuffleSplit
from sklearn.model_selection import HalvingGridSearchCV
from sklearn.model_selection import HalvingRandomSearchCV
from sklearn.model_selection import KFold, ShuffleSplit
from sklearn.svm import LinearSVC
from sklearn.model_selection._search_successive_halving import (
_SubsampleMetaSplitter,
_top_k,
)
class FastClassifier(DummyClassifier):
"""Dummy classifier that accepts parameters a, b, ... z.
These parameter don't affect the predictions and are useful for fast
grid searching."""
def __init__(
self, strategy="stratified", random_state=None, constant=None, **kwargs
):
super().__init__(
strategy=strategy, random_state=random_state, constant=constant
)
def get_params(self, deep=False):
params = super().get_params(deep=deep)
for char in range(ord("a"), ord("z") + 1):
params[chr(char)] = "whatever"
return params
@pytest.mark.parametrize("Est", (HalvingGridSearchCV, HalvingRandomSearchCV))
@pytest.mark.parametrize(
(
"aggressive_elimination,"
"max_resources,"
"expected_n_iterations,"
"expected_n_required_iterations,"
"expected_n_possible_iterations,"
"expected_n_remaining_candidates,"
"expected_n_candidates,"
"expected_n_resources,"
),
[
# notice how it loops at the beginning
# also, the number of candidates evaluated at the last iteration is
# <= factor
(True, "limited", 4, 4, 3, 1, [60, 20, 7, 3], [20, 20, 60, 180]),
# no aggressive elimination: we end up with less iterations, and
# the number of candidates at the last iter is > factor, which isn't
# ideal
(False, "limited", 3, 4, 3, 3, [60, 20, 7], [20, 60, 180]),
# # When the amount of resource isn't limited, aggressive_elimination
# # has no effect. Here the default min_resources='exhaust' will take
# # over.
(True, "unlimited", 4, 4, 4, 1, [60, 20, 7, 3], [37, 111, 333, 999]),
(False, "unlimited", 4, 4, 4, 1, [60, 20, 7, 3], [37, 111, 333, 999]),
],
)
def test_aggressive_elimination(
Est,
aggressive_elimination,
max_resources,
expected_n_iterations,
expected_n_required_iterations,
expected_n_possible_iterations,
expected_n_remaining_candidates,
expected_n_candidates,
expected_n_resources,
):
# Test the aggressive_elimination parameter.
n_samples = 1000
X, y = make_classification(n_samples=n_samples, random_state=0)
param_grid = {"a": ("l1", "l2"), "b": list(range(30))}
base_estimator = FastClassifier()
if max_resources == "limited":
max_resources = 180
else:
max_resources = n_samples
sh = Est(
base_estimator,
param_grid,
aggressive_elimination=aggressive_elimination,
max_resources=max_resources,
factor=3,
)
sh.set_params(verbose=True) # just for test coverage
if Est is HalvingRandomSearchCV:
# same number of candidates as with the grid
sh.set_params(n_candidates=2 * 30, min_resources="exhaust")
sh.fit(X, y)
assert sh.n_iterations_ == expected_n_iterations
assert sh.n_required_iterations_ == expected_n_required_iterations
assert sh.n_possible_iterations_ == expected_n_possible_iterations
assert sh.n_resources_ == expected_n_resources
assert sh.n_candidates_ == expected_n_candidates
assert sh.n_remaining_candidates_ == expected_n_remaining_candidates
assert ceil(sh.n_candidates_[-1] / sh.factor) == sh.n_remaining_candidates_
@pytest.mark.parametrize("Est", (HalvingGridSearchCV, HalvingRandomSearchCV))
@pytest.mark.parametrize(
(
"min_resources,"
"max_resources,"
"expected_n_iterations,"
"expected_n_possible_iterations,"
"expected_n_resources,"
),
[
# with enough resources
("smallest", "auto", 2, 4, [20, 60]),
# with enough resources but min_resources set manually
(50, "auto", 2, 3, [50, 150]),
# without enough resources, only one iteration can be done
("smallest", 30, 1, 1, [20]),
# with exhaust: use as much resources as possible at the last iter
("exhaust", "auto", 2, 2, [333, 999]),
("exhaust", 1000, 2, 2, [333, 999]),
("exhaust", 999, 2, 2, [333, 999]),
("exhaust", 600, 2, 2, [200, 600]),
("exhaust", 599, 2, 2, [199, 597]),
("exhaust", 300, 2, 2, [100, 300]),
("exhaust", 60, 2, 2, [20, 60]),
("exhaust", 50, 1, 1, [20]),
("exhaust", 20, 1, 1, [20]),
],
)
def test_min_max_resources(
Est,
min_resources,
max_resources,
expected_n_iterations,
expected_n_possible_iterations,
expected_n_resources,
):
# Test the min_resources and max_resources parameters, and how they affect
# the number of resources used at each iteration
n_samples = 1000
X, y = make_classification(n_samples=n_samples, random_state=0)
param_grid = {"a": [1, 2], "b": [1, 2, 3]}
base_estimator = FastClassifier()
sh = Est(
base_estimator,
param_grid,
factor=3,
min_resources=min_resources,
max_resources=max_resources,
)
if Est is HalvingRandomSearchCV:
sh.set_params(n_candidates=6) # same number as with the grid
sh.fit(X, y)
expected_n_required_iterations = 2 # given 6 combinations and factor = 3
assert sh.n_iterations_ == expected_n_iterations
assert sh.n_required_iterations_ == expected_n_required_iterations
assert sh.n_possible_iterations_ == expected_n_possible_iterations
assert sh.n_resources_ == expected_n_resources
if min_resources == "exhaust":
assert sh.n_possible_iterations_ == sh.n_iterations_ == len(sh.n_resources_)
@pytest.mark.parametrize("Est", (HalvingRandomSearchCV, HalvingGridSearchCV))
@pytest.mark.parametrize(
"max_resources, n_iterations, n_possible_iterations",
[
("auto", 5, 9), # all resources are used
(1024, 5, 9),
(700, 5, 8),
(512, 5, 8),
(511, 5, 7),
(32, 4, 4),
(31, 3, 3),
(16, 3, 3),
(4, 1, 1), # max_resources == min_resources, only one iteration is
# possible
],
)
def test_n_iterations(Est, max_resources, n_iterations, n_possible_iterations):
# test the number of actual iterations that were run depending on
# max_resources
n_samples = 1024
X, y = make_classification(n_samples=n_samples, random_state=1)
param_grid = {"a": [1, 2], "b": list(range(10))}
base_estimator = FastClassifier()
factor = 2
sh = Est(
base_estimator,
param_grid,
cv=2,
factor=factor,
max_resources=max_resources,
min_resources=4,
)
if Est is HalvingRandomSearchCV:
sh.set_params(n_candidates=20) # same as for HalvingGridSearchCV
sh.fit(X, y)
assert sh.n_required_iterations_ == 5
assert sh.n_iterations_ == n_iterations
assert sh.n_possible_iterations_ == n_possible_iterations
@pytest.mark.parametrize("Est", (HalvingRandomSearchCV, HalvingGridSearchCV))
def test_resource_parameter(Est):
# Test the resource parameter
n_samples = 1000
X, y = make_classification(n_samples=n_samples, random_state=0)
param_grid = {"a": [1, 2], "b": list(range(10))}
base_estimator = FastClassifier()
sh = Est(base_estimator, param_grid, cv=2, resource="c", max_resources=10, factor=3)
sh.fit(X, y)
assert set(sh.n_resources_) == set([1, 3, 9])
for r_i, params, param_c in zip(
sh.cv_results_["n_resources"],
sh.cv_results_["params"],
sh.cv_results_["param_c"],
):
assert r_i == params["c"] == param_c
with pytest.raises(
ValueError, match="Cannot use resource=1234 which is not supported "
):
sh = HalvingGridSearchCV(
base_estimator, param_grid, cv=2, resource="1234", max_resources=10
)
sh.fit(X, y)
with pytest.raises(
ValueError,
match="Cannot use parameter c as the resource since it is part "
"of the searched parameters.",
):
param_grid = {"a": [1, 2], "b": [1, 2], "c": [1, 3]}
sh = HalvingGridSearchCV(
base_estimator, param_grid, cv=2, resource="c", max_resources=10
)
sh.fit(X, y)
@pytest.mark.parametrize(
"max_resources, n_candidates, expected_n_candidates",
[
(512, "exhaust", 128), # generate exactly as much as needed
(32, "exhaust", 8),
(32, 8, 8),
(32, 7, 7), # ask for less than what we could
(32, 9, 9), # ask for more than 'reasonable'
],
)
def test_random_search(max_resources, n_candidates, expected_n_candidates):
# Test random search and make sure the number of generated candidates is
# as expected
n_samples = 1024
X, y = make_classification(n_samples=n_samples, random_state=0)
param_grid = {"a": norm, "b": norm}
base_estimator = FastClassifier()
sh = HalvingRandomSearchCV(
base_estimator,
param_grid,
n_candidates=n_candidates,
cv=2,
max_resources=max_resources,
factor=2,
min_resources=4,
)
sh.fit(X, y)
assert sh.n_candidates_[0] == expected_n_candidates
if n_candidates == "exhaust":
# Make sure 'exhaust' makes the last iteration use as much resources as
# we can
assert sh.n_resources_[-1] == max_resources
@pytest.mark.parametrize(
"param_distributions, expected_n_candidates",
[
({"a": [1, 2]}, 2), # all lists, sample less than n_candidates
({"a": randint(1, 3)}, 10), # not all list, respect n_candidates
],
)
def test_random_search_discrete_distributions(
param_distributions, expected_n_candidates
):
# Make sure random search samples the appropriate number of candidates when
# we ask for more than what's possible. How many parameters are sampled
# depends whether the distributions are 'all lists' or not (see
# ParameterSampler for details). This is somewhat redundant with the checks
# in ParameterSampler but interaction bugs were discovered during
# developement of SH
n_samples = 1024
X, y = make_classification(n_samples=n_samples, random_state=0)
base_estimator = FastClassifier()
sh = HalvingRandomSearchCV(base_estimator, param_distributions, n_candidates=10)
sh.fit(X, y)
assert sh.n_candidates_[0] == expected_n_candidates
@pytest.mark.parametrize("Est", (HalvingGridSearchCV, HalvingRandomSearchCV))
@pytest.mark.parametrize(
"params, expected_error_message",
[
({"scoring": {"accuracy", "accuracy"}}, "Multimetric scoring is not supported"),
(
{"resource": "not_a_parameter"},
"Cannot use resource=not_a_parameter which is not supported",
),
(
{"resource": "a", "max_resources": 100},
"Cannot use parameter a as the resource since it is part of",
),
({"max_resources": "not_auto"}, "max_resources must be either"),
({"max_resources": 100.5}, "max_resources must be either"),
({"max_resources": -10}, "max_resources must be either"),
({"min_resources": "bad str"}, "min_resources must be either"),
({"min_resources": 0.5}, "min_resources must be either"),
({"min_resources": -10}, "min_resources must be either"),
(
{"max_resources": "auto", "resource": "b"},
"max_resources can only be 'auto' if resource='n_samples'",
),
(
{"min_resources": 15, "max_resources": 14},
"min_resources_=15 is greater than max_resources_=14",
),
({"cv": KFold(shuffle=True)}, "must yield consistent folds"),
({"cv": ShuffleSplit()}, "must yield consistent folds"),
({"refit": "whatever"}, "refit is expected to be a boolean"),
],
)
def test_input_errors(Est, params, expected_error_message):
base_estimator = FastClassifier()
param_grid = {"a": [1]}
X, y = make_classification(100)
sh = Est(base_estimator, param_grid, **params)
with pytest.raises(ValueError, match=expected_error_message):
sh.fit(X, y)
@pytest.mark.parametrize(
"params, expected_error_message",
[
(
{"n_candidates": "exhaust", "min_resources": "exhaust"},
"cannot be both set to 'exhaust'",
),
({"n_candidates": "bad"}, "either 'exhaust' or a positive integer"),
({"n_candidates": 0}, "either 'exhaust' or a positive integer"),
],
)
def test_input_errors_randomized(params, expected_error_message):
# tests specific to HalvingRandomSearchCV
base_estimator = FastClassifier()
param_grid = {"a": [1]}
X, y = make_classification(100)
sh = HalvingRandomSearchCV(base_estimator, param_grid, **params)
with pytest.raises(ValueError, match=expected_error_message):
sh.fit(X, y)
@pytest.mark.parametrize(
"fraction, subsample_test, expected_train_size, expected_test_size",
[
(0.5, True, 40, 10),
(0.5, False, 40, 20),
(0.2, True, 16, 4),
(0.2, False, 16, 20),
],
)
def test_subsample_splitter_shapes(
fraction, subsample_test, expected_train_size, expected_test_size
):
# Make sure splits returned by SubsampleMetaSplitter are of appropriate
# size
n_samples = 100
X, y = make_classification(n_samples)
cv = _SubsampleMetaSplitter(
base_cv=KFold(5),
fraction=fraction,
subsample_test=subsample_test,
random_state=None,
)
for train, test in cv.split(X, y):
assert train.shape[0] == expected_train_size
assert test.shape[0] == expected_test_size
if subsample_test:
assert train.shape[0] + test.shape[0] == int(n_samples * fraction)
else:
assert test.shape[0] == n_samples // cv.base_cv.get_n_splits()
@pytest.mark.parametrize("subsample_test", (True, False))
def test_subsample_splitter_determinism(subsample_test):
# Make sure _SubsampleMetaSplitter is consistent across calls to split():
# - we're OK having training sets differ (they're always sampled with a
# different fraction anyway)
# - when we don't subsample the test set, we want it to be always the same.
# This check is the most important. This is ensured by the determinism
# of the base_cv.
# Note: we could force both train and test splits to be always the same if
# we drew an int seed in _SubsampleMetaSplitter.__init__
n_samples = 100
X, y = make_classification(n_samples)
cv = _SubsampleMetaSplitter(
base_cv=KFold(5), fraction=0.5, subsample_test=subsample_test, random_state=None
)
folds_a = list(cv.split(X, y, groups=None))
folds_b = list(cv.split(X, y, groups=None))
for (train_a, test_a), (train_b, test_b) in zip(folds_a, folds_b):
assert not np.all(train_a == train_b)
if subsample_test:
assert not np.all(test_a == test_b)
else:
assert np.all(test_a == test_b)
assert np.all(X[test_a] == X[test_b])
@pytest.mark.parametrize(
"k, itr, expected",
[
(1, 0, ["c"]),
(2, 0, ["a", "c"]),
(4, 0, ["d", "b", "a", "c"]),
(10, 0, ["d", "b", "a", "c"]),
(1, 1, ["e"]),
(2, 1, ["f", "e"]),
(10, 1, ["f", "e"]),
(1, 2, ["i"]),
(10, 2, ["g", "h", "i"]),
],
)
def test_top_k(k, itr, expected):
results = { # this isn't a 'real world' result dict
"iter": [0, 0, 0, 0, 1, 1, 2, 2, 2],
"mean_test_score": [4, 3, 5, 1, 11, 10, 5, 6, 9],
"params": ["a", "b", "c", "d", "e", "f", "g", "h", "i"],
}
got = _top_k(results, k=k, itr=itr)
assert np.all(got == expected)
@pytest.mark.parametrize("Est", (HalvingRandomSearchCV, HalvingGridSearchCV))
def test_cv_results(Est):
# test that the cv_results_ matches correctly the logic of the
# tournament: in particular that the candidates continued in each
# successive iteration are those that were best in the previous iteration
pd = pytest.importorskip("pandas")
rng = np.random.RandomState(0)
n_samples = 1000
X, y = make_classification(n_samples=n_samples, random_state=0)
param_grid = {"a": ("l1", "l2"), "b": list(range(30))}
base_estimator = FastClassifier()
# generate random scores: we want to avoid ties, which would otherwise
# mess with the ordering and make testing harder
def scorer(est, X, y):
return rng.rand()
sh = Est(base_estimator, param_grid, factor=2, scoring=scorer)
if Est is HalvingRandomSearchCV:
# same number of candidates as with the grid
sh.set_params(n_candidates=2 * 30, min_resources="exhaust")
sh.fit(X, y)
# non-regression check for
# https://github.com/scikit-learn/scikit-learn/issues/19203
assert isinstance(sh.cv_results_["iter"], np.ndarray)
assert isinstance(sh.cv_results_["n_resources"], np.ndarray)
cv_results_df = pd.DataFrame(sh.cv_results_)
# just make sure we don't have ties
assert len(cv_results_df["mean_test_score"].unique()) == len(cv_results_df)
cv_results_df["params_str"] = cv_results_df["params"].apply(str)
table = cv_results_df.pivot(
index="params_str", columns="iter", values="mean_test_score"
)
# table looks like something like this:
# iter 0 1 2 3 4 5
# params_str
# {'a': 'l2', 'b': 23} 0.75 NaN NaN NaN NaN NaN
# {'a': 'l1', 'b': 30} 0.90 0.875 NaN NaN NaN NaN
# {'a': 'l1', 'b': 0} 0.75 NaN NaN NaN NaN NaN
# {'a': 'l2', 'b': 3} 0.85 0.925 0.9125 0.90625 NaN NaN
# {'a': 'l1', 'b': 5} 0.80 NaN NaN NaN NaN NaN
# ...
# where a NaN indicates that the candidate wasn't evaluated at a given
# iteration, because it wasn't part of the top-K at some previous
# iteration. We here make sure that candidates that aren't in the top-k at
# any given iteration are indeed not evaluated at the subsequent
# iterations.
nan_mask = pd.isna(table)
n_iter = sh.n_iterations_
for it in range(n_iter - 1):
already_discarded_mask = nan_mask[it]
# make sure that if a candidate is already discarded, we don't evaluate
# it later
assert (
already_discarded_mask & nan_mask[it + 1] == already_discarded_mask
).all()
# make sure that the number of discarded candidate is correct
discarded_now_mask = ~already_discarded_mask & nan_mask[it + 1]
kept_mask = ~already_discarded_mask & ~discarded_now_mask
assert kept_mask.sum() == sh.n_candidates_[it + 1]
# make sure that all discarded candidates have a lower score than the
# kept candidates
discarded_max_score = table[it].where(discarded_now_mask).max()
kept_min_score = table[it].where(kept_mask).min()
assert discarded_max_score < kept_min_score
# We now make sure that the best candidate is chosen only from the last
# iteration.
# We also make sure this is true even if there were higher scores in
# earlier rounds (this isn't generally the case, but worth ensuring it's
# possible).
last_iter = cv_results_df["iter"].max()
idx_best_last_iter = cv_results_df[cv_results_df["iter"] == last_iter][
"mean_test_score"
].idxmax()
idx_best_all_iters = cv_results_df["mean_test_score"].idxmax()
assert sh.best_params_ == cv_results_df.iloc[idx_best_last_iter]["params"]
assert (
cv_results_df.iloc[idx_best_last_iter]["mean_test_score"]
< cv_results_df.iloc[idx_best_all_iters]["mean_test_score"]
)
assert (
cv_results_df.iloc[idx_best_last_iter]["params"]
!= cv_results_df.iloc[idx_best_all_iters]["params"]
)
@pytest.mark.parametrize("Est", (HalvingGridSearchCV, HalvingRandomSearchCV))
def test_base_estimator_inputs(Est):
# make sure that the base estimators are passed the correct parameters and
# number of samples at each iteration.
pd = pytest.importorskip("pandas")
passed_n_samples_fit = []
passed_n_samples_predict = []
passed_params = []
class FastClassifierBookKeeping(FastClassifier):
def fit(self, X, y):
passed_n_samples_fit.append(X.shape[0])
return super().fit(X, y)
def predict(self, X):
passed_n_samples_predict.append(X.shape[0])
return super().predict(X)
def set_params(self, **params):
passed_params.append(params)
return super().set_params(**params)
n_samples = 1024
n_splits = 2
X, y = make_classification(n_samples=n_samples, random_state=0)
param_grid = {"a": ("l1", "l2"), "b": list(range(30))}
base_estimator = FastClassifierBookKeeping()
sh = Est(
base_estimator,
param_grid,
factor=2,
cv=n_splits,
return_train_score=False,
refit=False,
)
if Est is HalvingRandomSearchCV:
# same number of candidates as with the grid
sh.set_params(n_candidates=2 * 30, min_resources="exhaust")
sh.fit(X, y)
assert len(passed_n_samples_fit) == len(passed_n_samples_predict)
passed_n_samples = [
x + y for (x, y) in zip(passed_n_samples_fit, passed_n_samples_predict)
]
# Lists are of length n_splits * n_iter * n_candidates_at_i.
# Each chunk of size n_splits corresponds to the n_splits folds for the
# same candidate at the same iteration, so they contain equal values. We
# subsample such that the lists are of length n_iter * n_candidates_at_it
passed_n_samples = passed_n_samples[::n_splits]
passed_params = passed_params[::n_splits]
cv_results_df = pd.DataFrame(sh.cv_results_)
assert len(passed_params) == len(passed_n_samples) == len(cv_results_df)
uniques, counts = np.unique(passed_n_samples, return_counts=True)
assert (sh.n_resources_ == uniques).all()
assert (sh.n_candidates_ == counts).all()
assert (cv_results_df["params"] == passed_params).all()
assert (cv_results_df["n_resources"] == passed_n_samples).all()
@pytest.mark.parametrize("Est", (HalvingGridSearchCV, HalvingRandomSearchCV))
def test_groups_support(Est):
# Check if ValueError (when groups is None) propagates to
# HalvingGridSearchCV and HalvingRandomSearchCV
# And also check if groups is correctly passed to the cv object
rng = np.random.RandomState(0)
X, y = make_classification(n_samples=50, n_classes=2, random_state=0)
groups = rng.randint(0, 3, 50)
clf = LinearSVC(random_state=0)
grid = {"C": [1]}
group_cvs = [
LeaveOneGroupOut(),
LeavePGroupsOut(2),
GroupKFold(n_splits=3),
GroupShuffleSplit(random_state=0),
]
error_msg = "The 'groups' parameter should not be None."
for cv in group_cvs:
gs = Est(clf, grid, cv=cv)
with pytest.raises(ValueError, match=error_msg):
gs.fit(X, y)
gs.fit(X, y, groups=groups)
non_group_cvs = [StratifiedKFold(), StratifiedShuffleSplit(random_state=0)]
for cv in non_group_cvs:
gs = Est(clf, grid, cv=cv)
# Should not raise an error
gs.fit(X, y)
@pytest.mark.parametrize("SearchCV", [HalvingRandomSearchCV, HalvingGridSearchCV])
def test_min_resources_null(SearchCV):
"""Check that we raise an error if the minimum resources is set to 0."""
base_estimator = FastClassifier()
param_grid = {"a": [1]}
X = np.empty(0).reshape(0, 3)
search = SearchCV(base_estimator, param_grid, min_resources="smallest")
err_msg = "min_resources_=0: you might have passed an empty dataset X."
with pytest.raises(ValueError, match=err_msg):
search.fit(X, [])
@pytest.mark.parametrize("SearchCV", [HalvingGridSearchCV, HalvingRandomSearchCV])
def test_select_best_index(SearchCV):
"""Check the selection strategy of the halving search."""
results = { # this isn't a 'real world' result dict
"iter": np.array([0, 0, 0, 0, 1, 1, 2, 2, 2]),
"mean_test_score": np.array([4, 3, 5, 1, 11, 10, 5, 6, 9]),
"params": np.array(["a", "b", "c", "d", "e", "f", "g", "h", "i"]),
}
# we expect the index of 'i'
best_index = SearchCV._select_best_index(None, None, results)
assert best_index == 8
| {
"content_hash": "dfea29d9303befabb37f576547409b95",
"timestamp": "",
"source": "github",
"line_count": 707,
"max_line_length": 88,
"avg_line_length": 35.605374823196605,
"alnum_prop": 0.618519842688595,
"repo_name": "amueller/scikit-learn",
"id": "93365809cb4d6b2b8592fb824db81c46f2e016c2",
"size": "25173",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sklearn/model_selection/tests/test_successive_halving.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "2232"
},
{
"name": "C",
"bytes": "41206"
},
{
"name": "C++",
"bytes": "146835"
},
{
"name": "Makefile",
"bytes": "1711"
},
{
"name": "Python",
"bytes": "9958394"
},
{
"name": "Shell",
"bytes": "44588"
}
],
"symlink_target": ""
} |
import unittest
import numpy as np
import paddle
import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest
def set_serialize_factor(serialize_factor):
main_prog = paddle.static.default_main_program()
op = main_prog.current_block().ops[-1]
op._set_attr('serialize_factor', serialize_factor)
class TestBase(IPUOpTest):
def setUp(self):
self.set_atol()
self.set_training()
self.set_data_feed()
self.set_feed_attr()
self.set_op_attrs()
def set_data_feed(self):
self.feed = {
"x": np.random.uniform(size=[16, 32]).astype('float32'),
"y": np.random.uniform(size=[32, 16]).astype('float32'),
}
def set_feed_attr(self):
self.feed_shape = [x.shape for x in self.feed.values()]
self.feed_list = list(self.feed.keys())
self.feed_dtype = [x.dtype for x in self.feed.values()]
def set_op_attrs(self):
self.attrs = {"transpose_x": False, "transpose_y": False}
@IPUOpTest.static_graph
def build_model(self):
x = paddle.static.data(
name=self.feed_list[0],
shape=self.feed_shape[0],
dtype=self.feed_dtype[0],
)
y = paddle.static.data(
name=self.feed_list[1],
shape=self.feed_shape[1],
dtype=self.feed_dtype[1],
)
# decrator maybe the best choice, but need to modify api
out = paddle.matmul(x, y, **self.attrs)
set_serialize_factor(4)
self.fetch_list = [out.name]
def run_model(self, run_ipu):
self.build_model()
if run_ipu:
place = paddle.IPUPlace()
else:
place = paddle.CPUPlace()
exe = paddle.static.Executor(place)
exe.run(self.startup_prog)
if run_ipu:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=self.is_training)
program = paddle.static.IpuCompiledProgram(
self.main_prog, ipu_strategy=ipu_strategy
).compile(feed_list, self.fetch_list)
else:
program = self.main_prog
result = exe.run(program, feed=self.feed, fetch_list=self.fetch_list)
return result[0]
def test_base(self):
res0 = self.run_model(False)
res1 = self.run_model(True)
np.testing.assert_allclose(
res0.flatten(), res1.flatten(), rtol=1e-05, atol=self.atol
)
self.assertTrue(res0.shape == res1.shape)
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "a11daba30c6591854880f1ecafd64868",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 77,
"avg_line_length": 31.428571428571427,
"alnum_prop": 0.5818181818181818,
"repo_name": "luotao1/Paddle",
"id": "6ee930f2475302aca93cf8875a127ffee95144fa",
"size": "3251",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "python/paddle/fluid/tests/unittests/ipu/test_matmul_serilize_ipu.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "58544"
},
{
"name": "C",
"bytes": "210300"
},
{
"name": "C++",
"bytes": "36771446"
},
{
"name": "CMake",
"bytes": "903079"
},
{
"name": "Cuda",
"bytes": "5200715"
},
{
"name": "Dockerfile",
"bytes": "4361"
},
{
"name": "Go",
"bytes": "49796"
},
{
"name": "Java",
"bytes": "16630"
},
{
"name": "Jinja",
"bytes": "23852"
},
{
"name": "MLIR",
"bytes": "39982"
},
{
"name": "Python",
"bytes": "36248258"
},
{
"name": "R",
"bytes": "1332"
},
{
"name": "Shell",
"bytes": "553175"
}
],
"symlink_target": ""
} |
from HandleFiles import HandleFiles
from DbHelper import DbHelper
path = raw_input('HandleFiles in Path : ')
a = HandleFiles()
b = DbHelper()
b.create_table_on_db()
a.scan_for_files_with_extension(path)
file = a.get_file()
files = file.split(" - ")
for file in files:
if not file: continue
b.insert_value_in_table(file)
| {
"content_hash": "014e4a56bc686899024d7c9b420aa1a1",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 42,
"avg_line_length": 16.8,
"alnum_prop": 0.6964285714285714,
"repo_name": "AlucarDWeb/HandleFiles",
"id": "ae85e08c773fafef7935dbfc23c714c30ce66e54",
"size": "337",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "main.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1772"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class TickcolorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self,
plotly_name="tickcolor",
parent_name="scatterternary.marker.colorbar",
**kwargs,
):
super(TickcolorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
**kwargs,
)
| {
"content_hash": "7b0d2d018bb286f4177396e5e253492b",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 70,
"avg_line_length": 29.3125,
"alnum_prop": 0.5927505330490405,
"repo_name": "plotly/plotly.py",
"id": "d29d47c7723c26f6417b3aaecef0077b4881b844",
"size": "469",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/scatterternary/marker/colorbar/_tickcolor.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
} |
import os
from MuseParse.tests.testUsingXML.xmlSet import xmlSet, parsePiece
from MuseParse.classes.ObjectHierarchy.ItemClasses import Directions
from MuseParse.classes.ObjectHierarchy.TreeClasses.PartNode import PartNode
from MuseParse.classes.ObjectHierarchy.TreeClasses.MeasureNode import MeasureNode
from MuseParse.classes.ObjectHierarchy.TreeClasses.BaseTree import Search
from MuseParse.classes.ObjectHierarchy.TreeClasses.NoteNode import Placeholder
from MuseParse.classes.ObjectHierarchy.TreeClasses.NoteNode import NoteNode
from MuseParse.classes.ObjectHierarchy.TreeClasses.OtherNodes import DirectionNode
from MuseParse.SampleMusicXML import testcases
partname = "repeatMarks.xml"
directory = testcases.__path__._path[0]
piece = parsePiece(os.path.join(directory, partname))
class testFile(xmlSet):
def setUp(self):
xmlSet.setUp(self)
self.m_num = 32
self.p_id = "P1"
self.p_name = "Flute"
def testParts(self):
global piece
self.assertIsInstance(piece.getPart(self.p_id), PartNode)
self.assertEqual(self.p_name, piece.getPart(self.p_id).GetItem().name)
def testMeasures(self):
self.assertIsInstance(
piece.getPart(
self.p_id).getMeasure(
self.m_num,
1),
MeasureNode)
class testSegno(xmlSet):
def setUp(self):
xmlSet.setUp(self)
self.m_num = 32
self.p_id = "P1"
self.p_name = "Flute"
self.segno = "segno"
self.measure_id = 2
self.item_id = 0
if hasattr(self, "measure_id"):
self.measure = piece.getPart(
self.p_id).getMeasure(
self.measure_id, 1)
if hasattr(self, "item_id"):
note = Search(NoteNode, self.measure, 1)
self.item = Search(DirectionNode, note, 1).GetItem()
def testHasAttr(self):
if hasattr(self, "measure"):
self.assertTrue(hasattr(self.measure, "segno"))
def testValue(self):
if hasattr(self, "measure"):
self.assertEqual(self.segno, self.measure.segno)
class testCoda(xmlSet):
def setUp(self):
xmlSet.setUp(self)
self.m_num = 32
self.p_id = "P1"
self.p_name = "Flute"
self.measure_id = 3
self.item_id = 0
self.coda = "coda"
if hasattr(self, "measure_id"):
self.measure = piece.getPart(
self.p_id).getMeasure(
self.measure_id, 1)
if hasattr(self, "item_id"):
note = Search(NoteNode, self.measure, 1)
self.item = Search(DirectionNode, note, 1).GetItem()
def testHasAttr(self):
if hasattr(self, "measure"):
self.assertTrue(hasattr(self.measure, "coda"))
def testValue(self):
if hasattr(self, "measure"):
self.assertEqual(self.coda, self.measure.coda)
class testFine(xmlSet):
def setUp(self):
xmlSet.setUp(self)
self.m_num = 32
self.p_id = "P1"
self.p_name = "Flute"
self.measure_id = 6
self.item_id = 1
self.fine = True
if hasattr(self, "measure_id"):
self.measure = piece.getPart(
self.p_id).getMeasure(
self.measure_id, 1)
if hasattr(self, "item_id"):
note = Search(Placeholder, self.measure, 1)
self.item = Search(DirectionNode, note, 1).GetItem()
def testHasAttr(self):
if hasattr(self, "measure"):
self.assertTrue(hasattr(self.measure, "fine"))
def testValue(self):
if hasattr(self, "measure"):
self.assertEqual(self.fine, self.measure.fine)
def testItem(self):
if hasattr(self, "item"):
self.assertIsInstance(self.item, Directions.Direction)
def testItemType(self):
if hasattr(self, "item"):
self.assertEqual("Fine", self.item.text)
class testDaCapo(xmlSet):
def setUp(self):
xmlSet.setUp(self)
self.m_num = 32
self.p_id = "P1"
self.measure_id = 7
self.item_id = 1
self.dacapo = True
if hasattr(self, "measure_id"):
self.measure = piece.getPart(
self.p_id).getMeasure(
self.measure_id, 1)
if hasattr(self, "item_id"):
note = Search(Placeholder, self.measure, 1)
self.item = Search(DirectionNode, note, 1).GetItem()
def testHasAttr(self):
if hasattr(self, "measure"):
self.assertTrue(hasattr(self.measure, "dacapo"))
def testValue(self):
if hasattr(self, "measure"):
self.assertEqual(self.dacapo, self.measure.dacapo)
def testItem(self):
if hasattr(self, "item"):
self.assertIsInstance(self.item, Directions.Direction)
def testItemType(self):
if hasattr(self, "item"):
self.assertEqual("D.C.", self.item.text)
class testDaCapoAlFine(xmlSet):
def setUp(self):
xmlSet.setUp(self)
self.m_num = 32
self.p_id = "P1"
self.measure_id = 8
self.item_id = 1
self.dacapo = True
if hasattr(self, "measure_id"):
self.measureNode = piece.getPart(
self.p_id).getMeasure(
self.measure_id, 1)
if hasattr(self, "item_id"):
note = Search(Placeholder, self.measureNode, 1)
self.item = Search(DirectionNode, note, 1).GetItem()
def testHasAttr(self):
if hasattr(self, "measure"):
self.assertTrue(hasattr(self.measureNode, "dacapo"))
def testValue(self):
if hasattr(self, "measure"):
self.assertEqual(self.dacapo, self.measureNode.dacapo)
def testItem(self):
if hasattr(self, "item"):
self.assertIsInstance(self.item, Directions.Direction)
def testItemType(self):
if hasattr(self, "item"):
self.assertEqual("D.C. al Fine", self.item.text)
class testDaCapoAlCoda(xmlSet):
def setUp(self):
xmlSet.setUp(self)
self.m_num = 32
self.p_id = "P1"
self.measure_id = 9
self.item_id = 1
self.dacapo = True
if hasattr(self, "measure_id"):
self.measureNode = piece.getPart(
self.p_id).getMeasure(
self.measure_id, 1)
if hasattr(self, "item_id"):
note = Search(Placeholder, self.measureNode, 1)
self.item = Search(DirectionNode, note, 1).GetItem()
def testHasAttr(self):
if hasattr(self, "measure"):
self.assertTrue(hasattr(self.measureNode, "dacapo"))
def testValue(self):
if hasattr(self, "measure"):
self.assertEqual(self.dacapo, self.measureNode.dacapo)
def testItem(self):
if hasattr(self, "item"):
self.assertIsInstance(self.item, Directions.Direction)
def testItemType(self):
if hasattr(self, "item"):
self.assertEqual("D.C. al Coda", self.item.text)
class testDalSegnoAlCoda(xmlSet):
def setUp(self):
xmlSet.setUp(self)
self.m_num = 32
self.p_id = "P1"
self.measure_id = 10
self.item_id = 1
self.dalsegno = "segno"
if hasattr(self, "measure_id"):
self.measureNode = piece.getPart(
self.p_id).getMeasure(
self.measure_id, 1)
if hasattr(self, "item_id"):
note = Search(Placeholder, self.measureNode, 1)
self.item = Search(DirectionNode, note, 1).GetItem()
def testHasAttr(self):
if hasattr(self, "measure"):
self.assertTrue(hasattr(self.measure, "dalsegno"))
def testValue(self):
if hasattr(self, "measure"):
self.assertEqual(self.dalsegno, self.measure.dalsegno)
def testItem(self):
if hasattr(self, "item"):
self.assertIsInstance(self.item, Directions.Direction)
def testItemType(self):
if hasattr(self, "item"):
self.assertEqual("D.S. al Coda", self.item.text)
class testDalSegnoAlFine(xmlSet):
def setUp(self):
xmlSet.setUp(self)
self.m_num = 32
self.measure_id = 12
self.item_id = 1
self.dalsegno = "segno"
self.p_id = "P1"
if hasattr(self, "measure_id"):
self.measureNode = piece.getPart(
self.p_id).getMeasure(
self.measure_id, 1)
self.measure = self.measureNode.GetItem()
if hasattr(self, "item_id"):
note = Search(Placeholder, self.measureNode, 1)
self.item = Search(DirectionNode, note, 1).GetItem()
def testHasAttr(self):
if hasattr(self, "measure"):
self.assertTrue(hasattr(self.measureNode, "dalsegno"))
def testValue(self):
if hasattr(self, "measure"):
self.assertEqual(self.dalsegno, self.measureNode.dalsegno)
def testItem(self):
if hasattr(self, "item"):
self.assertIsInstance(self.item, Directions.Direction)
def testItemType(self):
if hasattr(self, "item"):
self.assertEqual("D.S. al Fine", self.item.text)
class testDalSegno(xmlSet):
def setUp(self):
xmlSet.setUp(self)
self.measure_id = 13
self.item_id = 1
self.dalsegno = "segno"
self.m_num = 32
self.p_id = "P1"
if hasattr(self, "measure_id"):
self.measureNode = piece.getPart(
self.p_id).getMeasure(
self.measure_id, 1)
self.measure = self.measureNode.GetItem()
if hasattr(self, "item_id"):
note = Search(Placeholder, self.measureNode, 1)
self.item = Search(DirectionNode, note, 1).GetItem()
def testHasAttr(self):
if hasattr(self, "measure"):
self.assertTrue(hasattr(self.measureNode, "dalsegno"))
def testValue(self):
if hasattr(self, "measure"):
self.assertEqual(self.dalsegno, self.measureNode.dalsegno)
def testItem(self):
if hasattr(self, "item"):
self.assertIsInstance(self.item, Directions.Direction)
def testItemType(self):
if hasattr(self, "item"):
self.assertEqual("D.S.", self.item.text)
class testToCoda(xmlSet):
def setUp(self):
xmlSet.setUp(self)
self.measure_id = 14
self.item_id = 1
self.tocoda = "coda"
self.m_num = 32
self.p_id = "P1"
if hasattr(self, "measure_id"):
self.measureNode = piece.getPart(
self.p_id).getMeasure(
self.measure_id, 1)
if hasattr(self, "item_id"):
note = Search(Placeholder, self.measureNode, 1)
self.item = Search(DirectionNode, note, 1).GetItem()
def testHasAttr(self):
if hasattr(self, "measure"):
self.assertTrue(hasattr(self.measureNode, "tocoda"))
def testValue(self):
if hasattr(self, "measure"):
self.assertEqual(self.tocoda, self.measureNode.tocoda)
def testItem(self):
if hasattr(self, "item"):
self.assertIsInstance(self.item, Directions.Direction)
def testItemType(self):
if hasattr(self, "item"):
self.assertEqual("To Coda", self.item.text)
| {
"content_hash": "3a8fca620ac8b5c425e9283e1eda2f9e",
"timestamp": "",
"source": "github",
"line_count": 377,
"max_line_length": 82,
"avg_line_length": 30.16976127320955,
"alnum_prop": 0.5852822226129769,
"repo_name": "Godley/MuseParse",
"id": "6a68bb0c930b17e97556ac8d00a799a48095360e",
"size": "11374",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "MuseParse/tests/testUsingXML/testRepeatMarks.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "70"
},
{
"name": "LilyPond",
"bytes": "158134"
},
{
"name": "Python",
"bytes": "544511"
},
{
"name": "Shell",
"bytes": "604"
}
],
"symlink_target": ""
} |
import mock
from oslo_config import cfg
cfg.CONF.import_group('keystone_authtoken',
'keystonemiddleware.auth_token')
import keystoneclient.exceptions as kc_exception # noqa
from solum.common import exception
from solum.common import solum_keystoneclient
from solum.tests import base
from solum.tests import utils
@mock.patch('keystoneclient.v3.client.Client')
class KeystoneClientTest(base.BaseTestCase):
"""Test cases for solum.common.solum_keystoneclient."""
def setUp(self):
super(KeystoneClientTest, self).setUp()
dummy_url = 'http://server.test:5000/v2.0'
self.ctx = utils.dummy_context()
self.ctx.auth_url = dummy_url
self.ctx.auth_token = 'abcd1234'
self.ctx.auth_token_info = None
cfg.CONF.set_override('auth_uri', dummy_url,
group='keystone_authtoken')
cfg.CONF.set_override('admin_user', 'solum',
group='keystone_authtoken')
cfg.CONF.set_override('admin_password', 'verybadpass',
group='keystone_authtoken')
cfg.CONF.set_override('admin_tenant_name', 'service',
group='keystone_authtoken')
def test_init_v3_token(self, mock_ks):
"""Test creating the client, token auth."""
self.ctx.tenant = None
self.ctx.trust_id = None
solum_ks_client = solum_keystoneclient.KeystoneClientV3(self.ctx)
solum_ks_client.client
self.assertIsNotNone(solum_ks_client._client)
mock_ks.assert_called_once_with(token='abcd1234', project_id=None,
auth_url='http://server.test:5000/v3',
endpoint='http://server.test:5000/v3')
mock_ks.return_value.authenticate.assert_called_once_with()
def test_init_v3_bad_nocreds(self, mock_ks):
"""Test creating the client, no credentials."""
self.ctx.auth_token = None
self.ctx.trust_id = None
self.ctx.username = None
solum_ks_client = solum_keystoneclient.KeystoneClientV3(self.ctx)
self.assertRaises(exception.AuthorizationFailure,
solum_ks_client._v3_client_init)
def test_init_trust_token_access(self, mock_ks):
"""Test creating the client, token auth."""
self.ctx.tenant = 'abcd1234'
self.ctx.trust_id = None
self.ctx.auth_token_info = {'access': {'token': {'id': 'placeholder'}}}
solum_ks_client = solum_keystoneclient.KeystoneClientV3(self.ctx)
solum_ks_client.client
self.assertIsNotNone(solum_ks_client._client)
mock_ks.assert_called_once_with(auth_ref={'version': 'v2.0',
'token': {
'id': 'abcd1234'}},
endpoint='http://server.test:5000/v3',
auth_url='http://server.test:5000/v3')
def test_init_trust_token_token(self, mock_ks):
self.ctx.tenant = None
self.ctx.trust_id = None
self.ctx.auth_token_info = {'token': {}}
solum_ks_client = solum_keystoneclient.KeystoneClientV3(self.ctx)
solum_ks_client.client
self.assertIsNotNone(solum_ks_client._client)
mock_ks.assert_called_once_with(auth_ref={'auth_token': 'abcd1234',
'version': 'v3'},
endpoint='http://server.test:5000/v3',
auth_url='http://server.test:5000/v3')
def test_init_trust_token_none(self, mock_ks):
self.ctx.tenant = None
self.ctx.trust_id = None
self.ctx.auth_token_info = {'not_this': 'urg'}
solum_ks_client = solum_keystoneclient.KeystoneClientV3(self.ctx)
self.assertRaises(exception.AuthorizationFailure,
solum_ks_client._v3_client_init)
def test_create_trust_context_trust_id(self, mock_ks):
"""Test create_trust_context with existing trust_id."""
self.ctx.trust_id = 'atrust123'
solum_ks_client = solum_keystoneclient.KeystoneClientV3(self.ctx)
trust_context = solum_ks_client.create_trust_context()
self.assertEqual(self.ctx.to_dict(), trust_context.to_dict())
mock_ks.assert_called_once_with(username='solum',
auth_url='http://server.test:5000/v3',
password='verybadpass',
endpoint='http://server.test:5000/v3',
trust_id='atrust123')
mock_ks.return_value.authenticate.assert_called_once_with()
def test_create_trust_context_trust_create(self, mock_ks):
"""Test create_trust_context when creating a trust."""
class FakeTrust(object):
id = 'atrust123'
cfg.CONF.set_override('trusts_delegated_roles',
['solum_assembly_update'])
getter_mock = mock.PropertyMock(side_effect=['1234', '5678'])
type(mock_ks.return_value.auth_ref).user_id = getter_mock
mock_ks.return_value.auth_ref.project_id = '42'
mock_ks.return_value.trusts.create.return_value = FakeTrust()
self.ctx.trust_id = None
solum_ks_client = solum_keystoneclient.KeystoneClientV3(self.ctx)
trust_context = solum_ks_client.create_trust_context()
# admin_client and user client
expected = [mock.call(username='solum',
project_name='service',
password='verybadpass',
auth_url='http://server.test:5000/v3',
endpoint='http://server.test:5000/v3'),
mock.call(token='abcd1234',
project_id='fake_project_id',
auth_url='http://server.test:5000/v3',
endpoint='http://server.test:5000/v3')]
self.assertEqual(expected, mock_ks.call_args_list)
self.assertEqual([mock.call(), mock.call()],
mock_ks.return_value.authenticate.call_args_list)
# trust creation
self.assertEqual('atrust123', trust_context.trust_id)
mock_ks.return_value.trusts.create.assert_called_once_with(
trustor_user='5678',
trustee_user='1234',
project='42',
impersonation=True,
role_names=['solum_assembly_update'])
def test_init_admin_client_denied(self, mock_ks):
"""Test the admin_client property, auth failure path."""
self.ctx.username = None
self.ctx.password = None
self.ctx.trust_id = None
mock_ks.return_value.authenticate.return_value = False
solum_ks_client = solum_keystoneclient.KeystoneClientV3(self.ctx)
# Define wrapper for property or the property raises the exception
# outside of the assertRaises which fails the test
def get_admin_client():
solum_ks_client.admin_client
self.assertRaises(exception.AuthorizationFailure,
get_admin_client)
def test_init_lp_admin_client_denied(self, mock_ks):
"""Test the get_lp_admin_client property, auth failure path."""
self.ctx.username = None
self.ctx.password = None
self.ctx.trust_id = None
mock_ks.return_value.authenticate.return_value = False
solum_ks_client = solum_keystoneclient.KeystoneClientV3(self.ctx)
# Define wrapper for property or the property raises the exception
# outside of the assertRaises which fails the test
def get_lp_admin_client():
solum_ks_client.lp_admin_client
self.assertRaises(exception.AuthorizationFailure,
get_lp_admin_client)
def test_init_with_no_context(self, mock_ks):
"""Init with no context."""
mock_ks.return_value.authenticate.return_value = False
solum_ks_client = solum_keystoneclient.KeystoneClientV3(None)
self.assertEqual(solum_ks_client.endpoint,
'http://server.test:5000/v3')
def test_trust_init_fail(self, mock_ks):
"""Test consuming a trust when initializing, error scoping."""
self.ctx.username = None
self.ctx.auth_token = None
self.ctx.trust_id = 'atrust123'
mock_ks.return_value.auth_ref.trust_scoped = False
self.assertRaises(exception.AuthorizationFailure,
solum_keystoneclient.KeystoneClientV3, self.ctx)
def test_trust_init_token(self, mock_ks):
"""Test trust_id takes precedence when token specified."""
self.ctx.username = None
self.ctx.trust_id = 'atrust123'
solum_ks_client = solum_keystoneclient.KeystoneClientV3(self.ctx)
self.assertIsNotNone(solum_ks_client._client)
mock_ks.assert_called_once_with(username='solum',
auth_url='http://server.test:5000/v3',
password='verybadpass',
endpoint='http://server.test:5000/v3',
trust_id='atrust123')
mock_ks.return_value.authenticate.assert_called_once_with()
def test_delete_trust(self, mock_ks):
"""Test delete_trust when deleting trust."""
mock_ks.return_value.trusts.delete.return_value = None
solum_ks_client = solum_keystoneclient.KeystoneClientV3(self.ctx)
self.assertIsNone(solum_ks_client.delete_trust(trust_id='atrust123'))
mock_ks.return_value.trusts.delete.assert_called_once_with('atrust123')
def test_delete_trust_not_found(self, mock_ks):
"""Test delete_trust when trust already deleted."""
mock_delete = mock_ks.return_value.trusts.delete
mock_delete.side_effect = kc_exception.NotFound()
solum_ks_client = solum_keystoneclient.KeystoneClientV3(self.ctx)
self.assertIsNone(solum_ks_client.delete_trust(trust_id='atrust123'))
| {
"content_hash": "5138d6168d982beef7c8dff381c6b9ed",
"timestamp": "",
"source": "github",
"line_count": 227,
"max_line_length": 79,
"avg_line_length": 45.11453744493392,
"alnum_prop": 0.5875402792696026,
"repo_name": "devdattakulkarni/test-solum",
"id": "55ac331f16a365f9819f0c21c560e12e18baf1f2",
"size": "10827",
"binary": false,
"copies": "1",
"ref": "refs/heads/add-virt-driver",
"path": "solum/tests/common/test_solum_keystoneclient.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "958"
},
{
"name": "Python",
"bytes": "1240622"
},
{
"name": "Shell",
"bytes": "82235"
}
],
"symlink_target": ""
} |
import json
import itemlookup
import sys
import os
from riotcall import league
def main():
key = 'RGAPI-5593b315-1fc8-4ad8-be9e-45474b1f7308'
r = league.item_static(key)
if r.status_code != 200: # Ensure the request was successful
sys.stderr.write("Unsuccessful API call %d\nPlease try again later" % r.status_code)
sys.exit(1)
else:
allitems = r.json() # Extract json from response
if os.path.exists('C:\Riot Games\League of Legends\Config\Champions'): # Check default League install location
filefound = True
filepath = 'C:\Riot Games\League of Legends\Config\Champions'
else:
filefound = False
filepath = ''
while not filefound: # If not at default location, prompt
filepath = input('Give the path to your League of Legends Folder\n')
filepath += '\Config\Champions' # Append file structure in League folder to ensure
# proper directory is found
if not os.path.exists(filepath):
sys.stderr.write('Invalid file location, please try again\n')
else:
filefound = True
champfound = False # Prompt for Champion Name
filepath2 = '' # Stores path with champion later
while not champfound:
champ = input('What Champion is this item set for?\nPlease Capitalize the first letter of every word\n')
champ = champ.replace(" ", "") # Remove irrelevant special characters
champ = champ.replace("'", "")
champ = champ.replace(".", "")
if champ == 'Wukong': # Catch Special file names
filepath2 = filepath + '\MonkeyKing\Recommended'
elif champ == 'Fiddlesticks':
filepath2 = filepath + '\FiddleSticks\Recommended'
else:
filepath2 = filepath + "\\" + champ + '\Recommended'
if not os.path.exists(filepath2):
print("Error in finding champions, please try again")
else:
champfound = True
validName = False
invalidendings = ['SR', 'TT', 'DM', 'SC', 'PG']
name = ''
while not validName: # Ensure filename is not reserved by Riot
name = input('What would you like to name this item set?\n')
testName = name[-2:]
nameisvalid = True
for x in invalidendings:
if x == testName:
nameisvalid = False
print("Invalid Name, name cannot end with SR, TT, DM, SC, or PG")
if nameisvalid:
validName = True
# Create base dictionary for item set
d = {"title": name, "type": 'custom', "map": 'any', "mode": 'any', "priority": False, "sortrank": 0}
done = False
displaycounter = 1
blocklist = []
while not done: # Begin creating actual item blocks
blockname = input('Name of item block #%d: ' % displaycounter) # Name current block
currentblock = {'type': blockname}
itemlist = [] # Create itemlist for current block
itemdone = False
while not itemdone:
itemname = input('Item to add to this block --> ') # Prompt for item name and validate
itemid = itemlookup.item_name_util(itemname, allitems)
if itemid != -1: # If item is found
thisitem = {'id': itemid} # Add to current block
countdone = False
while not countdone:
itemcount = input('How many would you like to add? --> ') # Prompt for number to input and validate
try:
itemcount = int(itemcount)
if itemcount < 1:
print('Error, item count must be greater than 0, try again')
else:
thisitem['count'] = itemcount
countdone = True
except (TypeError, ValueError):
print('Error, must input an integer, try again')
itemlist.append(thisitem)
itemsdone = input('Would you like to add another item to this block? (Y/n)')
if itemsdone == 'n': # Accept anything other than n as yes
itemdone = True
currentblock['items'] = itemlist
displaycounter += 1
else:
print('Could not find item, try again')
blocklist.append(currentblock)
moreblock = input('Would you like to create another block? (Y/n) ')
if moreblock == 'n':
done = True
d['blocks'] = blocklist
with open(os.path.join(filepath2, name + '.json'), "w") as fout:
fout.write(json.dumps(d, indent=4, sort_keys=True)) # Write to file, use dumps() to ensure proper syntax
print('Done!')
if __name__ == "__main__":
main()
| {
"content_hash": "1bf31cd98b7ef15ae57f8214b779ba4f",
"timestamp": "",
"source": "github",
"line_count": 120,
"max_line_length": 120,
"avg_line_length": 45.81666666666667,
"alnum_prop": 0.49836304110585666,
"repo_name": "LeulBM/ItemSetInteractions",
"id": "b8dc37acd9e49b3019073912e3035d182e7eef7f",
"size": "5522",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setcreator.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "22090"
}
],
"symlink_target": ""
} |
pqueue.
| {
"content_hash": "2f254f8113c900e9e8ed98bd5f2bc849",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 7,
"avg_line_length": 8,
"alnum_prop": 0.75,
"repo_name": "SarahPythonista/acmpy",
"id": "f1bab14e15d9c4c5cd8852a95632b06869ab966d",
"size": "8",
"binary": false,
"copies": "1",
"ref": "refs/heads/stylistic",
"path": "spgl/datastructures/pqueue.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "7737"
},
{
"name": "Python",
"bytes": "404025"
}
],
"symlink_target": ""
} |
from cms.app_base import CMSApp
from cms.apphook_pool import apphook_pool
from django.utils.translation import ugettext_lazy as _
class TestimonialsApphook(CMSApp):
name = _("Testimonials Apphook")
app_name = 'testimonial'
def get_urls(self, page=None, language=None, **kwargs):
return ['allink_apps.testimonials.urls']
apphook_pool.register(TestimonialsApphook)
| {
"content_hash": "c36e59fdbca07cb7110d00e6ab6402f3",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 59,
"avg_line_length": 27.714285714285715,
"alnum_prop": 0.7422680412371134,
"repo_name": "allink/allink-apps",
"id": "8bc07243b68e54350996869f4306a0deefcd763b",
"size": "412",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "testimonials/cms_apps.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "994"
},
{
"name": "HTML",
"bytes": "47533"
},
{
"name": "Python",
"bytes": "183917"
}
],
"symlink_target": ""
} |
"""
These examples illustrate algorithms implemented by nondeterministic
atomic actions. The examples are from the book, "Parallel Program
Design: A Foundation" by Chandy & Misra.
The efficient way to solve most problems is to execute a deterministic
sequence of actions. The examples are presented merely to show how
IoTPy can implement nondeterministic algorithms written in the UNITY
framework (see the book).
In any point in a computation, at most one agent in a process can
execute an action. Execution proceeds as follows. One agent in a
process is selected nondeterministically and fairly to execute an
action. If the selected agent has not read all the items in its input
streams then the agent executes an action, i.e., it reads the values
currently in its input streams and as a consequence it may append new
values to its output streams. If the selected agent has read all the
items in its input stream then the action is a skip, i.e. nothing is
changed.
Fair selection is the same as in UNITY: every agent will be selected
for execution eventually. For each agent x, at each point in the
computation there will be a later point at which agent x will execute
an action.
If an agent has read all the values in its input streams then the
action of the agent at that point is a skip. If a program consists of
a single process and all agents of the process execute only skips then
the program has reached a fixed point: no values in the program change
from that point onwards. The system detects a fixed point and
execution of the process terminates.
If a program has multiple processes then a separate termination
detection algorithm has to be executed to determine if the program has
reached a fixed point.
An agent in one process communicates with an agent in a different
process through message passing. Agents in different processes do not
share memory.
Agents in different processes can execute actions concurrently;
however, for the purposes of reasoning about the program we can assume
that at each point in the computation only one agent in one process
executes an action.
The following examples illustrate how agents in a single process
operate on shared variables. These examples show how SIGNAL agents
use shared variables. A signal agent takes an action when there are
new values on its input streams. The agent doesn't inspect the values;
it takes an action regardless of the values and the number of new
values. Its input streams are merely signalling mechanisms to take
action.
Usually, an agent appends a value to a signaling stream to indicate
that the agent has changed a shared variable; an agent that has this
stream as an input stream can then take an action that reads the new
value of the shared variable. Since the value on the signaling stream
is arbitrary, and value can be used; we use the object _changed to
indicate that the state has changed. You can use any value including 1
and TRUE. Likewise, to indicate that a variable has not changed, the
stream signaling changes should have no new value, or equivalently has
_no_value. For convenience, you can also use _unchanged which has the
same effect as _no_value.
The first example is to sort a list in increasing order by flipping
any adjacent pair of elements that are out of order. This example has
one agent for each adjacent pair indexed (i, i+1) of the list, and
this agent is responsible for ensuring that this pair is in increasing
order. In this example, each action is represented by a single agent.
The second example is to find the matrix of shortest-path lengths in a
graph given the edge-weight matrix of the graph. This example has an
agent for every triple (i,j,k) where i,j,k are indices into the
matrix. The agent associated with the triple (i,j,k) is responsible
for ensuring that the triangle inequality holds for this triple, i.e.
d[i,k] <= d[i,j] + d[j,k].
The third example shows how a shared variable, stop, can be used by
one agent to stop the execution of another. This example illustrates
the nondeterministic aspect of these programs.
The examples use arrays of singal streams. The first example, has a
signal stream, changed[i], for each element lst[i] of the list. A
value is appended to changed[i] when lst[i] is changed. The second
example has a signal stream, changed[i,k], for the i,k-th entry into
the matrix, for all i,k. A value is appended to changed[i,k] when
D[i,k] changes. The third example uses a single shared variable, stop,
rather than a signal stream. This is because stop is changed only
once, from False to True. So, stop acts like a signal stream with a
single value.
"""
import unittest
from IoTPy.core.stream import Stream
from IoTPy.core.helper_control import _no_value, _unchanged, _changed
from IoTPy.helper_functions.recent_values import recent_values
from IoTPy.agent_types.op import signal_element, map_element
from IoTPy.agent_types.merge import weave_f
from IoTPy.agent_types.sink import sink
from IoTPy.agent_types.split import split_signal
def sort(lst):
"""
Parameters
----------
lst: list
"""
#----------------------------------------------------------------
# STEP 1. DEFINE FUNCTION TO BE ENCAPSULATED
def flip(index):
"""
Flips elements of list, lst, if they are out of order.
Parameters
----------
index: index into the array
"""
# Flip elements if out of order and return _changed
# to indicate a change to to the corresponding index.
# Return _unchanged if the elements are unchanged.
if lst[index] > lst[index+1]:
lst[index], lst[index+1] = lst[index+1], lst[index]
# Since both lst[index] and lst[index+1] changed value, return
# _changed for both outputs corresponding to index
# and index + 1
return [_changed,_changed]
else:
# Since neither lst[index] nor lst[index+1] changed value,
# return _unchanged for both outputs
return [_unchanged, _unchanged]
#----------------------------------------------------------------
# STEP 2. CREATE STREAMS
# Create one stream for each index into the array.
# The stream changed[i] gets a new value when the i-th element
# of the array is changed.
indices = range(len(lst))
changed = [ Stream('changed_' + str(i)) for i in indices]
#----------------------------------------------------------------
# STEP 3. CREATE AGENTS
# Create an agent for each of the elements 0, 1, ..., len(lst)-1,
# The agent executes its action when it reads a new value on either
# stream changed[i] or changed[i+1].
# This agent sends _changed on stream, changed[i], when the agent
# changes lst[i].
# Likewise, the agent sends _changed on stream changed[i+1] only
# when it changes lst[i+1].
# Note: weave_f is used in split_signal below. weave_f returns a
# stream consisting of the elements of its input streams in the
# order in which they arrive. In this example, in_stream is a
# stream of elements from changed[i] and changed[i+1].
for i in range(len(lst) - 1):
split_signal(
func=flip,
in_stream=weave_f([changed[i], changed[i+1]]),
out_streams=[changed[i], changed[i+1]],
name=i, index=i)
#----------------------------------------------------------------
#STEP 4. START COMPUTATION
# Get the scheduler and execute a step.
scheduler = Stream.scheduler
# Start the computation by putting any value (1 in this case) in
# each changed stream.
for i in indices:
changed[i].append(1)
# Start the scheduler.
scheduler.step()
def shortest_path(D):
"""
Parameters
----------
D: matrix where D[j,k] is the length of the edge from vertex j to
vertex k.
Returns
-------
D: matrix where D[j,k] is the length of the shortest path from
vertex j to vertex k.
"""
#----------------------------------------------------------------
# STEP 1. DEFINE FUNCTION TO BE ENCAPSULATED
def triangle_inequality(triple):
"""
Apply the triangle inequality. If this changes D then
return any value (1 in our example). If D is unchanged
then return no value.
Parameters
----------
triple: 3-element array or list
"""
i, j, k = triple
if D[i][j] + D[j][k] < D[i][k]:
D[i][k] = D[i][j] + D[j][k]
# Since D[i][k] changed value return _changed
return(_changed)
else:
# Since D[i][k] was changed by this action return _unchanged
return (_unchanged)
#----------------------------------------------------------------
# STEP 2. CREATE STREAMS
# Create an array, changed, of streams, where a value is appended
# to changed[i][k] when D[i][k] is changed.
indices = range(len(D))
changed = [[ Stream('changed_'+ str(i)+"-" + str(j))
for i in indices] for j in indices]
#----------------------------------------------------------------
# STEP 3. CREATE AGENTS
# Create an agent for each triple i,j,k. The agent executes its
# action when it reads a new element of stream x. If it changes D
# it then puts a new element on x.
for i in indices:
for j in indices:
for k in indices:
signal_element(func=triangle_inequality,
in_stream=weave_f([changed[i][j], changed[i][k]]),
out_stream=changed[i][k],
name='triple_'+ str(i)+"_"+str(j)+"_"+str(k),
triple=[i, j, k])
#----------------------------------------------------------------
#STEP 4. START COMPUTATION
# Get the scheduler and execute a step.
scheduler = Stream.scheduler
# Start the computation by putting a value on changed[i,j].
for i in indices:
for j in indices:
changed[i][j].append(1)
scheduler.step()
return D
def stop_agent_when_enough_elements(N):
"""
Shows how shared variables can be used to stop agents.
One agent generates a sequence until stopped by another agent.
Parameters
----------
N: int (positive)
"""
#----------------------------------------------------------------
# STEP 1. DEFINE FUNCTIONS TO BE ENCAPSULATED
def generate_numbers(v, state, stop):
"""
This function generates the sequence 0, 1, 2, ... starting
with the specified initial state. The function stops execution
when stop becomes True.
Parameters
----------
v: The element in the sequence, 0,1,2,.. read from the input
stream.
state: The last element of the sequence
stop: array of length 1. This is a shared variable of the agent.
"""
if not stop[0]:
return state, state+1
else:
return _no_value, state
def call_halt(v, N, stop):
if v > N:
stop[0] = True
#----------------------------------------------------------------
# STEP 2. CREATE STREAMS AND SHARED VARIABLES
# stop is a variable shared by both agents that are created
# below. It is initially False and set to True and then remains
# True.
stop = [False]
numbers = Stream('numbers')
#----------------------------------------------------------------
# STEP 3. CREATE AGENTS
# Create an agent that reads and writes the same stream: numbers.
# The agent executes its action when a new value appears on
# numbers. The action puts the next value on numbers if stop is
# False. The action has no effect (it is a skip operation) if stop
# is True.
map_element(
func=generate_numbers, in_stream=numbers,
out_stream=numbers, state=1, stop=stop)
# Create an agent that sets stop to True after it reads more than
# N values.
N = 3
sink(func=call_halt, in_stream=numbers, N=N, stop=stop)
#----------------------------------------------------------------
#STEP 4. START COMPUTATION
# Get the scheduler and execute a step.
scheduler = Stream.scheduler
# Start the computation by putting a value into the numbers stream.
numbers.append(0)
scheduler.step()
# The stream numbers will be 0, 1, ... up to N-1 and possibly may
# contain additional values. For example, if N = 3 then numbers
# could be 0, 1, 2 or 0, 1, 2, 3, 4, 5.
return numbers
assert list(range(N)) == recent_values(numbers)[:N]
class test_shared_variables(unittest.TestCase):
def test_shared_variables(self):
# EXAMPLE 1: SORT
lst = [10, 6, 8, 3, 20, 2, 23, 35]
sort(lst)
assert lst == [2, 3, 6, 8, 10, 20, 23, 35]
# EXAMPLE 2: MATRIX OF LENGTHS OF SHORTEST PATHS
D = [[0, 20, 40, 60], [20, 0, 10, 1], [40, 10, 0, 100],
[60, 1, 100, 0]]
shortest_path(D)
assert D == [[0, 20, 30, 21], [20, 0, 10, 1],
[30, 10, 0, 11], [21, 1, 11, 0]]
# EXAMPLE 3: STOP WHEN AGENT AFTER N ELEMENTS GENERATED.
N = 3
numbers = stop_agent_when_enough_elements(N)
assert list(range(N)) == recent_values(numbers)[:N]
print ('TEST OF SHARED VARIABLES IS SUCCESSFUL!')
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "62564c4e7caa8835b87aa8ff7ae597b7",
"timestamp": "",
"source": "github",
"line_count": 340,
"max_line_length": 81,
"avg_line_length": 39.80882352941177,
"alnum_prop": 0.6214259327669006,
"repo_name": "AssembleSoftware/IoTPy",
"id": "719f449525c425c7ff9f4e13a5b3ea0744e9444c",
"size": "13535",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/agent_types/test_shared_variables.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "373452"
},
{
"name": "Python",
"bytes": "786724"
}
],
"symlink_target": ""
} |
from django.shortcuts import get_object_or_404
from django import forms
from django.forms import Form, ModelForm, ModelChoiceField, ChoiceField
from django.contrib.auth.models import User, Group
from django.db.models import Count
from django.utils.translation import ugettext_lazy as _
from django.db.models.fields import BLANK_CHOICE_DASH
from multiupload.fields import MultiFileField
from api import models
class CreateUserForm(ModelForm):
password = forms.CharField(widget=forms.PasswordInput())
class Meta:
model = User
fields = ('username', 'email', 'password')
class UserForm(ModelForm):
class Meta:
model = User
fields = ('username', 'email')
class AddLinkForm(ModelForm):
class Meta:
model = models.UserLink
fields = ('type', 'value', 'relevance')
class ChangePasswordForm(Form):
old_password = forms.CharField(widget=forms.PasswordInput(), label=_('Old Password'))
new_password = forms.CharField(widget=forms.PasswordInput(), label=_('New Password'))
new_password2 = forms.CharField(widget=forms.PasswordInput(), label=_('New Password Again'))
def clean(self):
if ('new_password' in self.cleaned_data and 'new_password2' in self.cleaned_data
and self.cleaned_data['new_password'] == self.cleaned_data['new_password2']):
return self.cleaned_data
raise forms.ValidationError(_("The two password fields did not match."))
def getGirls():
girls = models.Card.objects.values('idol__name').annotate(total=Count('idol__name')).order_by('-total', 'idol__name')
return [('', '')] + [(girl['idol__name'], girl['idol__name']) for girl in girls]
class UserPreferencesForm(ModelForm):
best_girl = ChoiceField(label=_('Best Girl'), choices=getGirls(), required=False)
class Meta:
model = models.UserPreferences
fields = ('color', 'best_girl', 'location', 'private', 'description', 'private')
class AccountForm(ModelForm):
class Meta:
model = models.Account
fields = ('nickname', 'language', 'os', 'friend_id', 'rank')
class OwnedCardModelChoiceField(ModelChoiceField):
def label_from_instance(self, obj):
return unicode(obj.card) + ' ' + ('idolized' if obj.idolized else '')
class FullAccountForm(ModelForm):
center = OwnedCardModelChoiceField(queryset=models.OwnedCard.objects.all(), required=False)
# Always override this queryset to set the current account only
# form.fields['center'].queryset = models.OwnedCard.objects.filter(owner_account=owned_account, stored='Deck')
class Meta:
model = models.Account
fields = ('nickname', 'center', 'rank', 'friend_id', 'language', 'os', 'device', 'play_with', 'accept_friend_requests')
class FullAccountNoFriendIDForm(FullAccountForm):
class Meta:
model = models.Account
fields = ('nickname', 'center', 'rank', 'os', 'device', 'play_with', 'accept_friend_requests')
class SimplePasswordForm(Form):
password = forms.CharField(widget=forms.PasswordInput(attrs={'autocomplete': 'off'}), label=_('Password'))
class TransferCodeForm(ModelForm):
password = forms.CharField(widget=forms.PasswordInput(), label=_('Password'))
class Meta:
model = models.Account
fields = ('transfer_code',)
class QuickOwnedCardForm(ModelForm):
card = forms.IntegerField()
class Meta:
model = models.OwnedCard
fields = ('card', 'owner_account', 'idolized')
class StaffAddCardForm(ModelForm):
card = forms.IntegerField()
owner_account = forms.IntegerField()
def save(self, commit=True):
self.instance.card = get_object_or_404(models.Card, pk=self.cleaned_data['card'])
self.instance.owner_account = get_object_or_404(models.Account, pk=self.cleaned_data['owner_account'])
return super(StaffAddCardForm, self).save(commit)
class Meta:
model = models.OwnedCard
fields = ('card', 'owner_account', 'stored', 'idolized', 'max_level', 'max_bond', 'skill')
exclude = ('card', 'owner_account')
class OwnedCardForm(ModelForm):
class Meta:
model = models.OwnedCard
fields = ('card', 'owner_account', 'stored', 'idolized', 'max_level', 'max_bond', 'skill')
def getOwnedCardForm(form, accounts, owned_card=None):
form.fields['owner_account'].queryset = accounts
form.fields['owner_account'].required = True
form.fields['owner_account'].empty_label = None
if owned_card is not None:
if not owned_card.card.skill:
form.fields.pop('skill')
return form
class EventParticipationForm(ModelForm):
class Meta:
model = models.EventParticipation
fields = ('account', 'ranking', 'points', 'song_ranking')
class EventParticipationNoSongForm(ModelForm):
class Meta:
model = models.EventParticipation
fields = ('account', 'ranking', 'points')
class EventParticipationNoAccountForm(ModelForm):
class Meta:
model = models.EventParticipation
fields = ('ranking', 'points', 'song_ranking')
class EventParticipationNoSongNoAccountForm(ModelForm):
class Meta:
model = models.EventParticipation
fields = ('ranking', 'points')
def getEventParticipationForm(form, accounts):
form.fields['account'].queryset = accounts
form.fields['account'].required = True
form.fields['account'].empty_label = None
return form
class UserSearchForm(Form):
term = forms.CharField(required=False, label=_('Search'))
ordering = forms.ChoiceField(required=False, label='', widget=forms.RadioSelect, choices=[
('-accounts_set__rank', _('Ranking')),
('-accounts_set__verified', _('Verified')),
('-date_joined', _('New players')),
('username', _('Nickname')),
], initial='-accounts_set__rank')
class UserProfileStaffForm(ModelForm):
class Meta:
model = models.UserPreferences
fields = ('status', 'donation_link', 'donation_link_title', 'description', 'location', 'location_changed')
class AccountStaffForm(ModelForm):
owner_id = forms.IntegerField(required=False)
center = OwnedCardModelChoiceField(queryset=models.OwnedCard.objects.all(), required=True)
class Meta:
model = models.Account
fields = ('owner_id', 'friend_id', 'verified', 'rank', 'os', 'device', 'center')
class MultiImageField(MultiFileField, forms.ImageField):
pass
class VerificationRequestForm(ModelForm):
images = MultiImageField(min_num=0, max_num=10, required=False, help_text=_('If your files are too large, send them one by one. First upload one image, then edit your request with the second one, and so on. If even one image doesn\'t work, please resize your images.'))
def __init__(self, *args, **kwargs):
account = None
if 'account' in kwargs:
account = kwargs.pop('account')
super(VerificationRequestForm, self).__init__(*args, **kwargs)
if account is not None:
if account.language != 'JP' and account.language != 'EN':
self.fields['verification'].choices = ((0, ''), (1, _('Silver Verified')))
elif account.rank < 195:
self.fields['verification'].choices = ((0, ''), (1, _('Silver Verified')), (2, _('Gold Verified')))
class Meta:
model = models.VerificationRequest
fields = ('verification', 'comment', 'images')
class StaffVerificationRequestForm(ModelForm):
images = MultiImageField(min_num=0, max_num=10, required=False)
status = forms.ChoiceField(choices=((3, 'Verified'), (0, 'Rejected')), widget=forms.RadioSelect)
class Meta:
model = models.VerificationRequest
fields = ('status', 'verification_comment', 'images')
class StaffFilterVerificationRequestForm(ModelForm):
OS = forms.ChoiceField(choices=BLANK_CHOICE_DASH + list(models.OS_CHOICES), required=False)
def __init__(self, *args, **kwargs):
super(StaffFilterVerificationRequestForm, self).__init__(*args, **kwargs)
self.fields['verified_by'].queryset = User.objects.filter(is_staff=True)
self.fields['verified_by'].required = False
self.fields['status'].required = False
self.fields['status'].choices = BLANK_CHOICE_DASH + self.fields['status'].choices
self.fields['verification'].required = False
class Meta:
model = models.VerificationRequest
fields = ('status', 'verified_by', 'verification')
# class TeamForm(ModelForm):
# card0 = OwnedCardModelChoiceField(queryset=models.OwnedCard.objects.all(), required=False)
# card1 = OwnedCardModelChoiceField(queryset=models.OwnedCard.objects.all(), required=False)
# card2 = OwnedCardModelChoiceField(queryset=models.OwnedCard.objects.all(), required=False)
# card3 = OwnedCardModelChoiceField(queryset=models.OwnedCard.objects.all(), required=False)
# card4 = OwnedCardModelChoiceField(queryset=models.OwnedCard.objects.all(), required=False)
# card5 = OwnedCardModelChoiceField(queryset=models.OwnedCard.objects.all(), required=False)
# card6 = OwnedCardModelChoiceField(queryset=models.OwnedCard.objects.all(), required=False)
# card7 = OwnedCardModelChoiceField(queryset=models.OwnedCard.objects.all(), required=False)
# card8 = OwnedCardModelChoiceField(queryset=models.OwnedCard.objects.all(), required=False)
# class Meta:
# model = models.Team
# fields = ('name', 'card0', 'card1', 'card2', 'card3', 'card4', 'card5', 'card6', 'card7', 'card8')
# def getTeamForm(form, ownedcards):
# for i in range(9):
# print 'test'
# setattr(form, 'card' + str(i), OwnedCardModelChoiceField(queryset=ownedcards, required=False))
# return form
| {
"content_hash": "dc93227ccd9c562a4640e0c758a8eab7",
"timestamp": "",
"source": "github",
"line_count": 223,
"max_line_length": 273,
"avg_line_length": 43.64125560538117,
"alnum_prop": 0.677764077270859,
"repo_name": "laurenor/SchoolIdolAPI",
"id": "21004eaac8696ddec500ca4d658fd918fc23668e",
"size": "9732",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "web/forms.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "42132"
},
{
"name": "HTML",
"bytes": "232069"
},
{
"name": "JavaScript",
"bytes": "31705"
},
{
"name": "Python",
"bytes": "297636"
}
],
"symlink_target": ""
} |
import lwmath.Vector
class Spring(object):
def __init__(self, p1, p2, rest_length=100, stiffness=1.0):
self.p1 = p1
self.p2 = p2
self.rest_length = rest_length
self.stiffness = stiffness
self._delta = lwmath.Vector.Vector()
def apply(self):
"""F = -kx"""
self._delta.copy(self.p2.pos).sub(self.p1.pos)
dist = self._delta.mag() + 0.000001
force = (dist - self.rest_length) / (dist * (self.p1.massInv + self.p2.massInv)) * self.stiffness
if not self.p1.fixed:
self.p1.pos.add(self._delta.clone().scale(force * self.p1.massInv))
if not self.p2.fixed:
self.p2.pos.add(self._delta.scale(-force * self.p2.massInv)) | {
"content_hash": "346048eebec7ab46b61ac3cc0780dd7d",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 105,
"avg_line_length": 30.625,
"alnum_prop": 0.5795918367346938,
"repo_name": "gregroper/Pycipia",
"id": "602697ca4cb033e74112f53aec94d4983cf12de1",
"size": "750",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "engine/Spring.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "22354"
}
],
"symlink_target": ""
} |
from copy import deepcopy
from typing import Any, Awaitable, TYPE_CHECKING
from azure.core.rest import AsyncHttpResponse, HttpRequest
from azure.mgmt.core import AsyncARMPipelineClient
from .. import models
from ..._serialization import Deserializer, Serializer
from ._configuration import SecurityCenterConfiguration
from .operations import LocationsOperations, Operations, TasksOperations
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
class SecurityCenter: # pylint: disable=client-accepts-api-version-keyword
"""API spec for Microsoft.Security (Azure Security Center) resource provider.
:ivar locations: LocationsOperations operations
:vartype locations: azure.mgmt.security.v2015_06_01_preview.aio.operations.LocationsOperations
:ivar operations: Operations operations
:vartype operations: azure.mgmt.security.v2015_06_01_preview.aio.operations.Operations
:ivar tasks: TasksOperations operations
:vartype tasks: azure.mgmt.security.v2015_06_01_preview.aio.operations.TasksOperations
:param credential: Credential needed for the client to connect to Azure. Required.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: Azure subscription ID. Required.
:type subscription_id: str
:param base_url: Service URL. Default value is "https://management.azure.com".
:type base_url: str
:keyword api_version: Api Version. Default value is "2015-06-01-preview". Note that overriding
this default value may result in unsupported behavior.
:paramtype api_version: str
"""
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
base_url: str = "https://management.azure.com",
**kwargs: Any
) -> None:
self._config = SecurityCenterConfiguration(credential=credential, subscription_id=subscription_id, **kwargs)
self._client = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self._serialize.client_side_validation = False
self.locations = LocationsOperations(self._client, self._config, self._serialize, self._deserialize)
self.operations = Operations(self._client, self._config, self._serialize, self._deserialize)
self.tasks = TasksOperations(self._client, self._config, self._serialize, self._deserialize)
def _send_request(self, request: HttpRequest, **kwargs: Any) -> Awaitable[AsyncHttpResponse]:
"""Runs the network request through the client's chained policies.
>>> from azure.core.rest import HttpRequest
>>> request = HttpRequest("GET", "https://www.example.org/")
<HttpRequest [GET], url: 'https://www.example.org/'>
>>> response = await client._send_request(request)
<AsyncHttpResponse: 200 OK>
For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request
:param request: The network request you want to make. Required.
:type request: ~azure.core.rest.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to False.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.rest.AsyncHttpResponse
"""
request_copy = deepcopy(request)
request_copy.url = self._client.format_url(request_copy.url)
return self._client.send_request(request_copy, **kwargs)
async def close(self) -> None:
await self._client.close()
async def __aenter__(self) -> "SecurityCenter":
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_details) -> None:
await self._client.__aexit__(*exc_details)
| {
"content_hash": "6387118eb90453935ccd1c1a404dbd39",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 116,
"avg_line_length": 47.67058823529412,
"alnum_prop": 0.709279368213228,
"repo_name": "Azure/azure-sdk-for-python",
"id": "f2384c2be350def18c2c5c8c0daa8e6ae2432aca",
"size": "4520",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/security/azure-mgmt-security/azure/mgmt/security/v2015_06_01_preview/aio/_security_center.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
import os
import platform
from collections import OrderedDict, defaultdict
from jinja2 import Environment, FileSystemLoader
from conan.tools.env.environment import ProfileEnvironment
from conans.errors import ConanException, ConanV2Exception
from conans.model.conf import ConfDefinition
from conans.model.env_info import EnvValues, unquote
from conans.model.options import OptionsValues
from conans.model.profile import Profile
from conans.model.ref import ConanFileReference
from conans.util.config_parser import ConfigParser
from conans.util.files import load, mkdir
from conans.util.log import logger
class ProfileParser(object):
def __init__(self, text):
""" divides the text in 3 items:
- self.vars: Dictionary with variable=value declarations
- self.includes: List of other profiles to include
- self.profile_text: the remaining, containing settings, options, env, etc
"""
self.vars = OrderedDict() # Order matters, if user declares F=1 and then FOO=12,
# and in profile MYVAR=$FOO, it will
self.includes = []
self.profile_text = ""
for counter, line in enumerate(text.splitlines()):
if not line.strip() or line.strip().startswith("#"):
continue
elif line.strip().startswith("["):
self.profile_text = "\n".join(text.splitlines()[counter:])
break
elif line.strip().startswith("include("):
include = line.split("include(", 1)[1]
if not include.endswith(")"):
raise ConanException("Invalid include statement")
include = include[:-1]
self.includes.append(include)
else:
try:
name, value = line.split("=", 1)
except ValueError:
raise ConanException("Error while parsing line %i: '%s'" % (counter, line))
name = name.strip()
if " " in name:
raise ConanException("The names of the variables cannot contain spaces")
value = unquote(value)
self.vars[name] = value
def apply_vars(self):
self._apply_in_vars()
self._apply_in_profile_text()
def get_includes(self):
# Replace over includes seems insane and it is not documented. I am leaving it now
# afraid of breaking, but should be removed Conan 2.0
for include in self.includes:
for repl_key, repl_value in self.vars.items():
include = include.replace("$%s" % repl_key, repl_value)
yield include
def update_vars(self, included_vars):
""" update the variables dict with new ones from included profiles,
but keeping (higher priority) existing values"""
included_vars.update(self.vars)
self.vars = included_vars
def _apply_in_vars(self):
tmp_vars = OrderedDict()
for key, value in self.vars.items():
for repl_key, repl_value in self.vars.items():
key = key.replace("$%s" % repl_key, repl_value)
value = value.replace("$%s" % repl_key, repl_value)
tmp_vars[key] = value
self.vars = tmp_vars
def _apply_in_profile_text(self):
for k, v in self.vars.items():
self.profile_text = self.profile_text.replace("$%s" % k, v)
def get_profile_path(profile_name, default_folder, cwd, exists=True):
def valid_path(_profile_path, _profile_name=None):
if exists and not os.path.isfile(_profile_path):
raise ConanException("Profile not found: {}".format(_profile_name or _profile_path))
return _profile_path
if os.path.isabs(profile_name):
return valid_path(profile_name)
if profile_name[:2] in ("./", ".\\") or profile_name.startswith(".."): # local
profile_path = os.path.abspath(os.path.join(cwd, profile_name))
return valid_path(profile_path, profile_name)
if not os.path.exists(default_folder):
mkdir(default_folder)
profile_path = os.path.join(default_folder, profile_name)
if exists:
if not os.path.isfile(profile_path):
profile_path = os.path.abspath(os.path.join(cwd, profile_name))
if not os.path.isfile(profile_path):
raise ConanException("Profile not found: %s" % profile_name)
return profile_path
def read_profile(profile_name, cwd, default_folder):
""" Will look for "profile_name" in disk if profile_name is absolute path,
in current folder if path is relative or in the default folder otherwise.
return: a Profile object
"""
if not profile_name:
return None, None
profile_path = get_profile_path(profile_name, default_folder, cwd)
logger.debug("PROFILE LOAD: %s" % profile_path)
text = load(profile_path)
if profile_name.endswith(".jinja"):
base_path = os.path.dirname(profile_path)
context = {"platform": platform,
"os": os,
"profile_dir": base_path}
rtemplate = Environment(loader=FileSystemLoader(base_path)).from_string(text)
text = rtemplate.render(context)
try:
return _load_profile(text, profile_path, default_folder)
except ConanV2Exception:
raise
except ConanException as exc:
raise ConanException("Error reading '%s' profile: %s" % (profile_name, exc))
def _load_profile(text, profile_path, default_folder):
""" Parse and return a Profile object from a text config like representation.
cwd is needed to be able to load the includes
"""
try:
inherited_profile = Profile()
cwd = os.path.dirname(os.path.abspath(profile_path)) if profile_path else None
profile_parser = ProfileParser(text)
# Iterate the includes and call recursive to get the profile and variables
# from parent profiles
for include in profile_parser.get_includes():
# Recursion !!
profile, included_vars = read_profile(include, cwd, default_folder)
inherited_profile.compose_profile(profile)
profile_parser.update_vars(included_vars)
# Apply the automatic PROFILE_DIR variable
if cwd:
profile_parser.vars["PROFILE_DIR"] = os.path.abspath(cwd).replace('\\', '/')
# Replace the variables from parents in the current profile
profile_parser.apply_vars()
# Current profile before update with parents (but parent variables already applied)
doc = ConfigParser(profile_parser.profile_text,
allowed_fields=["build_requires", "tool_requires", "settings", "env",
"options", "conf", "buildenv", "runenv"])
# Merge the inherited profile with the readed from current profile
_apply_inner_profile(doc, inherited_profile)
return inherited_profile, profile_parser.vars
except ConanException:
raise
except Exception as exc:
raise ConanException("Error parsing the profile text file: %s" % str(exc))
def _load_single_build_require(profile, line):
tokens = line.split(":", 1)
if len(tokens) == 1:
pattern, req_list = "*", line
else:
pattern, req_list = tokens
refs = [ConanFileReference.loads(reference.strip()) for reference in req_list.split(",")]
profile.build_requires.setdefault(pattern, []).extend(refs)
def _apply_inner_profile(doc, base_profile):
"""
:param doc: ConfigParser object from the current profile (excluding includes and vars,
and with values already replaced)
:param base_profile: Profile inherited, it's used as a base profile to modify it.
:return: None
"""
def get_package_name_value(item):
"""Parse items like package:name=value or name=value"""
packagename = None
if ":" in item:
tmp = item.split(":", 1)
packagename, item = tmp
result_name, result_value = item.split("=", 1)
result_name = result_name.strip()
result_value = unquote(result_value)
return packagename, result_name, result_value
for setting in doc.settings.splitlines():
setting = setting.strip()
if setting and not setting.startswith("#"):
if "=" not in setting:
raise ConanException("Invalid setting line '%s'" % setting)
package_name, name, value = get_package_name_value(setting)
if package_name:
base_profile.package_settings[package_name][name] = value
else:
base_profile.settings[name] = value
if doc.build_requires:
# FIXME CHECKS OF DUPLICATED?
for req in doc.build_requires.splitlines():
_load_single_build_require(base_profile, req)
if doc.tool_requires:
for req in doc.tool_requires.splitlines():
_load_single_build_require(base_profile, req)
if doc.options:
base_profile.options.update(OptionsValues.loads(doc.options))
# The env vars from the current profile (read in doc)
# are updated with the included profiles (base_profile)
# the current env values has priority
current_env_values = EnvValues.loads(doc.env)
current_env_values.update(base_profile.env_values)
base_profile.env_values = current_env_values
if doc.conf:
new_prof = ConfDefinition()
new_prof.loads(doc.conf, profile=True)
base_profile.conf.update_conf_definition(new_prof)
if doc.buildenv:
buildenv = ProfileEnvironment.loads(doc.buildenv)
base_profile.buildenv.update_profile_env(buildenv)
if doc.runenv:
runenv = ProfileEnvironment.loads(doc.runenv)
base_profile.runenv.update_profile_env(runenv)
def profile_from_args(profiles, settings, options, env, conf, cwd, cache, build_profile=False):
""" Return a Profile object, as the result of merging a potentially existing Profile
file and the args command-line arguments
"""
# Ensures a default profile creating
default_profile = cache.default_profile
create_profile = profiles or settings or options or env or conf or not build_profile
if profiles is None:
default_name = "core:default_build_profile" if build_profile else "core:default_profile"
default_conf = cache.new_config[default_name]
if default_conf is not None:
default_profile_path = default_conf if os.path.isabs(default_conf) \
else os.path.join(cache.profiles_path, default_conf)
result, _ = read_profile(default_profile_path, os.getcwd(), cache.profiles_path)
elif create_profile:
result = default_profile
else:
result = None
else:
result = Profile()
for p in profiles:
tmp, _ = read_profile(p, cwd, cache.profiles_path)
result.compose_profile(tmp)
args_profile = _profile_parse_args(settings, options, env, conf)
if result:
result.compose_profile(args_profile)
else:
if create_profile:
result = args_profile
return result
def _profile_parse_args(settings, options, envs, conf):
""" return a Profile object result of parsing raw data
"""
def _get_tuples_list_from_extender_arg(items):
if not items:
return []
# Validate the pairs
for item in items:
chunks = item.split("=", 1)
if len(chunks) != 2:
raise ConanException("Invalid input '%s', use 'name=value'" % item)
return [(item[0], item[1]) for item in [item.split("=", 1) for item in items]]
def _get_simple_and_package_tuples(items):
"""Parse items like "thing:item=value or item2=value2 and returns a tuple list for
the simple items (name, value) and a dict for the package items
{package: [(item, value)...)], ...}
"""
simple_items = []
package_items = defaultdict(list)
tuples = _get_tuples_list_from_extender_arg(items)
for name, value in tuples:
if ":" in name: # Scoped items
tmp = name.split(":", 1)
ref_name = tmp[0]
name = tmp[1]
package_items[ref_name].append((name, value))
else:
simple_items.append((name, value))
return simple_items, package_items
def _get_env_values(_env, _package_env):
_env_values = EnvValues()
for name, value in _env:
_env_values.add(name, EnvValues.load_value(value))
for package, data in _package_env.items():
for name, value in data:
_env_values.add(name, EnvValues.load_value(value), package)
return _env_values
options = _get_tuples_list_from_extender_arg(options)
env, package_env = _get_simple_and_package_tuples(envs)
env_values = _get_env_values(env, package_env)
settings, package_settings = _get_simple_and_package_tuples(settings)
result = Profile()
result.options = OptionsValues(options)
result.env_values = env_values
result.settings = OrderedDict(settings)
if conf:
result.conf = ConfDefinition()
result.conf.loads("\n".join(conf))
for pkg, values in package_settings.items():
result.package_settings[pkg] = OrderedDict(values)
return result
| {
"content_hash": "cf25c80bfcd5502392b7271dbebaaa80",
"timestamp": "",
"source": "github",
"line_count": 344,
"max_line_length": 96,
"avg_line_length": 39.16860465116279,
"alnum_prop": 0.6239424075998219,
"repo_name": "conan-io/conan",
"id": "b484f68af833a55e5e3ce3a6b41690119d86a81f",
"size": "13474",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "conans/client/profile_loader.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "264"
},
{
"name": "C++",
"bytes": "425"
},
{
"name": "CMake",
"bytes": "447"
},
{
"name": "Python",
"bytes": "8209945"
}
],
"symlink_target": ""
} |
from info import __doc__
from numpy.version import version as __version__
import multiarray
import umath
import _internal # for freeze programs
import numerictypes as nt
multiarray.set_typeDict(nt.sctypeDict)
import _sort
from numeric import *
from fromnumeric import *
from defmatrix import *
import ma
import defchararray as char
import records as rec
from records import *
from memmap import *
from defchararray import *
import scalarmath
del nt
from fromnumeric import amax as max, amin as min, \
round_ as round
from numeric import absolute as abs
__all__ = ['char','rec','memmap','ma']
__all__ += numeric.__all__
__all__ += fromnumeric.__all__
__all__ += defmatrix.__all__
__all__ += rec.__all__
__all__ += char.__all__
def test(level=1, verbosity=1):
from numpy.testing import NumpyTest
return NumpyTest().test(level, verbosity)
| {
"content_hash": "c465ffba6d80841b298563432f210e33",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 51,
"avg_line_length": 23.10810810810811,
"alnum_prop": 0.7146198830409357,
"repo_name": "santisiri/popego",
"id": "4d22394d544f6c50a796826ee0f9218c566f8190",
"size": "856",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "envs/ALPHA-POPEGO/lib/python2.5/site-packages/numpy-1.0.4-py2.5-linux-x86_64.egg/numpy/core/__init__.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1246"
},
{
"name": "C",
"bytes": "504141"
},
{
"name": "C++",
"bytes": "26125"
},
{
"name": "CSS",
"bytes": "342653"
},
{
"name": "FORTRAN",
"bytes": "4872"
},
{
"name": "GAP",
"bytes": "13267"
},
{
"name": "Genshi",
"bytes": "407"
},
{
"name": "Groff",
"bytes": "17116"
},
{
"name": "HTML",
"bytes": "383181"
},
{
"name": "JavaScript",
"bytes": "1090769"
},
{
"name": "Makefile",
"bytes": "2441"
},
{
"name": "Mako",
"bytes": "376944"
},
{
"name": "Python",
"bytes": "20895618"
},
{
"name": "Ruby",
"bytes": "3380"
},
{
"name": "Shell",
"bytes": "23581"
},
{
"name": "Smarty",
"bytes": "522"
},
{
"name": "TeX",
"bytes": "35712"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function, unicode_literals
from pants.backend.python.targets.python_binary import PythonBinary
from pants.build_graph.app_base import AppBase
class PythonApp(AppBase):
"""A deployable Python application.
Invoking the ``bundle`` goal on one of these targets creates a
self-contained artifact suitable for deployment on some other machine.
The artifact contains the executable pex, its dependencies, and
extra files like config files, startup scripts, etc.
:API: public
"""
@classmethod
def alias(cls):
return 'python_app'
@classmethod
def binary_target_type(cls):
return PythonBinary
@staticmethod
def is_python_app(target):
return isinstance(target, PythonApp)
| {
"content_hash": "975311aee0b308798f7f0cb32f5c8105",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 82,
"avg_line_length": 27.25,
"alnum_prop": 0.7549148099606815,
"repo_name": "twitter/pants",
"id": "22e9a6bdd7c7ff599159cbf4a9c18badd1f21206",
"size": "910",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/python/pants/backend/python/targets/python_app.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "655"
},
{
"name": "C++",
"bytes": "2010"
},
{
"name": "CSS",
"bytes": "9444"
},
{
"name": "Dockerfile",
"bytes": "5639"
},
{
"name": "GAP",
"bytes": "1283"
},
{
"name": "Gherkin",
"bytes": "919"
},
{
"name": "Go",
"bytes": "2765"
},
{
"name": "HTML",
"bytes": "85294"
},
{
"name": "Java",
"bytes": "498956"
},
{
"name": "JavaScript",
"bytes": "22906"
},
{
"name": "Python",
"bytes": "6700799"
},
{
"name": "Rust",
"bytes": "765598"
},
{
"name": "Scala",
"bytes": "89346"
},
{
"name": "Shell",
"bytes": "94395"
},
{
"name": "Thrift",
"bytes": "2953"
}
],
"symlink_target": ""
} |
_author__ = "Dilipbobby"
# Collect input from the user coverting str to flot
kilometers = float(input('How many kilometers?: '))
# conversion factor
#NOTE: kilometer is equal to 0.62137 miles. 2) Kilometer = Miles / 0.62137
conv_fac = 0.621371
# calculate miles
miles = kilometers * conv_fac
print('%0.3f kilometers is equal to %0.3f miles' %(kilometers,miles))
#try out miles to kilometers here
| {
"content_hash": "0e1e2c100549dea1e223ccab9d831311",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 77,
"avg_line_length": 37.54545454545455,
"alnum_prop": 0.7046004842615012,
"repo_name": "dilipbobby/DataScience",
"id": "6ec2710243d54a6636933f6b4c69d1c3b94e2931",
"size": "414",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Python3/Level-1/KmtoM.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "648920"
},
{
"name": "Python",
"bytes": "18592"
},
{
"name": "Roff",
"bytes": "37"
}
],
"symlink_target": ""
} |
fname = input('Enter the file name: ')
try:
fhand = open(fname)
except:
print('File cannot be opened:', fname)
exit()
count = 0
total = 0
for line in fhand:
words = line.split()
if len(words) != 2 : continue
if words[0] != 'X-DSPAM-Confidence:' : continue
try:
conf = float(words[1])
except:
continue
count = count + 1
total = total + conf
average = total / count
print('Average spam confidence:', average)
| {
"content_hash": "712c395bf0070cb8835750a2b1c5cb5f",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 51,
"avg_line_length": 23.15,
"alnum_prop": 0.6025917926565875,
"repo_name": "mkhuthir/learnPython",
"id": "8cd9f32644196435cf8975c24ca034d84b73f910",
"size": "463",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Book_pythonlearn_com/10_files/spamave.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7706"
}
],
"symlink_target": ""
} |
import sys
import subprocess
import shutil
copied = []
ignore = ["libSystem.B.dylib","libstdc++.6.dylib"]
basefolder = sys.argv[1].rsplit("/",2)[0]
copy = True
recur = True
if len(sys.argv) == 3:
copy = False
if len(sys.argv) == 4:
copy = False
recur = False
def update_libraries(executable):
# Find all the dylib files and recursively add dependencies
print "Checking dependencies of " + executable
otool_cmd = ["otool","-L",executable]
execfolder = executable.rsplit("/",1)[0]
otool_out = subprocess.check_output(otool_cmd).split("\n\t")
execname = executable.rsplit("/",1)[1]
for l in otool_out[1:]: # Skip the first line
s = l.split(".dylib")
if len(s) > 1:
lib = s[0]+".dylib"
libname = lib.rsplit("/",1)[1]
if libname not in ignore and libname != execname and lib[:5] != "@exec":
print "Requires: " + lib
new_lib = execfolder+"/"+libname
if (lib != new_lib and libname not in copied):
if copy:
shutil.copyfile(lib, basefolder+"/lib/"+libname)
copied.append(libname)
new_library = execfolder+"/"+libname
if recur:
update_libraries(basefolder+"/lib/"+libname)
# Always run the install tool
install_name_tool = ["install_name_tool", "-change", lib, "@executable_path/../lib/"+libname, executable]
print "Redirecting library for "+lib
subprocess.call(install_name_tool)
# Update libraries on the default executable
update_libraries(sys.argv[1])
| {
"content_hash": "da802638e21fe1cdc552dbce665486bb",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 109,
"avg_line_length": 27.245283018867923,
"alnum_prop": 0.659972299168975,
"repo_name": "baubie/SpikeDB",
"id": "90f3280e0080115be2667dd6b5d95a7841e50c1d",
"size": "1444",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "library_magic.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "13989"
},
{
"name": "C++",
"bytes": "244151"
},
{
"name": "Erlang",
"bytes": "159"
},
{
"name": "Objective-C",
"bytes": "12167"
},
{
"name": "Python",
"bytes": "11068"
},
{
"name": "Shell",
"bytes": "2347"
},
{
"name": "TeX",
"bytes": "49078"
}
],
"symlink_target": ""
} |
import os
import sys
sys.path.append(os.path.dirname(__file__))
import openstackdocstheme
# Avoid unactionable warnings
import requestsexceptions
requestsexceptions.squelch_warnings(requestsexceptions.InsecureRequestWarning)
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['ext.remote']
# Add any paths that contain templates here, relative to this directory.
# templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Configuration Reference'
bug_tag = u'config-reference'
copyright = u'2015-2016, OpenStack contributors'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.9'
# The full version, including alpha/beta/rc tags.
release = '0.9'
# A few variables have to be set for the log-a-bug feature.
# giturl: The location of conf.py on Git. Must be set manually.
# gitsha: The SHA checksum of the bug description. Automatically extracted from git log.
# bug_tag: Tag for categorizing the bug. Must be set manually.
# These variables are passed to the logabug code via html_context.
giturl = u'http://git.openstack.org/cgit/openstack/openstack-manuals/tree/doc/config-reference/source'
git_cmd = "/usr/bin/git log | head -n1 | cut -f2 -d' '"
gitsha = os.popen(git_cmd).read().strip('\n')
html_context = {"gitsha": gitsha, "bug_tag": bug_tag,
"giturl": giturl}
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['common/cli*', 'common/nova*', 'common/get-started-*',
'tables/*.rst', 'tables/manual/*.rst']
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'openstackdocs'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = [openstackdocstheme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = []
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# So that we can enable "log-a-bug" links from each output HTML page, this
# variable must be set to a format that includes year, month, day, hours and
# minutes.
html_last_updated_fmt = '%Y-%m-%d %H:%M'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
html_use_index = False
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'config-reference'
# If true, publish source files
html_copy_source = False
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'ConfigRefRst.tex', u'Configuration Reference',
u'OpenStack contributors', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'ConfigRefRst', u'Configuration Reference',
[u'OpenStack contributors'], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'ConfigRefRst', u'Configuration Reference',
u'OpenStack contributors', 'ConfigRefRst',
'This document is for system administrators who want to look up'
'configuration options. It contains lists of configuration options'
'available with OpenStack and uses auto-generation to generate options'
'and the descriptions from the code for each project. It includes sample'
'configuration files.', 'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# -- Options for Internationalization output ------------------------------
locale_dirs = ['locale/']
| {
"content_hash": "08a4ebbbb15d2c368e860b5f8c5babaa",
"timestamp": "",
"source": "github",
"line_count": 277,
"max_line_length": 102,
"avg_line_length": 34.42960288808664,
"alnum_prop": 0.7042046765230157,
"repo_name": "AlekhyaMallina-Vedams/openstack-manuals",
"id": "815db31de991a82728bec3acc76d6d0a247dc086",
"size": "10365",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "doc/config-reference/source/conf.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "26828"
},
{
"name": "CSS",
"bytes": "121997"
},
{
"name": "HTML",
"bytes": "111435"
},
{
"name": "JavaScript",
"bytes": "25447"
},
{
"name": "Python",
"bytes": "9775"
}
],
"symlink_target": ""
} |
from numpy import ndarray, asarray, prod, concatenate, expand_dims
from numpy import any as npany
from collections import Iterable
def tupleize(arg):
"""
Coerce singletons and lists and ndarrays to tuples.
Parameters
----------
arg : tuple, list, ndarray, or singleton
Item to coerce
"""
if arg is None:
return None
if not isinstance(arg, (tuple, list, ndarray, Iterable)):
return tuple((arg,))
elif isinstance(arg, (list, ndarray)):
return tuple(arg)
elif isinstance(arg, Iterable) and not isinstance(arg, str):
return tuple(arg)
else:
return arg
def argpack(args):
"""
Coerce a list of arguments to a tuple.
Parameters
----------
args : tuple or nested tuple
Pack arguments into a tuple, converting ((,...),) or (,) -> (,)
"""
if isinstance(args[0], (tuple, list, ndarray)):
return tupleize(args[0])
elif isinstance(args[0], Iterable) and not isinstance(args[0], str):
# coerce any iterable into a list before calling tupleize (Python 3 compatibility)
return tupleize(list(args[0]))
else:
return tuple(args)
def inshape(shape, axes):
"""
Checks to see if a list of axes are contained within an array shape.
Parameters
----------
shape : tuple[int]
the shape of a BoltArray
axes : tuple[int]
the axes to check against shape
"""
valid = all([(axis < len(shape)) and (axis >= 0) for axis in axes])
if not valid:
raise ValueError("axes not valid for an ndarray of shape: %s" % str(shape))
def allclose(a, b):
"""
Test that a and b are close and match in shape.
Parameters
----------
a : ndarray
First array to check
b : ndarray
First array to check
"""
from numpy import allclose
return (a.shape == b.shape) and allclose(a, b)
def tuplesort(seq):
"""
Sort a list by a sequence.
Parameters
----------
seq : tuple
Sequence to sort by
"""
return sorted(range(len(seq)), key=seq.__getitem__)
def listify(lst, dim):
"""
Flatten lists of indices and ensure bounded by a known dim.
Parameters
----------
lst : list
List of integer indices
dim : tuple
Bounds for indices
"""
if not all([l.dtype == int for l in lst]):
raise ValueError("indices must be integers")
if npany(asarray(lst) >= dim):
raise ValueError("indices out of bounds for axis with size %s" % dim)
return lst.flatten()
def slicify(slc, dim):
"""
Force a slice to have defined start, stop, and step from a known dim
Parameters
----------
slc : slice or int
The slice to modify, or int to convert to a slice
dim : tuple
Bounds for slices
"""
if isinstance(slc, slice):
if slc.start is None and slc.stop is None and slc.step is None:
return slice(0, dim, 1)
elif slc.start is None and slc.step is None:
return slice(0, slc.stop, 1)
elif slc.stop is None and slc.step is None:
return slice(slc.start, dim, 1)
elif slc.step is None:
return slice(slc.start, slc.stop, 1)
else:
return slc
elif isinstance(slc, int):
return slice(slc, slc+1, 1)
else:
raise ValueError("Type for slice %s not recongized" % type(slc))
def istransposeable(new, old):
"""
Check to see if a proposed tuple of axes is a valid permutation
of an old set of axes. Checks length, axis repetion, and bounds.
Parameters
----------
new : tuple
tuple of proposed axes
old : tuple
tuple of old axes
"""
new, old = tupleize(new), tupleize(old)
if not len(new) == len(old):
raise ValueError("Axes do not match axes of keys")
if not len(set(new)) == len(set(old)):
raise ValueError("Repeated axes")
if any(n < 0 for n in new) or max(new) > len(old) - 1:
raise ValueError("Invalid axes")
def isreshapeable(new, old):
"""
Check to see if a proposed tuple of axes is a valid reshaping of
the old axes by ensuring that they can be factored.
Parameters
----------
new : tuple
tuple of proposed axes
old : tuple
tuple of old axes
"""
new, old = tupleize(new), tupleize(old)
if not prod(new) == prod(old):
raise ValueError("Total size of new keys must remain unchanged")
def allstack(vals, depth=0):
"""
If an ndarray has been split into multiple chunks by splitting it along
each axis at a number of locations, this function rebuilds the
original array from chunks.
Parameters
----------
vals : nested lists of ndarrays
each level of nesting of the lists representing a dimension of
the original array.
"""
if type(vals[0]) is ndarray:
return concatenate(vals, axis=depth)
else:
return concatenate([allstack(x, depth+1) for x in vals], axis=depth)
def iterexpand(arry, extra):
"""
Expand dimensions by iteratively append empty axes.
Parameters
----------
arry : ndarray
The original array
extra : int
The number of empty axes to append
"""
for d in range(arry.ndim, arry.ndim+extra):
arry = expand_dims(arry, axis=d)
return arry
| {
"content_hash": "d7df221eda72453872d31da203c7cb81",
"timestamp": "",
"source": "github",
"line_count": 214,
"max_line_length": 90,
"avg_line_length": 25.401869158878505,
"alnum_prop": 0.5965783664459161,
"repo_name": "andrewosh/bolt",
"id": "77f59c7fc5227bbf17e13a3771be678fb0036dc7",
"size": "5436",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bolt/utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "122232"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.