blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 777 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 149 values | src_encoding stringclasses 26 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 3 10.2M | extension stringclasses 188 values | content stringlengths 3 10.2M | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
207a167347f95a1410cc0b3d18fbe9d8facf0f7c | ed0f9eb0c1cb4858d91ef7e2d435db307f23a5a5 | /dist/manage/django/contrib/admin/filters.py | 260a7ef60a5f27faa21bf595ab4174d15e45e595 | [] | no_license | hjlhehehe123/ATC_Data | 81b4622e7279aa9cc2013db8cc5a71d33561e768 | ad35e61afb8e87d8bab2d2b3aeea08e9409d56c0 | refs/heads/master | 2023-07-13T16:23:45.951584 | 2021-08-20T12:37:34 | 2021-08-20T12:37:34 | 256,994,694 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 19,541 | py | """
This encapsulates the logic for displaying filters in the Django admin.
Filters are specified in models with the "list_filter" option.
Each filter subclass knows how to display a filter for a field that passes a
certain test -- e.g. being a DateField or ForeignKey.
"""
import datetime
from django.contrib.admin.options import IncorrectLookupParameters
from django.contrib.admin.utils import (
get_model_from_relation, prepare_lookup_value, reverse_field_path,
)
from django.core.exceptions import ImproperlyConfigured, ValidationError
from django.db import models
from django.utils import timezone
from django.utils.translation import gettext_lazy as _
class ListFilter:
title = None # Human-readable title to appear in the right sidebar.
template = 'admin/filter.html'
def __init__(self, request, params, model, model_admin):
# This dictionary will eventually contain the request's query string
# parameters actually used by this filter.
self.used_parameters = {}
if self.title is None:
raise ImproperlyConfigured(
"The list filter '%s' does not specify a 'title'."
% self.__class__.__name__
)
def has_output(self):
"""
Return True if some choices would be output for this filter.
"""
raise NotImplementedError('subclasses of ListFilter must provide a has_output() method')
def choices(self, changelist):
"""
Return choices ready to be output in the template.
`changelist` is the ChangeList to be displayed.
"""
raise NotImplementedError('subclasses of ListFilter must provide a choices() method')
def queryset(self, request, queryset):
"""
Return the filtered queryset.
"""
raise NotImplementedError('subclasses of ListFilter must provide a queryset() method')
def expected_parameters(self):
"""
Return the list of parameter names that are expected from the
request's query string and that will be used by this filter.
"""
raise NotImplementedError('subclasses of ListFilter must provide an expected_parameters() method')
class SimpleListFilter(ListFilter):
# The parameter that should be used in the query string for that filter.
parameter_name = None
def __init__(self, request, params, model, model_admin):
super().__init__(request, params, model, model_admin)
if self.parameter_name is None:
raise ImproperlyConfigured(
"The list filter '%s' does not specify a 'parameter_name'."
% self.__class__.__name__
)
if self.parameter_name in params:
value = params.pop(self.parameter_name)
self.used_parameters[self.parameter_name] = value
lookup_choices = self.lookups(request, model_admin)
if lookup_choices is None:
lookup_choices = ()
self.lookup_choices = list(lookup_choices)
def has_output(self):
return len(self.lookup_choices) > 0
def value(self):
"""
Return the value (in string format) provided in the request's
query string for this filter, if any, or None if the value wasn't
provided.
"""
return self.used_parameters.get(self.parameter_name)
def lookups(self, request, model_admin):
"""
Must be overridden to return a list of tuples (value, verbose value)
"""
raise NotImplementedError(
'The SimpleListFilter.lookups() method must be overridden to '
'return a list of tuples (value, verbose value).'
)
def expected_parameters(self):
return [self.parameter_name]
def choices(self, changelist):
yield {
'selected': self.value() is None,
'query_string': changelist.get_query_string(remove=[self.parameter_name]),
'display': _('All'),
}
for lookup, title in self.lookup_choices:
yield {
'selected': self.value() == str(lookup),
'query_string': changelist.get_query_string({self.parameter_name: lookup}),
'display': title,
}
class FieldListFilter(ListFilter):
_field_list_filters = []
_take_priority_index = 0
def __init__(self, field, request, params, model, model_admin, field_path):
self.field = field
self.field_path = field_path
self.title = getattr(field, 'verbose_name', field_path)
super().__init__(request, params, model, model_admin)
for p in self.expected_parameters():
if p in params:
value = params.pop(p)
self.used_parameters[p] = prepare_lookup_value(p, value)
def has_output(self):
return True
def queryset(self, request, queryset):
try:
return queryset.filter(**self.used_parameters)
except (ValueError, ValidationError) as e:
# Fields may raise a ValueError or ValidationError when converting
# the parameters to the correct type.
raise IncorrectLookupParameters(e)
@classmethod
def register(cls, test, list_filter_class, take_priority=False):
if take_priority:
# This is to allow overriding the default filters for certain types
# of fields with some custom filters. The first found in the list
# is used in priority.
cls._field_list_filters.insert(
cls._take_priority_index, (test, list_filter_class))
cls._take_priority_index += 1
else:
cls._field_list_filters.append((test, list_filter_class))
@classmethod
def create(cls, field, request, params, model, model_admin, field_path):
for test, list_filter_class in cls._field_list_filters:
if test(field):
return list_filter_class(field, request, params, model, model_admin, field_path=field_path)
class RelatedFieldListFilter(FieldListFilter):
def __init__(self, field, request, params, model, model_admin, field_path):
other_model = get_model_from_relation(field)
self.lookup_kwarg = '%s__%s__exact' % (field_path, field.target_field.name)
self.lookup_kwarg_isnull = '%s__isnull' % field_path
self.lookup_val = params.get(self.lookup_kwarg)
self.lookup_val_isnull = params.get(self.lookup_kwarg_isnull)
super().__init__(field, request, params, model, model_admin, field_path)
self.lookup_choices = self.field_choices(field, request, model_admin)
if hasattr(field, 'verbose_name'):
self.lookup_title = field.verbose_name
else:
self.lookup_title = other_model._meta.verbose_name
self.title = self.lookup_title
self.empty_value_display = model_admin.get_empty_value_display()
@property
def include_empty_choice(self):
"""
Return True if a "(None)" choice should be included, which filters
out everything except empty relationships.
"""
return self.field.null or (self.field.is_relation and self.field.many_to_many)
def has_output(self):
if self.include_empty_choice:
extra = 1
else:
extra = 0
return len(self.lookup_choices) + extra > 1
def expected_parameters(self):
return [self.lookup_kwarg, self.lookup_kwarg_isnull]
def field_admin_ordering(self, field, request, model_admin):
"""
Return the model admin's ordering for related field, if provided.
"""
related_admin = model_admin.admin_site._registry.get(field.remote_field.model)
if related_admin is not None:
return related_admin.get_ordering(request)
return ()
def field_choices(self, field, request, model_admin):
ordering = self.field_admin_ordering(field, request, model_admin)
return field.get_choices(include_blank=False, ordering=ordering)
def choices(self, changelist):
yield {
'selected': self.lookup_val is None and not self.lookup_val_isnull,
'query_string': changelist.get_query_string(remove=[self.lookup_kwarg, self.lookup_kwarg_isnull]),
'display': _('All'),
}
for pk_val, val in self.lookup_choices:
yield {
'selected': self.lookup_val == str(pk_val),
'query_string': changelist.get_query_string({self.lookup_kwarg: pk_val}, [self.lookup_kwarg_isnull]),
'display': val,
}
if self.include_empty_choice:
yield {
'selected': bool(self.lookup_val_isnull),
'query_string': changelist.get_query_string({self.lookup_kwarg_isnull: 'True'}, [self.lookup_kwarg]),
'display': self.empty_value_display,
}
FieldListFilter.register(lambda f: f.remote_field, RelatedFieldListFilter)
class BooleanFieldListFilter(FieldListFilter):
def __init__(self, field, request, params, model, model_admin, field_path):
self.lookup_kwarg = '%s__exact' % field_path
self.lookup_kwarg2 = '%s__isnull' % field_path
self.lookup_val = params.get(self.lookup_kwarg)
self.lookup_val2 = params.get(self.lookup_kwarg2)
super().__init__(field, request, params, model, model_admin, field_path)
if (self.used_parameters and self.lookup_kwarg in self.used_parameters and
self.used_parameters[self.lookup_kwarg] in ('1', '0')):
self.used_parameters[self.lookup_kwarg] = bool(int(self.used_parameters[self.lookup_kwarg]))
def expected_parameters(self):
return [self.lookup_kwarg, self.lookup_kwarg2]
def choices(self, changelist):
for lookup, title in (
(None, _('All')),
('1', _('Yes')),
('0', _('No'))):
yield {
'selected': self.lookup_val == lookup and not self.lookup_val2,
'query_string': changelist.get_query_string({self.lookup_kwarg: lookup}, [self.lookup_kwarg2]),
'display': title,
}
if self.field.null:
yield {
'selected': self.lookup_val2 == 'True',
'query_string': changelist.get_query_string({self.lookup_kwarg2: 'True'}, [self.lookup_kwarg]),
'display': _('Unknown'),
}
FieldListFilter.register(lambda f: isinstance(f, models.BooleanField), BooleanFieldListFilter)
class ChoicesFieldListFilter(FieldListFilter):
def __init__(self, field, request, params, model, model_admin, field_path):
self.lookup_kwarg = '%s__exact' % field_path
self.lookup_kwarg_isnull = '%s__isnull' % field_path
self.lookup_val = params.get(self.lookup_kwarg)
self.lookup_val_isnull = params.get(self.lookup_kwarg_isnull)
super().__init__(field, request, params, model, model_admin, field_path)
def expected_parameters(self):
return [self.lookup_kwarg, self.lookup_kwarg_isnull]
def choices(self, changelist):
yield {
'selected': self.lookup_val is None,
'query_string': changelist.get_query_string(remove=[self.lookup_kwarg, self.lookup_kwarg_isnull]),
'display': _('All')
}
none_title = ''
for lookup, title in self.field.flatchoices:
if lookup is None:
none_title = title
continue
yield {
'selected': str(lookup) == self.lookup_val,
'query_string': changelist.get_query_string({self.lookup_kwarg: lookup}, [self.lookup_kwarg_isnull]),
'display': title,
}
if none_title:
yield {
'selected': bool(self.lookup_val_isnull),
'query_string': changelist.get_query_string({self.lookup_kwarg_isnull: 'True'}, [self.lookup_kwarg]),
'display': none_title,
}
FieldListFilter.register(lambda f: bool(f.choices), ChoicesFieldListFilter)
class DateFieldListFilter(FieldListFilter):
def __init__(self, field, request, params, model, model_admin, field_path):
self.field_generic = '%s__' % field_path
self.date_params = {k: v for k, v in params.items() if k.startswith(self.field_generic)}
now = timezone.now()
# When time zone support is enabled, convert "now" to the user's time
# zone so Django's definition of "Today" matches what the user expects.
if timezone.is_aware(now):
now = timezone.localtime(now)
if isinstance(field, models.DateTimeField):
today = now.replace(hour=0, minute=0, second=0, microsecond=0)
else: # field is a models.DateField
today = now.date()
tomorrow = today + datetime.timedelta(days=1)
if today.month == 12:
next_month = today.replace(year=today.year + 1, month=1, day=1)
else:
next_month = today.replace(month=today.month + 1, day=1)
next_year = today.replace(year=today.year + 1, month=1, day=1)
self.lookup_kwarg_since = '%s__gte' % field_path
self.lookup_kwarg_until = '%s__lt' % field_path
self.links = (
(_('Any date'), {}),
(_('Today'), {
self.lookup_kwarg_since: str(today),
self.lookup_kwarg_until: str(tomorrow),
}),
(_('Past 7 days'), {
self.lookup_kwarg_since: str(today - datetime.timedelta(days=7)),
self.lookup_kwarg_until: str(tomorrow),
}),
(_('This month'), {
self.lookup_kwarg_since: str(today.replace(day=1)),
self.lookup_kwarg_until: str(next_month),
}),
(_('This year'), {
self.lookup_kwarg_since: str(today.replace(month=1, day=1)),
self.lookup_kwarg_until: str(next_year),
}),
)
if field.null:
self.lookup_kwarg_isnull = '%s__isnull' % field_path
self.links += (
(_('No date'), {self.field_generic + 'isnull': 'True'}),
(_('Has date'), {self.field_generic + 'isnull': 'False'}),
)
super().__init__(field, request, params, model, model_admin, field_path)
def expected_parameters(self):
params = [self.lookup_kwarg_since, self.lookup_kwarg_until]
if self.field.null:
params.append(self.lookup_kwarg_isnull)
return params
def choices(self, changelist):
for title, param_dict in self.links:
yield {
'selected': self.date_params == param_dict,
'query_string': changelist.get_query_string(param_dict, [self.field_generic]),
'display': title,
}
FieldListFilter.register(
lambda f: isinstance(f, models.DateField), DateFieldListFilter)
# This should be registered last, because it's a last resort. For example,
# if a field is eligible to use the BooleanFieldListFilter, that'd be much
# more appropriate, and the AllValuesFieldListFilter won't get used for it.
class AllValuesFieldListFilter(FieldListFilter):
def __init__(self, field, request, params, model, model_admin, field_path):
self.lookup_kwarg = field_path
self.lookup_kwarg_isnull = '%s__isnull' % field_path
self.lookup_val = params.get(self.lookup_kwarg)
self.lookup_val_isnull = params.get(self.lookup_kwarg_isnull)
self.empty_value_display = model_admin.get_empty_value_display()
parent_model, reverse_path = reverse_field_path(model, field_path)
# Obey parent ModelAdmin queryset when deciding which options to show
if model == parent_model:
queryset = model_admin.get_queryset(request)
else:
queryset = parent_model._default_manager.all()
self.lookup_choices = queryset.distinct().order_by(field.name).values_list(field.name, flat=True)
super().__init__(field, request, params, model, model_admin, field_path)
def expected_parameters(self):
return [self.lookup_kwarg, self.lookup_kwarg_isnull]
def choices(self, changelist):
yield {
'selected': self.lookup_val is None and self.lookup_val_isnull is None,
'query_string': changelist.get_query_string(remove=[self.lookup_kwarg, self.lookup_kwarg_isnull]),
'display': _('All'),
}
include_none = False
for val in self.lookup_choices:
if val is None:
include_none = True
continue
val = str(val)
yield {
'selected': self.lookup_val == val,
'query_string': changelist.get_query_string({self.lookup_kwarg: val}, [self.lookup_kwarg_isnull]),
'display': val,
}
if include_none:
yield {
'selected': bool(self.lookup_val_isnull),
'query_string': changelist.get_query_string({self.lookup_kwarg_isnull: 'True'}, [self.lookup_kwarg]),
'display': self.empty_value_display,
}
FieldListFilter.register(lambda f: True, AllValuesFieldListFilter)
class RelatedOnlyFieldListFilter(RelatedFieldListFilter):
def field_choices(self, field, request, model_admin):
pk_qs = model_admin.get_queryset(request).distinct().values_list('%s__pk' % self.field_path, flat=True)
ordering = self.field_admin_ordering(field, request, model_admin)
return field.get_choices(include_blank=False, limit_choices_to={'pk__in': pk_qs}, ordering=ordering)
class EmptyFieldListFilter(FieldListFilter):
def __init__(self, field, request, params, model, model_admin, field_path):
if not field.empty_strings_allowed and not field.null:
raise ImproperlyConfigured(
"The list filter '%s' cannot be used with field '%s' which "
"doesn't allow empty strings and nulls." % (
self.__class__.__name__,
field.name,
)
)
self.lookup_kwarg = '%s__isempty' % field_path
self.lookup_val = params.get(self.lookup_kwarg)
super().__init__(field, request, params, model, model_admin, field_path)
def queryset(self, request, queryset):
if self.lookup_kwarg not in self.used_parameters:
return queryset
if self.lookup_val not in ('0', '1'):
raise IncorrectLookupParameters
lookup_condition = models.Q()
if self.field.empty_strings_allowed:
lookup_condition |= models.Q(**{self.field_path: ''})
if self.field.null:
lookup_condition |= models.Q(**{'%s__isnull' % self.field_path: True})
if self.lookup_val == '1':
return queryset.filter(lookup_condition)
return queryset.exclude(lookup_condition)
def expected_parameters(self):
return [self.lookup_kwarg]
def choices(self, changelist):
for lookup, title in (
(None, _('All')),
('1', _('Empty')),
('0', _('Not empty')),
):
yield {
'selected': self.lookup_val == lookup,
'query_string': changelist.get_query_string({self.lookup_kwarg: lookup}),
'display': title,
}
| [
"1598214715@qq.com"
] | 1598214715@qq.com |
01f52cd21189eb15dd960b408e13c860f7fb4c07 | 06604399c457d6ec05fa5d5ae458632e2606ec98 | /test/dynamo/test_misc.py | ecc6b58ed15e60bacf404993edec092d1aec999d | [
"BSD-3-Clause",
"BSD-2-Clause",
"LicenseRef-scancode-secret-labs-2011",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0"
] | permissive | yncxcw/pytorch | 6f262f7613caef4c2ce18c85662db9adc6a2a81a | a3b72ee354031004edd9b951d0efcdd4508fd578 | refs/heads/master | 2023-07-20T21:38:00.718093 | 2023-07-13T03:54:17 | 2023-07-13T03:54:17 | 234,432,318 | 0 | 0 | NOASSERTION | 2020-01-16T23:34:42 | 2020-01-16T23:34:41 | null | UTF-8 | Python | false | false | 195,459 | py | # Owner(s): ["module: dynamo"]
import abc
import collections
import copy
import dataclasses
import dis
import enum
import logging
import math
import operator
import os
import sys
import traceback
import typing
import unittest
import unittest.mock as mock
import warnings
import weakref
from unittest.mock import patch
import numpy as np
import sympy
import torch
import torch._dynamo.test_case
import torch._dynamo.testing
import torch.onnx.operators
from torch._C import FileCheck
from torch._dynamo import allow_in_graph, bytecode_analysis, bytecode_transformation
from torch._dynamo.exc import Unsupported
from torch._dynamo.source import GetItemSource, LocalSource
from torch._dynamo.testing import (
CompileCounter,
expectedFailureDynamic,
requires_numpy_pytorch_interop,
same,
skipIfNotPy311,
unsupported,
)
from torch._dynamo.utils import CompileProfiler, ifdynstaticdefault
from torch.ao.quantization import MinMaxObserver
from torch.ao.quantization.fake_quantize import FakeQuantize
from torch.ao.quantization.qconfig import QConfig
from torch.ao.quantization.quantize_fx import prepare_qat_fx
from torch.autograd.profiler import _enable_dynamo_cache_lookup_profiler
from torch.fx.experimental.symbolic_shapes import ConstraintViolationError, FloorDiv
from torch.fx.experimental.validator import SympyToZ3, TranslationValidator
from torch.nn import functional as F
from torch.testing._internal.common_cuda import (
PLATFORM_SUPPORTS_FUSED_SDPA,
SM80OrLater,
TEST_CUDA,
TEST_MULTIGPU,
)
from torch.testing._internal.common_utils import freeze_rng_state, IS_FBCODE
from torch.testing._internal.jit_utils import JitTestCase
mytuple = collections.namedtuple("mytuple", ["a", "b", "ab"])
class MyPickledModule(torch.nn.Module):
def __init__(self, z):
super().__init__()
self.z = z
def forward(self, x, y):
return x * x * x + y + self.z
# These are used for test_{cond/map}_with_quantization
default_symmetric_fake_quant = FakeQuantize.with_args(
observer=MinMaxObserver, qscheme=torch.per_tensor_symmetric, dtype=torch.quint8
)
default_weight_symmetric_fake_quant = FakeQuantize.with_args(
observer=MinMaxObserver, qscheme=torch.per_tensor_symmetric, dtype=torch.qint8
)
uniform_qconfig_8bit = QConfig(
activation=default_symmetric_fake_quant,
weight=default_weight_symmetric_fake_quant.with_args,
)
qconfig_dict = {"object_type": [(torch.nn.Linear, uniform_qconfig_8bit)]}
class MiscTests(torch._dynamo.test_case.TestCase):
def test_boolarg(self):
def boolarg(aa, bb, flag):
if flag:
return aa - bb
else:
return bb - aa
a = torch.randn(10, 10)
b = torch.randn(10, 10)
correct1 = boolarg(a, b, True)
correct2 = boolarg(a, b, False)
correct3 = boolarg(a, b, None)
counter = CompileCounter()
opt_boolarg = torch._dynamo.optimize_assert(counter)(boolarg)
val1 = opt_boolarg(a, b, True)
val2 = opt_boolarg(a, b, False)
val3 = opt_boolarg(a, b, None)
val4 = opt_boolarg(a, b, True)
self.assertTrue(same(val1, correct1))
self.assertTrue(same(val2, correct2))
self.assertTrue(same(val3, correct3))
self.assertTrue(same(val4, correct1))
self.assertEqual(counter.frame_count, 3)
def test_callpacked(self):
def call_packed(args):
a, b, c = args
return a - b * c
counter = CompileCounter()
a = torch.randn(10, 10)
b = torch.randn(10, 10)
c = torch.randn(10, 10)
correct = call_packed([a, b, c])
opt_call_packed = torch._dynamo.optimize_assert(counter)(call_packed)
val1 = opt_call_packed([a, b, c])
val2 = opt_call_packed((a, b, c))
val3 = opt_call_packed([a, b, c])
val4 = opt_call_packed((a, b, c))
self.assertTrue(same(val1, correct))
self.assertTrue(same(val2, correct))
self.assertTrue(same(val3, correct))
self.assertTrue(same(val4, correct))
self.assertEqual(counter.frame_count, 2)
def test_raises(self):
def fn(a, b, c, cls):
x = a + b - c * 10
raise cls(str(x))
counter = CompileCounter()
a = torch.randn(10, 10)
b = torch.randn(10, 10)
c = torch.randn(10, 10)
opt_fn = torch._dynamo.optimize(counter)(fn)
self.assertRaises(AssertionError, lambda: opt_fn(a, b, c, AssertionError))
self.assertEqual(counter.frame_count, 1)
self.assertEqual(counter.op_count, 3)
def test_inplace(self):
def inplace1(a, b):
o = torch.empty((10, 10))
o.copy_(a)
o -= b
return o
torch._dynamo.testing.standard_test(self, inplace1, 2, expected_ops=3)
def test_unpack4(self):
def unpack4(a, b):
a = a[:5, :]
b = b[:5, :]
x, y = a.size()
o = torch.empty((x, y))
o.copy_(a / b)
return o
torch._dynamo.testing.standard_test(
self,
unpack4,
2,
expected_ops=5,
expected_ops_dynamic=ifdynstaticdefault(5, 7),
)
def test_unpack5(self):
def unpack5(a, b):
a = a[:5, :]
b = b[:5, :]
x, y = a.shape
o = torch.empty((x, y))
o.copy_(a / b)
return o
torch._dynamo.testing.standard_test(
self,
unpack5,
2,
expected_ops=5,
expected_ops_dynamic=ifdynstaticdefault(5, 7),
)
def test_matmul1(self):
def matmul_op1(a, b):
return a @ b
# TODO(jansel): FX doesn't support this, should add upstream support
torch._dynamo.testing.standard_test(self, matmul_op1, 2, expected_ops=1)
def test_int_shape_binops(self):
def fn(x):
# Test reversal by putting int arg first.
y = 15 - x.shape[0]
y = 4 + y
y = 5 * y
y = 2 % y
y = 3**y
y = 10 // y
y = pow(2, y)
y = 10 / y
return x + y
torch._dynamo.testing.standard_test(
self, fn, 1, expected_ops=1, expected_ops_dynamic=ifdynstaticdefault(1, 11)
)
def test_shape_int_inplace_binops(self):
def fn(x):
p = x.shape[0]
p += 2
p -= 2
p **= 2
p /= 2
p *= 2
p //= 2
p %= 2
return x + p
torch._dynamo.testing.standard_test(
self, fn, 1, expected_ops=1, expected_ops_dynamic=ifdynstaticdefault(1, 10)
)
def test_int_shape_inplace_binops(self):
def fn(x):
p = x.shape[0]
# Test reversal by putting constant first
y = 2
y += p
y = 2
y -= p
y = 2
y **= p
y = 2
y /= p
y = 2
y *= p
y = 2
y //= p
y = 2
y %= p
return x + y
torch._dynamo.testing.standard_test(
self, fn, 1, expected_ops=1, expected_ops_dynamic=ifdynstaticdefault(1, 10)
)
def test_int_int_comparisons(self):
def fn(x):
if 2 != 2:
out = 1
elif 2 < 1:
out = 1
elif 1 > 2:
out = 1
elif 1 >= 2:
out = 1
elif 2 <= 1:
out = 1
elif 2 == 2:
out = 2
else:
out = 1
return x + out
torch._dynamo.testing.standard_test(self, fn, 1, expected_ops=1)
def test_shape_int_comparisons(self):
def fn(x):
a = x.shape[0]
# Ensure support for constant on right side
if a != 10:
out = 1
elif a < 2:
out = 1
elif a > 12:
out = 1
elif a >= 12:
out = 1
elif a <= 2:
out = 1
elif a == 10:
out = 2
else:
out = 1
return x + out
# expect for dynamic: size, index, 6 comparison ops, add
torch._dynamo.testing.standard_test(
self, fn, 1, expected_ops=1, expected_ops_dynamic=ifdynstaticdefault(1, 9)
)
def test_int_shape_comparisons(self):
def fn(x):
a = x.shape[0]
# Ensure support for constant on left side
if 10 != a:
out = 1
elif 12 < a:
out = 1
elif 2 > a:
out = 1
elif 2 >= a:
out = 1
elif 12 <= a:
out = 1
elif 10 == a:
out = 2
else:
out = 1
return x + out
# expect for dynamic: size, index, 6 comparison ops, add
torch._dynamo.testing.standard_test(
self, fn, 1, expected_ops=1, expected_ops_dynamic=ifdynstaticdefault(1, 9)
)
def test_param_shape_binops(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.randn(15))
def forward(self, x):
# Test reversal by putting param shape arg first.
p = self.param.shape[0]
y = p - x.shape[0]
y = p + y
y = p * y
y = p % y
y = p**y
y = p // y
y = pow(p, y)
y = p / y
return x + y
counts = torch._dynamo.testing.CompileCounter()
mod = MyModule()
optimized_mod = torch._dynamo.optimize(counts, nopython=True)(mod)
x = torch.randn(3)
ref = mod(x)
res = optimized_mod(x)
self.assertTrue(same(ref, res))
self.assertEqual(counts.frame_count, 1)
if torch._dynamo.config.assume_static_by_default:
self.assertExpectedInline(counts.op_count, """1""")
else:
self.assertExpectedInline(counts.op_count, """11""")
def test_user_defined_binop(self):
class MyClass:
def __init__(self, value):
self.value = value
def __radd__(self, other):
return self.value + other
def fn(x, c):
y = x.shape[0] + c
return x + y
counts = torch._dynamo.testing.CompileCounter()
opt_fn = torch._dynamo.optimize(counts)(fn)
x = torch.randn(3)
c = MyClass(4)
ref = fn(x, c)
res = opt_fn(x, c)
self.assertTrue(same(ref, res))
self.assertEqual(counts.frame_count, 1)
if torch._dynamo.config.assume_static_by_default:
self.assertExpectedInline(counts.op_count, """1""")
else:
self.assertExpectedInline(counts.op_count, """4""")
def test_compare_shapes_eq(self):
def compare_shapes(a, b, to_list):
x = list(a.unsqueeze(-1).shape) if to_list else a.shape
y = list(b.unsqueeze(-1).shape) if to_list else b.shape
if x == y:
return a + 1
else:
return a + 2
# Test both ListVariable and ShapeVariable
torch._dynamo.testing.standard_test(
self, lambda a, b: compare_shapes(a, b, to_list=True), 2
)
torch._dynamo.testing.standard_test(
self, lambda a, b: compare_shapes(a, b, to_list=False), 2
)
def test_compare_shapes_tuple_eq(self):
def compare_shapes(a, b):
x = tuple(a.unsqueeze(-1).shape)
y = tuple(b.unsqueeze(-1).shape)
if x == y:
return a + 1
else:
return a + 2
torch._dynamo.testing.standard_test(self, lambda a, b: compare_shapes(a, b), 2)
def test_compare_shapes_tuple_neq(self):
def compare_shapes(a, b):
x = tuple(a.unsqueeze(-1).shape)
y = tuple(b.unsqueeze(-1).shape)
if x != y:
return a + 1
else:
return a + 2
torch._dynamo.testing.standard_test(self, lambda a, b: compare_shapes(a, b), 2)
def test_compare_shapes_neq(self):
def compare_shapes(a, b, to_list):
x = list(a.unsqueeze(-1).shape) if to_list else a.shape
y = list(b.unsqueeze(-1).shape) if to_list else b.shape
if x != y:
return a + 1
else:
return a + 2
# Test both ListVariable and ShapeVariable
torch._dynamo.testing.standard_test(
self, lambda a, b: compare_shapes(a, b, to_list=True), 2
)
torch._dynamo.testing.standard_test(
self, lambda a, b: compare_shapes(a, b, to_list=False), 2
)
def test_compare_shapes_with_constant(self):
def compare_shapes(a):
x = a.shape
if x[0] != 3:
return a * 4
return a * 3
guard_failure = None
def guard_failures(failure):
nonlocal guard_failure
guard_failure = failure
opt_fn = torch._dynamo.optimize(
"eager", nopython=True, guard_fail_fn=guard_failures
)(compare_shapes)
opt_fn(torch.randn([3, 4]))
opt_fn(torch.randn([4, 3]))
self.assertExpectedInline(
guard_failure.reason,
"""tensor 'L['a']' size mismatch at index 0. expected 3, actual 4""",
)
def test_builtin_isinstance(self):
def fn(x):
t = torch.arange(1, 3)
a = isinstance(x, torch.Tensor)
b = isinstance(t, torch.Tensor)
c = isinstance(x, int)
d = isinstance(3, int)
e = isinstance([1, 2, 3], list)
f = isinstance({"foo": 1, "bar": 2}, dict)
res = [a, b, c, d, e, f]
# Can't run yet due to other unimplemented instructions
# res += [isinstance(torch.nn.LazyLinear(2, 3), torch.nn.Linear)]
return res
torch._dynamo.testing.standard_test(self, fn, 1, expected_ops=1)
def test_fold(self):
def fn(a):
return a + math.sqrt(63)
torch._dynamo.testing.standard_test(self, fn, 1, expected_ops=1)
def test_shape_unpack(self):
def fn(x):
a, b = x.size()
return x * b
i = torch.randn(5, 10)
r1 = fn(i)
opt_fn = torch._dynamo.optimize("eager")(fn)
r2 = opt_fn(i)
self.assertTrue(same(r1, r2))
def test_tensor_iter(self):
def fn(x):
for y in x:
y.add_(1.0)
return y
# expect extra size node for dynamic
torch._dynamo.testing.standard_test(
self,
fn,
1,
expected_ops=20,
expected_ops_dynamic=ifdynstaticdefault(20, 21),
)
def test_empty_list(self):
def fn(x, ll):
if len(ll) == 0 and not ll and ll is not None:
return x + 1
i = torch.randn(5, 10)
r1 = fn(i, [])
opt_fn = torch._dynamo.optimize("eager")(fn)
r2 = opt_fn(i, [])
r3 = opt_fn(i, tuple())
self.assertTrue(same(r1, r2))
self.assertTrue(same(r1, r3))
def test_min_max_over_iterable(self):
def get_test_fn(func):
def _fn(a, b, func=func):
# try all of list, iterator, tuple, vararg.
lst = [a.shape[0] + 1, 8, a.shape[0]]
x = func(lst)
y = func(iter(lst))
z = func(tuple(lst))
w = func(*lst)
return a + (x + y + z + w)
return _fn
torch._dynamo.testing.standard_test(
self,
get_test_fn(func=min),
2,
expected_ops=1,
expected_ops_dynamic=ifdynstaticdefault(1, 14),
)
torch._dynamo.testing.standard_test(
self,
get_test_fn(func=max),
2,
expected_ops=1,
expected_ops_dynamic=ifdynstaticdefault(1, 17),
)
def test_config_obj(self):
class Cfg:
def __init__(self):
self.val = 0.5
self.count = 3
def fn(x, cfg):
for i in range(cfg.count):
x = x + cfg.val
return x
cfg1 = Cfg()
cfg1.val = 1.0
cfg2 = Cfg()
v = torch.zeros(1)
cnts = torch._dynamo.testing.CompileCounter()
opt_fn = torch._dynamo.optimize(cnts)(fn)
v = opt_fn(v, cfg1) # 3
v = opt_fn(v, cfg2) # 4.5
cfg2.count = 1
v = opt_fn(v, cfg2) # 5
cfg2.val = 2.0
v = opt_fn(v, cfg2) # 7
self.assertEqual(v[0], 7)
self.assertEqual(cnts.op_count, 8)
def test_config_getattr_default(self):
class Cfg:
def __init__(self):
self.val = 0.5
self.count = 10
def fn(x, cfg):
if getattr(cfg, "just_add_7", False):
return x + 7
for i in range(cfg.count):
x = x + cfg.val
return x
cfg1 = Cfg()
v = torch.zeros(1)
cnts = torch._dynamo.testing.CompileCounter()
opt_fn = torch._dynamo.optimize(cnts)(fn)
self.assertEqual(opt_fn(v, cfg1)[0], 5)
self.assertEqual(opt_fn(v, cfg1)[0], 5)
cfg1.just_add_7 = True
self.assertEqual(opt_fn(v, cfg1)[0], 7)
self.assertEqual(opt_fn(v, cfg1)[0], 7)
cfg1.just_add_7 = False
self.assertEqual(opt_fn(v, cfg1)[0], 5)
self.assertEqual(opt_fn(v, cfg1)[0], 5)
self.assertEqual(cnts.frame_count, 3)
def test_size_input(self):
def fn(x, s):
a, b = s
return x + (a - b)
v = torch.zeros(10, 20)
cnts = torch._dynamo.testing.CompileCounter()
opt_fn = torch._dynamo.optimize(cnts)(fn)
self.assertEqual(opt_fn(v, v.size())[0, 0], -10)
self.assertEqual(opt_fn(v, (10, 20))[0, 0], -10)
self.assertEqual(opt_fn(v, [10, 20])[0, 0], -10)
# One recompile per differing input type
self.assertEqual(cnts.frame_count, 3)
def test_cell_output1(self):
out = None
def fn(a, b):
nonlocal out
out = a + b * 10
v = torch.Tensor([100])
cnts = torch._dynamo.testing.CompileCounter()
opt_fn = torch._dynamo.optimize(cnts)(fn)
self.assertIsNone(opt_fn(v, v))
self.assertEqual(out[0], 1100)
self.assertEqual(cnts.op_count, 2)
def test_cell_output2(self):
out = None
def fn(a, b):
nonlocal out
c = unsupported(a, b)
out = a + b * 10 + c
v = torch.Tensor([100])
cnts = torch._dynamo.testing.CompileCounter()
opt_fn = torch._dynamo.optimize(cnts)(fn)
self.assertIsNone(opt_fn(v, v))
self.assertEqual(out[0], 1200)
self.assertEqual(cnts.op_count, 3)
def test_return_nested_function(self):
out = None
def fn(a, b):
nonlocal out
c = a + b
d = a + 1.0
def fn2(f: int = 7, g: float = 9.0):
nonlocal out
out = a + b * 10
return c * f - d * g
return fn2
v1 = torch.Tensor([100])
v2 = torch.Tensor([200])
cnts = torch._dynamo.testing.CompileCounter()
opt_fn = torch._dynamo.optimize(cnts)(fn)
opt_fn_ret = torch._dynamo.optimize(cnts)(opt_fn(v1, v2))
self.assertEqual(opt_fn_ret(1.5)[0], -459)
self.assertEqual(out[0], 2100)
self.assertEqual(cnts.frame_count, 2)
self.assertEqual(cnts.op_count, 7)
def test_tensor_dict1(self):
def fn(inputs):
return inputs["a"] - inputs["b"] * 1.5
v1 = torch.Tensor([100])
v2 = torch.Tensor([200])
cnts = torch._dynamo.testing.CompileCounter()
opt_fn = torch._dynamo.optimize(cnts)(fn)
self.assertEqual(opt_fn({"a": v1, "b": v2})[0], -200)
self.assertEqual(cnts.frame_count, 1)
self.assertEqual(cnts.op_count, 2)
def test_tensor_dict2(self):
def fn1(inputs):
total = torch.zeros(1)
for k, v in inputs.items():
total += v
return total
def fn2(inputs):
total = torch.zeros(1)
for v in inputs.values():
total += v
return total
def fn3(inputs):
total = torch.zeros(1)
for k in inputs.keys():
total += inputs[k]
return total
v1 = torch.Tensor([100])
v2 = torch.Tensor([200])
cnts = torch._dynamo.testing.CompileCounter()
opt_fn1 = torch._dynamo.optimize(cnts)(fn1)
opt_fn2 = torch._dynamo.optimize(cnts)(fn2)
opt_fn3 = torch._dynamo.optimize(cnts)(fn3)
self.assertEqual(opt_fn1({"a": v1, "b": v2})[0], 300)
self.assertEqual(opt_fn2({"a": v1, "b": v2})[0], 300)
self.assertEqual(opt_fn3({"a": v1, "b": v2})[0], 300)
self.assertEqual(cnts.frame_count, 3)
self.assertEqual(cnts.op_count, 9)
def test_dictcomp(self):
def fn1(inputs):
return {k: v + 1 for k, v in inputs.items()}
v1 = torch.Tensor([100])
v2 = torch.Tensor([200])
cnts = torch._dynamo.testing.CompileCounter()
opt_fn1 = torch._dynamo.optimize(cnts)(fn1)
self.assertEqual(opt_fn1({"a": v1, "b": v2})["a"], 101)
self.assertEqual(opt_fn1({"a": v1, "b": v2})["b"], 201)
self.assertEqual(cnts.frame_count, 1)
self.assertEqual(cnts.op_count, 2)
def test_listcomp(self):
def fn2(inputs):
return torch.sum(torch.cat([v + 1 for k, v in inputs.items()], 0))
v1 = torch.Tensor([100])
v2 = torch.Tensor([200])
cnts = torch._dynamo.testing.CompileCounter()
opt_fn2 = torch._dynamo.optimize(cnts)(fn2)
self.assertEqual(opt_fn2({"a": v1, "b": v2}), 302)
self.assertEqual(cnts.frame_count, 1)
self.assertEqual(cnts.op_count, 4)
def test_is_floating_point(self):
def fn(a, b):
x = a + 1.0
if torch.is_floating_point(b):
x = x + b
return x + 2.0
return torch._dynamo.testing.standard_test(self, fn=fn, nargs=2, expected_ops=3)
def test_is_floating_point2(self):
def fn(a, b):
x = a + 1.0
if b.is_floating_point():
x = x + b
return x + 2.0
return torch._dynamo.testing.standard_test(self, fn=fn, nargs=2, expected_ops=3)
def test_is_tensor(self):
def fn(a, b):
x = a + 1.0
if torch.is_tensor(b):
x = x + b
return x + 2.0
return torch._dynamo.testing.standard_test(self, fn=fn, nargs=2, expected_ops=3)
def test_is_tensor2(self):
def fn(x):
if torch.is_tensor(x):
return x + 1
else:
return torch.ones([2, 3])
x1 = {"input": torch.rand(2, 3)}
x2 = torch.rand(2, 3)
ref1 = fn(x1)
ref2 = fn(x2)
opt_fn = torch._dynamo.optimize("eager")(fn)
res1 = opt_fn(x1)
res2 = opt_fn(x2)
self.assertEqual(ref1, res1)
self.assertEqual(ref2, res2)
def test_numel(self):
def fn(a):
return (a + a.numel() + torch.numel(a), a + a.nelement())
return torch._dynamo.testing.standard_test(
self,
fn=fn,
nargs=1,
expected_ops=3,
expected_ops_dynamic=ifdynstaticdefault(3, 6),
)
def test_pair(self):
def fn(a):
return (
torch.zeros(torch.nn.modules.utils._pair(a.size()))
+ a
+ torch.ones(torch.nn.modules.utils._ntuple(3)(3)).sum()
)
return torch._dynamo.testing.standard_test(
self,
fn=fn,
nargs=1,
expected_ops=5,
expected_ops_dynamic=ifdynstaticdefault(5, 8),
)
@patch.object(torch._dynamo.config, "capture_scalar_outputs", True)
def test_tensor_item_capture(self):
def fn(a, b):
return (a + b).sum().item()
v1 = torch.randn((10, 10))
v2 = torch.randn((10, 10))
correct = fn(v1, v2)
cnts = torch._dynamo.testing.CompileCounter()
opt_fn = torch._dynamo.optimize((cnts))(fn)
self.assertEqual(opt_fn(v1, v2), correct)
self.assertEqual(cnts.frame_count, 1)
self.assertEqual(cnts.op_count, 3)
@patch.object(torch._dynamo.config, "capture_scalar_outputs", False)
def test_tensor_item_no_capture(self):
def fn(a, b):
return (a + b).sum().item()
v1 = torch.randn((10, 10))
v2 = torch.randn((10, 10))
correct = fn(v1, v2)
cnts = torch._dynamo.testing.CompileCounter()
opt_fn = torch._dynamo.optimize((cnts))(fn)
self.assertEqual(opt_fn(v1, v2), correct)
self.assertEqual(cnts.frame_count, 1)
self.assertEqual(cnts.op_count, 2)
def test_namedtuple1(self):
def fn(a, b):
tmp = mytuple(a, b, a + b)
return mytuple(tmp.a, tmp[1], tmp.ab + b)
v1 = torch.Tensor([10])
v2 = torch.Tensor([20])
cnts = torch._dynamo.testing.CompileCounter()
opt_fn = torch._dynamo.optimize(cnts)(fn)
self.assertEqual(opt_fn(v1, v2).ab, 50)
self.assertEqual(cnts.frame_count, 1)
self.assertEqual(cnts.op_count, 2)
def test_namedtuple2(self):
def fn(packed):
a, b, c = packed
if hasattr(packed, "b"):
b = packed.b + 1
c = packed[2]
return a + b + c
v1 = torch.Tensor([1])
v2 = torch.Tensor([2])
v3 = torch.Tensor([3])
cnts = torch._dynamo.testing.CompileCounter()
opt_fn = torch._dynamo.optimize(cnts)(fn)
self.assertEqual(opt_fn(mytuple(v1, v2, v3))[0], 7)
self.assertEqual(cnts.frame_count, 1)
self.assertEqual(cnts.op_count, 3)
def test_namedtuple3(self):
def fn(x, packed):
if isinstance(packed, mytuple):
return x + 1
else:
return x - 1
x = torch.rand([2, 3])
packed = mytuple(1, 2, 3)
ref = fn(x, packed)
opt_fn = torch._dynamo.optimize("eager")(fn)
res = opt_fn(x, packed)
self.assertTrue(same(ref, res))
def test_range_input(self):
def fn(a, rng):
x = a
for i in rng:
x = x + i
return x
def fn1(a):
return fn(a, rng=range(3))
return torch._dynamo.testing.standard_test(
self, fn=fn1, nargs=1, expected_ops=3
)
def test_range_with_shape(self):
def fn(a):
for i in range(1, a.shape[0]):
a += 1
return a
# expect 1 more op (size call) for dynamic
return torch._dynamo.testing.standard_test(
self,
fn=fn,
nargs=1,
expected_ops=9,
expected_ops_dynamic=ifdynstaticdefault(9, 10),
)
def test_build_tuple_unpack(self):
def fn1(a, b, c):
return a - b / c
def fn2(a, b, c):
tmp1 = (a,)
tmp2 = (b, c)
args = (*tmp1, *tmp2)
return fn1(*args)
def fn3(a, *args):
return fn1(a, *args)
torch._dynamo.testing.standard_test(self, fn=fn2, nargs=3, expected_ops=2)
torch._dynamo.testing.standard_test(self, fn=fn3, nargs=3, expected_ops=2)
def test_list_mul(self):
def fn(count):
head_mask = count * [None] * count
return head_mask
cnts = torch._dynamo.testing.CompileCounter()
opt_fn = torch._dynamo.optimize(cnts)(fn)
self.assertEqual(opt_fn(2), [None] * 4)
# TODO: the captured frame here is a bit goofy, because we don't
# output anything and none of the traced operations have side
# effects. Probably need better heuristic for bailing on
# dynamo if there are no outputs
if torch._dynamo.config.assume_static_by_default:
self.assertExpectedInline(cnts.frame_count, """0""")
self.assertExpectedInline(cnts.op_count, """0""")
else:
self.assertExpectedInline(cnts.frame_count, """1""")
self.assertExpectedInline(cnts.op_count, """2""")
def test_list_slice_mul(self):
def fn(count):
a = [1, 2, 3]
head_mask = count * a[1:] * count
return head_mask
cnts = torch._dynamo.testing.CompileCounter()
opt_fn = torch._dynamo.optimize(cnts)(fn)
self.assertEqual(opt_fn(2), [2, 3] * 4)
if torch._dynamo.config.assume_static_by_default:
self.assertExpectedInline(cnts.frame_count, """0""")
self.assertExpectedInline(cnts.op_count, """0""")
else:
self.assertExpectedInline(cnts.frame_count, """1""")
self.assertExpectedInline(cnts.op_count, """2""")
def test_tuple_mul(self):
def fn(count):
head_mask = count * (2, 3) * count
return head_mask
cnts = torch._dynamo.testing.CompileCounter()
opt_fn = torch._dynamo.optimize(cnts)(fn)
self.assertEqual(opt_fn(2), (2, 3) * 4)
if torch._dynamo.config.assume_static_by_default:
self.assertExpectedInline(cnts.frame_count, """0""")
self.assertExpectedInline(cnts.op_count, """0""")
else:
self.assertExpectedInline(cnts.frame_count, """1""")
self.assertExpectedInline(cnts.op_count, """2""")
def test_tuple_mul_with_shape(self):
def fn(a):
x = a.shape[0]
y = 2 * (x, 3) * 2
return a + y[4]
# expect 3 ops post folding for dynamic case: size, index, add
torch._dynamo.testing.standard_test(
self, fn, 1, expected_ops=1, expected_ops_dynamic=ifdynstaticdefault(1, 3)
)
def test_tuple_iadd_with_shape(self):
def fn(a):
output = (a + a.shape[0], a - a.shape[0])
# tuple += tuple
output += (a - a.shape[0], a + a.shape[0])
# tuple += constant tuple
output += (2, 3)
return output
# expect 4 add / subs for static, 4 * 3 (size, index, math op) for dynamic
torch._dynamo.testing.standard_test(
self, fn, 1, expected_ops=4, expected_ops_dynamic=ifdynstaticdefault(4, 12)
)
def test_list_iadd_with_shape(self):
def fn(a):
output = [a + a.shape[0], a - a.shape[0]]
# list += list
output += [a - a.shape[0], a + a.shape[0]]
# list += tuple
output += (a + a.shape[0], a - a.shape[0])
return output
# expect 6 add / subs for static, 6 * 3 (size, index, math op) for dynamic
torch._dynamo.testing.standard_test(
self, fn, 1, expected_ops=6, expected_ops_dynamic=ifdynstaticdefault(6, 18)
)
def test_user_getattr1(self):
class MyConfig(dict):
def __getattr__(self, name):
return self[name]
def fn(cfg, x, y):
return x + y + cfg.offset
x = torch.randn(10)
cfg = MyConfig(offset=5)
cnts = torch._dynamo.testing.CompileCounter()
opt_fn = torch._dynamo.optimize(cnts)(fn)
self.assertTrue(same(opt_fn(cfg, x, x), 2 * x + 5))
self.assertEqual(cnts.frame_count, 1)
self.assertEqual(cnts.op_count, 2)
def test_user_getattr2(self):
class MyConfig:
defined_on_class = 1
def __init__(self):
self.defined_on_object = 2
def __getattr__(self, name):
return 3
def fn(cfg, x):
return x + cfg.defined_on_class - cfg.defined_on_object + cfg.not_defined
x = torch.randn(10)
cfg = MyConfig()
cnts = torch._dynamo.testing.CompileCounter()
opt_fn = torch._dynamo.optimize(cnts)(fn)
self.assertTrue(same(opt_fn(cfg, x), x + 1 - 2 + 3))
self.assertEqual(cnts.frame_count, 1)
self.assertEqual(cnts.op_count, 3)
def test_user_getattribute(self):
class MyObject:
def __init__(self):
self.custom_dict = {"a": torch.rand((2, 2))}
self.my_number = 42
def __getattribute__(self, name):
custom_dict = super().__getattribute__("custom_dict")
if name in custom_dict:
return custom_dict[name]
return super().__getattribute__(name)
def run(self, x):
return self.my_number * x + self.a * x
def fn(obj, x):
return obj.run(x)
obj = MyObject()
x = torch.rand((2, 2))
cnts = torch._dynamo.testing.CompileCounter()
opt_fn = torch._dynamo.optimize(cnts)(fn)
self.assertTrue(same(opt_fn(obj, x), fn(obj, x)))
def test_nn_module_getattr(self):
class MyMod(torch.nn.Module):
def __init__(self):
super().__init__()
self.custom_dict = {"queue": [torch.rand((2, 2)) for _ in range(3)]}
self.other_attr = torch.rand((2, 2))
def __getattr__(self, name):
custom_dict = self.custom_dict
if name in custom_dict:
return custom_dict[name]
return super().__getattr__(name)
def forward(self, x):
return x @ self.other_attr + self.queue[-1]
x = torch.rand((2, 2))
mod = MyMod()
cnts = torch._dynamo.testing.CompileCounter()
opt_mod = torch._dynamo.optimize(cnts)(mod)
self.assertTrue(same(opt_mod(x), mod(x)))
self.assertTrue(cnts.frame_count, 1)
self.assertTrue(cnts.op_count, 2)
def test_nn_module_getattribute(self):
class MyMod(torch.nn.Module):
def __init__(self):
super().__init__()
self.my_number = 42
def __getattribute__(self, name):
if name == "special_attr":
return torch.tensor([[1, 2], [3, 4]])
return super().__getattribute__(name)
def forward(self, x):
return self.my_number * x + self.special_attr * x
def fn(mod, x):
return mod(x)
mod = MyMod()
x = torch.rand((2, 2))
cnts = torch._dynamo.testing.CompileCounter()
opt_fn = torch._dynamo.optimize(cnts)(fn)
self.assertTrue(same(opt_fn(mod, x), fn(mod, x)))
def test_constant_getattr(self):
# https://github.com/pytorch/pytorch/issues/97480
def fn():
return getattr(None, "arg", 3)
cnt = torch._dynamo.testing.CompileCounter()
optimized_fn = torch._dynamo.optimize(cnt)(fn)
res = optimized_fn()
self.assertTrue(same(res, 3))
def test_user_property(self):
class MyConfig:
@property
def prop5(self):
return 5
def fn(cfg, x, y):
return x + y + cfg.prop5
x = torch.randn(10)
cfg = MyConfig()
cnts = torch._dynamo.testing.CompileCounter()
opt_fn = torch._dynamo.optimize(cnts)(fn)
self.assertTrue(same(opt_fn(cfg, x, x), 2 * x + 5))
self.assertEqual(cnts.frame_count, 1)
self.assertEqual(cnts.op_count, 2)
def test_dataclass_fields(self):
@dataclasses.dataclass
class MyDataClass:
a: torch.Tensor
b: torch.Tensor = None
c: torch.Tensor = None
d: torch.Tensor = None
e: torch.Tensor = None
def fn(obj):
class_fields = dataclasses.fields(obj)
assert len(class_fields)
assert all(field.default is None for field in class_fields[1:])
other_fields_are_none = all(
getattr(obj, field.name) is None for field in class_fields[1:]
)
assert not other_fields_are_none
total = getattr(obj, class_fields[0].name)
for field in class_fields[1:]:
v = getattr(obj, field.name)
if v is not None:
total += v
return total
obj1 = MyDataClass(torch.randn(10), torch.randn(10), torch.randn(10))
obj2 = MyDataClass(torch.randn(10), e=torch.randn(10))
correct1 = fn(obj1)
correct2 = fn(obj2)
cnts = torch._dynamo.testing.CompileCounter()
opt_fn = torch._dynamo.optimize(cnts)(fn)
self.assertTrue(same(opt_fn(obj1), correct1))
self.assertEqual(cnts.frame_count, 1)
self.assertEqual(cnts.op_count, 2)
torch._dynamo.reset()
cnts = torch._dynamo.testing.CompileCounter()
opt_fn = torch._dynamo.optimize(cnts)(fn)
self.assertTrue(same(opt_fn(obj2), correct2))
self.assertEqual(cnts.frame_count, 1)
self.assertEqual(cnts.op_count, 1)
def test_tensor_build_list_unpack(self):
def fn(x):
# seen in fastNLP_Bert
return torch.cat([*x], dim=-1)
val = torch.randn([1, 1, 473, 768])
correct = fn(val)
cnts = torch._dynamo.testing.CompileCounter()
opt_fn = torch._dynamo.optimize(cnts)(fn)
self.assertTrue(same(opt_fn(val), correct))
self.assertEqual(cnts.frame_count, 1)
self.assertEqual(cnts.op_count, 2)
def test_numpy_int_constant(self):
def fn(x, a, b):
return x + (a % b)
args = [torch.randn(10), 4096, np.int64(8)]
correct = fn(*args)
cnts = torch._dynamo.testing.CompileCounter()
opt_fn = torch._dynamo.optimize(cnts)(fn)
self.assertTrue(same(opt_fn(*args), correct))
self.assertTrue(same(opt_fn(*args), correct))
self.assertEqual(cnts.frame_count, 1)
self.assertEqual(cnts.op_count, 2)
@requires_numpy_pytorch_interop
def test_numpy_ndarray_graph_break(self):
def fn(x):
a = x.numpy()
b = a.real
torch._dynamo.graph_break()
c = np.multiply(b, 2.0)
return c
cnts = torch._dynamo.testing.CompileCounter()
opt_fn = torch._dynamo.optimize(cnts)(fn)
for _ in range(10):
x = torch.randn(3)
ref = fn(x)
res = opt_fn(x)
self.assertEqual(ref, res)
self.assertEqual(cnts.frame_count, 2)
@requires_numpy_pytorch_interop
def test_numpy_ndarray_graph_break_with_multiple_outputs(self):
def fn(x, y):
a = x.numpy()
b = y.numpy()
torch._dynamo.graph_break()
return np.add(a, 1), np.add(b, 1)
cnts = torch._dynamo.testing.CompileCounter()
opt_fn = torch._dynamo.optimize(cnts)(fn)
for _ in range(10):
x = torch.randn([1, 3])
y = torch.randn([1, 3])
ref = fn(x, y)
res = opt_fn(x, y)
self.assertTrue(same(ref, res))
self.assertEqual(cnts.frame_count, 2)
@requires_numpy_pytorch_interop
def test_tensor_interacts_with_numpy_ndarray(self):
def fn(x, y):
a = x.numpy()
b = y.numpy()
c = np.ones_like(a)
d = np.ones_like(b)
torch._dynamo.graph_break()
return np.add(a, c), np.add(b, d)
cnts = torch._dynamo.testing.CompileCounter()
opt_fn = torch._dynamo.optimize(cnts)(fn)
for _ in range(10):
x = torch.randn([1, 3])
y = torch.randn([1, 3])
ref = fn(x, y)
res = opt_fn(x, y)
self.assertTrue(same(ref, res))
self.assertEqual(cnts.frame_count, 2)
def test_(self):
def fn(x: torch.Tensor, y: int):
z = x.detach()
w = y + 1
torch._dynamo.graph_break()
return z + w
cnts = torch._dynamo.testing.CompileCounter()
opt_fn = torch._dynamo.optimize(cnts)(fn)
for _ in range(10):
x = torch.randn([1, 3])
y = 5
ref = fn(x, y)
res = opt_fn(x, y)
self.assertTrue(same(ref, res))
self.assertEqual(cnts.frame_count, 2)
def test_inplace_view_on_graph_input(self):
# graph break when calling methods with inplace_view tag on graph input
func_args_map = {
lambda x: x.resize_(6).mul_(2): torch.ones(4),
lambda x: x.t_().mul_(2): torch.rand(2, 3),
lambda x: x.transpose_(0, 1).mul_(2): torch.rand(2, 3),
lambda x: x.squeeze_().mul_(2): torch.rand(1, 2, 3),
lambda x: x.unsqueeze_(0).mul_(2): torch.rand(2, 3),
lambda x: x.resize_as_(torch.rand(200, 300)): torch.rand(2, 3),
lambda x: x.swapaxes_(0, 1).mul_(2): torch.rand(2, 3),
lambda x: x.swapdims_(0, 1).mul_(2): torch.rand(2, 3),
lambda x: x.rename_("N", "C").mul_(2): torch.zeros(2, 3),
lambda x: x.as_strided_((3, 2), (2, 1)).mul_(2): torch.zeros(2, 3),
lambda x: x.detach_().mul_(2): torch.zeros(2, 3),
}
for func, args in func_args_map.items():
args_clone = args.clone()
cnts = torch._dynamo.testing.CompileCounter()
opt_f = torch._dynamo.optimize(cnts)(func)
self.assertTrue(same(func(args).shape, opt_f(args_clone).shape))
self.assertEqual(cnts.frame_count, 1)
self.assertEqual(cnts.op_count, 1) # mul_
def test_dict_mutation_side_effect(self):
def fn(d):
d["c"] = d["a"] + d.pop("b")
return d
args1 = {"a": torch.randn(10), "b": torch.randn(10)}
args2 = dict(args1)
assert fn(args1) is args1
cnts = torch._dynamo.testing.CompileCounter()
opt_fn = torch._dynamo.optimize(cnts)(fn)
self.assertIs(opt_fn(args2), args2)
self.assertTrue(same(args1, args2))
self.assertEqual(cnts.frame_count, 1)
self.assertEqual(cnts.op_count, 1)
def test_module_deepcopy(self):
m1 = torch.nn.Sequential(
torch.nn.Linear(10, 10),
torch.nn.ReLU(),
torch.nn.Linear(10, 10),
torch.nn.ReLU(),
)
m2 = torch.nn.Sequential(
torch.nn.Linear(10, 10),
torch.nn.ReLU(),
torch.nn.Linear(10, 10),
torch.nn.ReLU(),
)
def fn(m, x):
m_copy = copy.deepcopy(m)
return m_copy(x)
v = torch.randn(10)
correct1 = fn(m1, v)
correct2 = fn(m2, v)
cnts = torch._dynamo.testing.CompileCounter()
opt_fn = torch._dynamo.optimize(cnts)(fn)
for _ in range(10):
self.assertTrue(same(opt_fn(m1, v), correct1))
for _ in range(10):
self.assertTrue(same(opt_fn(m2, v), correct2))
self.assertEqual(cnts.frame_count, 1)
self.assertEqual(cnts.op_count, 4)
def test_type_copy(self):
def fn(seq):
a, b = seq
return type(seq)([a + 1, b + 2, a + b])
args1 = [torch.randn(10), torch.randn(10)]
args2 = (torch.randn(10), torch.randn(10))
correct1 = fn(args1)
correct2 = fn(args2)
cnts = torch._dynamo.testing.CompileCounter()
opt_fn = torch._dynamo.optimize(cnts)(fn)
self.assertTrue(same(opt_fn(args1), correct1))
self.assertTrue(same(opt_fn(args2), correct2))
self.assertIsInstance(opt_fn(args1), list)
self.assertIsInstance(opt_fn(args2), tuple)
self.assertEqual(cnts.frame_count, 2)
self.assertEqual(cnts.op_count, 6)
def test_setattr_mutation1(self):
class MyObj: # noqa: B903
def __init__(self, a, b):
self.a = a
self.b = b
def fn(obj):
obj.c = obj.a * obj.b + 1
obj.b = obj.a * obj.c + 2
obj.a = obj.b * obj.c + 3
obj.c = obj.a * obj.b + 4
obj.b = obj.a * obj.c + 5
obj.a = obj.b * obj.c + 6
return obj
x1 = torch.randn(10)
x2 = torch.randn(10)
obj1 = MyObj(x1, x2)
obj2 = MyObj(x1, x2)
fn(obj2)
cnts = torch._dynamo.testing.CompileCounter()
opt_fn = torch._dynamo.optimize(cnts)(fn)
self.assertIs(opt_fn(obj1), obj1)
self.assertTrue(same(obj1.a, obj2.a))
self.assertTrue(same(obj1.b, obj2.b))
self.assertTrue(same(obj1.c, obj2.c))
self.assertEqual(cnts.frame_count, 1)
self.assertEqual(cnts.op_count, 12)
def test_setattr_mutation2(self):
class MyObj:
def __init__(self, x):
self.a = x + 1
self.b = x + 2
def fn(x):
x = x / 3.0
obj = MyObj(x)
obj.c = obj.a * obj.b + 1
obj.b = obj.a * obj.c + 2
obj.a = obj.b * obj.c + 3
return obj
x1 = torch.randn(10)
obj2 = fn(x1)
cnts = torch._dynamo.testing.CompileCounter()
opt_fn = torch._dynamo.optimize(cnts)(fn)
obj1 = opt_fn(x1)
self.assertTrue(same(obj1.a, obj2.a))
self.assertTrue(same(obj1.b, obj2.b))
self.assertTrue(same(obj1.c, obj2.c))
self.assertEqual(cnts.frame_count, 1)
self.assertEqual(cnts.op_count, 9)
def test_setattr_mutation3(self):
# TODO(jansel): dead code eliminate the object creation
class MyObj:
def __init__(self, x):
super().__init__()
self.a = x + 1
self.b = x + 2
def fn(x):
x = x / 3.0
obj = MyObj(x)
obj.c = obj.a * obj.b + 1
obj.b = obj.a * obj.c + 2
obj.a = obj.b * obj.c + 3
return obj.a, obj.b, obj.c
x1 = torch.randn(10)
obj2 = fn(x1)
cnts = torch._dynamo.testing.CompileCounter()
opt_fn = torch._dynamo.optimize(cnts)(fn)
obj1 = opt_fn(x1)
self.assertTrue(same(obj1, obj2))
self.assertEqual(cnts.frame_count, 1)
self.assertEqual(cnts.op_count, 9)
def test_user_defined_class_name(self):
class MyClassFoo:
pass
def fn1(a, b, c):
tmp = MyClassFoo()
if tmp.__class__.__name__ == "MyClassFoo":
return a - b / c
torch._dynamo.testing.standard_test(self, fn=fn1, nargs=3)
def test_user_defined_class_python_type(self):
class MyClass1:
pass
class ExampleMeta(type):
pass
class MyClass2(metaclass=ExampleMeta):
pass
def fn(x, c):
if isinstance(c, MyClass1):
return x + 1
elif isinstance(c, MyClass2):
return x + 2
else:
return x + 3
x = torch.rand(3)
opt_fn = torch._dynamo.optimize("eager")(fn)
for c in [MyClass1, MyClass2]:
ref = fn(x, c)
res = opt_fn(x, c)
self.assertTrue(same(ref, res))
def test_super_calling_with_metaclass(self):
class ExampleMeta(type):
pass
class MyClass1(metaclass=ExampleMeta):
@classmethod
def add(cls, x):
return x + 1
class MyClass2(MyClass1):
@classmethod
def add(cls, x):
torch._dynamo.graph_break()
return x + super().add(x)
def fn(x, obj):
return x + obj.add(x)
x = torch.rand(3)
obj = MyClass2()
opt_fn = torch._dynamo.optimize("eager")(fn)
ref = fn(x, obj)
res = opt_fn(x, obj)
self.assertTrue(same(ref, res))
def test_manual_seed(self):
def fn(a, b):
x = a + b
torch.manual_seed(9000)
return x + 1
torch._dynamo.testing.standard_test(self, fn=fn, nargs=2, expected_ops=3)
def test_usr_cls_staticmethod(self):
class Foo:
@staticmethod
def bar(a, b):
return a + b
def fn(a, b):
return Foo.bar(a, b) - 1
torch._dynamo.testing.standard_test(self, fn=fn, nargs=2)
def test_usr_cls_classmethod(self):
class Foo:
@classmethod
def bar(cls, a, b):
return a + b
def fn(a, b):
return Foo.bar(a, b) - 1
torch._dynamo.testing.standard_test(self, fn=fn, nargs=2)
def test_dunder_methods(self):
class Foo:
def __init__(self, val):
super().__init__()
self.val = val
def __add__(self, other):
return Foo(self.val + other.val)
def __mul__(self, other):
return Foo(self.val * other.val)
def __truediv__(self, other):
return Foo(self.val / other.val)
def __sub__(self, other):
return Foo(self.val - other.val)
def fn(a, b, c):
return Foo(a) + Foo(b) * Foo(c) / Foo(a) - Foo(b)
torch._dynamo.testing.standard_test(self, fn=fn, nargs=3, expected_ops=4)
def test_function_annotation(self):
class Variable:
pass
def fn(x):
x = x / 3.0
def inner(y: typing.List[Variable]):
return x + 1
return inner
x1 = torch.randn(10)
obj2 = fn(x1)([])
cnts = torch._dynamo.testing.CompileCounter()
opt_fn = torch._dynamo.optimize_assert(cnts)(fn)
opt_fn_inner = torch._dynamo.optimize_assert(cnts)(opt_fn(x1))
obj1 = opt_fn_inner([])
self.assertTrue(same(obj1, obj2))
self.assertEqual(cnts.frame_count, 2)
self.assertEqual(cnts.op_count, 2)
def test_nested_closure(self):
v0 = torch.randn(10)
def fn1():
v1 = torch.randn(10)
def fn2(*args, **kwargs):
assert len(args) == 1
assert len(kwargs) == 1
v2 = torch.randn(10) + args[0] + kwargs["b"]
def fn3(v3=torch.randn(10)):
def fn4():
return v0 + v1 + v2 + v3 + 1
return fn4
return fn3
return fn2(1, b=2)()
cnts = torch._dynamo.testing.CompileCounter()
opt_fn1 = torch._dynamo.optimize_assert(cnts)(fn1)
tmp1 = torch._dynamo.optimize_assert(cnts)(opt_fn1())
tmp2 = torch._dynamo.optimize_assert(cnts)(opt_fn1())
self.assertTrue(tmp1().shape, (10,))
self.assertTrue(same(tmp1(), tmp1()))
self.assertFalse(same(tmp1(), tmp2()))
self.assertEqual(cnts.frame_count, 2)
self.assertEqual(cnts.op_count, 9)
def test_nested_closure_mutation(self):
def fn1():
v1 = torch.randn(10)
def fn2():
v2 = torch.randn(10)
def fn3():
nonlocal v1, v2
v1 += 1
v2 += 2
return v1 + v2
return fn3
rv = fn2()
rv()
rv()
return rv
torch.manual_seed(9000)
counter1 = fn1()
result1 = [counter1(), counter1(), counter1()]
torch.manual_seed(9000)
cnts = torch._dynamo.testing.CompileCounter()
opt_fn1 = torch._dynamo.optimize_assert(cnts)(fn1)
counter2 = torch._dynamo.optimize_assert(cnts)(opt_fn1())
result2 = [counter2(), counter2(), counter2()]
result1.append(counter1())
result2.append(counter2())
self.assertTrue(same(result1, result2))
self.assertEqual(cnts.frame_count, 2)
self.assertEqual(cnts.op_count, 11)
def test_write_to_closures_in_inlining(self):
out = []
for use_dynamo in [False, True]:
def make_counter():
x = torch.randn(10)
def counter():
nonlocal x
x = x + 1
return x
return counter
torch.manual_seed(0)
counter = make_counter()
if not use_dynamo:
out.append(counter() + counter())
else:
cnts = torch._dynamo.testing.CompileCounter()
@torch._dynamo.optimize(cnts, nopython=True)
def fn(counter):
return counter() + counter()
out.append(fn(counter))
self.assertEqual(cnts.frame_count, 1)
self.assertEqual(cnts.op_count, 3)
self.assertFalse(same(counter() + counter(), out[-1]))
self.assertTrue(same(out[0], out[1]))
def test_closure_out_of_scope_cell(self):
cell1 = torch.rand(1).item()
cell2 = torch.rand(3, 3)
def indirect():
return direct()
def direct():
def inner():
return cell1 + 1, cell2 + 3
return inner()
cnts = torch._dynamo.testing.CompileCounter()
opt_fn = torch._dynamo.optimize(cnts)(indirect)
result1, result2 = opt_fn()
self.assertAlmostEqual(cell1 + 1, result1)
self.assertTrue(torch.allclose(cell2 + 3, result2))
self.assertEqual(cnts.frame_count, 1)
self.assertEqual(cnts.op_count, 1)
def test_closure_out_of_scope_cell_with_mutation(self):
cell1 = torch.rand(1).item()
orig1 = cell1
cell2 = torch.rand(3, 3)
orig2 = cell2.clone()
def indirect():
return direct()
def direct():
def inner():
nonlocal cell1, cell2
x = cell2 + 1
cell1 += 1
cell2 += 10
x = x + cell2
return cell1, cell2, x
return inner()
cnts = torch._dynamo.testing.CompileCounter()
opt_fn = torch._dynamo.optimize(cnts, nopython=True)(indirect)
for i in range(1, 4):
result1, result2, _ = opt_fn()
self.assertAlmostEqual(orig1 + 1 * i, result1)
self.assertTrue(torch.allclose(orig2 + 10 * i, result2))
self.assertEqual(cnts.frame_count, 1)
self.assertEqual(cnts.op_count, 3)
cnts.clear()
def test_closure_out_of_scope_cell_with_cond(self):
# Test closure with out-of-scope cell variable, used in a cond
# where the two branches read different closure variables
from functorch.experimental.control_flow import cond
def g(x):
return x
class ModuleCondDeep(torch.nn.Module):
def forward(self, pred, x):
return self._indirection(pred, x)
def _indirection(self, pred, x):
return self.indirection(pred, x)
def indirection(self, pred, x):
def true_fn(y):
return y + 2
def false_fn(y):
return y - 2
def shallow(x):
return x * 2
def deep(x):
# y = g(x)
y = x
return cond(
x[0][0] > 0,
true_fn,
false_fn,
[y],
)
return cond(pred, shallow, deep, [x])
mod = ModuleCondDeep()
opt_mod = torch._dynamo.optimize("eager")(mod)
inp = torch.randn(3, 3)
exp1 = mod(torch.tensor(False), inp)
actual1 = opt_mod(torch.tensor(False), inp)
exp2 = mod(torch.tensor(True), inp)
actual2 = opt_mod(torch.tensor(True), inp)
self.assertTrue(torch.allclose(exp1, actual1))
self.assertTrue(torch.allclose(exp2, actual2))
def test_top_package_import(self):
def fn(x):
import torch.fx
assert not isinstance(x, torch.fx.Proxy)
return torch.sin(x)
x = torch.randn(4, 5)
ref = fn(x)
cnts = torch._dynamo.testing.CompileCounter()
opt_fn = torch._dynamo.optimize_assert(cnts)(fn)
res = opt_fn(x)
self.assertTrue(same(ref, res))
def test_typing_union_and_optional(self):
def fn(x):
a = torch.jit.annotate(typing.Dict[str, typing.Optional[torch.Tensor]], {})
b = torch.jit.annotate(
typing.Dict[str, typing.Union[torch.Tensor, None]], {}
)
return a, b, x + 1
x = torch.randn(3)
ref = fn(x)
opt_fn = torch._dynamo.optimize("eager")(fn)
res = opt_fn(x)
self.assertTrue(same(ref, res))
def test_optimize_on_module(self):
class MockModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.relu = torch.nn.ReLU()
def custom_member(self):
# Just for checking that Dynamo returned mod object can redirect
# to this method
pass
def forward(self, x):
return self.relu(x)
cnts1 = torch._dynamo.testing.CompileCounter()
mod = MockModule()
optimized_mod = torch._dynamo.optimize(cnts1, nopython=True)(mod)
a = torch.randn(10)
ref = mod(a)
res = optimized_mod(a)
optimized_mod.custom_member()
self.assertTrue(same(ref, res))
def test_nested_optimize_decorator(self):
cnts2 = torch._dynamo.testing.CompileCounter()
cnts3 = torch._dynamo.testing.CompileCounter()
@torch._dynamo.run()
def fn1(x):
return torch.sin(x) * 10
@torch._dynamo.optimize(cnts2, nopython=True)
def fn2(x):
return fn1(x) + 1
@torch._dynamo.optimize(cnts3, nopython=True)
def fn3(x):
return torch.relu(fn2(x))
fn3(torch.randn(4, 5))
self.assertEqual(cnts2.frame_count, 0)
self.assertEqual(cnts3.frame_count, 1)
self.assertEqual(cnts3.op_count, 4)
def test_nested_optimize_run(self):
cnts = torch._dynamo.testing.CompileCounter()
@torch._dynamo.optimize(cnts, nopython=True)
def fn(x):
return torch.relu(torch.cos(x) + torch.sin(x))
fn(torch.randn(4))
self.assertEqual(cnts.frame_count, 1)
fn(torch.randn(4, 4))
self.assertEqual(cnts.frame_count, 2)
# Test that run works on a decorated fn
fn = torch._dynamo.run(fn)
fn(torch.randn(4, 4, 4))
self.assertEqual(cnts.frame_count, 2)
def test_nested_optimize(self):
cnts1 = torch._dynamo.testing.CompileCounter()
cnts2 = torch._dynamo.testing.CompileCounter()
def fn(x):
return torch.relu(torch.cos(x) + torch.sin(x))
fn1 = torch._dynamo.optimize(cnts1, nopython=True)(fn)
fn2 = torch._dynamo.optimize(cnts2, nopython=True)(fn1)
# The first optimize in the nesting should be ignored
fn2(torch.randn(4))
self.assertEqual(cnts2.frame_count, 1)
self.assertEqual(cnts1.frame_count, 0)
# Since the fn code object is already compiled, calling fn1 should
# directly call the compiled_fn callable.
torch._dynamo.run()(fn1)(torch.randn(4))
self.assertEqual(cnts1.frame_count, 0)
# Test same behavior by reversing the calls
torch._dynamo.reset()
cnts1 = torch._dynamo.testing.CompileCounter()
cnts2 = torch._dynamo.testing.CompileCounter()
fn1 = torch._dynamo.optimize(cnts1, nopython=True)(fn)
fn2 = torch._dynamo.optimize(cnts2, nopython=True)(fn1)
fn1(torch.randn(4))
self.assertEqual(cnts1.frame_count, 1)
torch._dynamo.run()(fn2)(torch.randn(4))
self.assertEqual(cnts2.frame_count, 0)
def test_torch_size(self):
cnts = torch._dynamo.testing.CompileCounter()
def fn(x):
output_size = torch.Size([10, 10])
x = x.view(*output_size)
return (x,)
x = torch.randn(100, requires_grad=True)
x_clone = x.clone()
ref = fn(x)
opt_fn = torch._dynamo.optimize(cnts, nopython=True)(fn)
res = opt_fn(x_clone)
self.assertTrue(same(ref, res))
def test_size_dim(self):
cnts = torch._dynamo.testing.CompileCounter()
def fn(x, dim):
return x.size(dim=dim)
opt_fn = torch._dynamo.optimize(cnts, nopython=True)(fn)
x = torch.empty([4, 9, 8])
self.assertEqual(opt_fn(x, 1), 9)
self.assertEqual(opt_fn(x, -2), 9)
def test_stride_dim(self):
cnts = torch._dynamo.testing.CompileCounter()
def fn(x, dim):
return x.stride(dim=dim)
opt_fn = torch._dynamo.optimize(cnts, nopython=True)(fn)
x = torch.empty([4, 9, 8])
self.assertEqual(opt_fn(x, 0), 72)
self.assertEqual(opt_fn(x, -2), 8)
def test_torch_seed(self):
cnts = torch._dynamo.testing.CompileCounter()
def fn(x):
attention_seed = int(torch.seed() % sys.maxsize)
torch.manual_seed(attention_seed)
return (x,)
x = torch.randn(100, requires_grad=True)
ref = fn(x)
opt_fn = torch._dynamo.optimize(cnts, nopython=True)(fn)
res = opt_fn(x)
self.assertTrue(same(ref, res))
def test_is_tensor_like(self):
cnts = torch._dynamo.testing.CompileCounter()
def f(x):
if torch.overrides.is_tensor_like(x):
return (x * 2,)
return (torch.ones(10) + x,)
x = torch.randn(10)
ref0 = f(x)
ref1 = f(4)
opt_f = torch._dynamo.optimize(cnts, nopython=True)(f)
res0 = opt_f(x)
res1 = opt_f(4)
self.assertTrue(same(ref0, res0))
self.assertTrue(same(ref1, res1))
def test_is_tensor_like2(self):
class MyTensor:
@classmethod
def __torch_function__(cls, func, types, args=(), kwargs=None):
if kwargs is None:
kwargs = {}
if func is torch.max:
return torch.tensor(123)
return func(*args, **kwargs)
def fn(x):
if torch.overrides.is_tensor_like(x):
return torch.max(x)
else:
return torch.zeros(1)
x = MyTensor()
ref0 = fn(x)
ref1 = fn(4)
opt_fn = torch._dynamo.optimize("eager")(fn)
res0 = opt_fn(x)
res1 = opt_fn(4)
self.assertTrue(same(ref0, res0))
self.assertTrue(same(ref1, res1))
def test_tensor_data(self):
def fn(x, y):
return x[y.data]
x = torch.rand(8)
y = torch.ones(8).to(torch.int)
ref = fn(x, y)
opt_fn = torch._dynamo.optimize("eager", nopython=True)(fn)
res = opt_fn(x, y)
self.assertTrue(same(ref, res))
def test_tensor_layout(self):
def fn(x):
return torch.zeros(
[x.size()[0], x.size()[1]],
dtype=x.dtype,
layout=x.layout,
device=x.device,
)
x = torch.rand(2, 3)
ref = fn(x)
opt_fn = torch._dynamo.optimize("eager", nopython=True)(fn)
res = opt_fn(x)
self.assertTrue(same(ref, res))
def test_version_ci(self):
# temporary test to check that the ci torch version is set correctly
self.assertTrue(hasattr(torch, "_subclasses"))
@unittest.skipIf(not TEST_CUDA, "requires cuda")
def test_rand(self):
cnts = torch._dynamo.testing.CompileCounter()
device = "cuda"
def fn():
return torch.randn(10, device=device)
torch.manual_seed(10)
ref_run1 = fn()
torch.manual_seed(10)
ref_run2 = fn()
self.assertTrue(same(ref_run1, ref_run2))
torch.manual_seed(10)
opt_fn = torch._dynamo.optimize(cnts, nopython=True)(fn)
res = opt_fn()
self.assertTrue(same(res, ref_run1))
def test_slice_input(self):
cnts = torch._dynamo.testing.CompileCounter()
def getitem(a, idx):
if isinstance(idx, slice):
return (
torch.zeros(1),
a[idx]
+ [
100,
],
)
else:
return (torch.zeros(1), a[idx])
layers = list(range(10))
ref0 = getitem(layers, slice(0, 2, 1))
ref1 = getitem(layers, 2)
ref2 = getitem(layers, slice(3, 8, 2))
opt_getitem = torch._dynamo.optimize(cnts, nopython=True)(getitem)
res0 = opt_getitem(layers, slice(0, 2, 1))
res1 = opt_getitem(layers, 2)
res2 = opt_getitem(layers, slice(3, 8, 2))
self.assertTrue(ref0 == res0)
self.assertTrue(ref1 == res1)
self.assertTrue(ref2 == res2)
def test_grad(self):
cnts = torch._dynamo.testing.CompileCounter()
def fn(a, b):
out = a * b
out.sum().backward()
real_out = torch.sigmoid(a.grad + b)
return real_out
inps = [torch.randn(4, requires_grad=True) for _ in range(2)]
for inp in inps:
inp.grad = None
ref = fn(*inps)
for inp in inps:
inp.grad = None
opt_fn = torch._dynamo.optimize(cnts)(fn)
res = opt_fn(*inps)
self.assertTrue(same(ref, res))
@skipIfNotPy311
def test_linetable_311_writer1(self):
def fn():
a = 10
b = 20
c = a + b
f = "linetable_writer"
return f"Test if {f} generates correct co_linetable: {c}"
# Dynamo doesn't deal with column locations or end line numbers,
# so we only check that start line numbers in the linetables match.
keys = bytecode_transformation.get_code_keys()
code_options = {k: getattr(fn.__code__, k) for k in keys}
result = bytecode_transformation.clean_and_assemble_instructions(
bytecode_transformation.cleaned_instructions(fn.__code__),
keys,
code_options,
)
l1, l2 = list(fn.__code__.co_positions()), list(result[1].co_positions())
self.assertEqual(len(l1), len(l2))
for p1, p2 in zip(l1, l2):
# check that start line numbers match
self.assertEqual(p1[0], p2[0])
self.assertEqual(fn.__code__.co_lnotab, result[1].co_lnotab)
@skipIfNotPy311
def test_linetable_311_writer2(self):
"""
test large ops (LOAD_METHOD) and EXTENDED_ARGS
fn_str is in the form:
def fn():
...
x0 = 1
x1 = 1
...
l = [x0, x1, ...]
"""
fn_str = f"""\
def fn():
foo.bar(1, 2, 3)
{str(chr(10)).join(' ' * 4 + 'x' + str(i) + ' = 1' for i in range(1 << 9))}
l = [{str(' ').join('x' + str(i) + ',' for i in range(1 << 9))}]
"""
locals = {}
exec(fn_str, {}, locals)
fn = locals["fn"]
orig_inst_str = "\n".join(list(map(str, dis.get_instructions(fn))))
self.assertIn("EXTENDED_ARG", orig_inst_str)
self.assertIn("LOAD_METHOD", orig_inst_str)
keys = bytecode_transformation.get_code_keys()
code_options = {k: getattr(fn.__code__, k) for k in keys}
result = bytecode_transformation.clean_and_assemble_instructions(
bytecode_transformation.cleaned_instructions(fn.__code__),
keys,
code_options,
)
new_inst_str = "\n".join(list(map(str, result[0])))
self.assertIn("EXTENDED_ARG", new_inst_str)
self.assertIn("LOAD_METHOD", new_inst_str)
l1, l2 = list(fn.__code__.co_positions()), list(result[1].co_positions())
self.assertEqual(len(l1), len(l2))
for p1, p2 in zip(l1, l2):
# check that start line numbers match
self.assertEqual(p1[0], p2[0])
self.assertEqual(fn.__code__.co_lnotab, result[1].co_lnotab)
@unittest.skipIf(
sys.version_info < (3, 10) or sys.version_info >= (3, 11),
"linetable test for Python 3.10",
)
def test_linetable_310_writer(self):
def fn():
a = 10
b = 20
c = a + b
f = "linetable_writer"
return f"Test if {f} generates correct co_linetable: {c}"
inst = dis.get_instructions(fn)
result = bytecode_transformation.assemble(inst, fn.__code__.co_firstlineno)
self.assertTrue(result[1] == fn.__code__.co_linetable)
@unittest.skipIf(sys.version_info >= (3, 10), "use lnotab when python < 3.10")
def test_lnotab_writer(self):
def fn():
a = 10
b = 20
c = a + b
f = "lnotab_writer"
return f"Test if {f} generates correct co_lnotab: {c}"
inst = dis.get_instructions(fn)
result = bytecode_transformation.assemble(inst, fn.__code__.co_firstlineno)
self.assertTrue(result[1] == fn.__code__.co_lnotab)
def test_profiler_cache_lookup(self):
def fn(x):
y = x**2
y = y + 2
z = y**3
return z
for profiler, get_events in (
(torch.autograd.profiler.profile, lambda prof: prof.function_events),
(torch.profiler.profiler.profile, lambda prof: prof.events()),
):
x = torch.randn((2, 2), requires_grad=True)
ref = fn(x)
opt_fn = torch.compile(fn, backend="aot_eager")
# warmup
opt_fn(x)
# whenver we enter the profiler context, hooks are automatically registered
with profiler() as prof:
res = opt_fn(x)
events = list(
filter(
lambda event: event.name == "TorchDynamo Cache Lookup",
get_events(prof),
)
)
self.assertTrue(same(ref, res))
self.assertTrue(
len(events) == 1,
"Expected one lookup profiler event for one opt_fn run",
)
with profiler() as prof:
# just make sure the disable functionality works
_enable_dynamo_cache_lookup_profiler(False)
res = opt_fn(x)
events = list(
filter(
lambda event: event.name == "TorchDynamo Cache Lookup",
get_events(prof),
)
)
self.assertTrue(same(ref, res))
self.assertTrue(len(events) == 0, "Expected disabled profiling")
def test_tensor_is_contiguous(self):
def fn(x):
input = torch.randn((1, 16, 1, 1))
weight = torch.randn((8, 16, 3, 3))
weight = weight.to(memory_format=x)
output = torch.conv2d(input, weight, None, (2, 1), (1, 1), (1, 1), 1)
return output.is_contiguous(memory_format=x)
opt_fn = torch._dynamo.optimize("eager")(fn)
for x in [torch.contiguous_format, torch.channels_last]:
self.assertEqual(fn(x), opt_fn(x))
def test_python_slice(self):
def f1(input):
y = 0
for i, x in enumerate(input[2:], 1):
y = y + x
return y
def f2(input):
y = 0
for i, x in enumerate(input.shape[2:], 1):
y = y + x
return y
cnts = torch._dynamo.testing.CompileCounter()
opt_f1 = torch._dynamo.optimize(cnts)(f1)
opt_f2 = torch._dynamo.optimize(cnts)(f2)
res1 = opt_f1([1, 2, 3, 5])
res2 = opt_f2(torch.rand([2, 3, 4, 5]))
self.assertEqual(res1, 8)
self.assertEqual(res2, 9)
def test_enum_as_dict_key(self):
class MyEnum(enum.Enum):
FOO = 10
BAR = 20
def fn(x):
y = x + 2
z = {
MyEnum.FOO: torch.tensor(1),
MyEnum.BAR: 10,
"MyEnum.BAR": torch.tensor(8),
5: torch.rand(3),
}
torch._dynamo.graph_break()
a = z[MyEnum.FOO] + z["MyEnum.BAR"]
b = y * 2
return a, b
cnts = torch._dynamo.testing.CompileCounter()
opt_fn = torch._dynamo.optimize(cnts)(fn)
for _ in range(10):
x = torch.rand(3)
ref = fn(x)
res = opt_fn(x)
self.assertTrue(same(ref, res))
self.assertEqual(cnts.frame_count, 2)
def test_const_dict_variable_python_type(self):
from torch._dynamo.variables import ConstantVariable, ConstDictVariable
d1 = {"a": ConstantVariable(10), "b": ConstantVariable(20)}
d2 = collections.OrderedDict(
[("x", ConstantVariable(12)), ("y", ConstantVariable(22))]
)
self.assertEqual(ConstDictVariable(d1, dict).python_type(), dict)
self.assertEqual(
ConstDictVariable(d2, collections.OrderedDict).python_type(),
collections.OrderedDict,
)
def test_builtin_subclasses_as_method_on_class_type(self):
class Foo:
def __init__(self, name):
self.ame_ = name
def get_name(self):
return "Foo " + self.name_
class Bar(Foo):
def __init__(self, name):
self.name_ = name
def get_name(self):
return "Bar " + self.name_
class Baz(Foo):
def __init__(self, name): # noqa: B903
self.name_ = name
def get_name(self):
return "Baz " + self.name_
subs_of_foo_reg = Foo.__subclasses__()
counter = CompileCounter()
@torch._dynamo.optimize_assert(counter)
def fn():
return Foo.__subclasses__()
subs_of_foo_optim = fn()
self.assertEqual(len(subs_of_foo_reg), 2)
self.assertEqual(subs_of_foo_reg, subs_of_foo_optim)
def test_builtin_subclasses_as_method_on_var(self):
class Foo:
def __init__(self, name):
self.name_ = name
def get_name(self):
return "Foo " + self.name_
class Bar(Foo):
def __init__(self, name):
self.name_ = name
def get_name(self):
return "Bar " + self.name_
class Baz(Bar):
def __init__(self, name):
self.name_ = name
def get_name(self):
return "Baz " + self.name_
subs_of_foo_reg = Foo.__subclasses__()
sub_of_foo_subclass_var_reg = subs_of_foo_reg[0].__subclasses__()
sub_of_foo_subclass_var_optim = list()
counter = CompileCounter()
@torch._dynamo.optimize_assert(counter)
def fn():
return Foo.__subclasses__()
@torch._dynamo.optimize_assert(counter)
def fn_single(subs_of_foo_optim):
return subs_of_foo_optim[0].__subclasses__()
subs_of_foo_optim = fn()
sub_of_foo_subclass_var_optim = fn_single(subs_of_foo_optim)
self.assertEqual(len(sub_of_foo_subclass_var_optim), 1)
self.assertEqual(sub_of_foo_subclass_var_optim, sub_of_foo_subclass_var_reg)
def test_enum_no_graphbreaks(self):
class Foo(enum.Enum):
FOO = 0
BAR = 1
def fn(x, foo):
if foo is Foo.FOO:
x = torch.add(x, 1.0)
x = torch.mul(x, 1.0)
return x
x = torch.randn(1)
cnts = torch._dynamo.testing.CompileCounter()
opt_fn = torch._dynamo.optimize(cnts, nopython=True)(fn)
opt_fn(x, Foo.FOO)
self.assertEqual(cnts.op_count, 2)
torch._dynamo.reset()
cnts = torch._dynamo.testing.CompileCounter()
opt_fn = torch._dynamo.optimize(cnts, nopython=True)(fn)
opt_fn(x, Foo.BAR)
self.assertEqual(cnts.op_count, 1)
def test_repeat_interleave_graphbreaks(self):
def fn_no_breaks(x):
# no breaks on self_int
x += 1
x = torch.repeat_interleave(x, 2, 3)
x += 1
return x
def fn_has_breaks(x):
# breaks on self_Tensor
x += 1
x = torch.repeat_interleave(x, torch.tensor(2), 3)
x += 1
return x
x = torch.randn([4, 16, 1, 64])
cnts = torch._dynamo.testing.CompileCounter()
opt_fn = torch._dynamo.optimize(cnts)(fn_no_breaks)
opt_fn(x)
self.assertEqual(cnts.frame_count, 1)
torch._dynamo.reset()
cnts = torch._dynamo.testing.CompileCounter()
opt_fn = torch._dynamo.optimize(cnts)(fn_has_breaks)
opt_fn(x)
self.assertEqual(cnts.frame_count, 2)
def test_id_of_nn_module(self):
class M(torch.nn.Module):
def forward(self, x, ref_id):
self_id = id(self)
if self_id == ref_id:
x = torch.mul(x, 1.0)
x = torch.add(x, 1.0)
return x
m = M().eval()
data = torch.randn(1)
cnts = torch._dynamo.testing.CompileCounter()
correct_ref_id = id(m)
opt_m = torch._dynamo.optimize(cnts, nopython=True)(m)
opt_m(data, correct_ref_id)
# Extra op is the recorded equality test (although once
# the trace is flattened this is dead!)
if torch._dynamo.config.assume_static_by_default:
self.assertExpectedInline(cnts.op_count, """2""")
else:
self.assertExpectedInline(cnts.op_count, """3""")
torch._dynamo.reset()
cnts = torch._dynamo.testing.CompileCounter()
incorrect_ref_id = id(m) + 1
opt_m = torch._dynamo.optimize(cnts, nopython=True)(m)
opt_m(data, incorrect_ref_id)
if torch._dynamo.config.assume_static_by_default:
self.assertExpectedInline(cnts.op_count, """1""")
else:
self.assertExpectedInline(cnts.op_count, """2""")
def test_inline_func_jump_on_tensor_condition(self):
def f1(input):
if input == 0:
return input + 1
else:
return input + 2
def f2(input):
return f1(input)
cnts = torch._dynamo.testing.CompileCounter()
opt_f2 = torch._dynamo.optimize(cnts)(f2)
res1 = opt_f2(torch.tensor([1.0]))
res2 = opt_f2(torch.tensor([0.0]))
self.assertEqual(res1, 3)
self.assertEqual(res2, 1)
def test_frozenset_torch_func_contains(self):
funcs = frozenset([torch.add])
def fn(x, func):
if func in funcs:
x = torch.add(x, 1.0)
x = torch.mul(x, 1.0)
return x
x = torch.randn(1)
cnts = torch._dynamo.testing.CompileCounter()
opt_fn = torch._dynamo.optimize(cnts, nopython=True)(fn)
opt_fn(x, torch.add)
self.assertEqual(cnts.op_count, 2)
torch._dynamo.reset()
cnts = torch._dynamo.testing.CompileCounter()
opt_fn = torch._dynamo.optimize(cnts, nopython=True)(fn)
opt_fn(x, torch.mul)
self.assertEqual(cnts.op_count, 1)
def test_inline_list_mutation(self):
def f1(x):
x.append(torch.ones(8))
return x
def f2():
x = [torch.ones(6)]
f1(x)
return x
res1 = f2()
cnts = torch._dynamo.testing.CompileCounter()
opt_f2 = torch._dynamo.optimize(cnts)(f2)
res2 = opt_f2()
self.assertTrue(same(res1, res2))
def test_inline_dict_mutation(self):
def f1(d):
d["c"] = d["a"] + d.pop("b")
return d
def f2():
d = {"a": torch.ones(5), "b": torch.ones(5)}
f1(d)
return d
res1 = f2()
cnts = torch._dynamo.testing.CompileCounter()
opt_f2 = torch._dynamo.optimize(cnts)(f2)
res2 = opt_f2()
self.assertTrue(same(res1, res2))
def test_recursive_inline_list_mutation(self):
def f1(x, y):
x.append(torch.tensor([1.1]))
y.append(torch.tensor([1.2]))
return x, y
def f2(x, y):
x.append(torch.tensor([2.1]))
y.append(torch.tensor([2.2]))
f1(x, y)
return x, y
def f3(x):
x.append(torch.tensor([3.1]))
y = [torch.tensor([3.2])]
f2(x, y)
return x, y
def f4():
x = [torch.tensor([4.1])]
return f3(x)
res1 = f4()
cnts = torch._dynamo.testing.CompileCounter()
opt_f4 = torch._dynamo.optimize(cnts)(f4)
res2 = opt_f4()
self.assertTrue(same(res1, res2))
def test_sample_input(self):
from torch.testing._internal.common_methods_invocations import SampleInput
def fn(sample):
if isinstance(sample.input, torch.Tensor):
return sample.input * 2
return torch.zeros(())
sample = SampleInput(torch.ones(2))
ref = fn(sample)
opt_fn = torch._dynamo.optimize("eager")(fn)
res = opt_fn(sample)
self.assertTrue(same(ref, res))
def test_release_input_memory(self):
x = torch.rand([4])
x_ref = weakref.ref(x)
cnts = torch._dynamo.testing.CompileCounter()
@torch._dynamo.optimize(cnts)
def foo(x):
return x + x
out = foo(x)
self.assertTrue(same(out, x + x))
del x
self.assertIs(x_ref(), None)
def test_release_module_memory(self):
mod = torch.nn.Linear(10, 10)
x = torch.rand([10, 10])
mod_weight_ref = weakref.ref(mod.weight)
mod_ref = weakref.ref(mod)
# Modules that are passed into torch._dynamo optimized functions
# will normally be held onto through the generated GraphModule,
# which contains the modules. remove the reference in this backend
# and test that no additional references are being held.
class NoLeakBackend:
def __call__(self, gm: torch.fx.GraphModule, example_inputs):
gm.mod = None
def foo(*args, **kwargs):
return (1,)
return foo
no_leak_backend = NoLeakBackend()
@torch._dynamo.optimize(no_leak_backend)
def foo(mod, x):
return mod(x)
foo(mod, x)
del mod
del x
self.assertIsNone(mod_ref(), None)
self.assertIsNone(mod_weight_ref(), None)
def test_update_locals_and_stack_uses_shared_cache(self):
def fn(x):
perm = [0, 3, 5]
perm = list(range(min(perm))) + perm
perm.extend(i for i in range(x.dim()) if i not in perm)
return perm
x = torch.rand([2, 2, 2, 2, 2, 2])
res1 = fn(x)
cnts = torch._dynamo.testing.CompileCounter()
opt_fn = torch._dynamo.optimize(cnts)(fn)
res2 = opt_fn(x)
self.assertTrue(same(res1, res2))
def test_dict_reconstruct_keeps_original_order(self):
def fn():
modules = collections.OrderedDict([("act", torch.nn.ReLU())])
module_dict = torch.nn.ModuleDict(modules)
next_modules = {"fc4": torch.nn.Linear(5, 6), "act3": torch.nn.Sigmoid()}
modules.update(next_modules.items())
module_dict.update(next_modules)
return modules, module_dict
cnts = torch._dynamo.testing.CompileCounter()
opt_fn = torch._dynamo.optimize(cnts)(fn)
modules, module_dict = opt_fn()
self.assertEqual(len(module_dict), len(modules))
for k1, m2 in zip(modules, module_dict.children()):
self.assertTrue(modules[k1] is m2)
def test_side_effects_codegen_update_mutated(self):
# codegen to update mutated variables with side effect
# should after stack value's codegen
def f1(x):
alist = [x]
alist.append(x + 1)
alist[0].sum().item() # graph break
res = alist.pop()
res.sum().item() # graph break
return res
def f2(a, b):
d = {"a": a + 1, "b": b + 2}
x = d.pop("b")
x.sum().item() # graph break
y = d["a"] + x
y.sum().item() # graph break
d["c"] = y
return d
x = torch.rand([2, 3])
a = torch.rand([5, 6])
b = torch.rand([5, 6])
res11 = f1(x)
res21 = f2(a, b)
cnts = torch._dynamo.testing.CompileCounter()
opt_f1 = torch._dynamo.optimize(cnts)(f1)
opt_f2 = torch._dynamo.optimize(cnts)(f2)
res12 = opt_f1(x)
res22 = opt_f2(a, b)
self.assertTrue(same(res11, res12))
self.assertTrue(same(res21, res22))
def test_list_append_return_none(self):
def fn(x):
alist = []
blist = alist.append(x + 1)
return alist, blist
x = torch.tensor([2.3])
res = fn(x)
cnts = torch._dynamo.testing.CompileCounter()
opt_fn = torch._dynamo.optimize(cnts)(fn)
res2 = opt_fn(x)
self.assertEqual(res, res2)
def test_tensor_types(self):
def fn(dtype, tensor_type):
x = torch.empty(4, dtype=dtype)
assert isinstance(x, tensor_type)
opt_fn = torch._dynamo.optimize("eager")(fn)
opt_fn(torch.float32, torch.FloatTensor)
opt_fn(torch.float64, torch.DoubleTensor)
opt_fn(torch.float16, torch.HalfTensor)
opt_fn(torch.bfloat16, torch.BFloat16Tensor)
opt_fn(torch.uint8, torch.ByteTensor)
opt_fn(torch.int8, torch.CharTensor)
opt_fn(torch.int64, torch.LongTensor)
opt_fn(torch.int, torch.IntTensor)
opt_fn(torch.int16, torch.ShortTensor)
opt_fn(torch.bool, torch.BoolTensor)
def test_nan(self):
def f(x, n):
return x * 2 + n
x = torch.randn(4)
n = float("nan")
cnts = torch._dynamo.testing.CompileCounter()
opt_f = torch._dynamo.optimize(cnts)(f)
opt_f(x, n)
opt_f(x, n)
self.assertEqual(cnts.frame_count, 1)
@patch.object(torch._dynamo.config, "capture_scalar_outputs", True)
def test_item(self):
class MyMod(torch.nn.Module):
def forward(self, x):
z = torch.max(x)
return z.int().item()
x = torch.tensor([[10.6763, 11.7445, -2.2369]])
model = MyMod()
y = torch._dynamo.optimize("eager", nopython=True)(model)(x)
self.assertEqual(y, 11)
@patch.object(torch._dynamo.config, "capture_scalar_outputs", True)
def test_item_changes(self):
class MyMod(torch.nn.Module):
def forward(self, x):
z = torch.max(x)
return z.int().item()
x = torch.tensor([[10.6763, 11.7445, -2.2369]])
model = MyMod()
opt_model = torch._dynamo.optimize("eager", nopython=True)(model)
y = opt_model(x)
z = opt_model(torch.tensor([[y - 5, y + 10, y + 50]]))
self.assertEqual(y, 11)
self.assertEqual(z, 61)
@patch.object(torch._dynamo.config, "capture_scalar_outputs", True)
def test_item_changes_new_shape(self):
class MyMod(torch.nn.Module):
def forward(self, x):
z = torch.max(x)
return z.int().item()
x = torch.tensor([[10.6763, 11.7445, -2.2369]])
model = MyMod()
opt_model = torch._dynamo.optimize("eager", nopython=True)(model)
y = opt_model(x)
z = opt_model(torch.tensor([[y - 5, y + 50], [y + 5, y - 50]]))
self.assertEqual(y, 11)
self.assertEqual(z, 61)
@unittest.skip("https://github.com/pytorch/pytorch/issues/99726")
def test_cross_entropy_loss_fancy_ctor1(self):
rand_5 = torch.randn(5)
rand_3_5 = torch.randn(3, 5)
target = torch.empty(3, dtype=torch.long).random_(5)
loss = torch.nn.CrossEntropyLoss(
weight=rand_5, reduce=False, label_smoothing=0.5
)
opt_loss = torch._dynamo.optimize("eager", nopython=True)(loss)
input = rand_3_5
dynamo_output = opt_loss(input, target)
loss = torch.nn.CrossEntropyLoss(
weight=rand_5, reduce=False, label_smoothing=0.5
)
input = rand_3_5
output = loss(input, target)
self.assertTrue(torch.allclose(dynamo_output, output))
def test_cross_entropy_loss_fancy_ctor2(self):
rand_3_5 = torch.randn(3, 5)
target = torch.empty(3, dtype=torch.long).random_(5)
loss = torch.nn.CrossEntropyLoss(reduce=False, label_smoothing=0.5)
opt_loss = torch._dynamo.optimize("eager", nopython=True)(loss)
input = rand_3_5
dynamo_output = opt_loss(input, target)
loss = torch.nn.CrossEntropyLoss(reduce=False, label_smoothing=0.5)
input = rand_3_5
output = loss(input, target)
self.assertTrue(torch.allclose(dynamo_output, output))
def test_cross_entropy_loss_simple_ctor(self):
output = None
rand_3_5 = torch.randn(3, 5)
target = torch.empty(3, dtype=torch.long).random_(5)
loss = torch.nn.CrossEntropyLoss()
opt_loss = torch._dynamo.optimize("eager", nopython=True)(loss)
input = rand_3_5
dynamo_output = opt_loss(input, target)
loss = torch.nn.CrossEntropyLoss()
input = rand_3_5
output = loss(input, target)
self.assertTrue(torch.allclose(dynamo_output, output))
def test_nn_functional_reduction(self):
def fn(loss, reduction):
reduction_enum = F._Reduction.get_enum(reduction)
if reduction_enum == 0:
return loss
elif reduction_enum == 1:
return loss.mean()
elif reduction_enum == 2:
return loss.sum()
x = torch.rand([3, 5])
y = "mean"
ref = fn(x, y)
opt_fn = torch._dynamo.optimize("eager", nopython=True)(fn)
res = opt_fn(x, y)
self.assertTrue(torch.allclose(ref, res))
def test_large_reduction_list(self):
dtype = torch.float32
device = "cpu"
def check_sum_all(tensor: torch.Tensor) -> None:
pylist = tensor.reshape(-1).tolist()
self.assertTrue(same(tensor.sum(), torch.tensor(sum(pylist))))
check_sum_all(torch.randn(200000, dtype=dtype, device=device))
def test_raise_on_backend_error(self):
def my_compiler(gm, _):
raise RuntimeError("duck!")
@torch._dynamo.optimize(my_compiler)
def fn(a, b):
return a + b / (a - b)
self.assertRaises(
torch._dynamo.exc.BackendCompilerFailed,
lambda: fn(torch.randn(10), torch.randn(10)),
)
def test_named_parameters(self):
n_embd = 768
block_size = 128
vocab_size = 65
embd_pdrop = 0.1
class MyModel2(torch.nn.Module):
def __init__(self):
super().__init__()
self.tok_emb = torch.nn.Embedding(vocab_size, n_embd)
self.pos_emb = torch.nn.Parameter(torch.zeros(1, block_size, n_embd))
self.drop = torch.nn.Dropout(embd_pdrop)
def forward(self, x):
return x
class MyModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.tok_emb = torch.nn.Embedding(vocab_size, n_embd)
self.pos_emb = torch.nn.Parameter(torch.zeros(1, block_size, n_embd))
self.drop = torch.nn.Dropout(embd_pdrop)
self.submod2 = MyModel2()
def forward(self, x):
return x
# Regular
params = []
mod = MyModel()
actual_params = list(mod.named_parameters())
@torch._dynamo.optimize("eager", nopython=True)
def fn():
return list(mod.named_parameters())
params = fn()
self.assertEqual(len(actual_params), len(params))
for idx in range(len(params)):
k_a, v_a = actual_params[idx]
k, v = params[idx]
self.assertEqual(k_a, k)
self.assertTrue(torch.allclose(v_a, v))
# Prefix
params = []
mod = MyModel()
actual_params = list(mod.named_parameters(prefix="foo"))
@torch._dynamo.optimize("eager", nopython=True)
def fn1():
return list(mod.named_parameters(prefix="foo"))
params = fn1()
self.assertEqual(len(actual_params), len(params))
for idx in range(len(params)):
k_a, v_a = actual_params[idx]
k, v = params[idx]
self.assertEqual(k_a, k)
self.assertTrue(torch.allclose(v_a, v))
def test_module_complex_iter(self):
n_embd = 768
block_size = 128
vocab_size = 65
embd_pdrop = 0.1
class FakeGPT(torch.nn.Module):
def __init__(self):
super().__init__()
self.tok_emb = torch.nn.Embedding(vocab_size, n_embd)
self.pos_emb = torch.nn.Parameter(torch.zeros(1, block_size, n_embd))
self.drop = torch.nn.Dropout(embd_pdrop)
self.ln_f = torch.nn.LayerNorm(n_embd)
self.head = torch.nn.Linear(n_embd, vocab_size, bias=False)
self.block_size = block_size
self.names = []
def forward(self, idx, targets=None):
b, t = idx.size()
assert (
t <= self.block_size
), "Cannot forward, model block size is exhausted."
# forward the GPT model
token_embeddings = self.tok_emb(
idx
) # each index maps to a (learnable) vector
position_embeddings = self.pos_emb[
:, :t, :
] # each position maps to a (learnable) vector
x = self.drop(token_embeddings + position_embeddings)
x = self.blocks(x)
x = self.ln_f(x)
logits = self.head(x)
# if we are given some desired targets also calculate the loss
loss = None
if targets is not None:
loss = F.cross_entropy(
logits.view(-1, logits.size(-1)), targets.view(-1)
)
return logits, loss
def foo(self, memo=None, prefix="", remove_duplicate=False):
for mn, m in self.named_modules(
memo=memo, prefix=prefix, remove_duplicate=remove_duplicate
):
for pn, p in self.named_parameters():
fpn = "%s.%s" % (mn, pn) if mn else pn
self.names.append(fpn)
# Test plain recurse
model_a = FakeGPT()
model_a.foo()
a_names = model_a.names
model_b = FakeGPT()
opt_model_b = torch._dynamo.optimize("eager", nopython=True)(model_b)
opt_model_b.foo()
self.assertEqual(a_names, model_b.names)
# Test with prefix
model_a = FakeGPT()
model_a.foo(prefix="abc")
a_names = model_a.names
model_b = FakeGPT()
opt_model_b = torch._dynamo.optimize("eager", nopython=True)(model_b)
opt_model_b.foo(prefix="abc")
self.assertEqual(a_names, model_b.names)
def test_numpy_variable_isinstance(self):
def fn(x, m):
if isinstance(m, np.ndarray):
return x + 1
else:
return x - 1
x = torch.tensor([2.3])
m = np.array([1, 2, 3])
ref = fn(x, m)
cnts = torch._dynamo.testing.CompileCounter()
opt_fn = torch._dynamo.optimize(cnts)(fn)
res = opt_fn(x, m)
self.assertEqual(ref, res)
def test_tensor_dot_grad_no_graph_break(self):
def fn(a, b):
y = 3 * a**3 - b**2
y.backward(gradient=torch.tensor([1.0, 1.0]))
b.grad.zero_()
return a.grad, b.grad
a = torch.tensor([2.0, 3.0], requires_grad=True)
b = torch.tensor([6.0, 4.0], requires_grad=True)
cnts = torch._dynamo.testing.CompileCounter()
opt_fn = torch._dynamo.optimize(cnts)(fn)
_, b_grad = opt_fn(a, b)
self.assertTrue(same(b_grad, torch.tensor([0.0, 0.0])))
self.assertEqual(cnts.frame_count, 2)
def test_torch_nn_parameter_isinstance(self):
def fn(x):
a = torch.nn.Parameter(torch.rand(2, 3))
if isinstance(a, torch.Tensor):
return x + 1
else:
return x - 1
x = torch.tensor([2.5])
ref = fn(x)
opt_fn = torch._dynamo.optimize("eager")(fn)
res = opt_fn(x)
self.assertEqual(ref, res)
# '__torch__.torch.SymInt (of Python compilation unit at: 0x4c9c0e0)'
# object has no attribute or method '__ne__'
# NB: I don't think this ever can actually work, cuz TorchScript
# can't deal with SymInt inputs
@expectedFailureDynamic
@torch._dynamo.config.patch(raise_on_backend_change=True)
def test_change_backends(self):
@torch._dynamo.optimize("eager", nopython=True)
def fn1():
return x + 1
@torch._dynamo.optimize("ts")
def fn2():
return x + 2
@torch._dynamo.optimize("eager", nopython=False)
def fn3():
return x + 1
x = torch.tensor([3, 5])
fn1()
fn1()
fn3()
self.assertRaises(torch._dynamo.exc.ResetRequired, fn2)
fn1()
torch._dynamo.reset()
fn2()
fn2()
self.assertRaises(torch._dynamo.exc.ResetRequired, fn1)
self.assertRaises(torch._dynamo.exc.ResetRequired, fn3)
fn2()
def test_dynamo_min_operator_with_shape(self):
@torch._dynamo.optimize("eager", nopython=True)
def f(x, a):
return min(x.shape[0], a)
result = f(torch.ones(6), 3)
self.assertEqual(result, 3)
def test_onnx_shape_as_tensor(self):
@torch._dynamo.optimize("eager", nopython=True)
def f(x):
return 1 + torch._shape_as_tensor(x)[0]
gm, _ = torch._dynamo.export(f, torch.ones(6))
input_one_dim = torch.ones(6)
input_two_dims = torch.ones(7, 4)
self.assertEqual(f(input_one_dim), 7)
self.assertEqual(f(input_two_dims), 8)
self.assertEqual(f(input_two_dims), 8)
@torch._dynamo.optimize("eager", nopython=True)
def f_onnx(x):
return 1 + torch.onnx.operators.shape_as_tensor(x)[0]
self.assertEqual(f_onnx(input_one_dim), 7)
self.assertEqual(f_onnx(input_two_dims), 8)
self.assertEqual(f_onnx(input_two_dims), 8)
def test_cond(self):
from functorch.experimental.control_flow import cond
def true_fn(x):
return x.sin()
def false_fn(x):
return x.cos()
def f(pred, x):
return cond(pred, true_fn, false_fn, [x])
opt_fn = torch._dynamo.optimize("eager")(f)
a = opt_fn(torch.tensor(False), torch.tensor([0.25, 0.25]))
self.assertTrue(same(torch.cos(torch.tensor([0.25, 0.25])), a))
b = opt_fn(torch.tensor(True), torch.tensor([0.25, 0.25]))
self.assertTrue(same(torch.sin(torch.tensor([0.25, 0.25])), b))
def test_nonzero_static(self):
# invalid size
with self.assertRaisesRegex(
RuntimeError, "nonzero_static: 'size' must be an non-negative integer"
):
torch.nonzero_static(torch.tensor([8]), size=-2)
with self.assertRaisesRegex(
RuntimeError, "nonzero_static: 'size' must be an non-negative integer"
):
torch.nonzero_static(torch.tensor([8]), size=-2, out=torch.tensor(0))
# nonzero_static.out: out dtype mismatch
input_tensor = torch.tensor([8])
static_size = 1
out_tensor = torch.empty((static_size, input_tensor.dim()), dtype=torch.float)
with self.assertRaisesRegex(
RuntimeError, "nonzero_static: Expected out tensor to have scalar type Long"
):
torch.nonzero_static(input_tensor, size=static_size, out=out_tensor)
# nonzero_static.out: out resize (shrink)
input_tensor = torch.tensor([8])
static_size = 1
out_tensor = torch.empty((10, 10, 10, 10), dtype=torch.long)
self.assertTrue(
same(
torch.nonzero_static(input_tensor, size=static_size, out=out_tensor),
torch.tensor([0]),
)
)
self.assertTrue(
same(
out_tensor,
torch.tensor([0]),
)
)
# nonzero_static.out: out resize (enlarge)
input_tensor = torch.tensor([8])
static_size = 1
out_tensor = torch.empty((0), dtype=torch.long)
self.assertTrue(
same(
torch.nonzero_static(input_tensor, size=static_size, out=out_tensor),
torch.tensor([0]),
)
)
self.assertTrue(
same(
out_tensor,
torch.tensor([0]),
)
)
# 0 rank
input_tensor = torch.tensor(6)
static_size = 2
self.assertTrue(
same(
torch.nonzero_static(input_tensor, size=static_size),
torch.empty((static_size, input_tensor.dim()), dtype=torch.long),
)
)
# 0 size
input_tensor = torch.tensor([[[1]]])
static_size = 0
self.assertTrue(
same(
torch.nonzero_static(input_tensor, size=static_size),
torch.empty((static_size, input_tensor.dim()), dtype=torch.long),
)
)
# 1D input
input_tensor = torch.tensor([0, 8])
static_size = 1
self.assertTrue(
same(
torch.nonzero_static(input_tensor, size=static_size),
torch.tensor([1]),
)
)
input_tensor = torch.tensor([8, 0])
static_size = 2
self.assertTrue(
same(
torch.nonzero_static(input_tensor, size=static_size),
torch.tensor([[0], [-1]]), # padded with default fill_value "-1"
)
)
# 2D input
input_tensor = torch.tensor([[1.2, 0], [3.4, 5.6]])
static_size = 5
fill_value = -100
self.assertTrue(
torch._dynamo.utils.same(
torch.nonzero_static(
input_tensor, size=static_size, fill_value=fill_value
),
torch.tensor(
[
[0, 0],
[1, 0],
[1, 1],
[fill_value, fill_value],
[fill_value, fill_value],
]
),
)
)
input_tensor = torch.tensor([[1.2, 0], [3.4, 5.6]])
static_size = 2
fill_value = -100
self.assertTrue(
torch._dynamo.utils.same(
torch.nonzero_static(
input_tensor, size=static_size, fill_value=fill_value
),
torch.tensor([[0, 0], [1, 0]]),
)
)
# 3D input
input_tensor = torch.tensor([[[0, 0], [0, -3]], [[0, 0], [5, 0]]])
static_size = 4
fill_value = -999
self.assertTrue(
torch._dynamo.utils.same(
torch.nonzero_static(
input_tensor,
size=static_size,
fill_value=fill_value,
),
torch.tensor(
[
[0, 1, 1],
[1, 1, 0],
[fill_value, fill_value, fill_value],
[fill_value, fill_value, fill_value],
]
),
)
)
def test_cond_with_quantization(self):
from functorch.experimental.control_flow import cond
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
example_inputs = (torch.randn(5, 5),)
self.model = torch.nn.Linear(5, 5)
self.quantized_model = prepare_qat_fx(
self.model, qconfig_dict, example_inputs=example_inputs
)
def forward(self, pred, x):
def true_fn(x):
return x.sin() + self.quantized_model(x)
def false_fn(x):
return x.cos() + self.model(x)
return cond(pred, true_fn, false_fn, [x])
module = MyModule()
opt_m = torch._dynamo.optimize("eager", nopython=True)(module)
x = torch.rand((5, 5))
pred = torch.tensor(True)
self.assertTrue(same(module(pred, x), opt_m(pred, x)))
pred = torch.tensor(False)
self.assertTrue(same(module(pred, x), opt_m(pred, x)))
def test_map_with_quantization(self):
from functorch.experimental.control_flow import map
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
example_inputs = (torch.randn(5, 5),)
self.model = torch.nn.Linear(5, 5)
self.quantized_model = prepare_qat_fx(
self.model, qconfig_dict, example_inputs=example_inputs
)
def forward(self, x):
def body(x):
return x.sin() + self.quantized_model(x)
return map(body, x)
module = MyModule()
opt_m = torch._dynamo.optimize("eager", nopython=True)(module)
x = torch.rand((5, 5))
self.assertTrue(same(module(x), opt_m(x)))
def test_cond_side_effects(self):
from functorch.experimental.control_flow import cond
c = 0
def true_fn(x):
return x - c
def false_fn(x):
return x + c
def f(pred, x):
nonlocal c
c = 1
return cond(pred, true_fn, false_fn, [x])
opt_fn = torch._dynamo.optimize("eager")(f)
c = 0
a = opt_fn(torch.tensor(False), torch.tensor([0.25, 0.25]))
self.assertTrue(same(torch.tensor([1.25, 1.25]), a))
def test_map_side_effects(self):
from functorch.experimental.control_flow import map
class Module(torch.nn.Module):
def __init__(self):
super().__init__()
self.w = torch.tensor(1)
def forward(self, xs):
def body(x):
self.w += 1
return x
return map(body, xs)
mod = Module()
with self.assertRaisesRegex(
Unsupported, "Can't inplace modify module params/buffers"
):
opt_fn = torch._dynamo.optimize("eager", nopython=True)(mod)
opt_fn(torch.randn(3, 2))
def test_cond_nested(self):
from functorch.experimental.control_flow import cond
def true_fn_nested(x):
return x * 10
def false_fn_nested(x):
return x * -1
def true_fn(pred2, x):
return x.sin()
def false_fn(pred2, x):
return x + cond(pred2, true_fn_nested, false_fn_nested, [x])
def f(pred, pred2, x):
return cond(pred, true_fn, false_fn, [pred2, x])
cc = torch._dynamo.testing.CompileCounter()
opt_fn = torch._dynamo.optimize(cc)(f)
true_true_sin = opt_fn(
torch.tensor(True), torch.tensor(True), torch.tensor([0.25, 0.25])
)
self.assertTrue(same(torch.sin(torch.tensor([0.25, 0.25])), true_true_sin))
true_false_sin = opt_fn(
torch.tensor(True), torch.tensor(False), torch.tensor([0.25, 0.25])
)
self.assertTrue(same(torch.sin(torch.tensor([0.25, 0.25])), true_false_sin))
false_true_sum_mult = opt_fn(
torch.tensor(False), torch.tensor(True), torch.tensor([0.25, 0.25])
)
self.assertTrue(
same(torch.tensor([2.75, 2.75]), false_true_sum_mult)
) # * 10 then add x
false_false_sum_neg = opt_fn(
torch.tensor(False), torch.tensor(False), torch.tensor([0.25, 0.25])
)
self.assertTrue(
same(torch.tensor([0.0, 0.0]), false_false_sum_neg)
) # * -1 then add x
self.assertTrue(cc.frame_count, 2)
def test_cond_export(self):
from functorch.experimental.control_flow import cond
def true_fn_nested(x):
return x * 10
def false_fn_nested(x):
return x * -1
def true_fn(pred2, x):
return x.sin()
def false_fn(pred2, x):
return x + cond(pred2, true_fn_nested, false_fn_nested, [x])
def f(pred, pred2, x):
return cond(pred, true_fn, false_fn, [pred2, x])
graph, guard = torch._dynamo.export(
f, torch.tensor(False), torch.tensor(True), torch.tensor([0.25, 0.25])
)
true_true_sin = graph(
torch.tensor(True), torch.tensor(True), torch.tensor([0.25, 0.25])
)
self.assertTrue(same(torch.sin(torch.tensor([0.25, 0.25])), true_true_sin))
true_false_sin = graph(
torch.tensor(True), torch.tensor(False), torch.tensor([0.25, 0.25])
)
self.assertTrue(same(torch.sin(torch.tensor([0.25, 0.25])), true_false_sin))
false_true_sum_mult = graph(
torch.tensor(False), torch.tensor(True), torch.tensor([0.25, 0.25])
)
self.assertTrue(
same(torch.tensor([2.75, 2.75]), false_true_sum_mult)
) # * 10 then add x
false_false_sum_neg = graph(
torch.tensor(False), torch.tensor(False), torch.tensor([0.25, 0.25])
)
self.assertTrue(
same(torch.tensor([0.0, 0.0]), false_false_sum_neg)
) # * -1 then add x
def test_cond_export_single_arg(self):
from functorch.experimental.control_flow import cond
def true_fn(x):
return x
def false_fn(x):
return x.sin()
def f(pred, x):
return cond(pred, true_fn, false_fn, [x])
graph, guard = torch._dynamo.export(
f, torch.tensor(False), torch.tensor([0.25, 0.25])
)
true_mirror = graph(torch.tensor(True), torch.tensor([0.25, 0.25]))
self.assertTrue(same(torch.tensor([0.25, 0.25]), true_mirror))
true_mirror_2 = graph(torch.tensor(True), torch.tensor([0.33, 0.33, 0.33]))
self.assertTrue(same(torch.tensor([0.33, 0.33, 0.33]), true_mirror_2))
false_sin = graph(torch.tensor(False), torch.tensor([0.5, 0.5]))
self.assertTrue(same(torch.sin(torch.tensor([0.5, 0.5])), false_sin))
def test_enum_guards(self):
class MyEnum(enum.Enum):
FOO = 10
BAR = 20
def fn(x, y):
if y == MyEnum.FOO:
return x + 1
else:
return x - 1
x = torch.rand(3)
y = MyEnum.BAR
ref = fn(x, y)
opt_fn = torch.compile(backend="eager")(fn)
res = opt_fn(x, y)
self.assertTrue(same(ref, res))
def test_duplicate_graph_break_log(self):
torch._logging.set_logs(graph_breaks=True)
@torch._dynamo.optimize("eager")
def f1(a, b):
f2(a, b)
def f2(a, b):
c = a + b
print("break")
return a + b + c
@torch._dynamo.optimize("eager")
def g1(a, b):
g2(a, b)
def g2(a, b):
c = a + b
print("break")
return a + b + c
def count_graph_break_msgs(msgs):
return sum(msg.find("Graph break") != -1 for msg in msgs)
with self.assertLogs(logger="torch._dynamo", level=logging.DEBUG) as log:
torch._dynamo.config.verbose = True
f1(torch.randn(10), torch.randn(10))
self.assertGreater(count_graph_break_msgs(log.output), 1)
with self.assertLogs(logger="torch._dynamo", level=logging.DEBUG) as log:
torch._dynamo.config.verbose = False
g1(torch.randn(10), torch.randn(10))
self.assertEqual(count_graph_break_msgs(log.output), 1)
# reset logging state
torch._logging.set_logs()
def test_inplace_param_update(self):
def fn(param, y):
prev_grad = torch.is_grad_enabled()
try:
torch.set_grad_enabled(False)
torch.set_grad_enabled(True)
torch.set_grad_enabled(False)
param.add_(y)
finally:
torch.set_grad_enabled(prev_grad)
y = torch.randn(4)
x = torch.nn.Parameter(torch.randn(4))
fn(x, y)
cnts = torch._dynamo.testing.CompileCounter()
opt_fn = torch._dynamo.optimize(cnts, nopython=True)(fn)
opt_fn(x, y)
self.assertEqual(cnts.frame_count, 1)
self.assertEqual(cnts.op_count, 3)
@unittest.skipIf(
not PLATFORM_SUPPORTS_FUSED_SDPA or not SM80OrLater,
"Can't run fused SDPA on this platform",
)
def test_parsing_sdpa(self):
class MyModule(torch.nn.Module):
def forward(self, query, key, value):
out = F.scaled_dot_product_attention(query, key, value, None, 0, True)
out = F.scaled_dot_product_attention(
query, key, value, None, 0, True, scale=8
)
out = F.scaled_dot_product_attention(
query=query,
key=key,
value=value,
attn_mask=None,
dropout_p=0,
is_causal=True,
)
out = F.scaled_dot_product_attention(
query,
key=key,
value=value,
attn_mask=None,
dropout_p=0,
is_causal=True,
)
out = F.scaled_dot_product_attention(
query, key, value, None, dropout_p=0, is_causal=True
)
out = F.scaled_dot_product_attention(query, key, value, None, scale=8)
return out
device = "cuda"
dtype = torch.float16
seq_len_q = 1
seq_len_k = 1
head_dim = 8
query = torch.ones(
1, 8, seq_len_q, head_dim, device=device, dtype=dtype, requires_grad=True
)
key = torch.ones(
1, 8, seq_len_k, head_dim, device=device, dtype=dtype, requires_grad=True
)
value = torch.ones(
1, 8, seq_len_k, head_dim, device=device, dtype=dtype, requires_grad=True
)
module = MyModule()
opt_mod = torch._dynamo.optimize("inductor")(module)
opt_mod(query, key, value)
def test_generate_tensor_from_list_of_numpy_primitive_type(self):
# Test sth like torch.LongTensor(list(np.int64, np.int64, ...))
def fn():
x = np.array([1, 2, 3, 4, 5, 6], dtype=np.int64)
y = [x[0], x[2], x[4]]
z = torch.LongTensor(y)
return z
ref = fn()
opt_fn = torch._dynamo.optimize("eager")(fn)
res = opt_fn()
self.assertTrue(same(ref, res))
def test_object_classmethod(self):
class C:
@classmethod
def fn(cls, x):
return x + x
@torch._dynamo.optimize("eager", nopython=True)
def f():
return C().fn(torch.ones(2, 3))
self.assertTrue(torch.allclose(f(), torch.tensor([2.0])))
def test_object_staticmethod(self):
class C:
@staticmethod
def fn(x):
return x + x
@torch._dynamo.optimize("eager", nopython=True)
def f():
return C().fn(torch.ones(2, 3))
self.assertTrue(torch.allclose(f(), torch.tensor([2.0])))
def test_user_function_variable_supports_enum_argument(self):
class Foo(enum.Enum):
FOO = 0
BAR = 1
def gn(x, y=Foo.FOO):
if y is Foo.FOO:
return x
else:
return x + 1
def fn(x):
return gn(x)
x = torch.randn(2, 3)
ref = fn(x)
opt_fn = torch._dynamo.optimize("eager", nopython=True)(fn)
res = opt_fn(x)
self.assertTrue(torch.allclose(ref, res))
def test_user_function_variable_supports_type_abcmeta_argument(self):
class Foo(metaclass=abc.ABCMeta):
@abc.abstractclassmethod
def read(self): # noqa: B027
pass
class Bar(Foo):
def read(self):
return "Hello World!"
class Baz:
pass
def gn(x, tys=(Bar, Baz)):
if Bar in tys:
return x - 1
else:
return x + 1
def fn(x):
return gn(x)
x = torch.randn(2, 3)
ref = fn(x)
opt_fn = torch._dynamo.optimize("eager", nopython=True)(fn)
res = opt_fn(x)
self.assertTrue(torch.allclose(ref, res))
def test_user_function_variable_supports_function_argument(self):
# Test user defined function default arguments can be:
# 1, user defined functions (e.g, add1)
# 2, torch functions (e.g, torch.sin)
# 3, python builtin functions (e.g, operator.neg)
def add1(x):
return x + 1
def gn(x, f1=add1, f2=torch.sin, f3=operator.neg):
return f3(f2(f1(x)))
def fn(x):
return gn(x)
x = torch.randn(2, 3)
ref = fn(x)
opt_fn = torch._dynamo.optimize("eager", nopython=True)(fn)
res = opt_fn(x)
self.assertTrue(torch.allclose(ref, res))
def test_typing_variable_isinstance(self):
def fn(x, m):
if isinstance(m, typing.Mapping):
return x + 1
else:
return x - 1
x = torch.randn(2, 3)
m = {"x": torch.randn(3)}
ref = fn(x, m)
opt_fn = torch._dynamo.optimize("eager")(fn)
res = opt_fn(x, m)
self.assertTrue(torch.allclose(ref, res))
def test_repro_graph_breaks_in__get_item_by_idx(self):
class Mod(torch.nn.Module):
def __init__(self):
super().__init__()
self.mod = torch.nn.Sequential(
torch.nn.Linear(3, 3), torch.nn.Linear(3, 3)
)
def forward(self, x):
return self.mod[0](x)
m = Mod()
graph, _ = torch._dynamo.export(m, torch.randn(3, 3))
def test_nn_sequential_invocation(self):
with freeze_rng_state():
class TestModel(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.linears = torch.nn.Sequential(
torch.nn.Linear(2, 2),
torch.nn.Linear(2, 2),
torch.nn.Linear(2, 2),
torch.nn.Linear(2, 2),
)
def forward(self, x):
all_but_last = self.linears[:-1]
return all_but_last(x)
m = TestModel()
x = torch.rand((2, 2))
real = m(x)
graph, _ = torch._dynamo.export(m, x)
dynamo_result = graph(x)
self.assertTrue(same(real, dynamo_result))
def test_nn_sequential_invocation_reposition_indices(self):
with freeze_rng_state():
class TestModel(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.linears = torch.nn.Sequential(
torch.nn.Linear(2, 2),
torch.nn.Linear(2, 2),
torch.nn.Linear(2, 2),
torch.nn.Linear(2, 2),
)
def forward(self, x):
all_but_last = self.linears[1:3]
return all_but_last(x)
m = TestModel()
x = torch.rand((2, 2))
real = m(x)
graph, _ = torch._dynamo.export(m, x)
dynamo_result = graph(x)
self.assertTrue(same(real, dynamo_result))
def test_error_on_nested_fx_trace(self):
input = torch.rand(2, 3)
def f(x):
x + x
real = f(input)
optimized = torch._dynamo.optimize("eager")(f)
self.assertTrue(same(optimized(input), real))
with self.assertRaisesRegex(RuntimeError, "Detected that you are using FX"):
gm = torch.fx.symbolic_trace(optimized)
@patch.object(torch._dynamo.config, "error_on_nested_fx_trace", False)
def test_no_error_on_nested_fx_trace(self):
input = torch.rand(2, 3)
def f(x):
x + x
real = f(input)
optimized = torch._dynamo.optimize("eager")(f)
self.assertTrue(same(optimized(input), real))
# should not error
gm = torch.fx.symbolic_trace(optimized)
self.assertTrue(same(gm(input), real))
def test_not_dynamic_scope(self):
def f(y):
x = 1
def g():
x = 2
return lambda: x
return y + g()()
input = torch.zeros(1)
real = f(input)
optimized = torch._dynamo.optimize("eager")(f)
opt = optimized(input)
self.assertTrue(same(opt, real))
def test_inference_mode(self):
@torch.inference_mode()
def func(x, y):
return x.add(1.0) + y
x = torch.ones(4, requires_grad=True)
y = torch.ones(4, requires_grad=True)
ref = func(x, y)
opt_func = torch._dynamo.optimize("eager")(func)
x1 = torch.ones(4, requires_grad=True)
res = opt_func(x1, y)
self.assertTrue(same(ref, res))
self.assertTrue(same(x, x1))
def test_if_cond_nn_mod1(self):
class MockModule(torch.nn.Module):
def __init__(self, output_relu=True):
super().__init__()
self.relu = torch.nn.ReLU() if output_relu else None
def forward(self, x):
x = torch.sin(x)
if self.relu:
x = self.relu(x)
return x
model = MockModule()
opt_model = torch._dynamo.optimize("eager", nopython=True)(model)
x = torch.rand(4)
ref = model(x)
res = opt_model(x)
self.assertTrue(same(ref, res))
model = MockModule(output_relu=False)
opt_model = torch._dynamo.optimize("eager", nopython=True)(model)
x = torch.rand(4)
ref = model(x)
res = opt_model(x)
self.assertTrue(same(ref, res))
def test_if_cond_nn_mod2(self):
class MockModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.layer = torch.nn.Sequential()
def forward(self, x):
if self.layer:
return x + 1
else:
return x - 1
model = MockModule()
x = torch.rand(4)
ref = model(x)
opt_model = torch.compile(backend="eager")(model)
res = opt_model(x)
self.assertTrue(same(ref, res))
def test_if_cond_nn_mod3(self):
def fn(x):
if torch.nn.ModuleList():
return x + 1
else:
return x - 1
x = torch.rand(4)
ref = fn(x)
opt_fn = torch.compile(backend="eager")(fn)
res = opt_fn(x)
self.assertTrue(same(ref, res))
def test_if_cond_user_defined_object(self):
# obj.__bool__ is not existed
class A: # noqa: B903
def __init__(self, x):
self.x = x
# obj.__bool__ is function and returns bool type
class B:
def __init__(self, x):
self.x = x
def __bool__(self):
return self.x > 0
# obj.__bool__ is non-function
class C:
def __init__(self, x):
self.x = x
self.__bool__ = False
def fn(x, obj):
if not obj:
return x + 1
else:
return x - 1
x = torch.rand(4)
cnts = torch._dynamo.testing.CompileCounter()
opt_fn = torch._dynamo.optimize(cnts, nopython=True)(fn)
obj1 = A(0.5)
obj2 = B(0.5)
obj3 = B(-0.5)
obj4 = C(0.5)
for obj in [obj1, obj2, obj3, obj4, obj3, obj2]:
ref = fn(x, obj)
res = opt_fn(x, obj)
self.assertTrue(same(ref, res))
self.assertEqual(cnts.frame_count, 4)
def test_if_cond_user_defined_object2(self):
# obj.__bool__ is function and returns non-bool type
class MyObj:
def __init__(self, x):
self.x = x
def __bool__(self):
self.x = 1.2
return self.x
def fn(a, obj):
if not obj:
return a + obj.x
else:
return a - obj.x
x = torch.rand(4)
obj = MyObj(0.5)
opt_fn = torch._dynamo.optimize("eager")(fn)
try:
opt_fn(x, obj)
self.assertFalse(True)
except TypeError as e:
self.assertIn("__bool__ should return bool, returned float", str(e))
def test_if_cond_user_defined_object3(self):
# obj.__bool__ is not existed, but obj.__len__ exists
class A: # noqa: B903
def __init__(self, x):
self.x = x
def __len__(self):
return len(self.x)
# obj.__bool__ takes precedence over obj.__len__
class B:
def __init__(self, x):
self.x = x
def __bool__(self):
return False
def __len__(self):
return len(self.x)
def fn(x, obj):
if not obj:
return x + 1
else:
return x - 1
x = torch.rand(4)
opt_fn = torch.compile(backend="eager", fullgraph=True)(fn)
obj1 = A([1, 2, 3])
obj2 = A([])
obj3 = B([1, 2, 3])
obj4 = B([])
for obj in [obj1, obj2, obj3, obj4]:
ref = fn(x, obj)
res = opt_fn(x, obj)
self.assertTrue(same(ref, res))
def test_class_has_instancecheck_method(self):
class A:
pass
class ExampleMeta(type):
def __instancecheck__(cls, instance):
return True
class B(metaclass=ExampleMeta):
pass
def fn(x, obj):
if isinstance(obj, B):
return x + 1
else:
return x - 1
x = torch.rand(4)
obj = A()
ref = fn(x, obj)
opt_fn = torch._dynamo.optimize("eager", nopython=True)(fn)
res = opt_fn(x, obj)
self.assertTrue(same(ref, res))
def test_torch_cuda_is_available(self):
def fn(x):
if torch.cuda.is_available():
return x + 1
else:
return x - 1
x = torch.rand(4)
ref = fn(x)
opt_fn = torch._dynamo.optimize("eager", nopython=True)(fn)
res = opt_fn(x)
self.assertTrue(same(ref, res))
def test_variable_tracker_recursively_contains(self):
# VariableTracker.recursively_contains should be updated correctly when mutation happens
def fn(x):
data = [[None] * 3] * 3
for i in range(3):
if i == 0:
data[0][i] = x
else:
data[0][i] = data[0][i - 1] + 1
return data[0][-1]
x = torch.rand(4)
ref = fn(x)
opt_fn = torch._dynamo.optimize("eager", nopython=True)(fn)
res = opt_fn(x)
self.assertTrue(same(ref, res))
@unittest.skipIf(not TEST_CUDA, "requires cuda")
@unittest.skipIf(not torch.backends.cudnn.is_available(), "requires cudnn")
def test_torch_cudnn_is_acceptable(self):
def fn(x):
if torch.backends.cudnn.is_acceptable(tensor=x):
return x + 1
return x
x = torch.rand(4).cuda()
ref = fn(x)
opt_fn = torch._dynamo.optimize("eager", nopython=True)(fn)
res = opt_fn(x)
self.assertTrue(same(ref, res))
@unittest.skipIf(not TEST_CUDA, "requires cuda")
@unittest.skipIf(not torch.backends.cudnn.is_available(), "requires cudnn")
def test_torch_cudnn_is_acceptable_bad_inputs(self):
def fn1(x):
if torch.backends.cudnn.is_acceptable("invalid"):
return x + 1
return x
def fn2(x):
if torch.backends.cudnn.is_acceptable(x, 3.14):
return x + 1
return x
with self.assertRaisesRegex(
AssertionError, "Expect input to cudnn.is_acceptable to be a tensor"
):
x1 = torch.rand(4).cuda()
opt_fn1 = torch._dynamo.optimize("eager", nopython=True)(fn1)
res1 = opt_fn1(x1)
with self.assertRaisesRegex(
AssertionError, "Expect 1 input to cudnn.is_acceptable"
):
x2 = torch.rand(4).cuda()
opt_fn2 = torch._dynamo.optimize("eager", nopython=True)(fn2)
res = opt_fn2(x2)
@unittest.skipIf(not TEST_CUDA, "requires cuda")
def test_get_device(self):
def fn(x, y):
x = x + 1
y = y + 1
return x.get_device(), y.get_device()
x = torch.rand(4, device="cuda")
y = torch.rand(4, device="cpu")
ref = fn(x, y)
opt_fn = torch._dynamo.optimize("eager", nopython=True)(fn)
res = opt_fn(x, y)
self.assertTrue(same(ref, res))
def test_disable_flag(self):
cnt = torch._dynamo.testing.CompileCounter()
with patch.dict(os.environ, {"TORCH_COMPILE_DISABLE": "1"}):
def fn(x, y):
x = x + 1
y = y + 1
opt_fn = torch._dynamo.optimize(cnt)
self.assertEqual(cnt.frame_count, 0)
def test_is_compiling(self):
def f():
if torch._dynamo.is_compiling():
return torch.ones(2, 2)
else:
return torch.zeros(2, 2)
opt_f = torch._dynamo.optimize("eager")(f)
self.assertEqual(f(), torch.zeros(2, 2))
self.assertEqual(opt_f(), torch.ones(2, 2))
def test_torch_generator_set_state(self):
def fn():
default_state = torch.default_generator.get_state()
x = torch.rand([2, 3])
torch._dynamo.graph_break()
torch.default_generator.set_state(default_state)
y = torch.rand([2, 3])
return x, y
opt_fn = torch._dynamo.optimize("eager")(fn)
x, y = opt_fn()
self.assertEqual(x, y)
def test_torch_distributions_lazy_property(self):
def fn(x):
return torch.distributions.Categorical(probs=x).entropy()
opt_fn = torch._dynamo.optimize("eager")(fn)
x = torch.rand([4, 4])
self.assertEqual(opt_fn(x), fn(x))
def test_guard_failure_fn(self):
def fn(x, y, k):
x = x + 1
y = y + 1
return x * y * k
x = torch.tensor([0.5, 0.5])
y = torch.tensor([1.0, 1.0])
guard_failure = None
def guard_failures(failure):
nonlocal guard_failure
guard_failure = failure
opt_fn = torch._dynamo.optimize(
"eager", nopython=True, guard_fail_fn=guard_failures
)(fn)
x2 = torch.tensor([0.5, 0.5, 1.0])
y2 = torch.tensor([0.5, 0.5, 0.5])
opt_fn(x, y, 3)
opt_fn(x2, y2, 5)
if (
not torch._dynamo.config.specialize_int
and not torch._dynamo.config.assume_static_by_default
):
# we didn't actually test guard_failure_fn here but whatever,
# nice to see no guard failure on the test
self.assertTrue(guard_failure is None)
else:
self.assertTrue(guard_failure is not None)
def test_guard_failure_fn_shape_control(self):
def fn(x, y):
if x.shape[0] < 3:
if y.shape[0] < 3:
return x * y
else:
return x + y
else:
return -1
x = torch.randn([2, 2])
y = torch.randn([2, 2])
guard_failure = None
def guard_failures(failure):
nonlocal guard_failure
guard_failure = failure
opt_fn = torch._dynamo.optimize(
"eager", nopython=True, guard_fail_fn=guard_failures
)(fn)
x2 = torch.randn([5, 5])
y2 = torch.randn([5, 5])
opt_fn(x, y)
opt_fn(x2, y2)
self.assertTrue(guard_failure is not None)
if torch._dynamo.config.assume_static_by_default:
self.assertExpectedInline(
guard_failure[0],
"""tensor 'L['x']' size mismatch at index 0. expected 2, actual 5""",
)
else:
self.assertExpectedInline(guard_failure[0], """L['x'].size()[0] < 3""")
def test_guard_failure_fn2(self):
def fn(x, y):
x = x + 1
y = y + 1
return x * y
x = torch.tensor([0.5, 0.5])
y = torch.tensor([1.0, 1.0])
guard_failure = None
def guard_failures(failure):
nonlocal guard_failure
guard_failure = failure
opt_fn = torch._dynamo.optimize(
"eager", nopython=True, guard_fail_fn=guard_failures
)(fn)
x2 = torch.tensor([0.5, 0.5, 1.0])
y2 = torch.tensor([0.5, 0.5, 0.5])
opt_fn(x, y)
opt_fn(x2, y2)
if torch._dynamo.config.assume_static_by_default:
self.assertExpectedInline(
guard_failure[0],
"""tensor 'L['x']' size mismatch at index 0. expected 2, actual 3""",
)
else:
self.assertTrue(guard_failure is None)
def test_guard_failure_fn_tensor_iter(self):
def fn(x):
for y in x:
y.add_(1.0)
return y
guard_failure = None
def guard_failures(failure):
nonlocal guard_failure
guard_failure = failure
opt_fn = torch._dynamo.optimize(
"eager", nopython=True, guard_fail_fn=guard_failures
)(fn)
args1 = torch.randn(10, 10)
out = fn(args1)
opt_out = opt_fn(args1)
self.assertTrue(same(out, opt_out))
args2 = torch.randn(9, 10)
out = fn(args2)
opt_out = opt_fn(args2)
self.assertTrue(same(out, opt_out))
# guard is expected for both static and dynamic shapes
self.assertTrue(guard_failure is not None)
self.assertExpectedInline(guard_failure[0], """len(L['x']) == 10""")
def test_restore_graphstate(self):
# This function does some guard accumulation,
# and then rolls back due to control flow.
# The idea is that if one were printing guards as they appear,
# they would see this insert a guard that does not show up in the final set of
# guards as we rolled back from it.
def nested_fn(s):
if x[0] < 10:
return s * s
return s
def fn(x, y):
x = x + 1
y = nested_fn(y)
y = y + 10
return x * y
all_guards = []
def guard_export_print(guards):
nonlocal all_guards
all_guards.extend(guards)
opt_fn = torch._dynamo.optimize("eager", guard_export_fn=guard_export_print)(fn)
x = torch.tensor([0.5, 0.5])
y = torch.tensor([1.0, 1.0])
opt_fn(x, y)
for guard in all_guards:
# This guard was created
self.assertTrue(guard.name != "nested_fn.__closure__[0].cell_contents")
def test_call_parent_non_class_methods_from_child(self):
class A:
def add(self, x):
return x + 10
def mul(self, x):
return x * 0.1
class B(A):
def add(self, x):
return x + 20
def mul(self, x):
return x * 0.2
class C(B):
def add(self, x):
y = A.add(self, x)
z = B.mul(self, y)
return z + 30
x = torch.rand(4)
fn = C().add
ref = fn(x)
opt_fn = torch._dynamo.optimize("eager", nopython=True)(fn)
res = opt_fn(x)
self.assertTrue(same(ref, res))
def test_builder_for_class_with_metaclass(self):
class ExampleMeta(type):
pass
class MyClass(metaclass=ExampleMeta):
pass
def fn(x, y):
if isinstance(y, MyClass):
return x + 1
else:
return x - 1
x = torch.rand([4, 4])
y = MyClass()
ref = fn(x, y)
opt_fn = torch._dynamo.optimize("eager")(fn)
res = opt_fn(x, y)
self.assertTrue(same(ref, res))
def test_tuple_from_tuple_iter(self):
def inner_fn(*args):
acc = torch.ones(10, 10)
for arg in args:
acc.add_(arg)
return acc
@torch._dynamo.optimize("eager")
def fn(inputs, params):
y = tuple(inputs) + tuple(params)
return inner_fn(*y)
inputs = [torch.randn(10, 10) for _ in range(3)]
fn(inputs, iter(tuple(inputs)))
def test_torch_package_working_with_trace(self):
# from torch._dynamo.test_case import run_tests
inputs = [torch.randn([2, 2]), torch.randn([2, 2])]
optimized_model = torch._dynamo.optimize(backend="eager")(
MyPickledModule(torch.randn([2, 2]))
)
from torch import package
path = "/tmp/MyPickledModule.pt"
package_name = "MyPickledModule"
resource_name = "MyPickledModule.pkl"
model = MyPickledModule(torch.randn([2, 2]))
with package.PackageExporter(path) as exp:
exp.extern("**")
exp.save_pickle(package_name, resource_name, model)
imp = package.PackageImporter(path)
loaded_model = imp.load_pickle(package_name, resource_name)
optimized_loaded_model = torch._dynamo.optimize("eager")(loaded_model)(*inputs)
def test_shape_and_tuple_equality(self):
def fn(x, y, t):
z = x * y
if x.size() == t:
return z.cos()
return z.sin()
torch._dynamo.optimize("eager", nopython=True)(fn)(
torch.randn([4, 4]), torch.randn([4, 4]), (4, 4)
)
def test_int_list(self):
# if assume_static_by_default == True: spec int list
# otherwise: unspec int list
def fn(x, y):
return torch.sin(x + y[1] % 2)
x = torch.randn(6)
cnt = torch._dynamo.testing.CompileCounter()
opt_fn = torch._dynamo.optimize(cnt)(fn)
for i in range(10, 25, 3):
y = [i, i + 1, i + 2]
ref = fn(x, y)
res = opt_fn(x, y)
self.assertTrue(same(ref, res))
if torch._dynamo.config.assume_static_by_default:
if torch._dynamo.config.automatic_dynamic_shapes:
self.assertExpectedInline(cnt.frame_count, """2""")
else:
self.assertExpectedInline(cnt.frame_count, """5""")
else:
self.assertExpectedInline(cnt.frame_count, """1""")
# specifically test for tensor.attribute -> torch.something()
def test_real_imag_tensor_attribute(self):
def fn(x, y):
a = x.real
b = x.imag
return torch.mul(torch.add(a, y), b)
x_real = torch.rand((4, 4))
x_imag = torch.rand((4, 4))
x = torch.complex(x_real, x_imag)
y = torch.rand((4, 4))
ref = fn(x, y)
opt_fn = torch._dynamo.optimize("eager")(fn)
res = opt_fn(x, y)
self.assertTrue(same(ref, res))
def test_cast(self):
from typing import cast
def fn(x):
return cast(torch.Tensor, torch.add(x, 1.0))
opt_fn = torch.compile(backend="eager", fullgraph=True)(fn)
ref = fn(torch.ones(2, 2))
res = opt_fn(torch.ones(2, 2))
self.assertTrue(same(ref, res))
def test_T_tensor_attribute(self):
def fn(x, y):
a = x.T
return torch.add(a, y)
x = torch.rand((4, 4))
y = torch.rand((4, 4))
ref = fn(x, y)
opt_fn = torch._dynamo.optimize("eager")(fn)
res = opt_fn(x, y)
self.assertTrue(same(ref, res))
def test_recursive_tensor_attribute(self):
def fn(x, y):
a = x.real.T
b = x.imag
return torch.mul(torch.add(a, y), b)
x_real = torch.rand((4, 4))
x_imag = torch.rand((4, 4))
x = torch.complex(x_real, x_imag)
y = torch.rand((4, 4))
ref = fn(x, y)
opt_fn = torch._dynamo.optimize("eager")(fn)
res = opt_fn(x, y)
self.assertTrue(same(ref, res))
def test_assigning_function_to_object_attribute(self):
# user-defined functions which are object's attributes are not converted to bound methods
def my_add(*args):
a, b = args
return a + b
class MyClass:
def __init__(self, func):
self.add = func
obj = MyClass(my_add)
def fn(x):
return obj.add(x, 2)
x = torch.rand(2, 3)
ref = fn(x)
opt_fn = torch.compile(backend="eager")(fn)
res = opt_fn(x)
self.assertTrue(same(ref, res))
def test_assigning_function_to_class_attribute(self):
# user-defined functions which are class's attributes are converted to bound methods
def my_add(*args):
obj, a, b = args
return obj.x + a + b
class MyClass:
add = my_add
def __init__(self, x):
self.x = x
obj = MyClass(0.5)
def fn(x):
return obj.add(x, 2)
x = torch.rand(2, 3)
ref = fn(x)
opt_fn = torch.compile(backend="eager")(fn)
res = opt_fn(x)
self.assertTrue(same(ref, res))
def test_tagging_tensors_simple(self):
def foo(x, y):
return x * y, x, y
a = torch.randn([3, 3])
a.tag = "a"
a.frog = "ribbity ribbit"
b = torch.randn([3, 3])
b.tag = "b"
b.frog = "ribbit"
exported = torch._dynamo.export(foo, a, b)
out_graph = exported[0]
nodes = list(out_graph.graph.nodes)
placeholders = [node for node in nodes if node.op == "placeholder"]
all_tags = []
all_frogs = []
for placeholder in placeholders:
if "tensor_dict" in placeholder.meta:
all_tags.append(placeholder.meta["tensor_dict"]["tag"])
all_frogs.append(placeholder.meta["tensor_dict"]["frog"])
self.assertEqual(all_tags, ["a", "b"])
self.assertEqual(all_frogs, ["ribbity ribbit", "ribbit"])
def test_tagging_tensors_mix_used_unused_structure(self):
def pre_attention_state_ops(input, mems, state):
lc_key = state[0]
lc_val = state[1]
bar = []
for i in range(0, 4):
bar2 = []
for j in range(0, 3):
bar2.append(
lc_key + lc_val + torch.tensor([0.1, 0.25, 0.4, 0.5, 0.1])
)
bar.append(bar2)
return bar
mems = torch.tensor([[[1.8364, 0.2724, -1.4917, -0.4367, 0.8640]]])
state = [
torch.tensor([[[1.0517, 0.3848, -0.6472, 0.0823, 0.9116]]]),
torch.tensor([[[1.0517, 0.3848, -0.6472, 0.0823, 0.9116]]]),
]
i = torch.tensor(
[
[0.0313, -0.1487, -0.3846, -0.5321],
[-1.7073, 1.3331, -0.0890, -1.4935],
[-0.8314, -0.1862, -0.5935, 1.5232],
]
)
mems.tag = "MEMS"
i.tag = "FOO"
state[0].tag = "STATE_0"
state[1].tag = "HMMM"
exported = torch._dynamo.export(pre_attention_state_ops, i, mems, state)
out_graph = exported[0]
nodes = list(out_graph.graph.nodes)
placeholders = [node for node in nodes if node.op == "placeholder"]
all_tags = []
for placeholder in placeholders:
if "tensor_dict" in placeholder.meta:
all_tags.append(placeholder.meta["tensor_dict"]["tag"])
self.assertEqual(all_tags, ["STATE_0", "HMMM"])
def test_get_custom_tensor_attribute(self):
def fn(x):
return x.custom_attr * x
x = torch.rand((2, 2))
x.custom_attr = 3.14
ref = fn(x)
opt_fn = torch._dynamo.optimize("eager")(fn)
res = opt_fn(x)
self.assertTrue(same(ref, res))
def test_set_custom_tensor_attribute(self):
def fn(x):
x.custom_attr = 3.14
return x.custom_attr * x
x = torch.rand((2, 2))
ref = fn(x)
opt_fn = torch._dynamo.optimize("eager")(fn)
res = opt_fn(x)
self.assertTrue(same(ref, res))
def test_if_tensor_is_none(self):
"""
Python 3.11 adds new jump instructions that check if
TOS is None. We do not support these instructions.
"""
def f(x, y):
z = 1
if x is None:
z *= 2
if y is not None:
z *= 3
return z
opt_f = torch._dynamo.optimize("eager", nopython=True)(f)
self.assertEqual(opt_f(None, torch.ones(2)), 6)
if sys.version_info >= (3, 11):
insts = bytecode_transformation.cleaned_instructions(f.__code__)
for inst in insts:
self.assertNotIn("_NONE", inst.opname)
@skipIfNotPy311
def test_py311_jump_offset(self):
new_inst = bytecode_transformation.create_instruction
load_global = bytecode_transformation.create_load_global
consts = (None, 1, 2, 3, 4)
def create_test_code(jump_opname, target_idx):
targets = [
new_inst("LOAD_CONST", argval=1),
new_inst("LOAD_CONST", argval=3),
]
jump_to_target_inst = new_inst(jump_opname, target=targets[target_idx])
"""
pseudocode of generated bytecode:
def test_py311_fn():
goto target1
target0:
return 1
target1:
goto [target0/target2] (via fwd or bwd jump)
return 2
target2:
return 3
return 4
"""
# test with LOAD_GLOBAL since it has a different instruction size
insts = [
new_inst("RESUME", arg=0),
new_inst("JUMP_FORWARD", target=jump_to_target_inst),
targets[0],
load_global("print", False),
new_inst("POP_TOP"),
new_inst("RETURN_VALUE"),
jump_to_target_inst,
new_inst("LOAD_CONST", argval=2),
load_global("print", False),
new_inst("POP_TOP"),
new_inst("RETURN_VALUE"),
targets[1],
new_inst("RETURN_VALUE"),
new_inst("LOAD_CONST", argval=4),
new_inst("RETURN_VALUE"),
]
code_options = collections.OrderedDict(
[
("co_argcount", 0),
("co_posonlyargcount", 0),
("co_kwonlyargcount", 0),
("co_nlocals", 0),
("co_stacksize", 2),
("co_flags", 3),
("co_code", b""),
("co_consts", consts),
("co_names", ("print",)),
("co_varnames", ()),
("co_filename", __file__),
("co_name", "test_py311_fn"),
("co_qualname", "test_py311_fn"),
("co_firstlineno", 1),
("co_linetable", b""),
("co_exceptiontable", b""),
("co_freevars", ()),
("co_cellvars", ()),
]
)
return bytecode_transformation.clean_and_assemble_instructions(
insts,
list(code_options.keys()),
code_options,
)
# format: jump_opname, target_idx, expected forward jump, expected return value
test_args = (
("JUMP_FORWARD", 0, False, 1),
("JUMP_FORWARD", 1, True, 3),
("JUMP_BACKWARD", 0, False, 1),
("JUMP_BACKWARD", 1, True, 3),
)
for test in test_args:
insts, code = create_test_code(test[0], test[1])
# check if offset of latest jump instruction is forward/backward
for inst in reversed(insts):
if inst.opname.startswith("JUMP"):
if test[2]:
self.assertIn("FORWARD", inst.opname)
else:
self.assertIn("BACKWARD", inst.opname)
break
# run the code and check result
def dummy_fn():
pass
dummy_fn.__code__ = code
self.assertEqual(dummy_fn(), test[3])
dummy_opt = torch._dynamo.optimize("eager")(dummy_fn)
self.assertEqual(dummy_opt(), test[3])
def test_exception_table_encode_varint(self):
# these numbers have no real meaning to them
nums = [
0b111_101010_000000,
0b1100_111000_010101_101010,
]
b = bytecode_transformation.encode_exception_table_varint(
nums[0]
) + bytecode_transformation.encode_exception_table_varint(nums[1])
nums_new = []
b_iter = iter(bytes(b))
while True:
try:
nums_new.append(
bytecode_transformation.decode_exception_table_varint(b_iter)
)
except StopIteration:
break
self.assertEqual(nums, nums_new)
@skipIfNotPy311
def test_exception_table_parsing(self):
def fn():
try:
with a():
b()
c()
except Exception:
d()
finally:
e()
f()
tab = bytecode_transformation.parse_exception_table(
fn.__code__.co_exceptiontable
)
b = bytecode_transformation.assemble_exception_table(tab)
self.assertEqual(b, fn.__code__.co_exceptiontable)
@skipIfNotPy311
def test_exception_table_e2e(self):
def fn():
try:
with a():
b()
c()
except Exception:
d()
finally:
e()
f()
def nothing(*args):
pass
code = bytecode_transformation.transform_code_object(fn.__code__, nothing)
self.assertEqual(code.co_exceptiontable, fn.__code__.co_exceptiontable)
@skipIfNotPy311
def test_exception_table_e2e_2(self):
# last instructions of an exn_table entry is a large instruction
# i.e., LOAD_GLOBAL a
def fn():
try:
return a
except Exception:
pass
def nothing(*args):
pass
code = bytecode_transformation.transform_code_object(fn.__code__, nothing)
self.assertEqual(code.co_exceptiontable, fn.__code__.co_exceptiontable)
@skipIfNotPy311
def test_exception_table_entry_propagation(self):
insts = []
for _ in range(10):
insts.append(bytecode_transformation.create_instruction("NOP"))
insts[8].exn_tab_entry = bytecode_transformation.InstructionExnTabEntry(
insts[0], insts[9], insts[0], 0, True
)
insts[0].exn_tab_entry = bytecode_transformation.InstructionExnTabEntry(
insts[0], insts[0], insts[1], 0, True
)
insts[1].exn_tab_entry = bytecode_transformation.InstructionExnTabEntry(
insts[0], insts[2], insts[2], 0, True
)
insts[5].exn_tab_entry = bytecode_transformation.InstructionExnTabEntry(
insts[4], insts[6], insts[3], 0, True
)
insts[9].exn_tab_entry = bytecode_transformation.InstructionExnTabEntry(
insts[9], insts[9], insts[4], 0, True
)
insts[7].exn_tab_entry = bytecode_transformation.InstructionExnTabEntry(
insts[7], insts[9], insts[5], 0, True
)
bytecode_transformation.propagate_inst_exn_table_entries(insts)
expected = [1, 2, 2, 0, 3, 3, 3, 5, 5, 4]
for inst, exp in zip(insts, expected):
self.assertIsNotNone(inst.exn_tab_entry)
self.assertIs(inst.exn_tab_entry.target, insts[exp])
@skipIfNotPy311
def test_compute_exception_table_nested(self):
insts = []
for _ in range(20):
insts.append(bytecode_transformation.create_instruction("NOP"))
insts[10].exn_tab_entry = bytecode_transformation.InstructionExnTabEntry(
insts[1], insts[10], insts[0], 0, True
)
insts[0].exn_tab_entry = bytecode_transformation.InstructionExnTabEntry(
insts[1], insts[1], insts[1], 0, True
)
insts[1].exn_tab_entry = bytecode_transformation.InstructionExnTabEntry(
insts[1], insts[3], insts[2], 0, True
)
insts[5].exn_tab_entry = bytecode_transformation.InstructionExnTabEntry(
insts[5], insts[7], insts[3], 0, True
)
insts[9].exn_tab_entry = bytecode_transformation.InstructionExnTabEntry(
insts[10], insts[10], insts[4], 0, True
)
insts[7].exn_tab_entry = bytecode_transformation.InstructionExnTabEntry(
insts[8], insts[10], insts[5], 0, True
)
insts[14].exn_tab_entry = bytecode_transformation.InstructionExnTabEntry(
insts[13], insts[17], insts[6], 0, True
)
insts[16].exn_tab_entry = bytecode_transformation.InstructionExnTabEntry(
insts[15], insts[16], insts[7], 0, True
)
bytecode_transformation.update_offsets(insts)
tab = bytecode_transformation.compute_exception_table(insts)
expected = [
(1, 1, 1),
(2, 3, 2),
(4, 4, 0),
(5, 7, 3),
(8, 9, 5),
(10, 10, 4),
(13, 14, 6),
(15, 16, 7),
(17, 17, 6),
]
self.assertEquals(len(tab), len(expected))
for entry, exp in zip(tab, expected):
self.assertEquals(entry.start, exp[0] * 2)
self.assertEquals(entry.end, exp[1] * 2)
self.assertEquals(entry.target, exp[2] * 2)
@skipIfNotPy311
def test_remove_dead_code_with_exn_table_entries(self):
create_instruction = bytecode_transformation.create_instruction
target1 = create_instruction("NOP")
target2 = create_instruction("NOP")
target3 = create_instruction("NOP")
exn_start = create_instruction("NOP")
exn_end = create_instruction("NOP")
insts = [
create_instruction("JUMP_FORWARD", target=target1),
exn_start, # dead
target1,
create_instruction("JUMP_FORWARD", target=target3),
exn_end, # dead
target2,
target3,
]
exn_start.exn_tab_entry = bytecode_transformation.InstructionExnTabEntry(
exn_start, exn_end, target2, 0, True
)
bytecode_transformation.propagate_inst_exn_table_entries(insts)
insts = bytecode_analysis.remove_dead_code(insts)
self.assertEquals(len(insts), 5)
self.assertNotIn(exn_start, insts)
self.assertNotIn(exn_end, insts)
self.assertIn(target2, insts)
self.assertIn(target3, insts)
bytecode_transformation.update_offsets(insts)
tab = bytecode_transformation.compute_exception_table(insts)
self.assertEquals(len(tab), 1)
self.assertEquals(tab[0].start, 2)
self.assertEquals(tab[0].end, 4)
self.assertEquals(tab[0].target, 6)
def test_unhandled_exception_in_dynamo(self):
# traceback.format_exc() approximates an unhandled exception
def f(a):
a += 1
raise RuntimeError("smoge")
return a
opt_fn = torch._dynamo.optimize("eager")(f)
try:
opt_fn(torch.ones(2))
except RuntimeError as e:
self.assertIn("smoge", traceback.format_exc())
def test_unhandled_exception_in_dynamo2(self):
# segfaults in python 3.11 if shadow frame is freed improperly
from torch.testing import make_tensor
def fn():
# test that the errors are the same for dense and sparse versions
def test1(*, is_sparse):
# shapes must be compatible for matrix multiplication
a = make_tensor((2, 3), dtype=torch.float32, device="cpu")
if is_sparse:
a_sparse = a.to_sparse_csr()
return torch.addmm(a, a_sparse, a)
else:
return torch.addmm(a, a, a)
try:
test1(is_sparse=False)
except RuntimeError as msg:
try:
test1(is_sparse=True)
except RuntimeError as msg2:
raise RuntimeError("smoge")
opt_fn = torch._dynamo.optimize("eager")(fn)
try:
opt_fn()
except RuntimeError:
self.assertIn("smoge", traceback.format_exc())
def test_variable_access_in_exception(self):
def fn():
x = torch.ones(3, 3)
try:
raise RuntimeError("bad")
except RuntimeError:
x += 1
return x
opt_fn = torch._dynamo.optimize("eager")(fn)
torch.allclose(opt_fn(), torch.tensor([3.0]))
def test_ordered_dict_alias_reconstruct(self):
od = collections.OrderedDict
def fn():
d1 = dict()
d1["a"] = 1
d2 = od(d1)
d2["b"] = 2
torch._dynamo.graph_break()
if isinstance(d2, od):
return d2["a"] + d2["b"]
else:
return 0
dis.dis(fn)
self.assertEqual(torch._dynamo.optimize("eager")(fn)(), 3)
def test_raise_guard_full_constraint(self):
y = torch.randn([3, 3, 3])
def my_dyn_fn(x):
if x.shape[0] == 3:
return x.sin()
return x.cos()
torch._dynamo.mark_dynamic(y, 0)
with self.assertRaises(ConstraintViolationError):
torch._dynamo.optimize("eager")(my_dyn_fn)(y)
def test_mark_static(self):
counter = CompileCounter()
def my_dyn_fn(x):
return x.cos()
y = torch.randn([3])
torch._dynamo.mark_static(y, 0)
torch._dynamo.optimize(counter)(my_dyn_fn)(y)
z = torch.randn([4])
torch._dynamo.optimize(counter)(my_dyn_fn)(z)
self.assertEqual(counter.frame_count, 2)
def test_no_raise_guard_partial_constraint(self):
y = torch.randn([3, 3, 3])
def my_dyn_fn(x):
if x.shape[0] > 3:
return x.sin()
return x.cos()
torch._dynamo.optimize("eager")(my_dyn_fn)(y)
torch._dynamo.mark_dynamic(y, 0)
torch._dynamo.reset()
torch._dynamo.optimize("eager")(my_dyn_fn)(y)
def test_no_raise_guard_partial_constraint_across_break(self):
y = torch.randn([3, 3, 3])
def my_dyn_fn(x, y):
z = x * y
torch._dynamo.graph_break()
if z.shape[0] > 2:
return z.cos()
return x.cos()
torch._dynamo.optimize("eager")(my_dyn_fn)(y, y)
torch._dynamo.mark_dynamic(y, 0)
torch._dynamo.reset()
torch._dynamo.optimize("eager")(my_dyn_fn)(y, y)
# Sadly, this does not throw - we do not prop correctly across the graph break
@unittest.expectedFailure
def test_raise_guard_partial_constraint_across_break(self):
y = torch.randn([3, 3, 3])
def my_dyn_fn(x, y):
z = x * y
torch._dynamo.graph_break()
if z.shape[0] == 3:
return z.cos()
return x.cos()
torch._dynamo.optimize("eager")(my_dyn_fn)(y, y)
torch._dynamo.mark_dynamic(y, 0)
torch._dynamo.reset()
with self.assertRaisesRegex(
Exception,
):
torch._dynamo.optimize("eager")(my_dyn_fn)(y, y)
def test_raise_guard_partial_constraint_no_graph_break(self):
y = torch.randn([3, 3, 3])
def my_dyn_fn(x, y):
z = x * y
if z.shape[0] == 3:
return z.cos()
return x.cos()
torch._dynamo.mark_dynamic(y, 0)
with self.assertRaises(ConstraintViolationError):
torch._dynamo.optimize("eager")(my_dyn_fn)(y, y)
def test_cannot_trace_mark_dynamic(self):
y = torch.randn([3, 3, 3])
def my_dyn_fn(x):
torch._dynamo.mark_dynamic(x, 0)
return x * x
with self.assertRaisesRegex(
AssertionError, "Attempt to trace forbidden callable"
):
torch._dynamo.optimize("eager")(my_dyn_fn)(y)
def test_cannot_trace_mark_dynamic_safe_unreached(self):
y = torch.randn([3, 3, 3])
def my_dyn_fn(x):
if x.shape[0] == 3:
return x
print("Running", torch._dynamo.mark_dynamic(x, 0))
return x * x
torch._dynamo.optimize("eager")(my_dyn_fn)(y)
def test_anomaly_aot_autograd(self):
@allow_in_graph
def h(a):
r = a.sum()
# Trigger an exception in backwards
r.register_hook(lambda x: x + x.item())
return r
@torch.compile(backend="aot_eager")
def f(a):
return h(a)
with warnings.catch_warnings(record=True) as w, self.assertRaises(
torch._dynamo.exc.BackendCompilerFailed
):
f(torch.randn(2, 2, requires_grad=True))
self.assertEqual(len(w), 1)
self.assertIn("forward call that caused the error", str(w[0].message))
def test_py_guards_mark_dynamic(self):
def my_dyn_fn(a):
if a.shape[0] > 2:
return a.cos()
return a.sin()
counter = CompileCounter()
# Run with dynamic
x0 = torch.randn([3, 3, 3])
torch._dynamo.mark_dynamic(x0, 0)
torch._dynamo.optimize(counter)(my_dyn_fn)(x0)
self.assertEqual(counter.frame_count, 1)
# Run without dynamic, no recompile
x = torch.randn([3, 3, 3])
torch._dynamo.optimize(counter)(my_dyn_fn)(x)
self.assertEqual(counter.frame_count, 1)
# Mark a new dim, 1, as dynamic
x1 = torch.randn([3, 3, 3])
torch._dynamo.mark_dynamic(x1, 1)
torch._dynamo.optimize(counter)(my_dyn_fn)(x1)
# Recompile triggered because we marked a new dym as dynamic
self.assertEqual(counter.frame_count, 2)
# Reset
torch._dynamo.reset()
# Reset counter
counter = CompileCounter()
# Run with dynamic 1
torch._dynamo.optimize(counter)(my_dyn_fn)(x1)
self.assertEqual(counter.frame_count, 1)
# Run with dynamic 0, not subset
torch._dynamo.optimize(counter)(my_dyn_fn)(x0)
self.assertEqual(counter.frame_count, 2)
# Run with dynamic 0, 1, 2, not subset
x012 = torch.randn([3, 3, 3])
torch._dynamo.mark_dynamic(x012, 0)
torch._dynamo.mark_dynamic(x012, 1)
torch._dynamo.mark_dynamic(x012, 2)
torch._dynamo.optimize(counter)(my_dyn_fn)(x012)
self.assertEqual(counter.frame_count, 3)
def test_torch_compile_ctx_on_forward_and_training_step(self):
class MyModel(torch.nn.Module):
def forward(self):
...
def training_step(self):
self()
model = MyModel()
compiled_model = torch.compile(model)
model.forward = compiled_model.dynamo_ctx(model.forward)
model.training_step = compiled_model.dynamo_ctx(model.training_step)
model.training_step()
def test_torch_guards_stack_frame_register_inlining(self):
x = torch.tensor([0.5, 0.5])
y = torch.tensor([0.75, 0.75, 0.75, 0.75])
z = torch.tensor([0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25])
def uwu_inline_me(x, y, z):
r = torch.cat((x, x)) + y
r2 = torch.cat((y, y)) + z
return r, r2
def fn(x, y, z):
r, r2 = uwu_inline_me(x, y, z)
return torch.mul(r, r), torch.mul(r2, r2)
seen_frames = []
import contextlib
@contextlib.contextmanager
def global_context_capture_fn(frame_summary):
seen_frames.append(frame_summary)
yield
with mock.patch(
"torch._guards.TracingContext.current_frame",
side_effect=global_context_capture_fn,
):
torch._dynamo.optimize("eager")(fn)(x, y, z)
self.assertEqual(len(seen_frames), 1)
self.assertEqual(seen_frames[0].name, "fn")
self.assertEqual(seen_frames[0].line, "r, r2 = uwu_inline_me(x, y, z)")
def test_torch_guards_stack_frame_register_inlining_deep(self):
x = torch.tensor([0.5, 0.5])
y = torch.tensor([0.75, 0.75, 0.75, 0.75])
z = torch.tensor([0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25])
def uwu_inline_me_deep(x, y):
return torch.cat((x, x)) + y
def uwu_inline_me(x, y, z):
r = uwu_inline_me_deep(x, y)
r2 = uwu_inline_me_deep(y, z)
return r, r2
def fn(x, y, z):
r, r2 = uwu_inline_me(x, y, z)
return torch.mul(r, r), torch.mul(r2, r2)
seen_frames = []
import contextlib
@contextlib.contextmanager
def global_context_capture_fn(frame_summary):
seen_frames.append(frame_summary)
yield
with mock.patch(
"torch._guards.TracingContext.current_frame",
side_effect=global_context_capture_fn,
):
torch._dynamo.optimize("eager")(fn)(x, y, z)
self.assertEqual(len(seen_frames), 3)
self.assertEqual(seen_frames[0].name, "fn")
self.assertEqual(seen_frames[1].name, "uwu_inline_me")
self.assertEqual(seen_frames[2].line, "r2 = uwu_inline_me_deep(y, z)")
def test_error_on_recompile(self):
@torch._dynamo.optimize("eager")
def fn(a, b):
return a + b
with unittest.mock.patch("torch._dynamo.config.error_on_recompile", True):
with self.assertRaises(torch._dynamo.exc.RecompileError):
fn(torch.rand(2, 3), torch.rand(2, 3))
fn(torch.rand(2, 3), (1, 2, 3))
@expectedFailureDynamic
@torch._dynamo.config.patch(automatic_dynamic_shapes=False)
def test_compile_profiler(self):
class Model(torch.nn.Module):
def forward(self, input):
return input + input
model = Model()
with CompileProfiler() as prof:
compiled = torch.compile(model, backend=prof)
base_checker = (
lambda: FileCheck()
.check("Torchdynamo Profiler Report")
.check("Graph Breaks")
.check("No graph breaks detected.")
.check("Recompilation")
)
input = torch.rand((2, 3, 4))
_ = compiled(input)
base_checker().check("No recompilation detected.").run(prof.report())
new_shape_input = torch.rand((3, 3, 4))
_ = compiled(new_shape_input)
# Not an exhaustive test of dynamic shapes behavior, but some sanity
if torch._dynamo.config.assume_static_by_default:
base_checker().check("Recompile Reasons").check("'forward'").check(
"cache_size_limit to 1"
).run(prof.report())
else:
base_checker().check("No recompilation detected.").run(prof.report())
new_shape_input = torch.rand((4, 3, 4))
_ = compiled(new_shape_input)
base_checker().check("Recompile Reasons").check("'forward'").check(
"tensor 'L['input']' size mismatch at index 0. expected 2, actual 3"
).check(
"tensor 'L['input']' size mismatch at index 0. expected 3, actual 4"
).run(
prof.report()
)
def test_guards_strip_function_call(self):
from torch._dynamo.guards import strip_function_call
test_case = [
("___odict_getitem(a, 1)", "a"),
("a.layers[slice(2)][0]._xyz", "a"),
("getattr(a.layers[slice(2)][0]._abc, '0')", "a"),
("getattr(getattr(a.x[3], '0'), '3')", "a"),
("a.layers[slice(None, -1, None)][0]._xyz", "a"),
("a.layers[func('offset', -1, None)][0]._xyz", "a"),
]
# strip_function_call should extract the object from the string.
for name, expect_obj in test_case:
self.assertEqual(strip_function_call(name), expect_obj)
def test_int_neg(self):
def int_neg(a, b):
x = a.shape[0]
y = b.shape[0]
return -x * -y * a * b
torch._dynamo.testing.standard_test(self, int_neg, 2)
def test_hash_getitem_slice(self):
s = GetItemSource(LocalSource("foo"), slice(None, -1, None))
s2 = GetItemSource(LocalSource("foo"), slice(None, -1, None))
s3 = GetItemSource(LocalSource("foo"), slice(None, -1, 2))
some_set = set()
self.assertTrue(s not in some_set)
self.assertTrue(s2 not in some_set)
self.assertTrue(s3 not in some_set)
some_set.add(s)
self.assertTrue(s in some_set)
# s and s2 should hash the same
self.assertTrue(s2 in some_set)
# s3 should be different
self.assertTrue(s3 not in some_set)
self.assertTrue(s == s2)
self.assertTrue(s != s3)
def test_inline_dict_function(self):
def _result_type_dict(dtype):
return {bool: torch.float32}[dtype]
@torch.compile
def f():
return torch.ones(3, dtype=_result_type_dict(bool))
self.assertEqual(f(), torch.ones(3, dtype=torch.float32))
def test_inline_dict_function_passed_as_arg(self):
@torch.compile
def fn(d, x, y):
if d[x] is torch.float32:
return y.cos()
else:
return y.sin()
dd = {bool: torch.float32, int: torch.int64}
self.assertEqual(fn(dd, bool, torch.ones(4)), torch.ones(4).cos())
self.assertEqual(fn(dd, int, torch.ones(4)), torch.ones(4).sin())
def test_add_sizes(self):
def func(x):
y = x.size()
return y + y
eager_out = func(torch.ones(10, 10, 3))
compile_out = torch._dynamo.optimize("eager")(func)(torch.ones(10, 10, 3))
self.assertTrue(isinstance(compile_out, torch.Size))
self.assertEqual(eager_out, compile_out)
@unittest.skipIf(not TEST_MULTIGPU, "need multiple GPU")
def test_cuda_set_device(self):
def fn():
a = torch.ones(2, device="cuda")
torch.cuda.set_device(1)
return a + 1
with torch.cuda.device(0):
counter = CompileCounter()
opt_fn = torch._dynamo.optimize(counter)(fn)
res = opt_fn()
self.assertEqual(res.device.type, "cuda")
self.assertEqual(res.device.index, 0)
self.assertEqual(counter.frame_count, 2)
def test_nested_function_resuming_with_correct_globals(self):
# https://github.com/pytorch/pytorch/issues/99665
try:
from .utils import outer_func
except ImportError:
from utils import outer_func
def gn(x, y):
return x + y
def fn(x, y):
return outer_func(gn)(x, y)
x = torch.rand([3])
y = torch.rand([3])
opt_fn = torch.compile(backend="eager")(fn)
ref = fn(x, y)
res = opt_fn(x, y)
self.assertTrue(same(ref, res))
@dataclasses.dataclass
class CSETestCase:
expr: str
preface: typing.List[str] = dataclasses.field(default_factory=list)
expected: typing.Optional[str] = None
expected_py38: typing.Optional[str] = None
def _is_py38(self) -> bool:
return sys.version_info[:2] <= (3, 8)
def _has_ast_unparse(self) -> bool:
from torch._dynamo.guards import HAS_UNPARSE_FUNCTIONS
return HAS_UNPARSE_FUNCTIONS
def test_guards_cse_pass_single(self):
if not self._has_ast_unparse():
if IS_FBCODE:
raise RuntimeError("Needs astunparse or Python-3.9+")
raise unittest.SkipTest("Needs astunparse or Python-3.9+")
from torch._dynamo.guards import PyExprCSEPass
testcase = self.CSETestCase
testcases = [
# Nothing gets CSE-d, since the only repeated sub-expression is 'x'.
# i.e. not a node type we are interested on.
testcase(expr="x[0].a"),
testcase(expr="x[1].a"),
testcase(expr="x[2].a"),
# 'a.b.c' gets CSE-d, since it's a sub-expression used more than 'PyExprCSEPass.USE_THRESHOLD'.
testcase(
expr="a.b.c[0].d.e",
preface=["_var0 = a.b", "_var1 = _var0.c"],
expected="_var1[0].d.e",
),
testcase(expr="a.b.c[1].d.e", expected="_var1[1].d.e"),
testcase(expr="a.b.c[2].d.e", expected="_var1[2].d.e"),
# 'm.n[0]' gets CSE-d, since it is a sub-expression used more than 'PyExprCSEPass.USE_THRESHOLD'.
testcase(
expr="f(m.n[0], '0').x.y.z",
preface=["_var2 = m.n", "_var3 = _var2[0]"],
expected="f(_var3, '0').x.y.z",
),
testcase(expr="f(m.n[0], '1').x.y.z", expected="f(_var3, '1').x.y.z"),
testcase(expr="f(m.n[0], '2').x.y.z", expected="f(_var3, '2').x.y.z"),
# The whole expressiong gets CSE-d, as well as all of its sub-expressions.
testcase(
expr="self.g(a, b).k",
preface=["_var4 = self.g", "_var5 = _var4(a, b)", "_var6 = _var5.k"],
expected="_var6",
),
testcase(expr="self.g(a, b).k", expected="_var6"),
testcase(expr="self.g(a, b).k", expected="_var6"),
]
csepass = PyExprCSEPass()
csepass.count([t.expr for t in testcases])
for t in testcases:
preface, expr = csepass.replace(t.expr)
self.assertEqual(preface, t.preface)
expected = t.expected if t.expected is not None else t.expr
self.assertEqual(expr, expected)
def test_guards_cse_pass_multiple(self):
if not self._has_ast_unparse():
raise unittest.SkipTest("Needs astunparse or Python-3.9+")
from torch._dynamo.guards import PyExprCSEPass
testcase = self.CSETestCase
testcases = [
testcase(
expr="x[0].a < x[1].a * (3 - x[2].a)",
expected="x[0].a < x[1].a * (3 - x[2].a)",
expected_py38="(x[0].a < (x[1].a * (3 - x[2].a)))",
),
testcase(
expr="a.b.c[0].d.e + a.b.c[1].d.e * a.b.c[2].d.e > 0",
preface=["_var0 = a.b", "_var1 = _var0.c"],
expected="_var1[0].d.e + _var1[1].d.e * _var1[2].d.e > 0",
expected_py38="((_var1[0].d.e + (_var1[1].d.e * _var1[2].d.e)) > 0)",
),
testcase(
expr="f(m.n[0], '0').x.y.z * f(m.n[0], '1').x.y.z * f(m.n[0], '2').x.y.z < 512",
preface=["_var2 = m.n", "_var3 = _var2[0]"],
expected="f(_var3, '0').x.y.z * f(_var3, '1').x.y.z * f(_var3, '2').x.y.z < 512",
expected_py38="(((f(_var3, '0').x.y.z * f(_var3, '1').x.y.z) * f(_var3, '2').x.y.z) < 512)",
),
testcase(
expr="self.g(a, b).k + (1 - self.g(a, b).k) <= m[0].a + self.g(a, b).k",
preface=["_var4 = self.g", "_var5 = _var4(a, b)", "_var6 = _var5.k"],
expected="_var6 + (1 - _var6) <= m[0].a + _var6",
expected_py38="((_var6 + (1 - _var6)) <= (m[0].a + _var6))",
),
]
csepass = PyExprCSEPass()
csepass.count([t.expr for t in testcases])
for t in testcases:
preface, expr = csepass.replace(t.expr)
self.assertEqual(preface, t.preface)
expected = t.expected_py38 if self._is_py38() else t.expected
expected = expected if expected is not None else t.expr
self.assertEqual(expr, expected)
def test_guard_function_builder_with_cse(self):
from torch._dynamo.guards import build_guard_function
exprs = [
"x[0].a < x[1].a * (3 - x[2].a)",
"a.b.c[0].d.e + a.b.c[1].d.e * a.b.c[2].d.e > 0",
"f(m.n[0], '0').x.y.z * f(m.n[0], '1').x.y.z * f(m.n[0], '2').x.y.z < 512",
"self.g(a, b).k + (1 - self.g(a, b).k) <= m[0].a + self.g(a, b).k",
]
_, pycode = build_guard_function(exprs, "")
expected = """\
def ___make_guard_fn():
def guard(L):
if not (x[0].a < x[1].a * (3 - x[2].a)):
return False
_var0 = a.b
_var1 = _var0.c
if not (_var1[0].d.e + _var1[1].d.e * _var1[2].d.e > 0):
return False
_var2 = m.n
_var3 = _var2[0]
if not (f(_var3, '0').x.y.z * f(_var3, '1').x.y.z * f(_var3, '2').x.y.z < 512):
return False
_var4 = self.g
_var5 = _var4(a, b)
_var6 = _var5.k
if not (_var6 + (1 - _var6) <= m[0].a + _var6):
return False
return True
return guard
"""
expected_38 = """\
def ___make_guard_fn():
def guard(L):
if not ((x[0].a < (x[1].a * (3 - x[2].a)))):
return False
_var0 = a.b
_var1 = _var0.c
if not (((_var1[0].d.e + (_var1[1].d.e * _var1[2].d.e)) > 0)):
return False
_var2 = m.n
_var3 = _var2[0]
if not ((((f(_var3, '0').x.y.z * f(_var3, '1').x.y.z) * f(_var3, '2').x.y.z) < 512)):
return False
_var4 = self.g
_var5 = _var4(a, b)
_var6 = _var5.k
if not (((_var6 + (1 - _var6)) <= (m[0].a + _var6))):
return False
return True
return guard
"""
expected_38_no_astunparse = """\
def ___make_guard_fn():
def guard(L):
if not (x[0].a < x[1].a * (3 - x[2].a)):
return False
if not (a.b.c[0].d.e + a.b.c[1].d.e * a.b.c[2].d.e > 0):
return False
if not (f(m.n[0], '0').x.y.z * f(m.n[0], '1').x.y.z * f(m.n[0], '2').x.y.z < 512):
return False
if not (self.g(a, b).k + (1 - self.g(a, b).k) <= m[0].a + self.g(a, b).k):
return False
return True
return guard
"""
if self._is_py38():
expected = (
expected_38 if self._has_ast_unparse() else expected_38_no_astunparse
)
self.assertEqual(expected, pycode)
def test_dynamo_compiling_fake_tensor_to_vararg_int(self):
class MyModule(torch.nn.Module):
def __init__(self):
super(MyModule, self).__init__()
def forward(self, x):
# use numpy int so it's wrapped as fake tensor in dynamo
shape = np.int_(16)
# test shape as fake tensor, which param type is
# Sequence[Union[_int, SymInt]]
return x.reshape(shape)
x = torch.rand([4, 4])
model = MyModule()
orig_out = model(x)
opt_model = torch._dynamo.optimize("eager")(MyModule())
opt_out = opt_model(x)
self.assertTrue(same(orig_out, opt_out))
def test_scalar_tensor_is_equivalent_to_symint_argument(self):
class GumbelTopKSampler(torch.nn.Module):
def __init__(self, T, k):
super(GumbelTopKSampler, self).__init__()
self.T = torch.nn.Parameter(
torch.tensor(T, dtype=torch.float32), requires_grad=False
)
self.k = torch.nn.Parameter(
torch.tensor(k, dtype=torch.int32), requires_grad=False
)
def sample_discrete(self, logits):
threshold = torch.topk(logits, self.k, sorted=True)[0][..., -1]
samples = torch.ge(logits.squeeze(1), threshold).float()
return samples
def forward(self, logits):
dsamples = self.sample_discrete(logits)
return dsamples
x = torch.rand([4, 4, 4, 4])
m = GumbelTopKSampler(T=4, k=4)
orig_out = m(x)
opt_m = torch.compile(backend="eager")(m)
opt_out = opt_m(x)
self.assertTrue(same(orig_out, opt_out))
def test_scalar_tensor_is_equivalent_to_symint_list_argument(self):
class Jitter(torch.nn.Module):
def __init__(self, jitter_val):
super(Jitter, self).__init__()
self.jitter_val = jitter_val
def roll_tensor(self, input):
h_shift = np.int_(self.jitter_val - 1)
w_shift = np.int_(self.jitter_val + 1)
return torch.roll(
torch.roll(input, shifts=h_shift, dims=2), shifts=w_shift, dims=3
)
def forward(self, input):
return self.roll_tensor(input)
x = torch.rand([4, 4, 4, 4])
m = Jitter(jitter_val=4)
orig_out = m(x)
opt_m = torch.compile(backend="eager")(m)
opt_out = opt_m(x)
self.assertTrue(same(orig_out, opt_out))
def test_scalar_tensor_is_equivalent_to_int_list_argument(self):
class MyModel(torch.nn.Module):
def forward(self, input):
permute = torch.tensor([0, 2, 1])
x = input.permute(*permute)
return x
x = torch.randn(2, 3, 4)
m = MyModel()
orig_out = m(x)
opt_m = torch.compile(backend="eager")(m)
opt_out = opt_m(x)
self.assertTrue(same(orig_out, opt_out))
def test_torch_variable_hasattr(self):
def fn(x):
if hasattr(torch.nn, "Module"):
return x * x
return x + 1
compiled_fn = torch.compile(backend="eager", fullgraph=True)(fn)
x = torch.rand([4, 4])
fn_out = fn(x)
compiled_out = compiled_fn(x)
self.assertTrue(same(fn_out, compiled_out))
def test_torch_objects_as_keys(self):
remap = {torch.float16: torch.float32}
def fn():
return torch.randn(3, dtype=remap[torch.float16])
opt = torch._dynamo.optimize("eager")(fn)
opt()
def test_tracing_py_tree(self):
import torch.utils._pytree as pytree
def fn(xs):
flat_xs, spec = pytree.tree_flatten(xs)
res = [x.clone() for x in flat_xs]
return pytree.tree_unflatten(res, spec)
xs = [torch.tensor(i) for i in range(3)]
counter = CompileCounter()
torch._dynamo.optimize(counter, nopython=True)(fn)(xs)
self.assertEqual(counter.frame_count, 1)
self.assertEqual(counter.op_count, 3)
def test_tracing_nested_py_tree(self):
import torch.utils._pytree as pytree
def fn(xs):
flat_xs, spec = pytree.tree_flatten(xs)
res = [x.clone() for x in flat_xs]
return pytree.tree_unflatten(res, spec)
xs = [torch.tensor(i) for i in range(3)]
xsl = [xs, xs, xs, xs]
counter = CompileCounter()
comp_out = torch._dynamo.optimize(counter, nopython=True)(fn)(xsl)
real_out = fn(xsl)
self.assertEqual(comp_out, real_out)
self.assertEqual(counter.frame_count, 1)
self.assertEqual(counter.op_count, 12)
def test_tracing_nested_py_tree_tuples(self):
import torch.utils._pytree as pytree
def fn(xs):
flat_xs, spec = pytree.tree_flatten(xs)
res = [x.clone() for x in flat_xs]
return pytree.tree_unflatten(res, spec)
xs = [torch.tensor(i) for i in range(3)]
xsl = (xs, xs, xs, xs)
counter = CompileCounter()
comp_out = torch._dynamo.optimize(counter, nopython=True)(fn)(xsl)
real_out = fn(xsl)
self.assertEqual(comp_out, real_out)
self.assertEqual(counter.frame_count, 1)
self.assertEqual(counter.op_count, 12)
def test_tracing_nested_py_tree_dicts(self):
import torch.utils._pytree as pytree
def fn(xs):
flat_xs, spec = pytree.tree_flatten(xs)
res = [x.clone() for x in flat_xs]
return pytree.tree_unflatten(res, spec)
xs = [torch.tensor(i) for i in range(3)]
xsl = {
"a": xs,
"b": xs,
"c": xs,
}
counter = CompileCounter()
comp_out = torch._dynamo.optimize(counter, nopython=True)(fn)(xsl)
real_out = fn(xsl)
self.assertEqual(comp_out, real_out)
self.assertEqual(counter.frame_count, 1)
self.assertEqual(counter.op_count, 9)
def test_tracing_nested_py_tree_mixed_all(self):
import torch.utils._pytree as pytree
def fn(xs):
flat_xs, spec = pytree.tree_flatten(xs)
res = [x.clone() for x in flat_xs]
return pytree.tree_unflatten(res, spec)
xs = [torch.tensor(i) for i in range(3)]
xsa = (xs, xs)
xsb = {"aa": xsa, "ab": xs}
xsl = {
"a": xs,
"b": xsa,
"c": xsb,
}
counter = CompileCounter()
comp_out = torch._dynamo.optimize(counter, nopython=True)(fn)(xsl)
real_out = fn(xsl)
self.assertEqual(comp_out, real_out)
self.assertEqual(counter.frame_count, 1)
self.assertEqual(counter.op_count, 18)
def test_tracing_tree_map_only(self):
import torch.utils._pytree as pytree
def fn(xs):
def mapper(x):
return x.clone()
y = pytree.tree_map_only(torch.Tensor, mapper, xs)
return y
xs = [torch.tensor(i) for i in range(3)] + ["hi"]
xsa = (xs, xs)
xsb = {"aa": xsa, "ab": xs}
counter = CompileCounter()
comp_out = torch._dynamo.optimize(counter, nopython=True)(fn)(xsb)
real_out = fn(xsb)
self.assertEqual(comp_out, real_out)
self.assertEqual(counter.frame_count, 1)
self.assertEqual(counter.op_count, 9)
def _prepare_for_translation_validator(self):
validator = TranslationValidator()
# SymPy symbols.
s0, s1, s2 = sympy.symbols("s0 s1 s2", integer=True)
# Z3 symbols.
[validator.add_var(s, int) for s in (s0, s1, s2)]
z0, z1, z2 = [validator.z3var(s) for s in (s0, s1, s2)]
return (s0, s1, s2), (z0, z1, z2), validator
@torch._dynamo.config.patch(translation_validation=True)
def test_sympy_to_z3_translation(self):
import z3
(
(s0, s1, s2),
(z0, z1, z2),
validator,
) = self._prepare_for_translation_validator()
test_cases = [
# Integer constants.
(sympy.S.Zero, z3.IntVal(0)),
(sympy.S.One, z3.IntVal(1)),
(sympy.S.NegativeOne, z3.IntVal(-1)),
(sympy.Integer(2), z3.IntVal(2)),
(
s0,
z0,
),
# Arithmetic operations.
*[
(op(s0, s1), op(z0, z1))
for op in (
operator.add,
operator.mul,
operator.pow,
)
],
# Logical operations.
*[
(sympy_op(s0, s1), z3_op(z0, z1))
for sympy_op, z3_op in (
(sympy.Eq, operator.eq),
(sympy.Ne, operator.ne),
(sympy.Lt, operator.lt),
(sympy.Le, operator.le),
(sympy.Gt, operator.gt),
(sympy.Ge, operator.ge),
)
],
# Other operations.
(
s0 - s1,
z0 + z3.IntVal(-1) * z1,
),
(
s0 / s1,
z3.ToReal(z0) * (z1**-1),
),
(FloorDiv(s0, s1), z3.ToInt(z3.ToReal(z0) / z3.ToReal(z1))),
(s0 % s1, z0 - z3.ToInt(z3.ToReal(z0) / z3.ToReal(z1)) * z1),
(
s2 % (s0 / s1),
z2
- z3.ToReal(z3.ToInt(z3.ToReal(z2) / (z3.ToReal(z0) * z1**-1)))
* (z3.ToReal(z0) * z1**-1),
),
(
s2 % (s0**3),
z2 - z3.ToReal(z3.ToInt(z3.ToReal(z2) / z0**3)) * z0**3,
),
]
toZ3 = SympyToZ3(validator)
for sympy_expr, z3_expr in test_cases:
result = toZ3.run(sympy_expr)
self.assertTrue(
z3_expr.eq(result), msg=f"expected: {z3_expr}. Got: {result}"
)
@torch._dynamo.config.patch(translation_validation=True)
def test_translation_validator_sat(self):
(
(s0, s1, s2),
(z0, z1, z2),
validator,
) = self._prepare_for_translation_validator()
validator.add_source_expr(z0 > 5)
validator.add_source_expr(z1 / 2 > z0)
# Solutions for target is a subset of the solutions for the source.
validator.add_target_expr(s0 > 20)
validator.add_target_expr(s1 > s0**2)
r = validator.validate()
self.assertEqual(r.success, True, msg=f"failed with model: {r.model}")
self.assertIsNone(r.model)
self.assertIsNone(r.failed_source_expr)
@torch._dynamo.config.patch(translation_validation=True)
def test_translation_validator_unsat(self):
(
(s0, s1, s2),
(z0, z1, z2),
validator,
) = self._prepare_for_translation_validator()
validator.add_source_expr(z0 > 5)
validator.add_source_expr(z1 / 2 > z0)
# Solutions for target is NOT a subset of the solutions for the source.
validator.add_target_expr(s0 > 20)
# This expression is less restrictive than its counterpart.
validator.add_target_expr(s1 > s0 + 2)
r = validator.validate()
self.assertEqual(r.success, False, msg=f"failed with model: {r.model}")
self.assertIsNotNone(r.model)
self.assertIsNotNone(r.failed_source_expr)
class TestTracer(JitTestCase):
def test_jit_save(self):
def fn():
class Foo(torch.nn.Module):
def __init__(self):
super().__init__()
self.a = 3
@torch.jit.export
def __getstate__(self):
return (3, self.training)
@torch.jit.export
def __setstate__(self, state):
self.a = state[0]
self.training = state[1]
def forward(self, x):
return x + self.a
f = Foo()
return torch.jit.trace(f, (torch.rand(3, 4),))
fn()
opt_fn = torch._dynamo.optimize("eager")(fn)
opt_fn()
if __name__ == "__main__":
from torch._dynamo.test_case import run_tests
run_tests()
| [
"pytorchmergebot@users.noreply.github.com"
] | pytorchmergebot@users.noreply.github.com |
7bcf641d35d5803e3a09acdb8776d9a11ac344a1 | 2cc1097ec2941158be3466ec9a3ad54a02e92c83 | /gutenbergToTei.py | 1870403c73e0f362c5dce4af4d4ddfba0a4fcf6e | [] | no_license | mpetyx/gutenbergToTei | 418583a6905be6aaf3776cc02daaa5d6668675d7 | c8fd98fb832672a521e51403756f9d077fcceca6 | refs/heads/master | 2020-05-20T03:08:13.510126 | 2013-12-16T12:19:59 | 2013-12-16T12:19:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,427 | py | # gutenbergToTei.py
#
# Reformats and renames etexts downloaded from Project Gutenberg.
#
# Software adapted from Michiel Overtoom, motoom@xs4all.nl, july 2009.
#
# Modified by Matthew Jockers August 17, 2010 to encode result into TEI based XML
#
import os
import re
import shutil
remove = ["Produced by","End of the Project Gutenberg","End of Project Gutenberg"]
def beautify(fn, outputDir, filename):
''' Reads a raw Project Gutenberg etext, reformat paragraphs,
and removes fluff. Determines the title of the book and uses it
as a filename to write the resulting output text. '''
lines = [line.strip() for line in open(fn)]
collect = False
lookforsubtitle = False
outlines = []
startseen = endseen = False
title=""
one="<?xml version=\"1.0\" encoding=\"utf-8\"?><TEI xmlns=\"http://www.tei-c.org/ns/1.0\" version=\"5.0\"><teiHeader><fileDesc><titleStmt>"
two = "</titleStmt><publicationStmt><publisher></publisher><pubPlace></pubPlace><availability status=\"free\"><p>Project Gutenberg</p></availability></publicationStmt><seriesStmt><title>Project Gutenberg Full-Text Database</title></seriesStmt><sourceDesc default=\"false\"><biblFull default=\"false\"><titleStmt>"
three = "</titleStmt><extent></extent><publicationStmt><publisher></publisher><pubPlace></pubPlace><date></date></publicationStmt></biblFull></sourceDesc></fileDesc><encodingDesc><editorialDecl default=\"false\"><p>Preliminaries omitted.</p></editorialDecl></encodingDesc></teiHeader><text><body><div>"
for line in lines:
if line.startswith("Author: "):
author = line[8:]
authorTemp = line[8:]
continue
if line.startswith("Title: "):
title = line[7:]
titleTemp = line[7:]
lookforsubtitle = True
continue
if lookforsubtitle:
if not line.strip():
lookforsubtitle = False
else:
subtitle = line.strip()
subtitle = subtitle.strip(".")
title += ", " + subtitle
if ("*** START" in line) or ("***START" in line):
collect = startseen = True
paragraph = ""
continue
if ("*** END" in line) or ("***END" in line):
endseen = True
break
if not collect:
continue
if (titleTemp) and (authorTemp):
outlines.append(one)
outlines.append("<title>")
outlines.append(titleTemp)
outlines.append("</title>")
outlines.append("<author>")
outlines.append(authorTemp)
outlines.append("</author>")
outlines.append(two)
outlines.append("<title>")
outlines.append(titleTemp)
outlines.append("</title>")
outlines.append("<author>")
outlines.append(authorTemp)
outlines.append("</author>")
outlines.append(three)
authorTemp = False
titleTemp = False
continue
if not line:
paragraph = paragraph.strip()
for term in remove:
if paragraph.startswith(term):
paragraph = ""
if paragraph:
paragraph = paragraph.replace("&", "&")
outlines.append(paragraph)
outlines.append("</p>")
paragraph = "<p>"
else:
paragraph += " " + line
# Compose a filename. Replace some illegal file name characters with alternatives.
#ofn = author + title[:150] + ".xml"
ofn = filename
ofn = ofn.replace("&", "")
ofn = ofn.replace("/", "")
ofn = ofn.replace("\"", "")
ofn = ofn.replace(":", "")
ofn = ofn.replace(",,", "")
ofn = ofn.replace(" ", "")
ofn = ofn.replace("txt", "xml")
outlines.append("</div></body></text></TEI>")
text = "\n".join(outlines)
text = re.sub("End of the Project Gutenberg .*", "", text, re.M)
text = re.sub("Produced by .*", "", text, re.M)
text = re.sub("<p>\s+<\/p>", "", text)
text = re.sub("\s+", " ", text)
f = open(outputDir+ofn, "wt")
f.write(text)
f.close()
sourcepattern = re.compile(".*\.txt$")
sourceDir = "/Path/to/your/ProjectGutenberg/files/"
outputDir = "/Path/to/your/ProjectGutenberg/TEI/Output/files/"
for fn in os.listdir(sourceDir):
if sourcepattern.match(fn):
beautify(sourceDir+fn, outputDir, fn)
| [
"mpetyx@gmail.com"
] | mpetyx@gmail.com |
642c4c69096cef5756541a7d1f80cb48a9371758 | a0717328fdb8537251e8cccce25103c4fc97b172 | /web/backend/source/db/__init__.py | 3c4adec769160c35d229bf20e3f885df8a9e454f | [] | no_license | rodobre/smarthack | bee6c4d9b81cc1af3036dde9b81178cfa7e05bd1 | 0a76c089ec23610fd78e396bf7aa5d8c793882ef | refs/heads/master | 2020-09-01T14:22:22.478647 | 2019-11-03T08:10:09 | 2019-11-03T08:10:09 | 218,978,545 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 527 | py | from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
from redis_collections import Dict
import redislite
# database stored in a file (simplest way)
# TODO: change engine type if needed
db_uri = "sqlite:///database.sql"
engine = create_engine(db_uri)
Base = declarative_base()
Session = sessionmaker(bind = engine)
cache_uri = 'storage.rdb'
redis_connection = redislite.StrictRedis(cache_uri)
Cache = Dict(redis=redis_connection, key='storage')
| [
"mihailferaru2000@gmail.com"
] | mihailferaru2000@gmail.com |
462ef27593e49abb81fe553a729e9576409f91dc | 53edf6b0f4262ee76bb4e3b943394cfeafe54865 | /simulation_codes/_archived/PARTICLE_PUSHER_ONLY/simulation_parameters_1D.py | d14165a1507cd90571778a68494c7b5c588fb283 | [] | no_license | Yoshi2112/hybrid | f86265a2d35cb0a402ba6ab5f718717d8eeb740c | 85f3051be9368bced41af7d73b4ede9c3e15ff16 | refs/heads/master | 2023-07-07T21:47:59.791167 | 2023-06-27T23:09:23 | 2023-06-27T23:09:23 | 82,878,960 | 0 | 1 | null | 2020-04-16T18:03:59 | 2017-02-23T03:14:49 | Python | UTF-8 | Python | false | false | 16,313 | py | # -*- coding: utf-8 -*-
"""
Created on Fri Sep 22 11:00:58 2017
@author: iarey
Changing so that values are calculated here, but main inputs come from files
That way I'd only have to change the input file, rather than manually changing
every single value on each run. Makes doing pearl-studies much easier.
This script will just be about loading them in, doing checks, and initializing
derived values/casting to SI units (e.g. alfven velocity)
"""
import numpy as np
import sys
import os
# NOTE: particle_reflect and particle_reinit flags are deprecated. Remove at some point.
# Random options for testing purposes. Nothing here that'll probably be used
# Except under pretty specific circumstances.
gaussian_T = False
run_input = '../run_inputs/run_params.txt'
plasma_input = '../run_inputs/plasma_params.txt'
# Load run parameters
with open(run_input, 'r') as f:
### RUN PARAMETERS ###
drive = 'F:' # Drive letter or path for portable HDD e.g. 'E:/' or '/media/yoshi/UNI_HD/'
save_path = '/runs/new_flux_test2/' # Series save dir : Folder containing all runs of a series
run = 0 # Series run number : For multiple runs (e.g. parameter studies) with same overall structure (i.e. test series)
save_particles = 0 # Save data flag : For later analysis
seed = 6546351 # RNG Seed : Set to enable consistent results for parameter studies
## FLAGS ##
homogenous = 1 # Set B0 to homogenous (as test to compare to parabolic)
particle_periodic = 1 # Set particle boundary conditions to periodic
particle_reflect = 0 # Set particle boundary conditions to reflective
particle_reinit = 0 # Set particle boundary conditions to reinitialize
field_periodic = 0 # Set field boundary to periodic (False: Absorbtive Boundary Conditions)
radix_loading = 0 # Flag to use bit-reversed radix scrambling sets to initialise velocities
### SIMULATION PARAMETERS ###
NX = 64 # Number of cells - doesn't include ghost cells
ND = 2 # Damping region length: Multiple of NX (on each side of simulation domain)
max_rev = 50 # Simulation runtime, in multiples of the ion gyroperiod (in seconds)
dxm = 1.0 # Number of c/wpi per dx (Ion inertial length: anything less than 1 isn't "resolvable" by hybrid code, anything too much more than 1 does funky things to the waveform)
L = float(f.readline().split()[1]) # Field line L shell
r_A = float(f.readline().split()[1]) # Ionospheric anchor point (loss zone/max mirror point) - "Below 100km" - Baumjohann, Basic Space Plasma Physics
ie = int(f.readline().split()[1]) # Adiabatic electrons. 0: off (constant), 1: on.
min_dens = float(f.readline().split()[1]) # Allowable minimum charge density in a cell, as a fraction of ne*q
B_eq = f.readline().split()[1] # Initial magnetic field at equator: None for L-determined value (in T) :: 'Exact' value in node ND + NX//2
rc_hwidth = f.readline().split()[1] # Ring current half-width in number of cells (2*hwidth gives total cells with RC)
orbit_res = float(f.readline().split()[1]) # Orbit resolution
freq_res = float(f.readline().split()[1]) # Frequency resolution : Fraction of angular frequency for multiple cyclical values
part_res = float(f.readline().split()[1]) # Data capture resolution in gyroperiod fraction: Particle information
field_res = float(f.readline().split()[1]) # Data capture resolution in gyroperiod fraction: Field information
### RUN DESCRIPTION ###
run_description = f.readline() # Commentary to attach to runs, helpful to have a quick description
with open(plasma_input, 'r') as f:
### PARTICLE PARAMETERS ###
species_lbl = np.array(f.readline().split()[1:])
temp_color = np.array(f.readline().split()[1:])
temp_type = np.array(f.readline().split()[1:], dtype=int)
dist_type = np.array(f.readline().split()[1:], dtype=int)
nsp_ppc = np.array(f.readline().split()[1:], dtype=int)
mass = np.array(f.readline().split()[1:], dtype=float)
charge = np.array(f.readline().split()[1:], dtype=float)
drift_v = np.array(f.readline().split()[1:], dtype=float)
density = np.array(f.readline().split()[1:], dtype=float)*1e6
anisotropy = np.array(f.readline().split()[1:], dtype=float)
# Particle energy: If beta == 1, energies are in beta. If not, they are in eV
E_perp = np.array(f.readline().split()[1:], dtype=float)
E_e = float(f.readline().split()[1])
beta_flag = int(f.readline().split()[1])
#%%### DERIVED SIMULATION PARAMETERS
### PHYSICAL CONSTANTS ###
q = 1.602177e-19 # Elementary charge (C)
c = 2.998925e+08 # Speed of light (m/s)
mp = 1.672622e-27 # Mass of proton (kg)
me = 9.109384e-31 # Mass of electron (kg)
kB = 1.380649e-23 # Boltzmann's Constant (J/K)
e0 = 8.854188e-12 # Epsilon naught - permittivity of free space
mu0 = (4e-7) * np.pi # Magnetic Permeability of Free Space (SI units)
RE = 6.371e6 # Earth radius in metres
B_surf = 3.12e-5 # Magnetic field strength at Earth surface (equatorial)
NC = NX + 2*ND # Total number of cells
ne = density.sum() # Electron number density
E_par = E_perp / (anisotropy + 1) # Parallel species energy/beta
particle_open = 0
if particle_reflect == 1 or particle_reinit == 1:
print('Only periodic or open boundaries supported, defaulting to open')
particle_reflect = particle_reinit = particle_periodic = 0
particle_open = 1
elif particle_periodic == 0:
particle_open = 1
if B_eq == '-':
B_eq = (B_surf / (L ** 3)) # Magnetic field at equator, based on L value
if rc_hwidth == '-':
rc_hwidth = 0
else:
rc_hwidth = int(rc_hwidth)
if beta_flag == 0:
# Input energies as (perpendicular) eV
beta_per = None
Te0_scalar = E_e * 11603.
Tpar = E_par * 11603.
Tperp = E_perp* 11603.
else:
# Input energies in terms of a (perpendicular) beta
Tpar = E_par * B_eq ** 2 / (2 * mu0 * ne * kB)
Tperp = E_perp * B_eq ** 2 / (2 * mu0 * ne * kB)
Te0_scalar = E_par[0] * B_eq ** 2 / (2 * mu0 * ne * kB)
wpi = np.sqrt(ne * q ** 2 / (mp * e0)) # Proton Plasma Frequency, wpi (rad/s)
va = B_eq / np.sqrt(mu0*ne*mp) # Alfven speed at equator: Assuming pure proton plasma
dx = dxm * c / wpi # Spatial cadence, based on ion inertial length
xmax = NX // 2 * dx # Maximum simulation length, +/-ve on each side
xmin =-NX // 2 * dx
charge *= q # Cast species charge to Coulomb
mass *= mp # Cast species mass to kg
drift_v *= va # Cast species velocity to m/s
Nj = len(mass) # Number of species
n_contr = density / nsp_ppc # Species density contribution: Each macroparticle contributes this density to a cell
vth_par = np.sqrt(kB * Tpar / mass) # Species thermal velocities
vth_perp = np.sqrt(kB * Tperp / mass)
# Number of sim particles for each species, total
N_species = np.zeros(Nj, dtype=np.int64)
for jj in range(Nj):
# Cold species in every cell NX
if temp_type[jj] == 0:
N_species[jj] = nsp_ppc[jj] * NX + 2
# Warm species only in simulation center, unless rc_hwidth = 0 (disabled)
elif temp_type[jj] == 1:
if rc_hwidth == 0:
N_species[jj] = nsp_ppc[jj] * NX + 2
else:
N_species[jj] = nsp_ppc[jj] * 2*rc_hwidth + 2
# Spare assumes same number in each cell (doesn't account for dist=1)
# THIS CAN BE CHANGED LATER TO BE MORE MEMORY EFFICIENT. LEAVE IT HUGE FOR DEBUGGING PURPOSES.
if particle_open == 1:
spare_ppc = nsp_ppc.copy()
else:
spare_ppc = np.zeros(Nj, dtype=int)
N = N_species.sum() + (spare_ppc * NX).sum()
idx_start = np.asarray([np.sum(N_species[0:ii] ) for ii in range(0, Nj)]) # Start index values for each species in order
idx_end = np.asarray([np.sum(N_species[0:ii + 1]) for ii in range(0, Nj)]) # End index values for each species in order
if run == '-':
# Work out how many runs exist, then add to it. Save a bit of work numerically increasing.
if os.path.exists(drive + save_path) == False:
run = 0
else:
run = len(os.listdir(drive + save_path))
print('Run number AUTOSET to ', run)
else:
run = int(run)
############################
### MAGNETIC FIELD STUFF ###
############################
B_nodes = (np.arange(NC + 1) - NC // 2) * dx # B grid points position in space
E_nodes = (np.arange(NC) - NC // 2 + 0.5) * dx # E grid points position in space
# =============================================================================
# if homogenous == 1:
# a = 0
# B_xmax = B_eq
#
# # Also need to set any numeric values
# B_A = 0.0
# loss_cone_eq = 0.0
# loss_cone_xmax = 0.0
# theta_xmax = 0.0
# lambda_L = 0.0
# =============================================================================
#else:
print('Calculating length of field line...')
N_fl = 1e5 # Number of points to calculate field line length (higher is more accurate)
lat0 = np.arccos(np.sqrt((RE + r_A)/(RE*L))) # Latitude for this L value (at ionosphere height)
h = 2.0*lat0/float(N_fl) # Step size of lambda (latitude)
f_len = 0.0
for ii in range(int(N_fl)):
lda = ii*h - lat0 # Lattitude for this step
f_len += L*RE*np.cos(lda)*np.sqrt(4.0 - 3.0*np.cos(lda) ** 2) * h # Field line length accruance
print('Field line length = {:.2f} RE'.format(f_len/RE))
print('Simulation length = {:.2f} RE'.format(2*xmax/RE))
if xmax > f_len / 2:
sys.exit('Simulation length longer than field line. Aboring...')
print('Finding simulation boundary MLAT...')
dlam = 1e-5 # Latitude increment in radians
fx_len = 0.0; ii = 1 # Arclength/increment counters
while fx_len < xmax:
lam_i = dlam * ii # Current latitude
d_len = L * RE * np.cos(lam_i) * np.sqrt(4.0 - 3.0*np.cos(lam_i) ** 2) * dlam # Length increment
fx_len += d_len # Accrue arclength
ii += 1 # Increment counter
sys.stdout.write('\r{:.1f}% complete'.format(fx_len/xmax * 100.))
sys.stdout.flush()
print('\n')
theta_xmax = lam_i # Latitude of simulation boundary
r_xmax = L * RE * np.cos(theta_xmax) ** 2 # Radial distance of simulation boundary
B_xmax = B_eq*np.sqrt(4 - 3*np.cos(theta_xmax)**2)/np.cos(theta_xmax)**6 # Magnetic field intensity at boundary
a = (B_xmax / B_eq - 1) / xmax ** 2 # Parabolic scale factor: Fitted to B_eq, B_xmax
lambda_L = np.arccos(np.sqrt(1.0 / L)) # Lattitude of Earth's surface at this L
lat_A = np.arccos(np.sqrt((RE + r_A)/(RE*L))) # Anchor latitude in radians
B_A = B_eq * np.sqrt(4 - 3*np.cos(lat_A) ** 2)\
/ (np.cos(lat_A) ** 6) # Magnetic field at anchor point
if homogenous == 1:
a = 0
B_xmax = B_eq
loss_cone_eq = np.arcsin(np.sqrt(B_eq / B_A))*180 / np.pi # Equatorial loss cone in degrees
loss_cone_xmax = np.arcsin(np.sqrt(B_xmax / B_A)) # Boundary loss cone in radians
# Freqs based on highest magnetic field value (at simulation boundaries)
gyfreq = q*B_xmax/ mp # Proton Gyrofrequency (rad/s) at boundary (highest)
gyfreq_eq = q*B_eq / mp # Proton Gyrofrequency (rad/s) at equator (slowest)
k_max = np.pi / dx # Maximum permissible wavenumber in system (SI???)
qm_ratios = np.divide(charge, mass) # q/m ratio for each species
species_plasfreq_sq = (density * charge ** 2) / (mass * e0)
species_gyrofrequency = qm_ratios * B_eq
#%%### INPUT TESTS AND CHECKS
if rc_hwidth == 0:
rc_print = NX
else:
rc_print = rc_hwidth*2
print('Run Started')
print('Run Series : {}'.format(save_path.split('//')[-1]))
print('Run Number : {}'.format(run))
print('Particle save flag : {}\n'.format(save_particles))
print('Sim domain length : {:5.2f}R_E'.format(2 * xmax / RE))
print('Density : {:5.2f}cc'.format(ne / 1e6))
print('Equatorial B-field : {:5.2f}nT'.format(B_eq*1e9))
print('Maximum B-field : {:5.2f}nT'.format(B_xmax*1e9))
print('Iono. B-field : {:5.2f}mT'.format(B_A*1e6))
print('Equat. Loss cone : {:<5.2f} degrees '.format(loss_cone_eq))
print('Bound. Loss cone : {:<5.2f} degrees '.format(loss_cone_xmax * 180. / np.pi))
print('Maximum MLAT (+/-) : {:<5.2f} degrees '.format(theta_xmax * 180. / np.pi))
print('Iono. MLAT (+/-) : {:<5.2f} degrees\n'.format(lambda_L * 180. / np.pi))
print('Equat. Gyroperiod: : {}s'.format(round(2. * np.pi / gyfreq, 3)))
print('Inverse rad gyfreq : {}s'.format(round(1 / gyfreq, 3)))
print('Maximum sim time : {}s ({} gyroperiods)\n'.format(round(max_rev * 2. * np.pi / gyfreq_eq, 2), max_rev))
print('{} spatial cells, {} with ring current, 2x{} damped cells'.format(NX, rc_print, ND))
print('{} cells total'.format(NC))
print('{} particles initialized'.format(N_species.sum()))
print('{} particles spare'.format((spare_ppc * NX).sum()))
print('{} particles total\n'.format(N))
density_normal_sum = (charge / q) * (density / ne)
simulated_density_per_cell = (n_contr * charge * nsp_ppc).sum()
real_density_per_cell = ne*q
if abs(simulated_density_per_cell - real_density_per_cell) / real_density_per_cell > 1e-10:
print('--------------------------------------------------------------------------------')
print('WARNING: DENSITY CALCULATION ISSUE: RECHECK HOW MACROPARTICLE CONTRIBUTIONS WORK')
print('--------------------------------------------------------------------------------')
print('')
print('ABORTING...')
sys.exit()
if theta_xmax > lambda_L:
print('--------------------------------------------------')
print('WARNING : SIMULATION DOMAIN LONGER THAN FIELD LINE')
print('DO SOMETHING ABOUT IT')
print('--------------------------------------------------')
sys.exit()
if particle_periodic + particle_reflect + particle_reinit > 1:
print('--------------------------------------------------')
print('WARNING : ONLY ONE PARTICLE BOUNDARY CONDITION ALLOWED')
print('DO SOMETHING ABOUT IT')
print('--------------------------------------------------')
os.system("title Hybrid Simulation :: {} :: Run {}".format(save_path.split('//')[-1], run))
| [
"joshua.s.williams@uon.edu.au"
] | joshua.s.williams@uon.edu.au |
17283d50974695ebd93d2e2f98cdaf3efb4724f5 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03438/s143987298.py | 8d742e71452282deac9bebdbc846d1dab746618c | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 448 | py | ma = lambda :map(int,input().split())
lma = lambda :list(map(int,input().split()))
tma = lambda :tuple(map(int,input().split()))
ni = lambda:int(input())
yn = lambda fl:print("Yes") if fl else print("No")
import collections
import math
import itertools
import heapq as hq
ceil = math.ceil
n = ni()
A = lma()
B = lma()
D = [B[i]-A[i] for i in range(n)]
tmp = 0
for d in D:
if d>0:
d//=2
tmp+=d
f=False
if tmp>=0:
f=True
yn(f)
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
f0e447ddffc1c9a7edeeb540fb89de63cea9b26c | ef243d91a1826b490e935fa3f3e6c29c3cc547d0 | /PyQt5/QtWidgets/QStyleOptionTabWidgetFrame.py | 6fc9ddbe4f0b04937109786e7e89b57e0c786274 | [] | no_license | VentiFang/Python_local_module | 6b3d0b22399e817057dfd15d647a14bb1e41980e | c44f55379eca2818b29732c2815480ee755ae3fb | refs/heads/master | 2020-11-29T11:24:54.932967 | 2019-12-25T12:57:14 | 2019-12-25T12:57:14 | 230,101,875 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 615 | py | # encoding: utf-8
# module PyQt5.QtWidgets
# from F:\Python\Python36\lib\site-packages\PyQt5\QtWidgets.pyd
# by generator 1.147
# no doc
# imports
import PyQt5.QtCore as __PyQt5_QtCore
import PyQt5.QtGui as __PyQt5_QtGui
import sip as __sip
from .QStyleOption import QStyleOption
class QStyleOptionTabWidgetFrame(QStyleOption):
"""
QStyleOptionTabWidgetFrame()
QStyleOptionTabWidgetFrame(QStyleOptionTabWidgetFrame)
"""
def __init__(self, QStyleOptionTabWidgetFrame=None): # real signature unknown; restored from __doc__ with multiple overloads
pass
Type = 11
Version = 2
| [
"5149528+ventifang@user.noreply.gitee.com"
] | 5149528+ventifang@user.noreply.gitee.com |
ce485319554c320f209c63112d8997942295f3fe | 6119d11cc09c3604b4dd50f590126573d49f32e2 | /illiad_article_handler_app/tests.py | 1f61a9cc6b32d9cf0b524765ff676e5dc9556160 | [] | no_license | Brown-University-Library/illiad_article_handler_project | 393ed1bdda373e10d4ed3a6b886ef3d19ebea726 | 730d87eab14d826a86c572896a99dd124f785e9b | refs/heads/main | 2023-08-03T03:22:07.049285 | 2021-09-22T14:18:37 | 2021-09-22T14:18:37 | 402,741,433 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 986 | py | import logging
# from django.test import TestCase (test commit)
from django.test import SimpleTestCase as TestCase ## TestCase requires db, this doesn't
log = logging.getLogger(__name__)
# class Url_Test( TestCase ):
# """ Checks redirect output. """
# def test_redirect(self):
# """ Checks that redirect is returned. """
# response = self.client.get( '/handler/', {'ItemTitle': 'some title' } ) # project root part of url is assumed
# self.assertEqual( 302, response.status_code ) # redirect
# self.assertEqual( 'https://jcbl.aeon.atlas-sys.com/aeon.dll?ItemTitle=some+title', response.headers['Location'] )
# def test_colon(self):
# """ Checks that colon is encoded. """
# response = self.client.get( '/handler/', {'ItemTitle': 'some: title' } ) # project root part of url is assumed
# self.assertEqual( 'https://jcbl.aeon.atlas-sys.com/aeon.dll?ItemTitle=some%3A+title', response.headers['Location'] )
| [
"birkin.diana@gmail.com"
] | birkin.diana@gmail.com |
05b29d6292aae1d9423128712bab8fcdecadc505 | 1508b3e3f56e750e38db4334343beedcbb2f9c95 | /224/ex161.py | 26d116055d1d730ea0be6d10164c80f4d857bc77 | [] | no_license | kellyseeme/pythonexample | 3bb325e31c677160c1abd6c3f314f7ef3af55daa | 3eab43cdfa5c59a0f4553de84c9de21e5ded44bb | refs/heads/master | 2021-01-21T13:52:43.076697 | 2016-05-30T06:32:37 | 2016-05-30T06:32:37 | 51,348,486 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 106 | py | #!/usr/bin/env python
from sys import argv
script,filename = argv
txt = open(filename)
print txt.read()
| [
"root@python.(none)"
] | root@python.(none) |
cbea9fe9444f660a8102289ab002207f5f9e2d8c | be429a1e5e4903616a4532c1bf238df20fea75c0 | /6.8/671.二叉树中第二小的节点.py | 092fde588c9f0ce330899c0d7faf5c961c923a0c | [] | no_license | pythonnewbird/LeetCodeSolution | ccc8cc17df4cea3109d84b0c347ae91c1bc33a28 | 2447f760f08fb3879c5f03d8650e30ff74115d3d | refs/heads/master | 2020-03-19T05:06:06.681429 | 2018-07-01T12:39:09 | 2018-07-01T12:39:09 | 135,899,944 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 654 | py | # Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def findSecondMinimumValue(self, root):
"""
:type root: TreeNode
:rtype: int
"""
self.ans = 0x80000000
minVal = root.val
def traverse(root):
if not root: return
if self.ans > root.val > minVal:
self.ans = root.val
traverse(root.left)
traverse(root.right)
traverse(root)
return self.ans if self.ans != 0x80000000 else -1 | [
"21637007@zju.edu.cn"
] | 21637007@zju.edu.cn |
0bd52df15944ef40fba1a939026b011255408281 | 7400b1c16c81bba23e0e54c0aa77c116bd814761 | /.env/lib/python3.7/site-packages/aws_cdk/aws_autoscaling/__init__.py | f0eebbe22f45e429e9c8b83840ac07c4e48f2da6 | [] | no_license | vbloise3/hi_avail_cdk | 917be953809d5964200cf533233c8de224a88019 | 210052b02d5f63cba80d825a73d75035767ea709 | refs/heads/master | 2021-07-24T21:04:45.992216 | 2020-12-28T01:37:26 | 2020-12-28T01:37:26 | 231,833,778 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 383,782 | py | """
## Amazon EC2 Auto Scaling Construct Library
<!--BEGIN STABILITY BANNER-->---

---
<!--END STABILITY BANNER-->
This module is part of the [AWS Cloud Development Kit](https://github.com/aws/aws-cdk) project.
### Fleet
### Auto Scaling Group
An `AutoScalingGroup` represents a number of instances on which you run your code. You
pick the size of the fleet, the instance type and the OS image:
```python
# Example automatically generated without compilation. See https://github.com/aws/jsii/issues/826
import aws_cdk.aws_autoscaling as autoscaling
import aws_cdk.aws_ec2 as ec2
autoscaling.AutoScalingGroup(self, "ASG",
vpc=vpc,
instance_type=ec2.InstanceType.of(ec2.InstanceClass.BURSTABLE2, ec2.InstanceSize.MICRO),
machine_image=ec2.AmazonLinuxImage()
)
```
> NOTE: AutoScalingGroup has an property called `allowAllOutbound` (allowing the instances to contact the
> internet) which is set to `true` by default. Be sure to set this to `false` if you don't want
> your instances to be able to start arbitrary connections.
### Machine Images (AMIs)
AMIs control the OS that gets launched when you start your EC2 instance. The EC2
library contains constructs to select the AMI you want to use.
Depending on the type of AMI, you select it a different way.
The latest version of Amazon Linux and Microsoft Windows images are
selectable by instantiating one of these classes:
```python
# Example automatically generated. See https://github.com/aws/jsii/issues/826
# Pick a Windows edition to use
windows = ec2.WindowsImage(ec2.WindowsVersion.WINDOWS_SERVER_2019_ENGLISH_FULL_BASE)
# Pick the right Amazon Linux edition. All arguments shown are optional
# and will default to these values when omitted.
amzn_linux = ec2.AmazonLinuxImage(
generation=ec2.AmazonLinuxGeneration.AMAZON_LINUX,
edition=ec2.AmazonLinuxEdition.STANDARD,
virtualization=ec2.AmazonLinuxVirt.HVM,
storage=ec2.AmazonLinuxStorage.GENERAL_PURPOSE
)
# For other custom (Linux) images, instantiate a `GenericLinuxImage` with
# a map giving the AMI to in for each region:
linux = ec2.GenericLinuxImage({
"us-east-1": "ami-97785bed",
"eu-west-1": "ami-12345678"
})
```
> NOTE: The Amazon Linux images selected will be cached in your `cdk.json`, so that your
> AutoScalingGroups don't automatically change out from under you when you're making unrelated
> changes. To update to the latest version of Amazon Linux, remove the cache entry from the `context`
> section of your `cdk.json`.
>
> We will add command-line options to make this step easier in the future.
### AutoScaling Instance Counts
AutoScalingGroups make it possible to raise and lower the number of instances in the group,
in response to (or in advance of) changes in workload.
When you create your AutoScalingGroup, you specify a `minCapacity` and a
`maxCapacity`. AutoScaling policies that respond to metrics will never go higher
or lower than the indicated capacity (but scheduled scaling actions might, see
below).
There are three ways to scale your capacity:
* **In response to a metric** (also known as step scaling); for example, you
might want to scale out if the CPU usage across your cluster starts to rise,
and scale in when it drops again.
* **By trying to keep a certain metric around a given value** (also known as
target tracking scaling); you might want to automatically scale out and in to
keep your CPU usage around 50%.
* **On a schedule**; you might want to organize your scaling around traffic
flows you expect, by scaling out in the morning and scaling in in the
evening.
The general pattern of autoscaling will look like this:
```python
# Example automatically generated without compilation. See https://github.com/aws/jsii/issues/826
auto_scaling_group = autoscaling.AutoScalingGroup(self, "ASG",
min_capacity=5,
max_capacity=100
)
# Step scaling
auto_scaling_group.scale_on_metric(...)
# Target tracking scaling
auto_scaling_group.scale_on_cpu_utilization(...)
auto_scaling_group.scale_on_incoming_bytes(...)
auto_scaling_group.scale_on_outgoing_bytes(...)
auto_scaling_group.scale_on_request_count(...)
auto_scaling_group.scale_to_track_metric(...)
# Scheduled scaling
auto_scaling_group.scale_on_schedule(...)
```
#### Step Scaling
This type of scaling scales in and out in deterministics steps that you
configure, in response to metric values. For example, your scaling strategy to
scale in response to a metric that represents your average worker pool usage
might look like this:
```
Scaling -1 (no change) +1 +3
│ │ │ │ │
├────────┼───────────────────────┼────────┼────────┤
│ │ │ │ │
Worker use 0% 10% 50% 70% 100%
```
(Note that this is not necessarily a recommended scaling strategy, but it's
a possible one. You will have to determine what thresholds are right for you).
Note that in order to set up this scaling strategy, you will have to emit a
metric representing your worker utilization from your instances. After that,
you would configure the scaling something like this:
```python
# Example automatically generated without compilation. See https://github.com/aws/jsii/issues/826
worker_utilization_metric = cloudwatch.Metric(
namespace="MyService",
metric_name="WorkerUtilization"
)
capacity.scale_on_metric("ScaleToCPU",
metric=worker_utilization_metric,
scaling_steps=[{"upper": 10, "change": -1}, {"lower": 50, "change": +1}, {"lower": 70, "change": +3}
],
# Change this to AdjustmentType.PERCENT_CHANGE_IN_CAPACITY to interpret the
# 'change' numbers before as percentages instead of capacity counts.
adjustment_type=autoscaling.AdjustmentType.CHANGE_IN_CAPACITY
)
```
The AutoScaling construct library will create the required CloudWatch alarms and
AutoScaling policies for you.
#### Target Tracking Scaling
This type of scaling scales in and out in order to keep a metric around a value
you prefer. There are four types of predefined metrics you can track, or you can
choose to track a custom metric. If you do choose to track a custom metric,
be aware that the metric has to represent instance utilization in some way
(AutoScaling will scale out if the metric is higher than the target, and scale
in if the metric is lower than the target).
If you configure multiple target tracking policies, AutoScaling will use the
one that yields the highest capacity.
The following example scales to keep the CPU usage of your instances around
50% utilization:
```python
# Example automatically generated without compilation. See https://github.com/aws/jsii/issues/826
auto_scaling_group.scale_on_cpu_utilization("KeepSpareCPU",
target_utilization_percent=50
)
```
To scale on average network traffic in and out of your instances:
```python
# Example automatically generated without compilation. See https://github.com/aws/jsii/issues/826
auto_scaling_group.scale_on_incoming_bytes("LimitIngressPerInstance",
target_bytes_per_second=10 * 1024 * 1024
)
auto_scaling_group.scale_on_outcoming_bytes("LimitEgressPerInstance",
target_bytes_per_second=10 * 1024 * 1024
)
```
To scale on the average request count per instance (only works for
AutoScalingGroups that have been attached to Application Load
Balancers):
```python
# Example automatically generated without compilation. See https://github.com/aws/jsii/issues/826
auto_scaling_group.scale_on_request_count("LimitRPS",
target_requests_per_second=1000
)
```
#### Scheduled Scaling
This type of scaling is used to change capacities based on time. It works by
changing `minCapacity`, `maxCapacity` and `desiredCapacity` of the
AutoScalingGroup, and so can be used for two purposes:
* Scale in and out on a schedule by setting the `minCapacity` high or
the `maxCapacity` low.
* Still allow the regular scaling actions to do their job, but restrict
the range they can scale over (by setting both `minCapacity` and
`maxCapacity` but changing their range over time).
A schedule is expressed as a cron expression. The `Schedule` class has a `cron` method to help build cron expressions.
The following example scales the fleet out in the morning, going back to natural
scaling (all the way down to 1 instance if necessary) at night:
```python
# Example automatically generated without compilation. See https://github.com/aws/jsii/issues/826
auto_scaling_group.scale_on_schedule("PrescaleInTheMorning",
schedule=autoscaling.Schedule.cron(hour="8", minute="0"),
min_capacity=20
)
auto_scaling_group.scale_on_schedule("AllowDownscalingAtNight",
schedule=autoscaling.Schedule.cron(hour="20", minute="0"),
min_capacity=1
)
```
### Allowing Connections
See the documentation of the `@aws-cdk/aws-ec2` package for more information
about allowing connections between resources backed by instances.
### Future work
* [ ] CloudWatch Events (impossible to add currently as the AutoScalingGroup ARN is
necessary to make this rule and this cannot be accessed from CloudFormation).
"""
import abc
import builtins
import datetime
import enum
import typing
import jsii
import jsii.compat
import publication
import aws_cdk.aws_autoscaling_common
import aws_cdk.aws_cloudwatch
import aws_cdk.aws_ec2
import aws_cdk.aws_elasticloadbalancing
import aws_cdk.aws_elasticloadbalancingv2
import aws_cdk.aws_iam
import aws_cdk.aws_sns
import aws_cdk.core
__jsii_assembly__ = jsii.JSIIAssembly.load("@aws-cdk/aws-autoscaling", "1.21.1", __name__, "aws-autoscaling@1.21.1.jsii.tgz")
@jsii.data_type(jsii_type="@aws-cdk/aws-autoscaling.AdjustmentTier", jsii_struct_bases=[], name_mapping={'adjustment': 'adjustment', 'lower_bound': 'lowerBound', 'upper_bound': 'upperBound'})
class AdjustmentTier():
def __init__(self, *, adjustment: jsii.Number, lower_bound: typing.Optional[jsii.Number]=None, upper_bound: typing.Optional[jsii.Number]=None):
"""An adjustment.
:param adjustment: What number to adjust the capacity with. The number is interpeted as an added capacity, a new fixed capacity or an added percentage depending on the AdjustmentType value of the StepScalingPolicy. Can be positive or negative.
:param lower_bound: Lower bound where this scaling tier applies. The scaling tier applies if the difference between the metric value and its alarm threshold is higher than this value. Default: -Infinity if this is the first tier, otherwise the upperBound of the previous tier
:param upper_bound: Upper bound where this scaling tier applies. The scaling tier applies if the difference between the metric value and its alarm threshold is lower than this value. Default: +Infinity
"""
self._values = {
'adjustment': adjustment,
}
if lower_bound is not None: self._values["lower_bound"] = lower_bound
if upper_bound is not None: self._values["upper_bound"] = upper_bound
@builtins.property
def adjustment(self) -> jsii.Number:
"""What number to adjust the capacity with.
The number is interpeted as an added capacity, a new fixed capacity or an
added percentage depending on the AdjustmentType value of the
StepScalingPolicy.
Can be positive or negative.
"""
return self._values.get('adjustment')
@builtins.property
def lower_bound(self) -> typing.Optional[jsii.Number]:
"""Lower bound where this scaling tier applies.
The scaling tier applies if the difference between the metric
value and its alarm threshold is higher than this value.
default
:default: -Infinity if this is the first tier, otherwise the upperBound of the previous tier
"""
return self._values.get('lower_bound')
@builtins.property
def upper_bound(self) -> typing.Optional[jsii.Number]:
"""Upper bound where this scaling tier applies.
The scaling tier applies if the difference between the metric
value and its alarm threshold is lower than this value.
default
:default: +Infinity
"""
return self._values.get('upper_bound')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'AdjustmentTier(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
@jsii.enum(jsii_type="@aws-cdk/aws-autoscaling.AdjustmentType")
class AdjustmentType(enum.Enum):
"""How adjustment numbers are interpreted."""
CHANGE_IN_CAPACITY = "CHANGE_IN_CAPACITY"
"""Add the adjustment number to the current capacity.
A positive number increases capacity, a negative number decreases capacity.
"""
PERCENT_CHANGE_IN_CAPACITY = "PERCENT_CHANGE_IN_CAPACITY"
"""Add this percentage of the current capacity to itself.
The number must be between -100 and 100; a positive number increases
capacity and a negative number decreases it.
"""
EXACT_CAPACITY = "EXACT_CAPACITY"
"""Make the capacity equal to the exact number given."""
@jsii.data_type(jsii_type="@aws-cdk/aws-autoscaling.BaseTargetTrackingProps", jsii_struct_bases=[], name_mapping={'cooldown': 'cooldown', 'disable_scale_in': 'disableScaleIn', 'estimated_instance_warmup': 'estimatedInstanceWarmup'})
class BaseTargetTrackingProps():
def __init__(self, *, cooldown: typing.Optional[aws_cdk.core.Duration]=None, disable_scale_in: typing.Optional[bool]=None, estimated_instance_warmup: typing.Optional[aws_cdk.core.Duration]=None):
"""Base interface for target tracking props.
Contains the attributes that are common to target tracking policies,
except the ones relating to the metric and to the scalable target.
This interface is reused by more specific target tracking props objects.
:param cooldown: Period after a scaling completes before another scaling activity can start. Default: - The default cooldown configured on the AutoScalingGroup.
:param disable_scale_in: Indicates whether scale in by the target tracking policy is disabled. If the value is true, scale in is disabled and the target tracking policy won't remove capacity from the autoscaling group. Otherwise, scale in is enabled and the target tracking policy can remove capacity from the group. Default: false
:param estimated_instance_warmup: Estimated time until a newly launched instance can send metrics to CloudWatch. Default: - Same as the cooldown.
"""
self._values = {
}
if cooldown is not None: self._values["cooldown"] = cooldown
if disable_scale_in is not None: self._values["disable_scale_in"] = disable_scale_in
if estimated_instance_warmup is not None: self._values["estimated_instance_warmup"] = estimated_instance_warmup
@builtins.property
def cooldown(self) -> typing.Optional[aws_cdk.core.Duration]:
"""Period after a scaling completes before another scaling activity can start.
default
:default: - The default cooldown configured on the AutoScalingGroup.
"""
return self._values.get('cooldown')
@builtins.property
def disable_scale_in(self) -> typing.Optional[bool]:
"""Indicates whether scale in by the target tracking policy is disabled.
If the value is true, scale in is disabled and the target tracking policy
won't remove capacity from the autoscaling group. Otherwise, scale in is
enabled and the target tracking policy can remove capacity from the
group.
default
:default: false
"""
return self._values.get('disable_scale_in')
@builtins.property
def estimated_instance_warmup(self) -> typing.Optional[aws_cdk.core.Duration]:
"""Estimated time until a newly launched instance can send metrics to CloudWatch.
default
:default: - Same as the cooldown.
"""
return self._values.get('estimated_instance_warmup')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'BaseTargetTrackingProps(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
@jsii.data_type(jsii_type="@aws-cdk/aws-autoscaling.BasicLifecycleHookProps", jsii_struct_bases=[], name_mapping={'lifecycle_transition': 'lifecycleTransition', 'notification_target': 'notificationTarget', 'default_result': 'defaultResult', 'heartbeat_timeout': 'heartbeatTimeout', 'lifecycle_hook_name': 'lifecycleHookName', 'notification_metadata': 'notificationMetadata', 'role': 'role'})
class BasicLifecycleHookProps():
def __init__(self, *, lifecycle_transition: "LifecycleTransition", notification_target: "ILifecycleHookTarget", default_result: typing.Optional["DefaultResult"]=None, heartbeat_timeout: typing.Optional[aws_cdk.core.Duration]=None, lifecycle_hook_name: typing.Optional[str]=None, notification_metadata: typing.Optional[str]=None, role: typing.Optional[aws_cdk.aws_iam.IRole]=None):
"""Basic properties for a lifecycle hook.
:param lifecycle_transition: The state of the Amazon EC2 instance to which you want to attach the lifecycle hook.
:param notification_target: The target of the lifecycle hook.
:param default_result: The action the Auto Scaling group takes when the lifecycle hook timeout elapses or if an unexpected failure occurs. Default: Continue
:param heartbeat_timeout: Maximum time between calls to RecordLifecycleActionHeartbeat for the hook. If the lifecycle hook times out, perform the action in DefaultResult. Default: - No heartbeat timeout.
:param lifecycle_hook_name: Name of the lifecycle hook. Default: - Automatically generated name.
:param notification_metadata: Additional data to pass to the lifecycle hook target. Default: - No metadata.
:param role: The role that allows publishing to the notification target. Default: - A role is automatically created.
"""
self._values = {
'lifecycle_transition': lifecycle_transition,
'notification_target': notification_target,
}
if default_result is not None: self._values["default_result"] = default_result
if heartbeat_timeout is not None: self._values["heartbeat_timeout"] = heartbeat_timeout
if lifecycle_hook_name is not None: self._values["lifecycle_hook_name"] = lifecycle_hook_name
if notification_metadata is not None: self._values["notification_metadata"] = notification_metadata
if role is not None: self._values["role"] = role
@builtins.property
def lifecycle_transition(self) -> "LifecycleTransition":
"""The state of the Amazon EC2 instance to which you want to attach the lifecycle hook."""
return self._values.get('lifecycle_transition')
@builtins.property
def notification_target(self) -> "ILifecycleHookTarget":
"""The target of the lifecycle hook."""
return self._values.get('notification_target')
@builtins.property
def default_result(self) -> typing.Optional["DefaultResult"]:
"""The action the Auto Scaling group takes when the lifecycle hook timeout elapses or if an unexpected failure occurs.
default
:default: Continue
"""
return self._values.get('default_result')
@builtins.property
def heartbeat_timeout(self) -> typing.Optional[aws_cdk.core.Duration]:
"""Maximum time between calls to RecordLifecycleActionHeartbeat for the hook.
If the lifecycle hook times out, perform the action in DefaultResult.
default
:default: - No heartbeat timeout.
"""
return self._values.get('heartbeat_timeout')
@builtins.property
def lifecycle_hook_name(self) -> typing.Optional[str]:
"""Name of the lifecycle hook.
default
:default: - Automatically generated name.
"""
return self._values.get('lifecycle_hook_name')
@builtins.property
def notification_metadata(self) -> typing.Optional[str]:
"""Additional data to pass to the lifecycle hook target.
default
:default: - No metadata.
"""
return self._values.get('notification_metadata')
@builtins.property
def role(self) -> typing.Optional[aws_cdk.aws_iam.IRole]:
"""The role that allows publishing to the notification target.
default
:default: - A role is automatically created.
"""
return self._values.get('role')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'BasicLifecycleHookProps(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
@jsii.data_type(jsii_type="@aws-cdk/aws-autoscaling.BasicScheduledActionProps", jsii_struct_bases=[], name_mapping={'schedule': 'schedule', 'desired_capacity': 'desiredCapacity', 'end_time': 'endTime', 'max_capacity': 'maxCapacity', 'min_capacity': 'minCapacity', 'start_time': 'startTime'})
class BasicScheduledActionProps():
def __init__(self, *, schedule: "Schedule", desired_capacity: typing.Optional[jsii.Number]=None, end_time: typing.Optional[datetime.datetime]=None, max_capacity: typing.Optional[jsii.Number]=None, min_capacity: typing.Optional[jsii.Number]=None, start_time: typing.Optional[datetime.datetime]=None):
"""Properties for a scheduled scaling action.
:param schedule: When to perform this action. Supports cron expressions. For more information about cron expressions, see https://en.wikipedia.org/wiki/Cron.
:param desired_capacity: The new desired capacity. At the scheduled time, set the desired capacity to the given capacity. At least one of maxCapacity, minCapacity, or desiredCapacity must be supplied. Default: - No new desired capacity.
:param end_time: When this scheduled action expires. Default: - The rule never expires.
:param max_capacity: The new maximum capacity. At the scheduled time, set the maximum capacity to the given capacity. At least one of maxCapacity, minCapacity, or desiredCapacity must be supplied. Default: - No new maximum capacity.
:param min_capacity: The new minimum capacity. At the scheduled time, set the minimum capacity to the given capacity. At least one of maxCapacity, minCapacity, or desiredCapacity must be supplied. Default: - No new minimum capacity.
:param start_time: When this scheduled action becomes active. Default: - The rule is activate immediately.
"""
self._values = {
'schedule': schedule,
}
if desired_capacity is not None: self._values["desired_capacity"] = desired_capacity
if end_time is not None: self._values["end_time"] = end_time
if max_capacity is not None: self._values["max_capacity"] = max_capacity
if min_capacity is not None: self._values["min_capacity"] = min_capacity
if start_time is not None: self._values["start_time"] = start_time
@builtins.property
def schedule(self) -> "Schedule":
"""When to perform this action.
Supports cron expressions.
For more information about cron expressions, see https://en.wikipedia.org/wiki/Cron.
Example::
# Example automatically generated without compilation. See https://github.com/aws/jsii/issues/826
08 * * ?
"""
return self._values.get('schedule')
@builtins.property
def desired_capacity(self) -> typing.Optional[jsii.Number]:
"""The new desired capacity.
At the scheduled time, set the desired capacity to the given capacity.
At least one of maxCapacity, minCapacity, or desiredCapacity must be supplied.
default
:default: - No new desired capacity.
"""
return self._values.get('desired_capacity')
@builtins.property
def end_time(self) -> typing.Optional[datetime.datetime]:
"""When this scheduled action expires.
default
:default: - The rule never expires.
"""
return self._values.get('end_time')
@builtins.property
def max_capacity(self) -> typing.Optional[jsii.Number]:
"""The new maximum capacity.
At the scheduled time, set the maximum capacity to the given capacity.
At least one of maxCapacity, minCapacity, or desiredCapacity must be supplied.
default
:default: - No new maximum capacity.
"""
return self._values.get('max_capacity')
@builtins.property
def min_capacity(self) -> typing.Optional[jsii.Number]:
"""The new minimum capacity.
At the scheduled time, set the minimum capacity to the given capacity.
At least one of maxCapacity, minCapacity, or desiredCapacity must be supplied.
default
:default: - No new minimum capacity.
"""
return self._values.get('min_capacity')
@builtins.property
def start_time(self) -> typing.Optional[datetime.datetime]:
"""When this scheduled action becomes active.
default
:default: - The rule is activate immediately.
"""
return self._values.get('start_time')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'BasicScheduledActionProps(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
@jsii.data_type(jsii_type="@aws-cdk/aws-autoscaling.BasicStepScalingPolicyProps", jsii_struct_bases=[], name_mapping={'metric': 'metric', 'scaling_steps': 'scalingSteps', 'adjustment_type': 'adjustmentType', 'cooldown': 'cooldown', 'estimated_instance_warmup': 'estimatedInstanceWarmup', 'min_adjustment_magnitude': 'minAdjustmentMagnitude'})
class BasicStepScalingPolicyProps():
def __init__(self, *, metric: aws_cdk.aws_cloudwatch.IMetric, scaling_steps: typing.List["ScalingInterval"], adjustment_type: typing.Optional["AdjustmentType"]=None, cooldown: typing.Optional[aws_cdk.core.Duration]=None, estimated_instance_warmup: typing.Optional[aws_cdk.core.Duration]=None, min_adjustment_magnitude: typing.Optional[jsii.Number]=None):
"""
:param metric: Metric to scale on.
:param scaling_steps: The intervals for scaling. Maps a range of metric values to a particular scaling behavior.
:param adjustment_type: How the adjustment numbers inside 'intervals' are interpreted. Default: ChangeInCapacity
:param cooldown: Grace period after scaling activity. Default: Default cooldown period on your AutoScalingGroup
:param estimated_instance_warmup: Estimated time until a newly launched instance can send metrics to CloudWatch. Default: Same as the cooldown
:param min_adjustment_magnitude: Minimum absolute number to adjust capacity with as result of percentage scaling. Only when using AdjustmentType = PercentChangeInCapacity, this number controls the minimum absolute effect size. Default: No minimum scaling effect
"""
self._values = {
'metric': metric,
'scaling_steps': scaling_steps,
}
if adjustment_type is not None: self._values["adjustment_type"] = adjustment_type
if cooldown is not None: self._values["cooldown"] = cooldown
if estimated_instance_warmup is not None: self._values["estimated_instance_warmup"] = estimated_instance_warmup
if min_adjustment_magnitude is not None: self._values["min_adjustment_magnitude"] = min_adjustment_magnitude
@builtins.property
def metric(self) -> aws_cdk.aws_cloudwatch.IMetric:
"""Metric to scale on."""
return self._values.get('metric')
@builtins.property
def scaling_steps(self) -> typing.List["ScalingInterval"]:
"""The intervals for scaling.
Maps a range of metric values to a particular scaling behavior.
"""
return self._values.get('scaling_steps')
@builtins.property
def adjustment_type(self) -> typing.Optional["AdjustmentType"]:
"""How the adjustment numbers inside 'intervals' are interpreted.
default
:default: ChangeInCapacity
"""
return self._values.get('adjustment_type')
@builtins.property
def cooldown(self) -> typing.Optional[aws_cdk.core.Duration]:
"""Grace period after scaling activity.
default
:default: Default cooldown period on your AutoScalingGroup
"""
return self._values.get('cooldown')
@builtins.property
def estimated_instance_warmup(self) -> typing.Optional[aws_cdk.core.Duration]:
"""Estimated time until a newly launched instance can send metrics to CloudWatch.
default
:default: Same as the cooldown
"""
return self._values.get('estimated_instance_warmup')
@builtins.property
def min_adjustment_magnitude(self) -> typing.Optional[jsii.Number]:
"""Minimum absolute number to adjust capacity with as result of percentage scaling.
Only when using AdjustmentType = PercentChangeInCapacity, this number controls
the minimum absolute effect size.
default
:default: No minimum scaling effect
"""
return self._values.get('min_adjustment_magnitude')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'BasicStepScalingPolicyProps(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
@jsii.data_type(jsii_type="@aws-cdk/aws-autoscaling.BasicTargetTrackingScalingPolicyProps", jsii_struct_bases=[BaseTargetTrackingProps], name_mapping={'cooldown': 'cooldown', 'disable_scale_in': 'disableScaleIn', 'estimated_instance_warmup': 'estimatedInstanceWarmup', 'target_value': 'targetValue', 'custom_metric': 'customMetric', 'predefined_metric': 'predefinedMetric', 'resource_label': 'resourceLabel'})
class BasicTargetTrackingScalingPolicyProps(BaseTargetTrackingProps):
def __init__(self, *, cooldown: typing.Optional[aws_cdk.core.Duration]=None, disable_scale_in: typing.Optional[bool]=None, estimated_instance_warmup: typing.Optional[aws_cdk.core.Duration]=None, target_value: jsii.Number, custom_metric: typing.Optional[aws_cdk.aws_cloudwatch.IMetric]=None, predefined_metric: typing.Optional["PredefinedMetric"]=None, resource_label: typing.Optional[str]=None):
"""Properties for a Target Tracking policy that include the metric but exclude the target.
:param cooldown: Period after a scaling completes before another scaling activity can start. Default: - The default cooldown configured on the AutoScalingGroup.
:param disable_scale_in: Indicates whether scale in by the target tracking policy is disabled. If the value is true, scale in is disabled and the target tracking policy won't remove capacity from the autoscaling group. Otherwise, scale in is enabled and the target tracking policy can remove capacity from the group. Default: false
:param estimated_instance_warmup: Estimated time until a newly launched instance can send metrics to CloudWatch. Default: - Same as the cooldown.
:param target_value: The target value for the metric.
:param custom_metric: A custom metric for application autoscaling. The metric must track utilization. Scaling out will happen if the metric is higher than the target value, scaling in will happen in the metric is lower than the target value. Exactly one of customMetric or predefinedMetric must be specified. Default: - No custom metric.
:param predefined_metric: A predefined metric for application autoscaling. The metric must track utilization. Scaling out will happen if the metric is higher than the target value, scaling in will happen in the metric is lower than the target value. Exactly one of customMetric or predefinedMetric must be specified. Default: - No predefined metric.
:param resource_label: The resource label associated with the predefined metric. Should be supplied if the predefined metric is ALBRequestCountPerTarget, and the format should be: app///targetgroup// Default: - No resource label.
"""
self._values = {
'target_value': target_value,
}
if cooldown is not None: self._values["cooldown"] = cooldown
if disable_scale_in is not None: self._values["disable_scale_in"] = disable_scale_in
if estimated_instance_warmup is not None: self._values["estimated_instance_warmup"] = estimated_instance_warmup
if custom_metric is not None: self._values["custom_metric"] = custom_metric
if predefined_metric is not None: self._values["predefined_metric"] = predefined_metric
if resource_label is not None: self._values["resource_label"] = resource_label
@builtins.property
def cooldown(self) -> typing.Optional[aws_cdk.core.Duration]:
"""Period after a scaling completes before another scaling activity can start.
default
:default: - The default cooldown configured on the AutoScalingGroup.
"""
return self._values.get('cooldown')
@builtins.property
def disable_scale_in(self) -> typing.Optional[bool]:
"""Indicates whether scale in by the target tracking policy is disabled.
If the value is true, scale in is disabled and the target tracking policy
won't remove capacity from the autoscaling group. Otherwise, scale in is
enabled and the target tracking policy can remove capacity from the
group.
default
:default: false
"""
return self._values.get('disable_scale_in')
@builtins.property
def estimated_instance_warmup(self) -> typing.Optional[aws_cdk.core.Duration]:
"""Estimated time until a newly launched instance can send metrics to CloudWatch.
default
:default: - Same as the cooldown.
"""
return self._values.get('estimated_instance_warmup')
@builtins.property
def target_value(self) -> jsii.Number:
"""The target value for the metric."""
return self._values.get('target_value')
@builtins.property
def custom_metric(self) -> typing.Optional[aws_cdk.aws_cloudwatch.IMetric]:
"""A custom metric for application autoscaling.
The metric must track utilization. Scaling out will happen if the metric is higher than
the target value, scaling in will happen in the metric is lower than the target value.
Exactly one of customMetric or predefinedMetric must be specified.
default
:default: - No custom metric.
"""
return self._values.get('custom_metric')
@builtins.property
def predefined_metric(self) -> typing.Optional["PredefinedMetric"]:
"""A predefined metric for application autoscaling.
The metric must track utilization. Scaling out will happen if the metric is higher than
the target value, scaling in will happen in the metric is lower than the target value.
Exactly one of customMetric or predefinedMetric must be specified.
default
:default: - No predefined metric.
"""
return self._values.get('predefined_metric')
@builtins.property
def resource_label(self) -> typing.Optional[str]:
"""The resource label associated with the predefined metric.
Should be supplied if the predefined metric is ALBRequestCountPerTarget, and the
format should be:
app///targetgroup//
default
:default: - No resource label.
"""
return self._values.get('resource_label')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'BasicTargetTrackingScalingPolicyProps(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
@jsii.data_type(jsii_type="@aws-cdk/aws-autoscaling.BlockDevice", jsii_struct_bases=[], name_mapping={'device_name': 'deviceName', 'volume': 'volume', 'mapping_enabled': 'mappingEnabled'})
class BlockDevice():
def __init__(self, *, device_name: str, volume: "BlockDeviceVolume", mapping_enabled: typing.Optional[bool]=None):
"""Block device.
:param device_name: The device name exposed to the EC2 instance.
:param volume: Defines the block device volume, to be either an Amazon EBS volume or an ephemeral instance store volume.
:param mapping_enabled: If false, the device mapping will be suppressed. If set to false for the root device, the instance might fail the Amazon EC2 health check. Amazon EC2 Auto Scaling launches a replacement instance if the instance fails the health check. Default: true - device mapping is left untouched
"""
self._values = {
'device_name': device_name,
'volume': volume,
}
if mapping_enabled is not None: self._values["mapping_enabled"] = mapping_enabled
@builtins.property
def device_name(self) -> str:
"""The device name exposed to the EC2 instance.
see
:see: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/device_naming.html
Example::
# Example automatically generated without compilation. See https://github.com/aws/jsii/issues/826
"/dev/sdh" , "xvdh"
"""
return self._values.get('device_name')
@builtins.property
def volume(self) -> "BlockDeviceVolume":
"""Defines the block device volume, to be either an Amazon EBS volume or an ephemeral instance store volume.
Example::
# Example automatically generated without compilation. See https://github.com/aws/jsii/issues/826
BlockDeviceVolume.ebs(15) , BlockDeviceVolume.ephemeral(0)
"""
return self._values.get('volume')
@builtins.property
def mapping_enabled(self) -> typing.Optional[bool]:
"""If false, the device mapping will be suppressed.
If set to false for the root device, the instance might fail the Amazon EC2 health check.
Amazon EC2 Auto Scaling launches a replacement instance if the instance fails the health check.
default
:default: true - device mapping is left untouched
"""
return self._values.get('mapping_enabled')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'BlockDevice(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
class BlockDeviceVolume(metaclass=jsii.JSIIMeta, jsii_type="@aws-cdk/aws-autoscaling.BlockDeviceVolume"):
"""Describes a block device mapping for an EC2 instance or Auto Scaling group."""
def __init__(self, ebs_device: typing.Optional["EbsDeviceProps"]=None, virtual_name: typing.Optional[str]=None) -> None:
"""
:param ebs_device: EBS device info.
:param virtual_name: Virtual device name.
"""
jsii.create(BlockDeviceVolume, self, [ebs_device, virtual_name])
@jsii.member(jsii_name="ebs")
@builtins.classmethod
def ebs(cls, volume_size: jsii.Number, *, encrypted: typing.Optional[bool]=None, delete_on_termination: typing.Optional[bool]=None, iops: typing.Optional[jsii.Number]=None, volume_type: typing.Optional["EbsDeviceVolumeType"]=None) -> "BlockDeviceVolume":
"""Creates a new Elastic Block Storage device.
:param volume_size: The volume size, in Gibibytes (GiB).
:param encrypted: Specifies whether the EBS volume is encrypted. Encrypted EBS volumes can only be attached to instances that support Amazon EBS encryption Default: false
:param delete_on_termination: Indicates whether to delete the volume when the instance is terminated. Default: - true for Amazon EC2 Auto Scaling, false otherwise (e.g. EBS)
:param iops: The number of I/O operations per second (IOPS) to provision for the volume. Must only be set for {@link volumeType}: {@link EbsDeviceVolumeType.IO1} The maximum ratio of IOPS to volume size (in GiB) is 50:1, so for 5,000 provisioned IOPS, you need at least 100 GiB storage on the volume. Default: - none, required for {@link EbsDeviceVolumeType.IO1}
:param volume_type: The EBS volume type. Default: {@link EbsDeviceVolumeType.GP2}
"""
options = EbsDeviceOptions(encrypted=encrypted, delete_on_termination=delete_on_termination, iops=iops, volume_type=volume_type)
return jsii.sinvoke(cls, "ebs", [volume_size, options])
@jsii.member(jsii_name="ebsFromSnapshot")
@builtins.classmethod
def ebs_from_snapshot(cls, snapshot_id: str, *, volume_size: typing.Optional[jsii.Number]=None, delete_on_termination: typing.Optional[bool]=None, iops: typing.Optional[jsii.Number]=None, volume_type: typing.Optional["EbsDeviceVolumeType"]=None) -> "BlockDeviceVolume":
"""Creates a new Elastic Block Storage device from an existing snapshot.
:param snapshot_id: The snapshot ID of the volume to use.
:param volume_size: The volume size, in Gibibytes (GiB). If you specify volumeSize, it must be equal or greater than the size of the snapshot. Default: - The snapshot size
:param delete_on_termination: Indicates whether to delete the volume when the instance is terminated. Default: - true for Amazon EC2 Auto Scaling, false otherwise (e.g. EBS)
:param iops: The number of I/O operations per second (IOPS) to provision for the volume. Must only be set for {@link volumeType}: {@link EbsDeviceVolumeType.IO1} The maximum ratio of IOPS to volume size (in GiB) is 50:1, so for 5,000 provisioned IOPS, you need at least 100 GiB storage on the volume. Default: - none, required for {@link EbsDeviceVolumeType.IO1}
:param volume_type: The EBS volume type. Default: {@link EbsDeviceVolumeType.GP2}
"""
options = EbsDeviceSnapshotOptions(volume_size=volume_size, delete_on_termination=delete_on_termination, iops=iops, volume_type=volume_type)
return jsii.sinvoke(cls, "ebsFromSnapshot", [snapshot_id, options])
@jsii.member(jsii_name="ephemeral")
@builtins.classmethod
def ephemeral(cls, volume_index: jsii.Number) -> "BlockDeviceVolume":
"""Creates a virtual, ephemeral device.
The name will be in the form ephemeral{volumeIndex}.
:param volume_index: the volume index. Must be equal or greater than 0
"""
return jsii.sinvoke(cls, "ephemeral", [volume_index])
@builtins.property
@jsii.member(jsii_name="ebsDevice")
def ebs_device(self) -> typing.Optional["EbsDeviceProps"]:
"""EBS device info."""
return jsii.get(self, "ebsDevice")
@builtins.property
@jsii.member(jsii_name="virtualName")
def virtual_name(self) -> typing.Optional[str]:
"""Virtual device name."""
return jsii.get(self, "virtualName")
@jsii.implements(aws_cdk.core.IInspectable)
class CfnAutoScalingGroup(aws_cdk.core.CfnResource, metaclass=jsii.JSIIMeta, jsii_type="@aws-cdk/aws-autoscaling.CfnAutoScalingGroup"):
"""A CloudFormation ``AWS::AutoScaling::AutoScalingGroup``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-group.html
cloudformationResource:
:cloudformationResource:: AWS::AutoScaling::AutoScalingGroup
"""
def __init__(self, scope: aws_cdk.core.Construct, id: str, *, max_size: str, min_size: str, auto_scaling_group_name: typing.Optional[str]=None, availability_zones: typing.Optional[typing.List[str]]=None, cooldown: typing.Optional[str]=None, desired_capacity: typing.Optional[str]=None, health_check_grace_period: typing.Optional[jsii.Number]=None, health_check_type: typing.Optional[str]=None, instance_id: typing.Optional[str]=None, launch_configuration_name: typing.Optional[str]=None, launch_template: typing.Optional[typing.Union[typing.Optional[aws_cdk.core.IResolvable], typing.Optional["LaunchTemplateSpecificationProperty"]]]=None, lifecycle_hook_specification_list: typing.Optional[typing.Union[typing.Optional[aws_cdk.core.IResolvable], typing.Optional[typing.List[typing.Union[aws_cdk.core.IResolvable, "LifecycleHookSpecificationProperty"]]]]]=None, load_balancer_names: typing.Optional[typing.List[str]]=None, metrics_collection: typing.Optional[typing.Union[typing.Optional[aws_cdk.core.IResolvable], typing.Optional[typing.List[typing.Union[aws_cdk.core.IResolvable, "MetricsCollectionProperty"]]]]]=None, mixed_instances_policy: typing.Optional[typing.Union[typing.Optional[aws_cdk.core.IResolvable], typing.Optional["MixedInstancesPolicyProperty"]]]=None, notification_configurations: typing.Optional[typing.Union[typing.Optional[aws_cdk.core.IResolvable], typing.Optional[typing.List[typing.Union[aws_cdk.core.IResolvable, "NotificationConfigurationProperty"]]]]]=None, placement_group: typing.Optional[str]=None, service_linked_role_arn: typing.Optional[str]=None, tags: typing.Optional[typing.List["TagPropertyProperty"]]=None, target_group_arns: typing.Optional[typing.List[str]]=None, termination_policies: typing.Optional[typing.List[str]]=None, vpc_zone_identifier: typing.Optional[typing.List[str]]=None) -> None:
"""Create a new ``AWS::AutoScaling::AutoScalingGroup``.
:param scope: - scope in which this resource is defined.
:param id: - scoped id of the resource.
:param max_size: ``AWS::AutoScaling::AutoScalingGroup.MaxSize``.
:param min_size: ``AWS::AutoScaling::AutoScalingGroup.MinSize``.
:param auto_scaling_group_name: ``AWS::AutoScaling::AutoScalingGroup.AutoScalingGroupName``.
:param availability_zones: ``AWS::AutoScaling::AutoScalingGroup.AvailabilityZones``.
:param cooldown: ``AWS::AutoScaling::AutoScalingGroup.Cooldown``.
:param desired_capacity: ``AWS::AutoScaling::AutoScalingGroup.DesiredCapacity``.
:param health_check_grace_period: ``AWS::AutoScaling::AutoScalingGroup.HealthCheckGracePeriod``.
:param health_check_type: ``AWS::AutoScaling::AutoScalingGroup.HealthCheckType``.
:param instance_id: ``AWS::AutoScaling::AutoScalingGroup.InstanceId``.
:param launch_configuration_name: ``AWS::AutoScaling::AutoScalingGroup.LaunchConfigurationName``.
:param launch_template: ``AWS::AutoScaling::AutoScalingGroup.LaunchTemplate``.
:param lifecycle_hook_specification_list: ``AWS::AutoScaling::AutoScalingGroup.LifecycleHookSpecificationList``.
:param load_balancer_names: ``AWS::AutoScaling::AutoScalingGroup.LoadBalancerNames``.
:param metrics_collection: ``AWS::AutoScaling::AutoScalingGroup.MetricsCollection``.
:param mixed_instances_policy: ``AWS::AutoScaling::AutoScalingGroup.MixedInstancesPolicy``.
:param notification_configurations: ``AWS::AutoScaling::AutoScalingGroup.NotificationConfigurations``.
:param placement_group: ``AWS::AutoScaling::AutoScalingGroup.PlacementGroup``.
:param service_linked_role_arn: ``AWS::AutoScaling::AutoScalingGroup.ServiceLinkedRoleARN``.
:param tags: ``AWS::AutoScaling::AutoScalingGroup.Tags``.
:param target_group_arns: ``AWS::AutoScaling::AutoScalingGroup.TargetGroupARNs``.
:param termination_policies: ``AWS::AutoScaling::AutoScalingGroup.TerminationPolicies``.
:param vpc_zone_identifier: ``AWS::AutoScaling::AutoScalingGroup.VPCZoneIdentifier``.
"""
props = CfnAutoScalingGroupProps(max_size=max_size, min_size=min_size, auto_scaling_group_name=auto_scaling_group_name, availability_zones=availability_zones, cooldown=cooldown, desired_capacity=desired_capacity, health_check_grace_period=health_check_grace_period, health_check_type=health_check_type, instance_id=instance_id, launch_configuration_name=launch_configuration_name, launch_template=launch_template, lifecycle_hook_specification_list=lifecycle_hook_specification_list, load_balancer_names=load_balancer_names, metrics_collection=metrics_collection, mixed_instances_policy=mixed_instances_policy, notification_configurations=notification_configurations, placement_group=placement_group, service_linked_role_arn=service_linked_role_arn, tags=tags, target_group_arns=target_group_arns, termination_policies=termination_policies, vpc_zone_identifier=vpc_zone_identifier)
jsii.create(CfnAutoScalingGroup, self, [scope, id, props])
@jsii.member(jsii_name="inspect")
def inspect(self, inspector: aws_cdk.core.TreeInspector) -> None:
"""Examines the CloudFormation resource and discloses attributes.
:param inspector: - tree inspector to collect and process attributes.
stability
:stability: experimental
"""
return jsii.invoke(self, "inspect", [inspector])
@jsii.member(jsii_name="renderProperties")
def _render_properties(self, props: typing.Mapping[str,typing.Any]) -> typing.Mapping[str,typing.Any]:
"""
:param props: -
"""
return jsii.invoke(self, "renderProperties", [props])
@jsii.python.classproperty
@jsii.member(jsii_name="CFN_RESOURCE_TYPE_NAME")
def CFN_RESOURCE_TYPE_NAME(cls) -> str:
"""The CloudFormation resource type name for this resource class."""
return jsii.sget(cls, "CFN_RESOURCE_TYPE_NAME")
@builtins.property
@jsii.member(jsii_name="cfnProperties")
def _cfn_properties(self) -> typing.Mapping[str,typing.Any]:
return jsii.get(self, "cfnProperties")
@builtins.property
@jsii.member(jsii_name="tags")
def tags(self) -> aws_cdk.core.TagManager:
"""``AWS::AutoScaling::AutoScalingGroup.Tags``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-group.html#cfn-as-group-tags
"""
return jsii.get(self, "tags")
@builtins.property
@jsii.member(jsii_name="maxSize")
def max_size(self) -> str:
"""``AWS::AutoScaling::AutoScalingGroup.MaxSize``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-group.html#cfn-as-group-maxsize
"""
return jsii.get(self, "maxSize")
@max_size.setter
def max_size(self, value: str):
jsii.set(self, "maxSize", value)
@builtins.property
@jsii.member(jsii_name="minSize")
def min_size(self) -> str:
"""``AWS::AutoScaling::AutoScalingGroup.MinSize``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-group.html#cfn-as-group-minsize
"""
return jsii.get(self, "minSize")
@min_size.setter
def min_size(self, value: str):
jsii.set(self, "minSize", value)
@builtins.property
@jsii.member(jsii_name="autoScalingGroupName")
def auto_scaling_group_name(self) -> typing.Optional[str]:
"""``AWS::AutoScaling::AutoScalingGroup.AutoScalingGroupName``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-group.html#cfn-autoscaling-autoscalinggroup-autoscalinggroupname
"""
return jsii.get(self, "autoScalingGroupName")
@auto_scaling_group_name.setter
def auto_scaling_group_name(self, value: typing.Optional[str]):
jsii.set(self, "autoScalingGroupName", value)
@builtins.property
@jsii.member(jsii_name="availabilityZones")
def availability_zones(self) -> typing.Optional[typing.List[str]]:
"""``AWS::AutoScaling::AutoScalingGroup.AvailabilityZones``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-group.html#cfn-as-group-availabilityzones
"""
return jsii.get(self, "availabilityZones")
@availability_zones.setter
def availability_zones(self, value: typing.Optional[typing.List[str]]):
jsii.set(self, "availabilityZones", value)
@builtins.property
@jsii.member(jsii_name="cooldown")
def cooldown(self) -> typing.Optional[str]:
"""``AWS::AutoScaling::AutoScalingGroup.Cooldown``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-group.html#cfn-as-group-cooldown
"""
return jsii.get(self, "cooldown")
@cooldown.setter
def cooldown(self, value: typing.Optional[str]):
jsii.set(self, "cooldown", value)
@builtins.property
@jsii.member(jsii_name="desiredCapacity")
def desired_capacity(self) -> typing.Optional[str]:
"""``AWS::AutoScaling::AutoScalingGroup.DesiredCapacity``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-group.html#cfn-as-group-desiredcapacity
"""
return jsii.get(self, "desiredCapacity")
@desired_capacity.setter
def desired_capacity(self, value: typing.Optional[str]):
jsii.set(self, "desiredCapacity", value)
@builtins.property
@jsii.member(jsii_name="healthCheckGracePeriod")
def health_check_grace_period(self) -> typing.Optional[jsii.Number]:
"""``AWS::AutoScaling::AutoScalingGroup.HealthCheckGracePeriod``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-group.html#cfn-as-group-healthcheckgraceperiod
"""
return jsii.get(self, "healthCheckGracePeriod")
@health_check_grace_period.setter
def health_check_grace_period(self, value: typing.Optional[jsii.Number]):
jsii.set(self, "healthCheckGracePeriod", value)
@builtins.property
@jsii.member(jsii_name="healthCheckType")
def health_check_type(self) -> typing.Optional[str]:
"""``AWS::AutoScaling::AutoScalingGroup.HealthCheckType``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-group.html#cfn-as-group-healthchecktype
"""
return jsii.get(self, "healthCheckType")
@health_check_type.setter
def health_check_type(self, value: typing.Optional[str]):
jsii.set(self, "healthCheckType", value)
@builtins.property
@jsii.member(jsii_name="instanceId")
def instance_id(self) -> typing.Optional[str]:
"""``AWS::AutoScaling::AutoScalingGroup.InstanceId``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-group.html#cfn-as-group-instanceid
"""
return jsii.get(self, "instanceId")
@instance_id.setter
def instance_id(self, value: typing.Optional[str]):
jsii.set(self, "instanceId", value)
@builtins.property
@jsii.member(jsii_name="launchConfigurationName")
def launch_configuration_name(self) -> typing.Optional[str]:
"""``AWS::AutoScaling::AutoScalingGroup.LaunchConfigurationName``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-group.html#cfn-as-group-launchconfigurationname
"""
return jsii.get(self, "launchConfigurationName")
@launch_configuration_name.setter
def launch_configuration_name(self, value: typing.Optional[str]):
jsii.set(self, "launchConfigurationName", value)
@builtins.property
@jsii.member(jsii_name="launchTemplate")
def launch_template(self) -> typing.Optional[typing.Union[typing.Optional[aws_cdk.core.IResolvable], typing.Optional["LaunchTemplateSpecificationProperty"]]]:
"""``AWS::AutoScaling::AutoScalingGroup.LaunchTemplate``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-group.html#cfn-as-group-launchtemplate
"""
return jsii.get(self, "launchTemplate")
@launch_template.setter
def launch_template(self, value: typing.Optional[typing.Union[typing.Optional[aws_cdk.core.IResolvable], typing.Optional["LaunchTemplateSpecificationProperty"]]]):
jsii.set(self, "launchTemplate", value)
@builtins.property
@jsii.member(jsii_name="lifecycleHookSpecificationList")
def lifecycle_hook_specification_list(self) -> typing.Optional[typing.Union[typing.Optional[aws_cdk.core.IResolvable], typing.Optional[typing.List[typing.Union[aws_cdk.core.IResolvable, "LifecycleHookSpecificationProperty"]]]]]:
"""``AWS::AutoScaling::AutoScalingGroup.LifecycleHookSpecificationList``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-group.html#cfn-autoscaling-autoscalinggroup-lifecyclehookspecificationlist
"""
return jsii.get(self, "lifecycleHookSpecificationList")
@lifecycle_hook_specification_list.setter
def lifecycle_hook_specification_list(self, value: typing.Optional[typing.Union[typing.Optional[aws_cdk.core.IResolvable], typing.Optional[typing.List[typing.Union[aws_cdk.core.IResolvable, "LifecycleHookSpecificationProperty"]]]]]):
jsii.set(self, "lifecycleHookSpecificationList", value)
@builtins.property
@jsii.member(jsii_name="loadBalancerNames")
def load_balancer_names(self) -> typing.Optional[typing.List[str]]:
"""``AWS::AutoScaling::AutoScalingGroup.LoadBalancerNames``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-group.html#cfn-as-group-loadbalancernames
"""
return jsii.get(self, "loadBalancerNames")
@load_balancer_names.setter
def load_balancer_names(self, value: typing.Optional[typing.List[str]]):
jsii.set(self, "loadBalancerNames", value)
@builtins.property
@jsii.member(jsii_name="metricsCollection")
def metrics_collection(self) -> typing.Optional[typing.Union[typing.Optional[aws_cdk.core.IResolvable], typing.Optional[typing.List[typing.Union[aws_cdk.core.IResolvable, "MetricsCollectionProperty"]]]]]:
"""``AWS::AutoScaling::AutoScalingGroup.MetricsCollection``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-group.html#cfn-as-group-metricscollection
"""
return jsii.get(self, "metricsCollection")
@metrics_collection.setter
def metrics_collection(self, value: typing.Optional[typing.Union[typing.Optional[aws_cdk.core.IResolvable], typing.Optional[typing.List[typing.Union[aws_cdk.core.IResolvable, "MetricsCollectionProperty"]]]]]):
jsii.set(self, "metricsCollection", value)
@builtins.property
@jsii.member(jsii_name="mixedInstancesPolicy")
def mixed_instances_policy(self) -> typing.Optional[typing.Union[typing.Optional[aws_cdk.core.IResolvable], typing.Optional["MixedInstancesPolicyProperty"]]]:
"""``AWS::AutoScaling::AutoScalingGroup.MixedInstancesPolicy``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-group.html#cfn-as-group-mixedinstancespolicy
"""
return jsii.get(self, "mixedInstancesPolicy")
@mixed_instances_policy.setter
def mixed_instances_policy(self, value: typing.Optional[typing.Union[typing.Optional[aws_cdk.core.IResolvable], typing.Optional["MixedInstancesPolicyProperty"]]]):
jsii.set(self, "mixedInstancesPolicy", value)
@builtins.property
@jsii.member(jsii_name="notificationConfigurations")
def notification_configurations(self) -> typing.Optional[typing.Union[typing.Optional[aws_cdk.core.IResolvable], typing.Optional[typing.List[typing.Union[aws_cdk.core.IResolvable, "NotificationConfigurationProperty"]]]]]:
"""``AWS::AutoScaling::AutoScalingGroup.NotificationConfigurations``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-group.html#cfn-as-group-notificationconfigurations
"""
return jsii.get(self, "notificationConfigurations")
@notification_configurations.setter
def notification_configurations(self, value: typing.Optional[typing.Union[typing.Optional[aws_cdk.core.IResolvable], typing.Optional[typing.List[typing.Union[aws_cdk.core.IResolvable, "NotificationConfigurationProperty"]]]]]):
jsii.set(self, "notificationConfigurations", value)
@builtins.property
@jsii.member(jsii_name="placementGroup")
def placement_group(self) -> typing.Optional[str]:
"""``AWS::AutoScaling::AutoScalingGroup.PlacementGroup``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-group.html#cfn-as-group-placementgroup
"""
return jsii.get(self, "placementGroup")
@placement_group.setter
def placement_group(self, value: typing.Optional[str]):
jsii.set(self, "placementGroup", value)
@builtins.property
@jsii.member(jsii_name="serviceLinkedRoleArn")
def service_linked_role_arn(self) -> typing.Optional[str]:
"""``AWS::AutoScaling::AutoScalingGroup.ServiceLinkedRoleARN``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-group.html#cfn-autoscaling-autoscalinggroup-servicelinkedrolearn
"""
return jsii.get(self, "serviceLinkedRoleArn")
@service_linked_role_arn.setter
def service_linked_role_arn(self, value: typing.Optional[str]):
jsii.set(self, "serviceLinkedRoleArn", value)
@builtins.property
@jsii.member(jsii_name="targetGroupArns")
def target_group_arns(self) -> typing.Optional[typing.List[str]]:
"""``AWS::AutoScaling::AutoScalingGroup.TargetGroupARNs``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-group.html#cfn-as-group-targetgrouparns
"""
return jsii.get(self, "targetGroupArns")
@target_group_arns.setter
def target_group_arns(self, value: typing.Optional[typing.List[str]]):
jsii.set(self, "targetGroupArns", value)
@builtins.property
@jsii.member(jsii_name="terminationPolicies")
def termination_policies(self) -> typing.Optional[typing.List[str]]:
"""``AWS::AutoScaling::AutoScalingGroup.TerminationPolicies``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-group.html#cfn-as-group-termpolicy
"""
return jsii.get(self, "terminationPolicies")
@termination_policies.setter
def termination_policies(self, value: typing.Optional[typing.List[str]]):
jsii.set(self, "terminationPolicies", value)
@builtins.property
@jsii.member(jsii_name="vpcZoneIdentifier")
def vpc_zone_identifier(self) -> typing.Optional[typing.List[str]]:
"""``AWS::AutoScaling::AutoScalingGroup.VPCZoneIdentifier``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-group.html#cfn-as-group-vpczoneidentifier
"""
return jsii.get(self, "vpcZoneIdentifier")
@vpc_zone_identifier.setter
def vpc_zone_identifier(self, value: typing.Optional[typing.List[str]]):
jsii.set(self, "vpcZoneIdentifier", value)
@jsii.data_type(jsii_type="@aws-cdk/aws-autoscaling.CfnAutoScalingGroup.InstancesDistributionProperty", jsii_struct_bases=[], name_mapping={'on_demand_allocation_strategy': 'onDemandAllocationStrategy', 'on_demand_base_capacity': 'onDemandBaseCapacity', 'on_demand_percentage_above_base_capacity': 'onDemandPercentageAboveBaseCapacity', 'spot_allocation_strategy': 'spotAllocationStrategy', 'spot_instance_pools': 'spotInstancePools', 'spot_max_price': 'spotMaxPrice'})
class InstancesDistributionProperty():
def __init__(self, *, on_demand_allocation_strategy: typing.Optional[str]=None, on_demand_base_capacity: typing.Optional[jsii.Number]=None, on_demand_percentage_above_base_capacity: typing.Optional[jsii.Number]=None, spot_allocation_strategy: typing.Optional[str]=None, spot_instance_pools: typing.Optional[jsii.Number]=None, spot_max_price: typing.Optional[str]=None):
"""
:param on_demand_allocation_strategy: ``CfnAutoScalingGroup.InstancesDistributionProperty.OnDemandAllocationStrategy``.
:param on_demand_base_capacity: ``CfnAutoScalingGroup.InstancesDistributionProperty.OnDemandBaseCapacity``.
:param on_demand_percentage_above_base_capacity: ``CfnAutoScalingGroup.InstancesDistributionProperty.OnDemandPercentageAboveBaseCapacity``.
:param spot_allocation_strategy: ``CfnAutoScalingGroup.InstancesDistributionProperty.SpotAllocationStrategy``.
:param spot_instance_pools: ``CfnAutoScalingGroup.InstancesDistributionProperty.SpotInstancePools``.
:param spot_max_price: ``CfnAutoScalingGroup.InstancesDistributionProperty.SpotMaxPrice``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/cfn-as-mixedinstancespolicy-instancesdistribution.html
"""
self._values = {
}
if on_demand_allocation_strategy is not None: self._values["on_demand_allocation_strategy"] = on_demand_allocation_strategy
if on_demand_base_capacity is not None: self._values["on_demand_base_capacity"] = on_demand_base_capacity
if on_demand_percentage_above_base_capacity is not None: self._values["on_demand_percentage_above_base_capacity"] = on_demand_percentage_above_base_capacity
if spot_allocation_strategy is not None: self._values["spot_allocation_strategy"] = spot_allocation_strategy
if spot_instance_pools is not None: self._values["spot_instance_pools"] = spot_instance_pools
if spot_max_price is not None: self._values["spot_max_price"] = spot_max_price
@builtins.property
def on_demand_allocation_strategy(self) -> typing.Optional[str]:
"""``CfnAutoScalingGroup.InstancesDistributionProperty.OnDemandAllocationStrategy``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/cfn-as-mixedinstancespolicy-instancesdistribution.html#cfn-autoscaling-autoscalinggroup-instancesdistribution-ondemandallocationstrategy
"""
return self._values.get('on_demand_allocation_strategy')
@builtins.property
def on_demand_base_capacity(self) -> typing.Optional[jsii.Number]:
"""``CfnAutoScalingGroup.InstancesDistributionProperty.OnDemandBaseCapacity``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/cfn-as-mixedinstancespolicy-instancesdistribution.html#cfn-autoscaling-autoscalinggroup-instancesdistribution-ondemandbasecapacity
"""
return self._values.get('on_demand_base_capacity')
@builtins.property
def on_demand_percentage_above_base_capacity(self) -> typing.Optional[jsii.Number]:
"""``CfnAutoScalingGroup.InstancesDistributionProperty.OnDemandPercentageAboveBaseCapacity``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/cfn-as-mixedinstancespolicy-instancesdistribution.html#cfn-autoscaling-autoscalinggroup-instancesdistribution-ondemandpercentageabovebasecapacity
"""
return self._values.get('on_demand_percentage_above_base_capacity')
@builtins.property
def spot_allocation_strategy(self) -> typing.Optional[str]:
"""``CfnAutoScalingGroup.InstancesDistributionProperty.SpotAllocationStrategy``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/cfn-as-mixedinstancespolicy-instancesdistribution.html#cfn-autoscaling-autoscalinggroup-instancesdistribution-spotallocationstrategy
"""
return self._values.get('spot_allocation_strategy')
@builtins.property
def spot_instance_pools(self) -> typing.Optional[jsii.Number]:
"""``CfnAutoScalingGroup.InstancesDistributionProperty.SpotInstancePools``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/cfn-as-mixedinstancespolicy-instancesdistribution.html#cfn-autoscaling-autoscalinggroup-instancesdistribution-spotinstancepools
"""
return self._values.get('spot_instance_pools')
@builtins.property
def spot_max_price(self) -> typing.Optional[str]:
"""``CfnAutoScalingGroup.InstancesDistributionProperty.SpotMaxPrice``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/cfn-as-mixedinstancespolicy-instancesdistribution.html#cfn-autoscaling-autoscalinggroup-instancesdistribution-spotmaxprice
"""
return self._values.get('spot_max_price')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'InstancesDistributionProperty(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
@jsii.data_type(jsii_type="@aws-cdk/aws-autoscaling.CfnAutoScalingGroup.LaunchTemplateOverridesProperty", jsii_struct_bases=[], name_mapping={'instance_type': 'instanceType'})
class LaunchTemplateOverridesProperty():
def __init__(self, *, instance_type: typing.Optional[str]=None):
"""
:param instance_type: ``CfnAutoScalingGroup.LaunchTemplateOverridesProperty.InstanceType``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/cfn-as-mixedinstancespolicy-launchtemplateoverrides.html
"""
self._values = {
}
if instance_type is not None: self._values["instance_type"] = instance_type
@builtins.property
def instance_type(self) -> typing.Optional[str]:
"""``CfnAutoScalingGroup.LaunchTemplateOverridesProperty.InstanceType``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/cfn-as-mixedinstancespolicy-launchtemplateoverrides.html#cfn-autoscaling-autoscalinggroup-launchtemplateoverrides-instancetype
"""
return self._values.get('instance_type')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'LaunchTemplateOverridesProperty(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
@jsii.data_type(jsii_type="@aws-cdk/aws-autoscaling.CfnAutoScalingGroup.LaunchTemplateProperty", jsii_struct_bases=[], name_mapping={'launch_template_specification': 'launchTemplateSpecification', 'overrides': 'overrides'})
class LaunchTemplateProperty():
def __init__(self, *, launch_template_specification: typing.Union[aws_cdk.core.IResolvable, "CfnAutoScalingGroup.LaunchTemplateSpecificationProperty"], overrides: typing.Optional[typing.Union[typing.Optional[aws_cdk.core.IResolvable], typing.Optional[typing.List[typing.Union[aws_cdk.core.IResolvable, "CfnAutoScalingGroup.LaunchTemplateOverridesProperty"]]]]]=None):
"""
:param launch_template_specification: ``CfnAutoScalingGroup.LaunchTemplateProperty.LaunchTemplateSpecification``.
:param overrides: ``CfnAutoScalingGroup.LaunchTemplateProperty.Overrides``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/cfn-as-mixedinstancespolicy-launchtemplate.html
"""
self._values = {
'launch_template_specification': launch_template_specification,
}
if overrides is not None: self._values["overrides"] = overrides
@builtins.property
def launch_template_specification(self) -> typing.Union[aws_cdk.core.IResolvable, "CfnAutoScalingGroup.LaunchTemplateSpecificationProperty"]:
"""``CfnAutoScalingGroup.LaunchTemplateProperty.LaunchTemplateSpecification``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/cfn-as-mixedinstancespolicy-launchtemplate.html#cfn-as-group-launchtemplate
"""
return self._values.get('launch_template_specification')
@builtins.property
def overrides(self) -> typing.Optional[typing.Union[typing.Optional[aws_cdk.core.IResolvable], typing.Optional[typing.List[typing.Union[aws_cdk.core.IResolvable, "CfnAutoScalingGroup.LaunchTemplateOverridesProperty"]]]]]:
"""``CfnAutoScalingGroup.LaunchTemplateProperty.Overrides``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/cfn-as-mixedinstancespolicy-launchtemplate.html#cfn-as-mixedinstancespolicy-overrides
"""
return self._values.get('overrides')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'LaunchTemplateProperty(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
@jsii.data_type(jsii_type="@aws-cdk/aws-autoscaling.CfnAutoScalingGroup.LaunchTemplateSpecificationProperty", jsii_struct_bases=[], name_mapping={'version': 'version', 'launch_template_id': 'launchTemplateId', 'launch_template_name': 'launchTemplateName'})
class LaunchTemplateSpecificationProperty():
def __init__(self, *, version: str, launch_template_id: typing.Optional[str]=None, launch_template_name: typing.Optional[str]=None):
"""
:param version: ``CfnAutoScalingGroup.LaunchTemplateSpecificationProperty.Version``.
:param launch_template_id: ``CfnAutoScalingGroup.LaunchTemplateSpecificationProperty.LaunchTemplateId``.
:param launch_template_name: ``CfnAutoScalingGroup.LaunchTemplateSpecificationProperty.LaunchTemplateName``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-autoscaling-autoscalinggroup-launchtemplatespecification.html
"""
self._values = {
'version': version,
}
if launch_template_id is not None: self._values["launch_template_id"] = launch_template_id
if launch_template_name is not None: self._values["launch_template_name"] = launch_template_name
@builtins.property
def version(self) -> str:
"""``CfnAutoScalingGroup.LaunchTemplateSpecificationProperty.Version``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-autoscaling-autoscalinggroup-launchtemplatespecification.html#cfn-autoscaling-autoscalinggroup-launchtemplatespecification-version
"""
return self._values.get('version')
@builtins.property
def launch_template_id(self) -> typing.Optional[str]:
"""``CfnAutoScalingGroup.LaunchTemplateSpecificationProperty.LaunchTemplateId``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-autoscaling-autoscalinggroup-launchtemplatespecification.html#cfn-autoscaling-autoscalinggroup-launchtemplatespecification-launchtemplateid
"""
return self._values.get('launch_template_id')
@builtins.property
def launch_template_name(self) -> typing.Optional[str]:
"""``CfnAutoScalingGroup.LaunchTemplateSpecificationProperty.LaunchTemplateName``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-autoscaling-autoscalinggroup-launchtemplatespecification.html#cfn-autoscaling-autoscalinggroup-launchtemplatespecification-launchtemplatename
"""
return self._values.get('launch_template_name')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'LaunchTemplateSpecificationProperty(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
@jsii.data_type(jsii_type="@aws-cdk/aws-autoscaling.CfnAutoScalingGroup.LifecycleHookSpecificationProperty", jsii_struct_bases=[], name_mapping={'lifecycle_hook_name': 'lifecycleHookName', 'lifecycle_transition': 'lifecycleTransition', 'default_result': 'defaultResult', 'heartbeat_timeout': 'heartbeatTimeout', 'notification_metadata': 'notificationMetadata', 'notification_target_arn': 'notificationTargetArn', 'role_arn': 'roleArn'})
class LifecycleHookSpecificationProperty():
def __init__(self, *, lifecycle_hook_name: str, lifecycle_transition: str, default_result: typing.Optional[str]=None, heartbeat_timeout: typing.Optional[jsii.Number]=None, notification_metadata: typing.Optional[str]=None, notification_target_arn: typing.Optional[str]=None, role_arn: typing.Optional[str]=None):
"""
:param lifecycle_hook_name: ``CfnAutoScalingGroup.LifecycleHookSpecificationProperty.LifecycleHookName``.
:param lifecycle_transition: ``CfnAutoScalingGroup.LifecycleHookSpecificationProperty.LifecycleTransition``.
:param default_result: ``CfnAutoScalingGroup.LifecycleHookSpecificationProperty.DefaultResult``.
:param heartbeat_timeout: ``CfnAutoScalingGroup.LifecycleHookSpecificationProperty.HeartbeatTimeout``.
:param notification_metadata: ``CfnAutoScalingGroup.LifecycleHookSpecificationProperty.NotificationMetadata``.
:param notification_target_arn: ``CfnAutoScalingGroup.LifecycleHookSpecificationProperty.NotificationTargetARN``.
:param role_arn: ``CfnAutoScalingGroup.LifecycleHookSpecificationProperty.RoleARN``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-autoscaling-autoscalinggroup-lifecyclehookspecification.html
"""
self._values = {
'lifecycle_hook_name': lifecycle_hook_name,
'lifecycle_transition': lifecycle_transition,
}
if default_result is not None: self._values["default_result"] = default_result
if heartbeat_timeout is not None: self._values["heartbeat_timeout"] = heartbeat_timeout
if notification_metadata is not None: self._values["notification_metadata"] = notification_metadata
if notification_target_arn is not None: self._values["notification_target_arn"] = notification_target_arn
if role_arn is not None: self._values["role_arn"] = role_arn
@builtins.property
def lifecycle_hook_name(self) -> str:
"""``CfnAutoScalingGroup.LifecycleHookSpecificationProperty.LifecycleHookName``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-autoscaling-autoscalinggroup-lifecyclehookspecification.html#cfn-autoscaling-autoscalinggroup-lifecyclehookspecification-lifecyclehookname
"""
return self._values.get('lifecycle_hook_name')
@builtins.property
def lifecycle_transition(self) -> str:
"""``CfnAutoScalingGroup.LifecycleHookSpecificationProperty.LifecycleTransition``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-autoscaling-autoscalinggroup-lifecyclehookspecification.html#cfn-autoscaling-autoscalinggroup-lifecyclehookspecification-lifecycletransition
"""
return self._values.get('lifecycle_transition')
@builtins.property
def default_result(self) -> typing.Optional[str]:
"""``CfnAutoScalingGroup.LifecycleHookSpecificationProperty.DefaultResult``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-autoscaling-autoscalinggroup-lifecyclehookspecification.html#cfn-autoscaling-autoscalinggroup-lifecyclehookspecification-defaultresult
"""
return self._values.get('default_result')
@builtins.property
def heartbeat_timeout(self) -> typing.Optional[jsii.Number]:
"""``CfnAutoScalingGroup.LifecycleHookSpecificationProperty.HeartbeatTimeout``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-autoscaling-autoscalinggroup-lifecyclehookspecification.html#cfn-autoscaling-autoscalinggroup-lifecyclehookspecification-heartbeattimeout
"""
return self._values.get('heartbeat_timeout')
@builtins.property
def notification_metadata(self) -> typing.Optional[str]:
"""``CfnAutoScalingGroup.LifecycleHookSpecificationProperty.NotificationMetadata``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-autoscaling-autoscalinggroup-lifecyclehookspecification.html#cfn-autoscaling-autoscalinggroup-lifecyclehookspecification-notificationmetadata
"""
return self._values.get('notification_metadata')
@builtins.property
def notification_target_arn(self) -> typing.Optional[str]:
"""``CfnAutoScalingGroup.LifecycleHookSpecificationProperty.NotificationTargetARN``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-autoscaling-autoscalinggroup-lifecyclehookspecification.html#cfn-autoscaling-autoscalinggroup-lifecyclehookspecification-notificationtargetarn
"""
return self._values.get('notification_target_arn')
@builtins.property
def role_arn(self) -> typing.Optional[str]:
"""``CfnAutoScalingGroup.LifecycleHookSpecificationProperty.RoleARN``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-autoscaling-autoscalinggroup-lifecyclehookspecification.html#cfn-autoscaling-autoscalinggroup-lifecyclehookspecification-rolearn
"""
return self._values.get('role_arn')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'LifecycleHookSpecificationProperty(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
@jsii.data_type(jsii_type="@aws-cdk/aws-autoscaling.CfnAutoScalingGroup.MetricsCollectionProperty", jsii_struct_bases=[], name_mapping={'granularity': 'granularity', 'metrics': 'metrics'})
class MetricsCollectionProperty():
def __init__(self, *, granularity: str, metrics: typing.Optional[typing.List[str]]=None):
"""
:param granularity: ``CfnAutoScalingGroup.MetricsCollectionProperty.Granularity``.
:param metrics: ``CfnAutoScalingGroup.MetricsCollectionProperty.Metrics``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-metricscollection.html
"""
self._values = {
'granularity': granularity,
}
if metrics is not None: self._values["metrics"] = metrics
@builtins.property
def granularity(self) -> str:
"""``CfnAutoScalingGroup.MetricsCollectionProperty.Granularity``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-metricscollection.html#cfn-as-metricscollection-granularity
"""
return self._values.get('granularity')
@builtins.property
def metrics(self) -> typing.Optional[typing.List[str]]:
"""``CfnAutoScalingGroup.MetricsCollectionProperty.Metrics``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-metricscollection.html#cfn-as-metricscollection-metrics
"""
return self._values.get('metrics')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'MetricsCollectionProperty(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
@jsii.data_type(jsii_type="@aws-cdk/aws-autoscaling.CfnAutoScalingGroup.MixedInstancesPolicyProperty", jsii_struct_bases=[], name_mapping={'launch_template': 'launchTemplate', 'instances_distribution': 'instancesDistribution'})
class MixedInstancesPolicyProperty():
def __init__(self, *, launch_template: typing.Union[aws_cdk.core.IResolvable, "CfnAutoScalingGroup.LaunchTemplateProperty"], instances_distribution: typing.Optional[typing.Union[typing.Optional[aws_cdk.core.IResolvable], typing.Optional["CfnAutoScalingGroup.InstancesDistributionProperty"]]]=None):
"""
:param launch_template: ``CfnAutoScalingGroup.MixedInstancesPolicyProperty.LaunchTemplate``.
:param instances_distribution: ``CfnAutoScalingGroup.MixedInstancesPolicyProperty.InstancesDistribution``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/cfn-as-group-mixedinstancespolicy.html
"""
self._values = {
'launch_template': launch_template,
}
if instances_distribution is not None: self._values["instances_distribution"] = instances_distribution
@builtins.property
def launch_template(self) -> typing.Union[aws_cdk.core.IResolvable, "CfnAutoScalingGroup.LaunchTemplateProperty"]:
"""``CfnAutoScalingGroup.MixedInstancesPolicyProperty.LaunchTemplate``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/cfn-as-group-mixedinstancespolicy.html#cfn-as-mixedinstancespolicy-launchtemplate
"""
return self._values.get('launch_template')
@builtins.property
def instances_distribution(self) -> typing.Optional[typing.Union[typing.Optional[aws_cdk.core.IResolvable], typing.Optional["CfnAutoScalingGroup.InstancesDistributionProperty"]]]:
"""``CfnAutoScalingGroup.MixedInstancesPolicyProperty.InstancesDistribution``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/cfn-as-group-mixedinstancespolicy.html#cfn-as-mixedinstancespolicy-instancesdistribution
"""
return self._values.get('instances_distribution')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'MixedInstancesPolicyProperty(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
@jsii.data_type(jsii_type="@aws-cdk/aws-autoscaling.CfnAutoScalingGroup.NotificationConfigurationProperty", jsii_struct_bases=[], name_mapping={'topic_arn': 'topicArn', 'notification_types': 'notificationTypes'})
class NotificationConfigurationProperty():
def __init__(self, *, topic_arn: str, notification_types: typing.Optional[typing.List[str]]=None):
"""
:param topic_arn: ``CfnAutoScalingGroup.NotificationConfigurationProperty.TopicARN``.
:param notification_types: ``CfnAutoScalingGroup.NotificationConfigurationProperty.NotificationTypes``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-notificationconfigurations.html
"""
self._values = {
'topic_arn': topic_arn,
}
if notification_types is not None: self._values["notification_types"] = notification_types
@builtins.property
def topic_arn(self) -> str:
"""``CfnAutoScalingGroup.NotificationConfigurationProperty.TopicARN``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-notificationconfigurations.html#cfn-autoscaling-autoscalinggroup-notificationconfigurations-topicarn
"""
return self._values.get('topic_arn')
@builtins.property
def notification_types(self) -> typing.Optional[typing.List[str]]:
"""``CfnAutoScalingGroup.NotificationConfigurationProperty.NotificationTypes``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-notificationconfigurations.html#cfn-as-group-notificationconfigurations-notificationtypes
"""
return self._values.get('notification_types')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'NotificationConfigurationProperty(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
@jsii.data_type(jsii_type="@aws-cdk/aws-autoscaling.CfnAutoScalingGroup.TagPropertyProperty", jsii_struct_bases=[], name_mapping={'key': 'key', 'propagate_at_launch': 'propagateAtLaunch', 'value': 'value'})
class TagPropertyProperty():
def __init__(self, *, key: str, propagate_at_launch: typing.Union[bool, aws_cdk.core.IResolvable], value: str):
"""
:param key: ``CfnAutoScalingGroup.TagPropertyProperty.Key``.
:param propagate_at_launch: ``CfnAutoScalingGroup.TagPropertyProperty.PropagateAtLaunch``.
:param value: ``CfnAutoScalingGroup.TagPropertyProperty.Value``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-tags.html
"""
self._values = {
'key': key,
'propagate_at_launch': propagate_at_launch,
'value': value,
}
@builtins.property
def key(self) -> str:
"""``CfnAutoScalingGroup.TagPropertyProperty.Key``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-tags.html#cfn-as-tags-Key
"""
return self._values.get('key')
@builtins.property
def propagate_at_launch(self) -> typing.Union[bool, aws_cdk.core.IResolvable]:
"""``CfnAutoScalingGroup.TagPropertyProperty.PropagateAtLaunch``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-tags.html#cfn-as-tags-PropagateAtLaunch
"""
return self._values.get('propagate_at_launch')
@builtins.property
def value(self) -> str:
"""``CfnAutoScalingGroup.TagPropertyProperty.Value``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-tags.html#cfn-as-tags-Value
"""
return self._values.get('value')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'TagPropertyProperty(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
@jsii.data_type(jsii_type="@aws-cdk/aws-autoscaling.CfnAutoScalingGroupProps", jsii_struct_bases=[], name_mapping={'max_size': 'maxSize', 'min_size': 'minSize', 'auto_scaling_group_name': 'autoScalingGroupName', 'availability_zones': 'availabilityZones', 'cooldown': 'cooldown', 'desired_capacity': 'desiredCapacity', 'health_check_grace_period': 'healthCheckGracePeriod', 'health_check_type': 'healthCheckType', 'instance_id': 'instanceId', 'launch_configuration_name': 'launchConfigurationName', 'launch_template': 'launchTemplate', 'lifecycle_hook_specification_list': 'lifecycleHookSpecificationList', 'load_balancer_names': 'loadBalancerNames', 'metrics_collection': 'metricsCollection', 'mixed_instances_policy': 'mixedInstancesPolicy', 'notification_configurations': 'notificationConfigurations', 'placement_group': 'placementGroup', 'service_linked_role_arn': 'serviceLinkedRoleArn', 'tags': 'tags', 'target_group_arns': 'targetGroupArns', 'termination_policies': 'terminationPolicies', 'vpc_zone_identifier': 'vpcZoneIdentifier'})
class CfnAutoScalingGroupProps():
def __init__(self, *, max_size: str, min_size: str, auto_scaling_group_name: typing.Optional[str]=None, availability_zones: typing.Optional[typing.List[str]]=None, cooldown: typing.Optional[str]=None, desired_capacity: typing.Optional[str]=None, health_check_grace_period: typing.Optional[jsii.Number]=None, health_check_type: typing.Optional[str]=None, instance_id: typing.Optional[str]=None, launch_configuration_name: typing.Optional[str]=None, launch_template: typing.Optional[typing.Union[typing.Optional[aws_cdk.core.IResolvable], typing.Optional["CfnAutoScalingGroup.LaunchTemplateSpecificationProperty"]]]=None, lifecycle_hook_specification_list: typing.Optional[typing.Union[typing.Optional[aws_cdk.core.IResolvable], typing.Optional[typing.List[typing.Union[aws_cdk.core.IResolvable, "CfnAutoScalingGroup.LifecycleHookSpecificationProperty"]]]]]=None, load_balancer_names: typing.Optional[typing.List[str]]=None, metrics_collection: typing.Optional[typing.Union[typing.Optional[aws_cdk.core.IResolvable], typing.Optional[typing.List[typing.Union[aws_cdk.core.IResolvable, "CfnAutoScalingGroup.MetricsCollectionProperty"]]]]]=None, mixed_instances_policy: typing.Optional[typing.Union[typing.Optional[aws_cdk.core.IResolvable], typing.Optional["CfnAutoScalingGroup.MixedInstancesPolicyProperty"]]]=None, notification_configurations: typing.Optional[typing.Union[typing.Optional[aws_cdk.core.IResolvable], typing.Optional[typing.List[typing.Union[aws_cdk.core.IResolvable, "CfnAutoScalingGroup.NotificationConfigurationProperty"]]]]]=None, placement_group: typing.Optional[str]=None, service_linked_role_arn: typing.Optional[str]=None, tags: typing.Optional[typing.List["CfnAutoScalingGroup.TagPropertyProperty"]]=None, target_group_arns: typing.Optional[typing.List[str]]=None, termination_policies: typing.Optional[typing.List[str]]=None, vpc_zone_identifier: typing.Optional[typing.List[str]]=None):
"""Properties for defining a ``AWS::AutoScaling::AutoScalingGroup``.
:param max_size: ``AWS::AutoScaling::AutoScalingGroup.MaxSize``.
:param min_size: ``AWS::AutoScaling::AutoScalingGroup.MinSize``.
:param auto_scaling_group_name: ``AWS::AutoScaling::AutoScalingGroup.AutoScalingGroupName``.
:param availability_zones: ``AWS::AutoScaling::AutoScalingGroup.AvailabilityZones``.
:param cooldown: ``AWS::AutoScaling::AutoScalingGroup.Cooldown``.
:param desired_capacity: ``AWS::AutoScaling::AutoScalingGroup.DesiredCapacity``.
:param health_check_grace_period: ``AWS::AutoScaling::AutoScalingGroup.HealthCheckGracePeriod``.
:param health_check_type: ``AWS::AutoScaling::AutoScalingGroup.HealthCheckType``.
:param instance_id: ``AWS::AutoScaling::AutoScalingGroup.InstanceId``.
:param launch_configuration_name: ``AWS::AutoScaling::AutoScalingGroup.LaunchConfigurationName``.
:param launch_template: ``AWS::AutoScaling::AutoScalingGroup.LaunchTemplate``.
:param lifecycle_hook_specification_list: ``AWS::AutoScaling::AutoScalingGroup.LifecycleHookSpecificationList``.
:param load_balancer_names: ``AWS::AutoScaling::AutoScalingGroup.LoadBalancerNames``.
:param metrics_collection: ``AWS::AutoScaling::AutoScalingGroup.MetricsCollection``.
:param mixed_instances_policy: ``AWS::AutoScaling::AutoScalingGroup.MixedInstancesPolicy``.
:param notification_configurations: ``AWS::AutoScaling::AutoScalingGroup.NotificationConfigurations``.
:param placement_group: ``AWS::AutoScaling::AutoScalingGroup.PlacementGroup``.
:param service_linked_role_arn: ``AWS::AutoScaling::AutoScalingGroup.ServiceLinkedRoleARN``.
:param tags: ``AWS::AutoScaling::AutoScalingGroup.Tags``.
:param target_group_arns: ``AWS::AutoScaling::AutoScalingGroup.TargetGroupARNs``.
:param termination_policies: ``AWS::AutoScaling::AutoScalingGroup.TerminationPolicies``.
:param vpc_zone_identifier: ``AWS::AutoScaling::AutoScalingGroup.VPCZoneIdentifier``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-group.html
"""
self._values = {
'max_size': max_size,
'min_size': min_size,
}
if auto_scaling_group_name is not None: self._values["auto_scaling_group_name"] = auto_scaling_group_name
if availability_zones is not None: self._values["availability_zones"] = availability_zones
if cooldown is not None: self._values["cooldown"] = cooldown
if desired_capacity is not None: self._values["desired_capacity"] = desired_capacity
if health_check_grace_period is not None: self._values["health_check_grace_period"] = health_check_grace_period
if health_check_type is not None: self._values["health_check_type"] = health_check_type
if instance_id is not None: self._values["instance_id"] = instance_id
if launch_configuration_name is not None: self._values["launch_configuration_name"] = launch_configuration_name
if launch_template is not None: self._values["launch_template"] = launch_template
if lifecycle_hook_specification_list is not None: self._values["lifecycle_hook_specification_list"] = lifecycle_hook_specification_list
if load_balancer_names is not None: self._values["load_balancer_names"] = load_balancer_names
if metrics_collection is not None: self._values["metrics_collection"] = metrics_collection
if mixed_instances_policy is not None: self._values["mixed_instances_policy"] = mixed_instances_policy
if notification_configurations is not None: self._values["notification_configurations"] = notification_configurations
if placement_group is not None: self._values["placement_group"] = placement_group
if service_linked_role_arn is not None: self._values["service_linked_role_arn"] = service_linked_role_arn
if tags is not None: self._values["tags"] = tags
if target_group_arns is not None: self._values["target_group_arns"] = target_group_arns
if termination_policies is not None: self._values["termination_policies"] = termination_policies
if vpc_zone_identifier is not None: self._values["vpc_zone_identifier"] = vpc_zone_identifier
@builtins.property
def max_size(self) -> str:
"""``AWS::AutoScaling::AutoScalingGroup.MaxSize``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-group.html#cfn-as-group-maxsize
"""
return self._values.get('max_size')
@builtins.property
def min_size(self) -> str:
"""``AWS::AutoScaling::AutoScalingGroup.MinSize``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-group.html#cfn-as-group-minsize
"""
return self._values.get('min_size')
@builtins.property
def auto_scaling_group_name(self) -> typing.Optional[str]:
"""``AWS::AutoScaling::AutoScalingGroup.AutoScalingGroupName``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-group.html#cfn-autoscaling-autoscalinggroup-autoscalinggroupname
"""
return self._values.get('auto_scaling_group_name')
@builtins.property
def availability_zones(self) -> typing.Optional[typing.List[str]]:
"""``AWS::AutoScaling::AutoScalingGroup.AvailabilityZones``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-group.html#cfn-as-group-availabilityzones
"""
return self._values.get('availability_zones')
@builtins.property
def cooldown(self) -> typing.Optional[str]:
"""``AWS::AutoScaling::AutoScalingGroup.Cooldown``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-group.html#cfn-as-group-cooldown
"""
return self._values.get('cooldown')
@builtins.property
def desired_capacity(self) -> typing.Optional[str]:
"""``AWS::AutoScaling::AutoScalingGroup.DesiredCapacity``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-group.html#cfn-as-group-desiredcapacity
"""
return self._values.get('desired_capacity')
@builtins.property
def health_check_grace_period(self) -> typing.Optional[jsii.Number]:
"""``AWS::AutoScaling::AutoScalingGroup.HealthCheckGracePeriod``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-group.html#cfn-as-group-healthcheckgraceperiod
"""
return self._values.get('health_check_grace_period')
@builtins.property
def health_check_type(self) -> typing.Optional[str]:
"""``AWS::AutoScaling::AutoScalingGroup.HealthCheckType``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-group.html#cfn-as-group-healthchecktype
"""
return self._values.get('health_check_type')
@builtins.property
def instance_id(self) -> typing.Optional[str]:
"""``AWS::AutoScaling::AutoScalingGroup.InstanceId``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-group.html#cfn-as-group-instanceid
"""
return self._values.get('instance_id')
@builtins.property
def launch_configuration_name(self) -> typing.Optional[str]:
"""``AWS::AutoScaling::AutoScalingGroup.LaunchConfigurationName``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-group.html#cfn-as-group-launchconfigurationname
"""
return self._values.get('launch_configuration_name')
@builtins.property
def launch_template(self) -> typing.Optional[typing.Union[typing.Optional[aws_cdk.core.IResolvable], typing.Optional["CfnAutoScalingGroup.LaunchTemplateSpecificationProperty"]]]:
"""``AWS::AutoScaling::AutoScalingGroup.LaunchTemplate``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-group.html#cfn-as-group-launchtemplate
"""
return self._values.get('launch_template')
@builtins.property
def lifecycle_hook_specification_list(self) -> typing.Optional[typing.Union[typing.Optional[aws_cdk.core.IResolvable], typing.Optional[typing.List[typing.Union[aws_cdk.core.IResolvable, "CfnAutoScalingGroup.LifecycleHookSpecificationProperty"]]]]]:
"""``AWS::AutoScaling::AutoScalingGroup.LifecycleHookSpecificationList``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-group.html#cfn-autoscaling-autoscalinggroup-lifecyclehookspecificationlist
"""
return self._values.get('lifecycle_hook_specification_list')
@builtins.property
def load_balancer_names(self) -> typing.Optional[typing.List[str]]:
"""``AWS::AutoScaling::AutoScalingGroup.LoadBalancerNames``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-group.html#cfn-as-group-loadbalancernames
"""
return self._values.get('load_balancer_names')
@builtins.property
def metrics_collection(self) -> typing.Optional[typing.Union[typing.Optional[aws_cdk.core.IResolvable], typing.Optional[typing.List[typing.Union[aws_cdk.core.IResolvable, "CfnAutoScalingGroup.MetricsCollectionProperty"]]]]]:
"""``AWS::AutoScaling::AutoScalingGroup.MetricsCollection``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-group.html#cfn-as-group-metricscollection
"""
return self._values.get('metrics_collection')
@builtins.property
def mixed_instances_policy(self) -> typing.Optional[typing.Union[typing.Optional[aws_cdk.core.IResolvable], typing.Optional["CfnAutoScalingGroup.MixedInstancesPolicyProperty"]]]:
"""``AWS::AutoScaling::AutoScalingGroup.MixedInstancesPolicy``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-group.html#cfn-as-group-mixedinstancespolicy
"""
return self._values.get('mixed_instances_policy')
@builtins.property
def notification_configurations(self) -> typing.Optional[typing.Union[typing.Optional[aws_cdk.core.IResolvable], typing.Optional[typing.List[typing.Union[aws_cdk.core.IResolvable, "CfnAutoScalingGroup.NotificationConfigurationProperty"]]]]]:
"""``AWS::AutoScaling::AutoScalingGroup.NotificationConfigurations``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-group.html#cfn-as-group-notificationconfigurations
"""
return self._values.get('notification_configurations')
@builtins.property
def placement_group(self) -> typing.Optional[str]:
"""``AWS::AutoScaling::AutoScalingGroup.PlacementGroup``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-group.html#cfn-as-group-placementgroup
"""
return self._values.get('placement_group')
@builtins.property
def service_linked_role_arn(self) -> typing.Optional[str]:
"""``AWS::AutoScaling::AutoScalingGroup.ServiceLinkedRoleARN``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-group.html#cfn-autoscaling-autoscalinggroup-servicelinkedrolearn
"""
return self._values.get('service_linked_role_arn')
@builtins.property
def tags(self) -> typing.Optional[typing.List["CfnAutoScalingGroup.TagPropertyProperty"]]:
"""``AWS::AutoScaling::AutoScalingGroup.Tags``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-group.html#cfn-as-group-tags
"""
return self._values.get('tags')
@builtins.property
def target_group_arns(self) -> typing.Optional[typing.List[str]]:
"""``AWS::AutoScaling::AutoScalingGroup.TargetGroupARNs``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-group.html#cfn-as-group-targetgrouparns
"""
return self._values.get('target_group_arns')
@builtins.property
def termination_policies(self) -> typing.Optional[typing.List[str]]:
"""``AWS::AutoScaling::AutoScalingGroup.TerminationPolicies``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-group.html#cfn-as-group-termpolicy
"""
return self._values.get('termination_policies')
@builtins.property
def vpc_zone_identifier(self) -> typing.Optional[typing.List[str]]:
"""``AWS::AutoScaling::AutoScalingGroup.VPCZoneIdentifier``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-group.html#cfn-as-group-vpczoneidentifier
"""
return self._values.get('vpc_zone_identifier')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'CfnAutoScalingGroupProps(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
@jsii.implements(aws_cdk.core.IInspectable)
class CfnLaunchConfiguration(aws_cdk.core.CfnResource, metaclass=jsii.JSIIMeta, jsii_type="@aws-cdk/aws-autoscaling.CfnLaunchConfiguration"):
"""A CloudFormation ``AWS::AutoScaling::LaunchConfiguration``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-launchconfig.html
cloudformationResource:
:cloudformationResource:: AWS::AutoScaling::LaunchConfiguration
"""
def __init__(self, scope: aws_cdk.core.Construct, id: str, *, image_id: str, instance_type: str, associate_public_ip_address: typing.Optional[typing.Union[typing.Optional[bool], typing.Optional[aws_cdk.core.IResolvable]]]=None, block_device_mappings: typing.Optional[typing.Union[typing.Optional[aws_cdk.core.IResolvable], typing.Optional[typing.List[typing.Union[aws_cdk.core.IResolvable, "BlockDeviceMappingProperty"]]]]]=None, classic_link_vpc_id: typing.Optional[str]=None, classic_link_vpc_security_groups: typing.Optional[typing.List[str]]=None, ebs_optimized: typing.Optional[typing.Union[typing.Optional[bool], typing.Optional[aws_cdk.core.IResolvable]]]=None, iam_instance_profile: typing.Optional[str]=None, instance_id: typing.Optional[str]=None, instance_monitoring: typing.Optional[typing.Union[typing.Optional[bool], typing.Optional[aws_cdk.core.IResolvable]]]=None, kernel_id: typing.Optional[str]=None, key_name: typing.Optional[str]=None, launch_configuration_name: typing.Optional[str]=None, placement_tenancy: typing.Optional[str]=None, ram_disk_id: typing.Optional[str]=None, security_groups: typing.Optional[typing.List[str]]=None, spot_price: typing.Optional[str]=None, user_data: typing.Optional[str]=None) -> None:
"""Create a new ``AWS::AutoScaling::LaunchConfiguration``.
:param scope: - scope in which this resource is defined.
:param id: - scoped id of the resource.
:param image_id: ``AWS::AutoScaling::LaunchConfiguration.ImageId``.
:param instance_type: ``AWS::AutoScaling::LaunchConfiguration.InstanceType``.
:param associate_public_ip_address: ``AWS::AutoScaling::LaunchConfiguration.AssociatePublicIpAddress``.
:param block_device_mappings: ``AWS::AutoScaling::LaunchConfiguration.BlockDeviceMappings``.
:param classic_link_vpc_id: ``AWS::AutoScaling::LaunchConfiguration.ClassicLinkVPCId``.
:param classic_link_vpc_security_groups: ``AWS::AutoScaling::LaunchConfiguration.ClassicLinkVPCSecurityGroups``.
:param ebs_optimized: ``AWS::AutoScaling::LaunchConfiguration.EbsOptimized``.
:param iam_instance_profile: ``AWS::AutoScaling::LaunchConfiguration.IamInstanceProfile``.
:param instance_id: ``AWS::AutoScaling::LaunchConfiguration.InstanceId``.
:param instance_monitoring: ``AWS::AutoScaling::LaunchConfiguration.InstanceMonitoring``.
:param kernel_id: ``AWS::AutoScaling::LaunchConfiguration.KernelId``.
:param key_name: ``AWS::AutoScaling::LaunchConfiguration.KeyName``.
:param launch_configuration_name: ``AWS::AutoScaling::LaunchConfiguration.LaunchConfigurationName``.
:param placement_tenancy: ``AWS::AutoScaling::LaunchConfiguration.PlacementTenancy``.
:param ram_disk_id: ``AWS::AutoScaling::LaunchConfiguration.RamDiskId``.
:param security_groups: ``AWS::AutoScaling::LaunchConfiguration.SecurityGroups``.
:param spot_price: ``AWS::AutoScaling::LaunchConfiguration.SpotPrice``.
:param user_data: ``AWS::AutoScaling::LaunchConfiguration.UserData``.
"""
props = CfnLaunchConfigurationProps(image_id=image_id, instance_type=instance_type, associate_public_ip_address=associate_public_ip_address, block_device_mappings=block_device_mappings, classic_link_vpc_id=classic_link_vpc_id, classic_link_vpc_security_groups=classic_link_vpc_security_groups, ebs_optimized=ebs_optimized, iam_instance_profile=iam_instance_profile, instance_id=instance_id, instance_monitoring=instance_monitoring, kernel_id=kernel_id, key_name=key_name, launch_configuration_name=launch_configuration_name, placement_tenancy=placement_tenancy, ram_disk_id=ram_disk_id, security_groups=security_groups, spot_price=spot_price, user_data=user_data)
jsii.create(CfnLaunchConfiguration, self, [scope, id, props])
@jsii.member(jsii_name="inspect")
def inspect(self, inspector: aws_cdk.core.TreeInspector) -> None:
"""Examines the CloudFormation resource and discloses attributes.
:param inspector: - tree inspector to collect and process attributes.
stability
:stability: experimental
"""
return jsii.invoke(self, "inspect", [inspector])
@jsii.member(jsii_name="renderProperties")
def _render_properties(self, props: typing.Mapping[str,typing.Any]) -> typing.Mapping[str,typing.Any]:
"""
:param props: -
"""
return jsii.invoke(self, "renderProperties", [props])
@jsii.python.classproperty
@jsii.member(jsii_name="CFN_RESOURCE_TYPE_NAME")
def CFN_RESOURCE_TYPE_NAME(cls) -> str:
"""The CloudFormation resource type name for this resource class."""
return jsii.sget(cls, "CFN_RESOURCE_TYPE_NAME")
@builtins.property
@jsii.member(jsii_name="cfnProperties")
def _cfn_properties(self) -> typing.Mapping[str,typing.Any]:
return jsii.get(self, "cfnProperties")
@builtins.property
@jsii.member(jsii_name="imageId")
def image_id(self) -> str:
"""``AWS::AutoScaling::LaunchConfiguration.ImageId``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-launchconfig.html#cfn-as-launchconfig-imageid
"""
return jsii.get(self, "imageId")
@image_id.setter
def image_id(self, value: str):
jsii.set(self, "imageId", value)
@builtins.property
@jsii.member(jsii_name="instanceType")
def instance_type(self) -> str:
"""``AWS::AutoScaling::LaunchConfiguration.InstanceType``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-launchconfig.html#cfn-as-launchconfig-instancetype
"""
return jsii.get(self, "instanceType")
@instance_type.setter
def instance_type(self, value: str):
jsii.set(self, "instanceType", value)
@builtins.property
@jsii.member(jsii_name="associatePublicIpAddress")
def associate_public_ip_address(self) -> typing.Optional[typing.Union[typing.Optional[bool], typing.Optional[aws_cdk.core.IResolvable]]]:
"""``AWS::AutoScaling::LaunchConfiguration.AssociatePublicIpAddress``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-launchconfig.html#cf-as-launchconfig-associatepubip
"""
return jsii.get(self, "associatePublicIpAddress")
@associate_public_ip_address.setter
def associate_public_ip_address(self, value: typing.Optional[typing.Union[typing.Optional[bool], typing.Optional[aws_cdk.core.IResolvable]]]):
jsii.set(self, "associatePublicIpAddress", value)
@builtins.property
@jsii.member(jsii_name="blockDeviceMappings")
def block_device_mappings(self) -> typing.Optional[typing.Union[typing.Optional[aws_cdk.core.IResolvable], typing.Optional[typing.List[typing.Union[aws_cdk.core.IResolvable, "BlockDeviceMappingProperty"]]]]]:
"""``AWS::AutoScaling::LaunchConfiguration.BlockDeviceMappings``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-launchconfig.html#cfn-as-launchconfig-blockdevicemappings
"""
return jsii.get(self, "blockDeviceMappings")
@block_device_mappings.setter
def block_device_mappings(self, value: typing.Optional[typing.Union[typing.Optional[aws_cdk.core.IResolvable], typing.Optional[typing.List[typing.Union[aws_cdk.core.IResolvable, "BlockDeviceMappingProperty"]]]]]):
jsii.set(self, "blockDeviceMappings", value)
@builtins.property
@jsii.member(jsii_name="classicLinkVpcId")
def classic_link_vpc_id(self) -> typing.Optional[str]:
"""``AWS::AutoScaling::LaunchConfiguration.ClassicLinkVPCId``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-launchconfig.html#cfn-as-launchconfig-classiclinkvpcid
"""
return jsii.get(self, "classicLinkVpcId")
@classic_link_vpc_id.setter
def classic_link_vpc_id(self, value: typing.Optional[str]):
jsii.set(self, "classicLinkVpcId", value)
@builtins.property
@jsii.member(jsii_name="classicLinkVpcSecurityGroups")
def classic_link_vpc_security_groups(self) -> typing.Optional[typing.List[str]]:
"""``AWS::AutoScaling::LaunchConfiguration.ClassicLinkVPCSecurityGroups``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-launchconfig.html#cfn-as-launchconfig-classiclinkvpcsecuritygroups
"""
return jsii.get(self, "classicLinkVpcSecurityGroups")
@classic_link_vpc_security_groups.setter
def classic_link_vpc_security_groups(self, value: typing.Optional[typing.List[str]]):
jsii.set(self, "classicLinkVpcSecurityGroups", value)
@builtins.property
@jsii.member(jsii_name="ebsOptimized")
def ebs_optimized(self) -> typing.Optional[typing.Union[typing.Optional[bool], typing.Optional[aws_cdk.core.IResolvable]]]:
"""``AWS::AutoScaling::LaunchConfiguration.EbsOptimized``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-launchconfig.html#cfn-as-launchconfig-ebsoptimized
"""
return jsii.get(self, "ebsOptimized")
@ebs_optimized.setter
def ebs_optimized(self, value: typing.Optional[typing.Union[typing.Optional[bool], typing.Optional[aws_cdk.core.IResolvable]]]):
jsii.set(self, "ebsOptimized", value)
@builtins.property
@jsii.member(jsii_name="iamInstanceProfile")
def iam_instance_profile(self) -> typing.Optional[str]:
"""``AWS::AutoScaling::LaunchConfiguration.IamInstanceProfile``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-launchconfig.html#cfn-as-launchconfig-iaminstanceprofile
"""
return jsii.get(self, "iamInstanceProfile")
@iam_instance_profile.setter
def iam_instance_profile(self, value: typing.Optional[str]):
jsii.set(self, "iamInstanceProfile", value)
@builtins.property
@jsii.member(jsii_name="instanceId")
def instance_id(self) -> typing.Optional[str]:
"""``AWS::AutoScaling::LaunchConfiguration.InstanceId``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-launchconfig.html#cfn-as-launchconfig-instanceid
"""
return jsii.get(self, "instanceId")
@instance_id.setter
def instance_id(self, value: typing.Optional[str]):
jsii.set(self, "instanceId", value)
@builtins.property
@jsii.member(jsii_name="instanceMonitoring")
def instance_monitoring(self) -> typing.Optional[typing.Union[typing.Optional[bool], typing.Optional[aws_cdk.core.IResolvable]]]:
"""``AWS::AutoScaling::LaunchConfiguration.InstanceMonitoring``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-launchconfig.html#cfn-as-launchconfig-instancemonitoring
"""
return jsii.get(self, "instanceMonitoring")
@instance_monitoring.setter
def instance_monitoring(self, value: typing.Optional[typing.Union[typing.Optional[bool], typing.Optional[aws_cdk.core.IResolvable]]]):
jsii.set(self, "instanceMonitoring", value)
@builtins.property
@jsii.member(jsii_name="kernelId")
def kernel_id(self) -> typing.Optional[str]:
"""``AWS::AutoScaling::LaunchConfiguration.KernelId``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-launchconfig.html#cfn-as-launchconfig-kernelid
"""
return jsii.get(self, "kernelId")
@kernel_id.setter
def kernel_id(self, value: typing.Optional[str]):
jsii.set(self, "kernelId", value)
@builtins.property
@jsii.member(jsii_name="keyName")
def key_name(self) -> typing.Optional[str]:
"""``AWS::AutoScaling::LaunchConfiguration.KeyName``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-launchconfig.html#cfn-as-launchconfig-keyname
"""
return jsii.get(self, "keyName")
@key_name.setter
def key_name(self, value: typing.Optional[str]):
jsii.set(self, "keyName", value)
@builtins.property
@jsii.member(jsii_name="launchConfigurationName")
def launch_configuration_name(self) -> typing.Optional[str]:
"""``AWS::AutoScaling::LaunchConfiguration.LaunchConfigurationName``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-launchconfig.html#cfn-autoscaling-launchconfig-launchconfigurationname
"""
return jsii.get(self, "launchConfigurationName")
@launch_configuration_name.setter
def launch_configuration_name(self, value: typing.Optional[str]):
jsii.set(self, "launchConfigurationName", value)
@builtins.property
@jsii.member(jsii_name="placementTenancy")
def placement_tenancy(self) -> typing.Optional[str]:
"""``AWS::AutoScaling::LaunchConfiguration.PlacementTenancy``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-launchconfig.html#cfn-as-launchconfig-placementtenancy
"""
return jsii.get(self, "placementTenancy")
@placement_tenancy.setter
def placement_tenancy(self, value: typing.Optional[str]):
jsii.set(self, "placementTenancy", value)
@builtins.property
@jsii.member(jsii_name="ramDiskId")
def ram_disk_id(self) -> typing.Optional[str]:
"""``AWS::AutoScaling::LaunchConfiguration.RamDiskId``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-launchconfig.html#cfn-as-launchconfig-ramdiskid
"""
return jsii.get(self, "ramDiskId")
@ram_disk_id.setter
def ram_disk_id(self, value: typing.Optional[str]):
jsii.set(self, "ramDiskId", value)
@builtins.property
@jsii.member(jsii_name="securityGroups")
def security_groups(self) -> typing.Optional[typing.List[str]]:
"""``AWS::AutoScaling::LaunchConfiguration.SecurityGroups``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-launchconfig.html#cfn-as-launchconfig-securitygroups
"""
return jsii.get(self, "securityGroups")
@security_groups.setter
def security_groups(self, value: typing.Optional[typing.List[str]]):
jsii.set(self, "securityGroups", value)
@builtins.property
@jsii.member(jsii_name="spotPrice")
def spot_price(self) -> typing.Optional[str]:
"""``AWS::AutoScaling::LaunchConfiguration.SpotPrice``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-launchconfig.html#cfn-as-launchconfig-spotprice
"""
return jsii.get(self, "spotPrice")
@spot_price.setter
def spot_price(self, value: typing.Optional[str]):
jsii.set(self, "spotPrice", value)
@builtins.property
@jsii.member(jsii_name="userData")
def user_data(self) -> typing.Optional[str]:
"""``AWS::AutoScaling::LaunchConfiguration.UserData``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-launchconfig.html#cfn-as-launchconfig-userdata
"""
return jsii.get(self, "userData")
@user_data.setter
def user_data(self, value: typing.Optional[str]):
jsii.set(self, "userData", value)
@jsii.data_type(jsii_type="@aws-cdk/aws-autoscaling.CfnLaunchConfiguration.BlockDeviceMappingProperty", jsii_struct_bases=[], name_mapping={'device_name': 'deviceName', 'ebs': 'ebs', 'no_device': 'noDevice', 'virtual_name': 'virtualName'})
class BlockDeviceMappingProperty():
def __init__(self, *, device_name: str, ebs: typing.Optional[typing.Union[typing.Optional[aws_cdk.core.IResolvable], typing.Optional["CfnLaunchConfiguration.BlockDeviceProperty"]]]=None, no_device: typing.Optional[typing.Union[typing.Optional[bool], typing.Optional[aws_cdk.core.IResolvable]]]=None, virtual_name: typing.Optional[str]=None):
"""
:param device_name: ``CfnLaunchConfiguration.BlockDeviceMappingProperty.DeviceName``.
:param ebs: ``CfnLaunchConfiguration.BlockDeviceMappingProperty.Ebs``.
:param no_device: ``CfnLaunchConfiguration.BlockDeviceMappingProperty.NoDevice``.
:param virtual_name: ``CfnLaunchConfiguration.BlockDeviceMappingProperty.VirtualName``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-launchconfig-blockdev-mapping.html
"""
self._values = {
'device_name': device_name,
}
if ebs is not None: self._values["ebs"] = ebs
if no_device is not None: self._values["no_device"] = no_device
if virtual_name is not None: self._values["virtual_name"] = virtual_name
@builtins.property
def device_name(self) -> str:
"""``CfnLaunchConfiguration.BlockDeviceMappingProperty.DeviceName``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-launchconfig-blockdev-mapping.html#cfn-as-launchconfig-blockdev-mapping-devicename
"""
return self._values.get('device_name')
@builtins.property
def ebs(self) -> typing.Optional[typing.Union[typing.Optional[aws_cdk.core.IResolvable], typing.Optional["CfnLaunchConfiguration.BlockDeviceProperty"]]]:
"""``CfnLaunchConfiguration.BlockDeviceMappingProperty.Ebs``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-launchconfig-blockdev-mapping.html#cfn-as-launchconfig-blockdev-mapping-ebs
"""
return self._values.get('ebs')
@builtins.property
def no_device(self) -> typing.Optional[typing.Union[typing.Optional[bool], typing.Optional[aws_cdk.core.IResolvable]]]:
"""``CfnLaunchConfiguration.BlockDeviceMappingProperty.NoDevice``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-launchconfig-blockdev-mapping.html#cfn-as-launchconfig-blockdev-mapping-nodevice
"""
return self._values.get('no_device')
@builtins.property
def virtual_name(self) -> typing.Optional[str]:
"""``CfnLaunchConfiguration.BlockDeviceMappingProperty.VirtualName``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-launchconfig-blockdev-mapping.html#cfn-as-launchconfig-blockdev-mapping-virtualname
"""
return self._values.get('virtual_name')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'BlockDeviceMappingProperty(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
@jsii.data_type(jsii_type="@aws-cdk/aws-autoscaling.CfnLaunchConfiguration.BlockDeviceProperty", jsii_struct_bases=[], name_mapping={'delete_on_termination': 'deleteOnTermination', 'encrypted': 'encrypted', 'iops': 'iops', 'snapshot_id': 'snapshotId', 'volume_size': 'volumeSize', 'volume_type': 'volumeType'})
class BlockDeviceProperty():
def __init__(self, *, delete_on_termination: typing.Optional[typing.Union[typing.Optional[bool], typing.Optional[aws_cdk.core.IResolvable]]]=None, encrypted: typing.Optional[typing.Union[typing.Optional[bool], typing.Optional[aws_cdk.core.IResolvable]]]=None, iops: typing.Optional[jsii.Number]=None, snapshot_id: typing.Optional[str]=None, volume_size: typing.Optional[jsii.Number]=None, volume_type: typing.Optional[str]=None):
"""
:param delete_on_termination: ``CfnLaunchConfiguration.BlockDeviceProperty.DeleteOnTermination``.
:param encrypted: ``CfnLaunchConfiguration.BlockDeviceProperty.Encrypted``.
:param iops: ``CfnLaunchConfiguration.BlockDeviceProperty.Iops``.
:param snapshot_id: ``CfnLaunchConfiguration.BlockDeviceProperty.SnapshotId``.
:param volume_size: ``CfnLaunchConfiguration.BlockDeviceProperty.VolumeSize``.
:param volume_type: ``CfnLaunchConfiguration.BlockDeviceProperty.VolumeType``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-launchconfig-blockdev-template.html
"""
self._values = {
}
if delete_on_termination is not None: self._values["delete_on_termination"] = delete_on_termination
if encrypted is not None: self._values["encrypted"] = encrypted
if iops is not None: self._values["iops"] = iops
if snapshot_id is not None: self._values["snapshot_id"] = snapshot_id
if volume_size is not None: self._values["volume_size"] = volume_size
if volume_type is not None: self._values["volume_type"] = volume_type
@builtins.property
def delete_on_termination(self) -> typing.Optional[typing.Union[typing.Optional[bool], typing.Optional[aws_cdk.core.IResolvable]]]:
"""``CfnLaunchConfiguration.BlockDeviceProperty.DeleteOnTermination``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-launchconfig-blockdev-template.html#cfn-as-launchconfig-blockdev-template-deleteonterm
"""
return self._values.get('delete_on_termination')
@builtins.property
def encrypted(self) -> typing.Optional[typing.Union[typing.Optional[bool], typing.Optional[aws_cdk.core.IResolvable]]]:
"""``CfnLaunchConfiguration.BlockDeviceProperty.Encrypted``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-launchconfig-blockdev-template.html#cfn-as-launchconfig-blockdev-template-encrypted
"""
return self._values.get('encrypted')
@builtins.property
def iops(self) -> typing.Optional[jsii.Number]:
"""``CfnLaunchConfiguration.BlockDeviceProperty.Iops``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-launchconfig-blockdev-template.html#cfn-as-launchconfig-blockdev-template-iops
"""
return self._values.get('iops')
@builtins.property
def snapshot_id(self) -> typing.Optional[str]:
"""``CfnLaunchConfiguration.BlockDeviceProperty.SnapshotId``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-launchconfig-blockdev-template.html#cfn-as-launchconfig-blockdev-template-snapshotid
"""
return self._values.get('snapshot_id')
@builtins.property
def volume_size(self) -> typing.Optional[jsii.Number]:
"""``CfnLaunchConfiguration.BlockDeviceProperty.VolumeSize``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-launchconfig-blockdev-template.html#cfn-as-launchconfig-blockdev-template-volumesize
"""
return self._values.get('volume_size')
@builtins.property
def volume_type(self) -> typing.Optional[str]:
"""``CfnLaunchConfiguration.BlockDeviceProperty.VolumeType``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-launchconfig-blockdev-template.html#cfn-as-launchconfig-blockdev-template-volumetype
"""
return self._values.get('volume_type')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'BlockDeviceProperty(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
@jsii.data_type(jsii_type="@aws-cdk/aws-autoscaling.CfnLaunchConfigurationProps", jsii_struct_bases=[], name_mapping={'image_id': 'imageId', 'instance_type': 'instanceType', 'associate_public_ip_address': 'associatePublicIpAddress', 'block_device_mappings': 'blockDeviceMappings', 'classic_link_vpc_id': 'classicLinkVpcId', 'classic_link_vpc_security_groups': 'classicLinkVpcSecurityGroups', 'ebs_optimized': 'ebsOptimized', 'iam_instance_profile': 'iamInstanceProfile', 'instance_id': 'instanceId', 'instance_monitoring': 'instanceMonitoring', 'kernel_id': 'kernelId', 'key_name': 'keyName', 'launch_configuration_name': 'launchConfigurationName', 'placement_tenancy': 'placementTenancy', 'ram_disk_id': 'ramDiskId', 'security_groups': 'securityGroups', 'spot_price': 'spotPrice', 'user_data': 'userData'})
class CfnLaunchConfigurationProps():
def __init__(self, *, image_id: str, instance_type: str, associate_public_ip_address: typing.Optional[typing.Union[typing.Optional[bool], typing.Optional[aws_cdk.core.IResolvable]]]=None, block_device_mappings: typing.Optional[typing.Union[typing.Optional[aws_cdk.core.IResolvable], typing.Optional[typing.List[typing.Union[aws_cdk.core.IResolvable, "CfnLaunchConfiguration.BlockDeviceMappingProperty"]]]]]=None, classic_link_vpc_id: typing.Optional[str]=None, classic_link_vpc_security_groups: typing.Optional[typing.List[str]]=None, ebs_optimized: typing.Optional[typing.Union[typing.Optional[bool], typing.Optional[aws_cdk.core.IResolvable]]]=None, iam_instance_profile: typing.Optional[str]=None, instance_id: typing.Optional[str]=None, instance_monitoring: typing.Optional[typing.Union[typing.Optional[bool], typing.Optional[aws_cdk.core.IResolvable]]]=None, kernel_id: typing.Optional[str]=None, key_name: typing.Optional[str]=None, launch_configuration_name: typing.Optional[str]=None, placement_tenancy: typing.Optional[str]=None, ram_disk_id: typing.Optional[str]=None, security_groups: typing.Optional[typing.List[str]]=None, spot_price: typing.Optional[str]=None, user_data: typing.Optional[str]=None):
"""Properties for defining a ``AWS::AutoScaling::LaunchConfiguration``.
:param image_id: ``AWS::AutoScaling::LaunchConfiguration.ImageId``.
:param instance_type: ``AWS::AutoScaling::LaunchConfiguration.InstanceType``.
:param associate_public_ip_address: ``AWS::AutoScaling::LaunchConfiguration.AssociatePublicIpAddress``.
:param block_device_mappings: ``AWS::AutoScaling::LaunchConfiguration.BlockDeviceMappings``.
:param classic_link_vpc_id: ``AWS::AutoScaling::LaunchConfiguration.ClassicLinkVPCId``.
:param classic_link_vpc_security_groups: ``AWS::AutoScaling::LaunchConfiguration.ClassicLinkVPCSecurityGroups``.
:param ebs_optimized: ``AWS::AutoScaling::LaunchConfiguration.EbsOptimized``.
:param iam_instance_profile: ``AWS::AutoScaling::LaunchConfiguration.IamInstanceProfile``.
:param instance_id: ``AWS::AutoScaling::LaunchConfiguration.InstanceId``.
:param instance_monitoring: ``AWS::AutoScaling::LaunchConfiguration.InstanceMonitoring``.
:param kernel_id: ``AWS::AutoScaling::LaunchConfiguration.KernelId``.
:param key_name: ``AWS::AutoScaling::LaunchConfiguration.KeyName``.
:param launch_configuration_name: ``AWS::AutoScaling::LaunchConfiguration.LaunchConfigurationName``.
:param placement_tenancy: ``AWS::AutoScaling::LaunchConfiguration.PlacementTenancy``.
:param ram_disk_id: ``AWS::AutoScaling::LaunchConfiguration.RamDiskId``.
:param security_groups: ``AWS::AutoScaling::LaunchConfiguration.SecurityGroups``.
:param spot_price: ``AWS::AutoScaling::LaunchConfiguration.SpotPrice``.
:param user_data: ``AWS::AutoScaling::LaunchConfiguration.UserData``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-launchconfig.html
"""
self._values = {
'image_id': image_id,
'instance_type': instance_type,
}
if associate_public_ip_address is not None: self._values["associate_public_ip_address"] = associate_public_ip_address
if block_device_mappings is not None: self._values["block_device_mappings"] = block_device_mappings
if classic_link_vpc_id is not None: self._values["classic_link_vpc_id"] = classic_link_vpc_id
if classic_link_vpc_security_groups is not None: self._values["classic_link_vpc_security_groups"] = classic_link_vpc_security_groups
if ebs_optimized is not None: self._values["ebs_optimized"] = ebs_optimized
if iam_instance_profile is not None: self._values["iam_instance_profile"] = iam_instance_profile
if instance_id is not None: self._values["instance_id"] = instance_id
if instance_monitoring is not None: self._values["instance_monitoring"] = instance_monitoring
if kernel_id is not None: self._values["kernel_id"] = kernel_id
if key_name is not None: self._values["key_name"] = key_name
if launch_configuration_name is not None: self._values["launch_configuration_name"] = launch_configuration_name
if placement_tenancy is not None: self._values["placement_tenancy"] = placement_tenancy
if ram_disk_id is not None: self._values["ram_disk_id"] = ram_disk_id
if security_groups is not None: self._values["security_groups"] = security_groups
if spot_price is not None: self._values["spot_price"] = spot_price
if user_data is not None: self._values["user_data"] = user_data
@builtins.property
def image_id(self) -> str:
"""``AWS::AutoScaling::LaunchConfiguration.ImageId``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-launchconfig.html#cfn-as-launchconfig-imageid
"""
return self._values.get('image_id')
@builtins.property
def instance_type(self) -> str:
"""``AWS::AutoScaling::LaunchConfiguration.InstanceType``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-launchconfig.html#cfn-as-launchconfig-instancetype
"""
return self._values.get('instance_type')
@builtins.property
def associate_public_ip_address(self) -> typing.Optional[typing.Union[typing.Optional[bool], typing.Optional[aws_cdk.core.IResolvable]]]:
"""``AWS::AutoScaling::LaunchConfiguration.AssociatePublicIpAddress``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-launchconfig.html#cf-as-launchconfig-associatepubip
"""
return self._values.get('associate_public_ip_address')
@builtins.property
def block_device_mappings(self) -> typing.Optional[typing.Union[typing.Optional[aws_cdk.core.IResolvable], typing.Optional[typing.List[typing.Union[aws_cdk.core.IResolvable, "CfnLaunchConfiguration.BlockDeviceMappingProperty"]]]]]:
"""``AWS::AutoScaling::LaunchConfiguration.BlockDeviceMappings``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-launchconfig.html#cfn-as-launchconfig-blockdevicemappings
"""
return self._values.get('block_device_mappings')
@builtins.property
def classic_link_vpc_id(self) -> typing.Optional[str]:
"""``AWS::AutoScaling::LaunchConfiguration.ClassicLinkVPCId``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-launchconfig.html#cfn-as-launchconfig-classiclinkvpcid
"""
return self._values.get('classic_link_vpc_id')
@builtins.property
def classic_link_vpc_security_groups(self) -> typing.Optional[typing.List[str]]:
"""``AWS::AutoScaling::LaunchConfiguration.ClassicLinkVPCSecurityGroups``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-launchconfig.html#cfn-as-launchconfig-classiclinkvpcsecuritygroups
"""
return self._values.get('classic_link_vpc_security_groups')
@builtins.property
def ebs_optimized(self) -> typing.Optional[typing.Union[typing.Optional[bool], typing.Optional[aws_cdk.core.IResolvable]]]:
"""``AWS::AutoScaling::LaunchConfiguration.EbsOptimized``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-launchconfig.html#cfn-as-launchconfig-ebsoptimized
"""
return self._values.get('ebs_optimized')
@builtins.property
def iam_instance_profile(self) -> typing.Optional[str]:
"""``AWS::AutoScaling::LaunchConfiguration.IamInstanceProfile``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-launchconfig.html#cfn-as-launchconfig-iaminstanceprofile
"""
return self._values.get('iam_instance_profile')
@builtins.property
def instance_id(self) -> typing.Optional[str]:
"""``AWS::AutoScaling::LaunchConfiguration.InstanceId``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-launchconfig.html#cfn-as-launchconfig-instanceid
"""
return self._values.get('instance_id')
@builtins.property
def instance_monitoring(self) -> typing.Optional[typing.Union[typing.Optional[bool], typing.Optional[aws_cdk.core.IResolvable]]]:
"""``AWS::AutoScaling::LaunchConfiguration.InstanceMonitoring``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-launchconfig.html#cfn-as-launchconfig-instancemonitoring
"""
return self._values.get('instance_monitoring')
@builtins.property
def kernel_id(self) -> typing.Optional[str]:
"""``AWS::AutoScaling::LaunchConfiguration.KernelId``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-launchconfig.html#cfn-as-launchconfig-kernelid
"""
return self._values.get('kernel_id')
@builtins.property
def key_name(self) -> typing.Optional[str]:
"""``AWS::AutoScaling::LaunchConfiguration.KeyName``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-launchconfig.html#cfn-as-launchconfig-keyname
"""
return self._values.get('key_name')
@builtins.property
def launch_configuration_name(self) -> typing.Optional[str]:
"""``AWS::AutoScaling::LaunchConfiguration.LaunchConfigurationName``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-launchconfig.html#cfn-autoscaling-launchconfig-launchconfigurationname
"""
return self._values.get('launch_configuration_name')
@builtins.property
def placement_tenancy(self) -> typing.Optional[str]:
"""``AWS::AutoScaling::LaunchConfiguration.PlacementTenancy``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-launchconfig.html#cfn-as-launchconfig-placementtenancy
"""
return self._values.get('placement_tenancy')
@builtins.property
def ram_disk_id(self) -> typing.Optional[str]:
"""``AWS::AutoScaling::LaunchConfiguration.RamDiskId``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-launchconfig.html#cfn-as-launchconfig-ramdiskid
"""
return self._values.get('ram_disk_id')
@builtins.property
def security_groups(self) -> typing.Optional[typing.List[str]]:
"""``AWS::AutoScaling::LaunchConfiguration.SecurityGroups``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-launchconfig.html#cfn-as-launchconfig-securitygroups
"""
return self._values.get('security_groups')
@builtins.property
def spot_price(self) -> typing.Optional[str]:
"""``AWS::AutoScaling::LaunchConfiguration.SpotPrice``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-launchconfig.html#cfn-as-launchconfig-spotprice
"""
return self._values.get('spot_price')
@builtins.property
def user_data(self) -> typing.Optional[str]:
"""``AWS::AutoScaling::LaunchConfiguration.UserData``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-launchconfig.html#cfn-as-launchconfig-userdata
"""
return self._values.get('user_data')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'CfnLaunchConfigurationProps(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
@jsii.implements(aws_cdk.core.IInspectable)
class CfnLifecycleHook(aws_cdk.core.CfnResource, metaclass=jsii.JSIIMeta, jsii_type="@aws-cdk/aws-autoscaling.CfnLifecycleHook"):
"""A CloudFormation ``AWS::AutoScaling::LifecycleHook``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-as-lifecyclehook.html
cloudformationResource:
:cloudformationResource:: AWS::AutoScaling::LifecycleHook
"""
def __init__(self, scope: aws_cdk.core.Construct, id: str, *, auto_scaling_group_name: str, lifecycle_transition: str, default_result: typing.Optional[str]=None, heartbeat_timeout: typing.Optional[jsii.Number]=None, lifecycle_hook_name: typing.Optional[str]=None, notification_metadata: typing.Optional[str]=None, notification_target_arn: typing.Optional[str]=None, role_arn: typing.Optional[str]=None) -> None:
"""Create a new ``AWS::AutoScaling::LifecycleHook``.
:param scope: - scope in which this resource is defined.
:param id: - scoped id of the resource.
:param auto_scaling_group_name: ``AWS::AutoScaling::LifecycleHook.AutoScalingGroupName``.
:param lifecycle_transition: ``AWS::AutoScaling::LifecycleHook.LifecycleTransition``.
:param default_result: ``AWS::AutoScaling::LifecycleHook.DefaultResult``.
:param heartbeat_timeout: ``AWS::AutoScaling::LifecycleHook.HeartbeatTimeout``.
:param lifecycle_hook_name: ``AWS::AutoScaling::LifecycleHook.LifecycleHookName``.
:param notification_metadata: ``AWS::AutoScaling::LifecycleHook.NotificationMetadata``.
:param notification_target_arn: ``AWS::AutoScaling::LifecycleHook.NotificationTargetARN``.
:param role_arn: ``AWS::AutoScaling::LifecycleHook.RoleARN``.
"""
props = CfnLifecycleHookProps(auto_scaling_group_name=auto_scaling_group_name, lifecycle_transition=lifecycle_transition, default_result=default_result, heartbeat_timeout=heartbeat_timeout, lifecycle_hook_name=lifecycle_hook_name, notification_metadata=notification_metadata, notification_target_arn=notification_target_arn, role_arn=role_arn)
jsii.create(CfnLifecycleHook, self, [scope, id, props])
@jsii.member(jsii_name="inspect")
def inspect(self, inspector: aws_cdk.core.TreeInspector) -> None:
"""Examines the CloudFormation resource and discloses attributes.
:param inspector: - tree inspector to collect and process attributes.
stability
:stability: experimental
"""
return jsii.invoke(self, "inspect", [inspector])
@jsii.member(jsii_name="renderProperties")
def _render_properties(self, props: typing.Mapping[str,typing.Any]) -> typing.Mapping[str,typing.Any]:
"""
:param props: -
"""
return jsii.invoke(self, "renderProperties", [props])
@jsii.python.classproperty
@jsii.member(jsii_name="CFN_RESOURCE_TYPE_NAME")
def CFN_RESOURCE_TYPE_NAME(cls) -> str:
"""The CloudFormation resource type name for this resource class."""
return jsii.sget(cls, "CFN_RESOURCE_TYPE_NAME")
@builtins.property
@jsii.member(jsii_name="cfnProperties")
def _cfn_properties(self) -> typing.Mapping[str,typing.Any]:
return jsii.get(self, "cfnProperties")
@builtins.property
@jsii.member(jsii_name="autoScalingGroupName")
def auto_scaling_group_name(self) -> str:
"""``AWS::AutoScaling::LifecycleHook.AutoScalingGroupName``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-as-lifecyclehook.html#cfn-as-lifecyclehook-autoscalinggroupname
"""
return jsii.get(self, "autoScalingGroupName")
@auto_scaling_group_name.setter
def auto_scaling_group_name(self, value: str):
jsii.set(self, "autoScalingGroupName", value)
@builtins.property
@jsii.member(jsii_name="lifecycleTransition")
def lifecycle_transition(self) -> str:
"""``AWS::AutoScaling::LifecycleHook.LifecycleTransition``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-as-lifecyclehook.html#cfn-as-lifecyclehook-lifecycletransition
"""
return jsii.get(self, "lifecycleTransition")
@lifecycle_transition.setter
def lifecycle_transition(self, value: str):
jsii.set(self, "lifecycleTransition", value)
@builtins.property
@jsii.member(jsii_name="defaultResult")
def default_result(self) -> typing.Optional[str]:
"""``AWS::AutoScaling::LifecycleHook.DefaultResult``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-as-lifecyclehook.html#cfn-as-lifecyclehook-defaultresult
"""
return jsii.get(self, "defaultResult")
@default_result.setter
def default_result(self, value: typing.Optional[str]):
jsii.set(self, "defaultResult", value)
@builtins.property
@jsii.member(jsii_name="heartbeatTimeout")
def heartbeat_timeout(self) -> typing.Optional[jsii.Number]:
"""``AWS::AutoScaling::LifecycleHook.HeartbeatTimeout``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-as-lifecyclehook.html#cfn-as-lifecyclehook-heartbeattimeout
"""
return jsii.get(self, "heartbeatTimeout")
@heartbeat_timeout.setter
def heartbeat_timeout(self, value: typing.Optional[jsii.Number]):
jsii.set(self, "heartbeatTimeout", value)
@builtins.property
@jsii.member(jsii_name="lifecycleHookName")
def lifecycle_hook_name(self) -> typing.Optional[str]:
"""``AWS::AutoScaling::LifecycleHook.LifecycleHookName``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-as-lifecyclehook.html#cfn-autoscaling-lifecyclehook-lifecyclehookname
"""
return jsii.get(self, "lifecycleHookName")
@lifecycle_hook_name.setter
def lifecycle_hook_name(self, value: typing.Optional[str]):
jsii.set(self, "lifecycleHookName", value)
@builtins.property
@jsii.member(jsii_name="notificationMetadata")
def notification_metadata(self) -> typing.Optional[str]:
"""``AWS::AutoScaling::LifecycleHook.NotificationMetadata``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-as-lifecyclehook.html#cfn-as-lifecyclehook-notificationmetadata
"""
return jsii.get(self, "notificationMetadata")
@notification_metadata.setter
def notification_metadata(self, value: typing.Optional[str]):
jsii.set(self, "notificationMetadata", value)
@builtins.property
@jsii.member(jsii_name="notificationTargetArn")
def notification_target_arn(self) -> typing.Optional[str]:
"""``AWS::AutoScaling::LifecycleHook.NotificationTargetARN``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-as-lifecyclehook.html#cfn-as-lifecyclehook-notificationtargetarn
"""
return jsii.get(self, "notificationTargetArn")
@notification_target_arn.setter
def notification_target_arn(self, value: typing.Optional[str]):
jsii.set(self, "notificationTargetArn", value)
@builtins.property
@jsii.member(jsii_name="roleArn")
def role_arn(self) -> typing.Optional[str]:
"""``AWS::AutoScaling::LifecycleHook.RoleARN``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-as-lifecyclehook.html#cfn-as-lifecyclehook-rolearn
"""
return jsii.get(self, "roleArn")
@role_arn.setter
def role_arn(self, value: typing.Optional[str]):
jsii.set(self, "roleArn", value)
@jsii.data_type(jsii_type="@aws-cdk/aws-autoscaling.CfnLifecycleHookProps", jsii_struct_bases=[], name_mapping={'auto_scaling_group_name': 'autoScalingGroupName', 'lifecycle_transition': 'lifecycleTransition', 'default_result': 'defaultResult', 'heartbeat_timeout': 'heartbeatTimeout', 'lifecycle_hook_name': 'lifecycleHookName', 'notification_metadata': 'notificationMetadata', 'notification_target_arn': 'notificationTargetArn', 'role_arn': 'roleArn'})
class CfnLifecycleHookProps():
def __init__(self, *, auto_scaling_group_name: str, lifecycle_transition: str, default_result: typing.Optional[str]=None, heartbeat_timeout: typing.Optional[jsii.Number]=None, lifecycle_hook_name: typing.Optional[str]=None, notification_metadata: typing.Optional[str]=None, notification_target_arn: typing.Optional[str]=None, role_arn: typing.Optional[str]=None):
"""Properties for defining a ``AWS::AutoScaling::LifecycleHook``.
:param auto_scaling_group_name: ``AWS::AutoScaling::LifecycleHook.AutoScalingGroupName``.
:param lifecycle_transition: ``AWS::AutoScaling::LifecycleHook.LifecycleTransition``.
:param default_result: ``AWS::AutoScaling::LifecycleHook.DefaultResult``.
:param heartbeat_timeout: ``AWS::AutoScaling::LifecycleHook.HeartbeatTimeout``.
:param lifecycle_hook_name: ``AWS::AutoScaling::LifecycleHook.LifecycleHookName``.
:param notification_metadata: ``AWS::AutoScaling::LifecycleHook.NotificationMetadata``.
:param notification_target_arn: ``AWS::AutoScaling::LifecycleHook.NotificationTargetARN``.
:param role_arn: ``AWS::AutoScaling::LifecycleHook.RoleARN``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-as-lifecyclehook.html
"""
self._values = {
'auto_scaling_group_name': auto_scaling_group_name,
'lifecycle_transition': lifecycle_transition,
}
if default_result is not None: self._values["default_result"] = default_result
if heartbeat_timeout is not None: self._values["heartbeat_timeout"] = heartbeat_timeout
if lifecycle_hook_name is not None: self._values["lifecycle_hook_name"] = lifecycle_hook_name
if notification_metadata is not None: self._values["notification_metadata"] = notification_metadata
if notification_target_arn is not None: self._values["notification_target_arn"] = notification_target_arn
if role_arn is not None: self._values["role_arn"] = role_arn
@builtins.property
def auto_scaling_group_name(self) -> str:
"""``AWS::AutoScaling::LifecycleHook.AutoScalingGroupName``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-as-lifecyclehook.html#cfn-as-lifecyclehook-autoscalinggroupname
"""
return self._values.get('auto_scaling_group_name')
@builtins.property
def lifecycle_transition(self) -> str:
"""``AWS::AutoScaling::LifecycleHook.LifecycleTransition``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-as-lifecyclehook.html#cfn-as-lifecyclehook-lifecycletransition
"""
return self._values.get('lifecycle_transition')
@builtins.property
def default_result(self) -> typing.Optional[str]:
"""``AWS::AutoScaling::LifecycleHook.DefaultResult``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-as-lifecyclehook.html#cfn-as-lifecyclehook-defaultresult
"""
return self._values.get('default_result')
@builtins.property
def heartbeat_timeout(self) -> typing.Optional[jsii.Number]:
"""``AWS::AutoScaling::LifecycleHook.HeartbeatTimeout``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-as-lifecyclehook.html#cfn-as-lifecyclehook-heartbeattimeout
"""
return self._values.get('heartbeat_timeout')
@builtins.property
def lifecycle_hook_name(self) -> typing.Optional[str]:
"""``AWS::AutoScaling::LifecycleHook.LifecycleHookName``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-as-lifecyclehook.html#cfn-autoscaling-lifecyclehook-lifecyclehookname
"""
return self._values.get('lifecycle_hook_name')
@builtins.property
def notification_metadata(self) -> typing.Optional[str]:
"""``AWS::AutoScaling::LifecycleHook.NotificationMetadata``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-as-lifecyclehook.html#cfn-as-lifecyclehook-notificationmetadata
"""
return self._values.get('notification_metadata')
@builtins.property
def notification_target_arn(self) -> typing.Optional[str]:
"""``AWS::AutoScaling::LifecycleHook.NotificationTargetARN``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-as-lifecyclehook.html#cfn-as-lifecyclehook-notificationtargetarn
"""
return self._values.get('notification_target_arn')
@builtins.property
def role_arn(self) -> typing.Optional[str]:
"""``AWS::AutoScaling::LifecycleHook.RoleARN``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-as-lifecyclehook.html#cfn-as-lifecyclehook-rolearn
"""
return self._values.get('role_arn')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'CfnLifecycleHookProps(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
@jsii.implements(aws_cdk.core.IInspectable)
class CfnScalingPolicy(aws_cdk.core.CfnResource, metaclass=jsii.JSIIMeta, jsii_type="@aws-cdk/aws-autoscaling.CfnScalingPolicy"):
"""A CloudFormation ``AWS::AutoScaling::ScalingPolicy``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-policy.html
cloudformationResource:
:cloudformationResource:: AWS::AutoScaling::ScalingPolicy
"""
def __init__(self, scope: aws_cdk.core.Construct, id: str, *, auto_scaling_group_name: str, adjustment_type: typing.Optional[str]=None, cooldown: typing.Optional[str]=None, estimated_instance_warmup: typing.Optional[jsii.Number]=None, metric_aggregation_type: typing.Optional[str]=None, min_adjustment_magnitude: typing.Optional[jsii.Number]=None, policy_type: typing.Optional[str]=None, scaling_adjustment: typing.Optional[jsii.Number]=None, step_adjustments: typing.Optional[typing.Union[typing.Optional[aws_cdk.core.IResolvable], typing.Optional[typing.List[typing.Union[aws_cdk.core.IResolvable, "StepAdjustmentProperty"]]]]]=None, target_tracking_configuration: typing.Optional[typing.Union[typing.Optional[aws_cdk.core.IResolvable], typing.Optional["TargetTrackingConfigurationProperty"]]]=None) -> None:
"""Create a new ``AWS::AutoScaling::ScalingPolicy``.
:param scope: - scope in which this resource is defined.
:param id: - scoped id of the resource.
:param auto_scaling_group_name: ``AWS::AutoScaling::ScalingPolicy.AutoScalingGroupName``.
:param adjustment_type: ``AWS::AutoScaling::ScalingPolicy.AdjustmentType``.
:param cooldown: ``AWS::AutoScaling::ScalingPolicy.Cooldown``.
:param estimated_instance_warmup: ``AWS::AutoScaling::ScalingPolicy.EstimatedInstanceWarmup``.
:param metric_aggregation_type: ``AWS::AutoScaling::ScalingPolicy.MetricAggregationType``.
:param min_adjustment_magnitude: ``AWS::AutoScaling::ScalingPolicy.MinAdjustmentMagnitude``.
:param policy_type: ``AWS::AutoScaling::ScalingPolicy.PolicyType``.
:param scaling_adjustment: ``AWS::AutoScaling::ScalingPolicy.ScalingAdjustment``.
:param step_adjustments: ``AWS::AutoScaling::ScalingPolicy.StepAdjustments``.
:param target_tracking_configuration: ``AWS::AutoScaling::ScalingPolicy.TargetTrackingConfiguration``.
"""
props = CfnScalingPolicyProps(auto_scaling_group_name=auto_scaling_group_name, adjustment_type=adjustment_type, cooldown=cooldown, estimated_instance_warmup=estimated_instance_warmup, metric_aggregation_type=metric_aggregation_type, min_adjustment_magnitude=min_adjustment_magnitude, policy_type=policy_type, scaling_adjustment=scaling_adjustment, step_adjustments=step_adjustments, target_tracking_configuration=target_tracking_configuration)
jsii.create(CfnScalingPolicy, self, [scope, id, props])
@jsii.member(jsii_name="inspect")
def inspect(self, inspector: aws_cdk.core.TreeInspector) -> None:
"""Examines the CloudFormation resource and discloses attributes.
:param inspector: - tree inspector to collect and process attributes.
stability
:stability: experimental
"""
return jsii.invoke(self, "inspect", [inspector])
@jsii.member(jsii_name="renderProperties")
def _render_properties(self, props: typing.Mapping[str,typing.Any]) -> typing.Mapping[str,typing.Any]:
"""
:param props: -
"""
return jsii.invoke(self, "renderProperties", [props])
@jsii.python.classproperty
@jsii.member(jsii_name="CFN_RESOURCE_TYPE_NAME")
def CFN_RESOURCE_TYPE_NAME(cls) -> str:
"""The CloudFormation resource type name for this resource class."""
return jsii.sget(cls, "CFN_RESOURCE_TYPE_NAME")
@builtins.property
@jsii.member(jsii_name="cfnProperties")
def _cfn_properties(self) -> typing.Mapping[str,typing.Any]:
return jsii.get(self, "cfnProperties")
@builtins.property
@jsii.member(jsii_name="autoScalingGroupName")
def auto_scaling_group_name(self) -> str:
"""``AWS::AutoScaling::ScalingPolicy.AutoScalingGroupName``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-policy.html#cfn-as-scalingpolicy-autoscalinggroupname
"""
return jsii.get(self, "autoScalingGroupName")
@auto_scaling_group_name.setter
def auto_scaling_group_name(self, value: str):
jsii.set(self, "autoScalingGroupName", value)
@builtins.property
@jsii.member(jsii_name="adjustmentType")
def adjustment_type(self) -> typing.Optional[str]:
"""``AWS::AutoScaling::ScalingPolicy.AdjustmentType``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-policy.html#cfn-as-scalingpolicy-adjustmenttype
"""
return jsii.get(self, "adjustmentType")
@adjustment_type.setter
def adjustment_type(self, value: typing.Optional[str]):
jsii.set(self, "adjustmentType", value)
@builtins.property
@jsii.member(jsii_name="cooldown")
def cooldown(self) -> typing.Optional[str]:
"""``AWS::AutoScaling::ScalingPolicy.Cooldown``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-policy.html#cfn-as-scalingpolicy-cooldown
"""
return jsii.get(self, "cooldown")
@cooldown.setter
def cooldown(self, value: typing.Optional[str]):
jsii.set(self, "cooldown", value)
@builtins.property
@jsii.member(jsii_name="estimatedInstanceWarmup")
def estimated_instance_warmup(self) -> typing.Optional[jsii.Number]:
"""``AWS::AutoScaling::ScalingPolicy.EstimatedInstanceWarmup``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-policy.html#cfn-as-scalingpolicy-estimatedinstancewarmup
"""
return jsii.get(self, "estimatedInstanceWarmup")
@estimated_instance_warmup.setter
def estimated_instance_warmup(self, value: typing.Optional[jsii.Number]):
jsii.set(self, "estimatedInstanceWarmup", value)
@builtins.property
@jsii.member(jsii_name="metricAggregationType")
def metric_aggregation_type(self) -> typing.Optional[str]:
"""``AWS::AutoScaling::ScalingPolicy.MetricAggregationType``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-policy.html#cfn-as-scalingpolicy-metricaggregationtype
"""
return jsii.get(self, "metricAggregationType")
@metric_aggregation_type.setter
def metric_aggregation_type(self, value: typing.Optional[str]):
jsii.set(self, "metricAggregationType", value)
@builtins.property
@jsii.member(jsii_name="minAdjustmentMagnitude")
def min_adjustment_magnitude(self) -> typing.Optional[jsii.Number]:
"""``AWS::AutoScaling::ScalingPolicy.MinAdjustmentMagnitude``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-policy.html#cfn-as-scalingpolicy-minadjustmentmagnitude
"""
return jsii.get(self, "minAdjustmentMagnitude")
@min_adjustment_magnitude.setter
def min_adjustment_magnitude(self, value: typing.Optional[jsii.Number]):
jsii.set(self, "minAdjustmentMagnitude", value)
@builtins.property
@jsii.member(jsii_name="policyType")
def policy_type(self) -> typing.Optional[str]:
"""``AWS::AutoScaling::ScalingPolicy.PolicyType``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-policy.html#cfn-as-scalingpolicy-policytype
"""
return jsii.get(self, "policyType")
@policy_type.setter
def policy_type(self, value: typing.Optional[str]):
jsii.set(self, "policyType", value)
@builtins.property
@jsii.member(jsii_name="scalingAdjustment")
def scaling_adjustment(self) -> typing.Optional[jsii.Number]:
"""``AWS::AutoScaling::ScalingPolicy.ScalingAdjustment``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-policy.html#cfn-as-scalingpolicy-scalingadjustment
"""
return jsii.get(self, "scalingAdjustment")
@scaling_adjustment.setter
def scaling_adjustment(self, value: typing.Optional[jsii.Number]):
jsii.set(self, "scalingAdjustment", value)
@builtins.property
@jsii.member(jsii_name="stepAdjustments")
def step_adjustments(self) -> typing.Optional[typing.Union[typing.Optional[aws_cdk.core.IResolvable], typing.Optional[typing.List[typing.Union[aws_cdk.core.IResolvable, "StepAdjustmentProperty"]]]]]:
"""``AWS::AutoScaling::ScalingPolicy.StepAdjustments``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-policy.html#cfn-as-scalingpolicy-stepadjustments
"""
return jsii.get(self, "stepAdjustments")
@step_adjustments.setter
def step_adjustments(self, value: typing.Optional[typing.Union[typing.Optional[aws_cdk.core.IResolvable], typing.Optional[typing.List[typing.Union[aws_cdk.core.IResolvable, "StepAdjustmentProperty"]]]]]):
jsii.set(self, "stepAdjustments", value)
@builtins.property
@jsii.member(jsii_name="targetTrackingConfiguration")
def target_tracking_configuration(self) -> typing.Optional[typing.Union[typing.Optional[aws_cdk.core.IResolvable], typing.Optional["TargetTrackingConfigurationProperty"]]]:
"""``AWS::AutoScaling::ScalingPolicy.TargetTrackingConfiguration``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-policy.html#cfn-autoscaling-scalingpolicy-targettrackingconfiguration
"""
return jsii.get(self, "targetTrackingConfiguration")
@target_tracking_configuration.setter
def target_tracking_configuration(self, value: typing.Optional[typing.Union[typing.Optional[aws_cdk.core.IResolvable], typing.Optional["TargetTrackingConfigurationProperty"]]]):
jsii.set(self, "targetTrackingConfiguration", value)
@jsii.data_type(jsii_type="@aws-cdk/aws-autoscaling.CfnScalingPolicy.CustomizedMetricSpecificationProperty", jsii_struct_bases=[], name_mapping={'metric_name': 'metricName', 'namespace': 'namespace', 'statistic': 'statistic', 'dimensions': 'dimensions', 'unit': 'unit'})
class CustomizedMetricSpecificationProperty():
def __init__(self, *, metric_name: str, namespace: str, statistic: str, dimensions: typing.Optional[typing.Union[typing.Optional[aws_cdk.core.IResolvable], typing.Optional[typing.List[typing.Union[aws_cdk.core.IResolvable, "CfnScalingPolicy.MetricDimensionProperty"]]]]]=None, unit: typing.Optional[str]=None):
"""
:param metric_name: ``CfnScalingPolicy.CustomizedMetricSpecificationProperty.MetricName``.
:param namespace: ``CfnScalingPolicy.CustomizedMetricSpecificationProperty.Namespace``.
:param statistic: ``CfnScalingPolicy.CustomizedMetricSpecificationProperty.Statistic``.
:param dimensions: ``CfnScalingPolicy.CustomizedMetricSpecificationProperty.Dimensions``.
:param unit: ``CfnScalingPolicy.CustomizedMetricSpecificationProperty.Unit``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-autoscaling-scalingpolicy-customizedmetricspecification.html
"""
self._values = {
'metric_name': metric_name,
'namespace': namespace,
'statistic': statistic,
}
if dimensions is not None: self._values["dimensions"] = dimensions
if unit is not None: self._values["unit"] = unit
@builtins.property
def metric_name(self) -> str:
"""``CfnScalingPolicy.CustomizedMetricSpecificationProperty.MetricName``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-autoscaling-scalingpolicy-customizedmetricspecification.html#cfn-autoscaling-scalingpolicy-customizedmetricspecification-metricname
"""
return self._values.get('metric_name')
@builtins.property
def namespace(self) -> str:
"""``CfnScalingPolicy.CustomizedMetricSpecificationProperty.Namespace``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-autoscaling-scalingpolicy-customizedmetricspecification.html#cfn-autoscaling-scalingpolicy-customizedmetricspecification-namespace
"""
return self._values.get('namespace')
@builtins.property
def statistic(self) -> str:
"""``CfnScalingPolicy.CustomizedMetricSpecificationProperty.Statistic``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-autoscaling-scalingpolicy-customizedmetricspecification.html#cfn-autoscaling-scalingpolicy-customizedmetricspecification-statistic
"""
return self._values.get('statistic')
@builtins.property
def dimensions(self) -> typing.Optional[typing.Union[typing.Optional[aws_cdk.core.IResolvable], typing.Optional[typing.List[typing.Union[aws_cdk.core.IResolvable, "CfnScalingPolicy.MetricDimensionProperty"]]]]]:
"""``CfnScalingPolicy.CustomizedMetricSpecificationProperty.Dimensions``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-autoscaling-scalingpolicy-customizedmetricspecification.html#cfn-autoscaling-scalingpolicy-customizedmetricspecification-dimensions
"""
return self._values.get('dimensions')
@builtins.property
def unit(self) -> typing.Optional[str]:
"""``CfnScalingPolicy.CustomizedMetricSpecificationProperty.Unit``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-autoscaling-scalingpolicy-customizedmetricspecification.html#cfn-autoscaling-scalingpolicy-customizedmetricspecification-unit
"""
return self._values.get('unit')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'CustomizedMetricSpecificationProperty(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
@jsii.data_type(jsii_type="@aws-cdk/aws-autoscaling.CfnScalingPolicy.MetricDimensionProperty", jsii_struct_bases=[], name_mapping={'name': 'name', 'value': 'value'})
class MetricDimensionProperty():
def __init__(self, *, name: str, value: str):
"""
:param name: ``CfnScalingPolicy.MetricDimensionProperty.Name``.
:param value: ``CfnScalingPolicy.MetricDimensionProperty.Value``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-autoscaling-scalingpolicy-metricdimension.html
"""
self._values = {
'name': name,
'value': value,
}
@builtins.property
def name(self) -> str:
"""``CfnScalingPolicy.MetricDimensionProperty.Name``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-autoscaling-scalingpolicy-metricdimension.html#cfn-autoscaling-scalingpolicy-metricdimension-name
"""
return self._values.get('name')
@builtins.property
def value(self) -> str:
"""``CfnScalingPolicy.MetricDimensionProperty.Value``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-autoscaling-scalingpolicy-metricdimension.html#cfn-autoscaling-scalingpolicy-metricdimension-value
"""
return self._values.get('value')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'MetricDimensionProperty(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
@jsii.data_type(jsii_type="@aws-cdk/aws-autoscaling.CfnScalingPolicy.PredefinedMetricSpecificationProperty", jsii_struct_bases=[], name_mapping={'predefined_metric_type': 'predefinedMetricType', 'resource_label': 'resourceLabel'})
class PredefinedMetricSpecificationProperty():
def __init__(self, *, predefined_metric_type: str, resource_label: typing.Optional[str]=None):
"""
:param predefined_metric_type: ``CfnScalingPolicy.PredefinedMetricSpecificationProperty.PredefinedMetricType``.
:param resource_label: ``CfnScalingPolicy.PredefinedMetricSpecificationProperty.ResourceLabel``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-autoscaling-scalingpolicy-predefinedmetricspecification.html
"""
self._values = {
'predefined_metric_type': predefined_metric_type,
}
if resource_label is not None: self._values["resource_label"] = resource_label
@builtins.property
def predefined_metric_type(self) -> str:
"""``CfnScalingPolicy.PredefinedMetricSpecificationProperty.PredefinedMetricType``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-autoscaling-scalingpolicy-predefinedmetricspecification.html#cfn-autoscaling-scalingpolicy-predefinedmetricspecification-predefinedmetrictype
"""
return self._values.get('predefined_metric_type')
@builtins.property
def resource_label(self) -> typing.Optional[str]:
"""``CfnScalingPolicy.PredefinedMetricSpecificationProperty.ResourceLabel``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-autoscaling-scalingpolicy-predefinedmetricspecification.html#cfn-autoscaling-scalingpolicy-predefinedmetricspecification-resourcelabel
"""
return self._values.get('resource_label')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'PredefinedMetricSpecificationProperty(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
@jsii.data_type(jsii_type="@aws-cdk/aws-autoscaling.CfnScalingPolicy.StepAdjustmentProperty", jsii_struct_bases=[], name_mapping={'scaling_adjustment': 'scalingAdjustment', 'metric_interval_lower_bound': 'metricIntervalLowerBound', 'metric_interval_upper_bound': 'metricIntervalUpperBound'})
class StepAdjustmentProperty():
def __init__(self, *, scaling_adjustment: jsii.Number, metric_interval_lower_bound: typing.Optional[jsii.Number]=None, metric_interval_upper_bound: typing.Optional[jsii.Number]=None):
"""
:param scaling_adjustment: ``CfnScalingPolicy.StepAdjustmentProperty.ScalingAdjustment``.
:param metric_interval_lower_bound: ``CfnScalingPolicy.StepAdjustmentProperty.MetricIntervalLowerBound``.
:param metric_interval_upper_bound: ``CfnScalingPolicy.StepAdjustmentProperty.MetricIntervalUpperBound``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-autoscaling-scalingpolicy-stepadjustments.html
"""
self._values = {
'scaling_adjustment': scaling_adjustment,
}
if metric_interval_lower_bound is not None: self._values["metric_interval_lower_bound"] = metric_interval_lower_bound
if metric_interval_upper_bound is not None: self._values["metric_interval_upper_bound"] = metric_interval_upper_bound
@builtins.property
def scaling_adjustment(self) -> jsii.Number:
"""``CfnScalingPolicy.StepAdjustmentProperty.ScalingAdjustment``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-autoscaling-scalingpolicy-stepadjustments.html#cfn-autoscaling-scalingpolicy-stepadjustment-scalingadjustment
"""
return self._values.get('scaling_adjustment')
@builtins.property
def metric_interval_lower_bound(self) -> typing.Optional[jsii.Number]:
"""``CfnScalingPolicy.StepAdjustmentProperty.MetricIntervalLowerBound``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-autoscaling-scalingpolicy-stepadjustments.html#cfn-autoscaling-scalingpolicy-stepadjustment-metricintervallowerbound
"""
return self._values.get('metric_interval_lower_bound')
@builtins.property
def metric_interval_upper_bound(self) -> typing.Optional[jsii.Number]:
"""``CfnScalingPolicy.StepAdjustmentProperty.MetricIntervalUpperBound``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-autoscaling-scalingpolicy-stepadjustments.html#cfn-autoscaling-scalingpolicy-stepadjustment-metricintervalupperbound
"""
return self._values.get('metric_interval_upper_bound')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'StepAdjustmentProperty(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
@jsii.data_type(jsii_type="@aws-cdk/aws-autoscaling.CfnScalingPolicy.TargetTrackingConfigurationProperty", jsii_struct_bases=[], name_mapping={'target_value': 'targetValue', 'customized_metric_specification': 'customizedMetricSpecification', 'disable_scale_in': 'disableScaleIn', 'predefined_metric_specification': 'predefinedMetricSpecification'})
class TargetTrackingConfigurationProperty():
def __init__(self, *, target_value: jsii.Number, customized_metric_specification: typing.Optional[typing.Union[typing.Optional[aws_cdk.core.IResolvable], typing.Optional["CfnScalingPolicy.CustomizedMetricSpecificationProperty"]]]=None, disable_scale_in: typing.Optional[typing.Union[typing.Optional[bool], typing.Optional[aws_cdk.core.IResolvable]]]=None, predefined_metric_specification: typing.Optional[typing.Union[typing.Optional[aws_cdk.core.IResolvable], typing.Optional["CfnScalingPolicy.PredefinedMetricSpecificationProperty"]]]=None):
"""
:param target_value: ``CfnScalingPolicy.TargetTrackingConfigurationProperty.TargetValue``.
:param customized_metric_specification: ``CfnScalingPolicy.TargetTrackingConfigurationProperty.CustomizedMetricSpecification``.
:param disable_scale_in: ``CfnScalingPolicy.TargetTrackingConfigurationProperty.DisableScaleIn``.
:param predefined_metric_specification: ``CfnScalingPolicy.TargetTrackingConfigurationProperty.PredefinedMetricSpecification``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-autoscaling-scalingpolicy-targettrackingconfiguration.html
"""
self._values = {
'target_value': target_value,
}
if customized_metric_specification is not None: self._values["customized_metric_specification"] = customized_metric_specification
if disable_scale_in is not None: self._values["disable_scale_in"] = disable_scale_in
if predefined_metric_specification is not None: self._values["predefined_metric_specification"] = predefined_metric_specification
@builtins.property
def target_value(self) -> jsii.Number:
"""``CfnScalingPolicy.TargetTrackingConfigurationProperty.TargetValue``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-autoscaling-scalingpolicy-targettrackingconfiguration.html#cfn-autoscaling-scalingpolicy-targettrackingconfiguration-targetvalue
"""
return self._values.get('target_value')
@builtins.property
def customized_metric_specification(self) -> typing.Optional[typing.Union[typing.Optional[aws_cdk.core.IResolvable], typing.Optional["CfnScalingPolicy.CustomizedMetricSpecificationProperty"]]]:
"""``CfnScalingPolicy.TargetTrackingConfigurationProperty.CustomizedMetricSpecification``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-autoscaling-scalingpolicy-targettrackingconfiguration.html#cfn-autoscaling-scalingpolicy-targettrackingconfiguration-customizedmetricspecification
"""
return self._values.get('customized_metric_specification')
@builtins.property
def disable_scale_in(self) -> typing.Optional[typing.Union[typing.Optional[bool], typing.Optional[aws_cdk.core.IResolvable]]]:
"""``CfnScalingPolicy.TargetTrackingConfigurationProperty.DisableScaleIn``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-autoscaling-scalingpolicy-targettrackingconfiguration.html#cfn-autoscaling-scalingpolicy-targettrackingconfiguration-disablescalein
"""
return self._values.get('disable_scale_in')
@builtins.property
def predefined_metric_specification(self) -> typing.Optional[typing.Union[typing.Optional[aws_cdk.core.IResolvable], typing.Optional["CfnScalingPolicy.PredefinedMetricSpecificationProperty"]]]:
"""``CfnScalingPolicy.TargetTrackingConfigurationProperty.PredefinedMetricSpecification``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-autoscaling-scalingpolicy-targettrackingconfiguration.html#cfn-autoscaling-scalingpolicy-targettrackingconfiguration-predefinedmetricspecification
"""
return self._values.get('predefined_metric_specification')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'TargetTrackingConfigurationProperty(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
@jsii.data_type(jsii_type="@aws-cdk/aws-autoscaling.CfnScalingPolicyProps", jsii_struct_bases=[], name_mapping={'auto_scaling_group_name': 'autoScalingGroupName', 'adjustment_type': 'adjustmentType', 'cooldown': 'cooldown', 'estimated_instance_warmup': 'estimatedInstanceWarmup', 'metric_aggregation_type': 'metricAggregationType', 'min_adjustment_magnitude': 'minAdjustmentMagnitude', 'policy_type': 'policyType', 'scaling_adjustment': 'scalingAdjustment', 'step_adjustments': 'stepAdjustments', 'target_tracking_configuration': 'targetTrackingConfiguration'})
class CfnScalingPolicyProps():
def __init__(self, *, auto_scaling_group_name: str, adjustment_type: typing.Optional[str]=None, cooldown: typing.Optional[str]=None, estimated_instance_warmup: typing.Optional[jsii.Number]=None, metric_aggregation_type: typing.Optional[str]=None, min_adjustment_magnitude: typing.Optional[jsii.Number]=None, policy_type: typing.Optional[str]=None, scaling_adjustment: typing.Optional[jsii.Number]=None, step_adjustments: typing.Optional[typing.Union[typing.Optional[aws_cdk.core.IResolvable], typing.Optional[typing.List[typing.Union[aws_cdk.core.IResolvable, "CfnScalingPolicy.StepAdjustmentProperty"]]]]]=None, target_tracking_configuration: typing.Optional[typing.Union[typing.Optional[aws_cdk.core.IResolvable], typing.Optional["CfnScalingPolicy.TargetTrackingConfigurationProperty"]]]=None):
"""Properties for defining a ``AWS::AutoScaling::ScalingPolicy``.
:param auto_scaling_group_name: ``AWS::AutoScaling::ScalingPolicy.AutoScalingGroupName``.
:param adjustment_type: ``AWS::AutoScaling::ScalingPolicy.AdjustmentType``.
:param cooldown: ``AWS::AutoScaling::ScalingPolicy.Cooldown``.
:param estimated_instance_warmup: ``AWS::AutoScaling::ScalingPolicy.EstimatedInstanceWarmup``.
:param metric_aggregation_type: ``AWS::AutoScaling::ScalingPolicy.MetricAggregationType``.
:param min_adjustment_magnitude: ``AWS::AutoScaling::ScalingPolicy.MinAdjustmentMagnitude``.
:param policy_type: ``AWS::AutoScaling::ScalingPolicy.PolicyType``.
:param scaling_adjustment: ``AWS::AutoScaling::ScalingPolicy.ScalingAdjustment``.
:param step_adjustments: ``AWS::AutoScaling::ScalingPolicy.StepAdjustments``.
:param target_tracking_configuration: ``AWS::AutoScaling::ScalingPolicy.TargetTrackingConfiguration``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-policy.html
"""
self._values = {
'auto_scaling_group_name': auto_scaling_group_name,
}
if adjustment_type is not None: self._values["adjustment_type"] = adjustment_type
if cooldown is not None: self._values["cooldown"] = cooldown
if estimated_instance_warmup is not None: self._values["estimated_instance_warmup"] = estimated_instance_warmup
if metric_aggregation_type is not None: self._values["metric_aggregation_type"] = metric_aggregation_type
if min_adjustment_magnitude is not None: self._values["min_adjustment_magnitude"] = min_adjustment_magnitude
if policy_type is not None: self._values["policy_type"] = policy_type
if scaling_adjustment is not None: self._values["scaling_adjustment"] = scaling_adjustment
if step_adjustments is not None: self._values["step_adjustments"] = step_adjustments
if target_tracking_configuration is not None: self._values["target_tracking_configuration"] = target_tracking_configuration
@builtins.property
def auto_scaling_group_name(self) -> str:
"""``AWS::AutoScaling::ScalingPolicy.AutoScalingGroupName``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-policy.html#cfn-as-scalingpolicy-autoscalinggroupname
"""
return self._values.get('auto_scaling_group_name')
@builtins.property
def adjustment_type(self) -> typing.Optional[str]:
"""``AWS::AutoScaling::ScalingPolicy.AdjustmentType``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-policy.html#cfn-as-scalingpolicy-adjustmenttype
"""
return self._values.get('adjustment_type')
@builtins.property
def cooldown(self) -> typing.Optional[str]:
"""``AWS::AutoScaling::ScalingPolicy.Cooldown``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-policy.html#cfn-as-scalingpolicy-cooldown
"""
return self._values.get('cooldown')
@builtins.property
def estimated_instance_warmup(self) -> typing.Optional[jsii.Number]:
"""``AWS::AutoScaling::ScalingPolicy.EstimatedInstanceWarmup``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-policy.html#cfn-as-scalingpolicy-estimatedinstancewarmup
"""
return self._values.get('estimated_instance_warmup')
@builtins.property
def metric_aggregation_type(self) -> typing.Optional[str]:
"""``AWS::AutoScaling::ScalingPolicy.MetricAggregationType``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-policy.html#cfn-as-scalingpolicy-metricaggregationtype
"""
return self._values.get('metric_aggregation_type')
@builtins.property
def min_adjustment_magnitude(self) -> typing.Optional[jsii.Number]:
"""``AWS::AutoScaling::ScalingPolicy.MinAdjustmentMagnitude``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-policy.html#cfn-as-scalingpolicy-minadjustmentmagnitude
"""
return self._values.get('min_adjustment_magnitude')
@builtins.property
def policy_type(self) -> typing.Optional[str]:
"""``AWS::AutoScaling::ScalingPolicy.PolicyType``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-policy.html#cfn-as-scalingpolicy-policytype
"""
return self._values.get('policy_type')
@builtins.property
def scaling_adjustment(self) -> typing.Optional[jsii.Number]:
"""``AWS::AutoScaling::ScalingPolicy.ScalingAdjustment``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-policy.html#cfn-as-scalingpolicy-scalingadjustment
"""
return self._values.get('scaling_adjustment')
@builtins.property
def step_adjustments(self) -> typing.Optional[typing.Union[typing.Optional[aws_cdk.core.IResolvable], typing.Optional[typing.List[typing.Union[aws_cdk.core.IResolvable, "CfnScalingPolicy.StepAdjustmentProperty"]]]]]:
"""``AWS::AutoScaling::ScalingPolicy.StepAdjustments``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-policy.html#cfn-as-scalingpolicy-stepadjustments
"""
return self._values.get('step_adjustments')
@builtins.property
def target_tracking_configuration(self) -> typing.Optional[typing.Union[typing.Optional[aws_cdk.core.IResolvable], typing.Optional["CfnScalingPolicy.TargetTrackingConfigurationProperty"]]]:
"""``AWS::AutoScaling::ScalingPolicy.TargetTrackingConfiguration``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-policy.html#cfn-autoscaling-scalingpolicy-targettrackingconfiguration
"""
return self._values.get('target_tracking_configuration')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'CfnScalingPolicyProps(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
@jsii.implements(aws_cdk.core.IInspectable)
class CfnScheduledAction(aws_cdk.core.CfnResource, metaclass=jsii.JSIIMeta, jsii_type="@aws-cdk/aws-autoscaling.CfnScheduledAction"):
"""A CloudFormation ``AWS::AutoScaling::ScheduledAction``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-as-scheduledaction.html
cloudformationResource:
:cloudformationResource:: AWS::AutoScaling::ScheduledAction
"""
def __init__(self, scope: aws_cdk.core.Construct, id: str, *, auto_scaling_group_name: str, desired_capacity: typing.Optional[jsii.Number]=None, end_time: typing.Optional[str]=None, max_size: typing.Optional[jsii.Number]=None, min_size: typing.Optional[jsii.Number]=None, recurrence: typing.Optional[str]=None, start_time: typing.Optional[str]=None) -> None:
"""Create a new ``AWS::AutoScaling::ScheduledAction``.
:param scope: - scope in which this resource is defined.
:param id: - scoped id of the resource.
:param auto_scaling_group_name: ``AWS::AutoScaling::ScheduledAction.AutoScalingGroupName``.
:param desired_capacity: ``AWS::AutoScaling::ScheduledAction.DesiredCapacity``.
:param end_time: ``AWS::AutoScaling::ScheduledAction.EndTime``.
:param max_size: ``AWS::AutoScaling::ScheduledAction.MaxSize``.
:param min_size: ``AWS::AutoScaling::ScheduledAction.MinSize``.
:param recurrence: ``AWS::AutoScaling::ScheduledAction.Recurrence``.
:param start_time: ``AWS::AutoScaling::ScheduledAction.StartTime``.
"""
props = CfnScheduledActionProps(auto_scaling_group_name=auto_scaling_group_name, desired_capacity=desired_capacity, end_time=end_time, max_size=max_size, min_size=min_size, recurrence=recurrence, start_time=start_time)
jsii.create(CfnScheduledAction, self, [scope, id, props])
@jsii.member(jsii_name="inspect")
def inspect(self, inspector: aws_cdk.core.TreeInspector) -> None:
"""Examines the CloudFormation resource and discloses attributes.
:param inspector: - tree inspector to collect and process attributes.
stability
:stability: experimental
"""
return jsii.invoke(self, "inspect", [inspector])
@jsii.member(jsii_name="renderProperties")
def _render_properties(self, props: typing.Mapping[str,typing.Any]) -> typing.Mapping[str,typing.Any]:
"""
:param props: -
"""
return jsii.invoke(self, "renderProperties", [props])
@jsii.python.classproperty
@jsii.member(jsii_name="CFN_RESOURCE_TYPE_NAME")
def CFN_RESOURCE_TYPE_NAME(cls) -> str:
"""The CloudFormation resource type name for this resource class."""
return jsii.sget(cls, "CFN_RESOURCE_TYPE_NAME")
@builtins.property
@jsii.member(jsii_name="cfnProperties")
def _cfn_properties(self) -> typing.Mapping[str,typing.Any]:
return jsii.get(self, "cfnProperties")
@builtins.property
@jsii.member(jsii_name="autoScalingGroupName")
def auto_scaling_group_name(self) -> str:
"""``AWS::AutoScaling::ScheduledAction.AutoScalingGroupName``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-as-scheduledaction.html#cfn-as-scheduledaction-asgname
"""
return jsii.get(self, "autoScalingGroupName")
@auto_scaling_group_name.setter
def auto_scaling_group_name(self, value: str):
jsii.set(self, "autoScalingGroupName", value)
@builtins.property
@jsii.member(jsii_name="desiredCapacity")
def desired_capacity(self) -> typing.Optional[jsii.Number]:
"""``AWS::AutoScaling::ScheduledAction.DesiredCapacity``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-as-scheduledaction.html#cfn-as-scheduledaction-desiredcapacity
"""
return jsii.get(self, "desiredCapacity")
@desired_capacity.setter
def desired_capacity(self, value: typing.Optional[jsii.Number]):
jsii.set(self, "desiredCapacity", value)
@builtins.property
@jsii.member(jsii_name="endTime")
def end_time(self) -> typing.Optional[str]:
"""``AWS::AutoScaling::ScheduledAction.EndTime``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-as-scheduledaction.html#cfn-as-scheduledaction-endtime
"""
return jsii.get(self, "endTime")
@end_time.setter
def end_time(self, value: typing.Optional[str]):
jsii.set(self, "endTime", value)
@builtins.property
@jsii.member(jsii_name="maxSize")
def max_size(self) -> typing.Optional[jsii.Number]:
"""``AWS::AutoScaling::ScheduledAction.MaxSize``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-as-scheduledaction.html#cfn-as-scheduledaction-maxsize
"""
return jsii.get(self, "maxSize")
@max_size.setter
def max_size(self, value: typing.Optional[jsii.Number]):
jsii.set(self, "maxSize", value)
@builtins.property
@jsii.member(jsii_name="minSize")
def min_size(self) -> typing.Optional[jsii.Number]:
"""``AWS::AutoScaling::ScheduledAction.MinSize``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-as-scheduledaction.html#cfn-as-scheduledaction-minsize
"""
return jsii.get(self, "minSize")
@min_size.setter
def min_size(self, value: typing.Optional[jsii.Number]):
jsii.set(self, "minSize", value)
@builtins.property
@jsii.member(jsii_name="recurrence")
def recurrence(self) -> typing.Optional[str]:
"""``AWS::AutoScaling::ScheduledAction.Recurrence``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-as-scheduledaction.html#cfn-as-scheduledaction-recurrence
"""
return jsii.get(self, "recurrence")
@recurrence.setter
def recurrence(self, value: typing.Optional[str]):
jsii.set(self, "recurrence", value)
@builtins.property
@jsii.member(jsii_name="startTime")
def start_time(self) -> typing.Optional[str]:
"""``AWS::AutoScaling::ScheduledAction.StartTime``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-as-scheduledaction.html#cfn-as-scheduledaction-starttime
"""
return jsii.get(self, "startTime")
@start_time.setter
def start_time(self, value: typing.Optional[str]):
jsii.set(self, "startTime", value)
@jsii.data_type(jsii_type="@aws-cdk/aws-autoscaling.CfnScheduledActionProps", jsii_struct_bases=[], name_mapping={'auto_scaling_group_name': 'autoScalingGroupName', 'desired_capacity': 'desiredCapacity', 'end_time': 'endTime', 'max_size': 'maxSize', 'min_size': 'minSize', 'recurrence': 'recurrence', 'start_time': 'startTime'})
class CfnScheduledActionProps():
def __init__(self, *, auto_scaling_group_name: str, desired_capacity: typing.Optional[jsii.Number]=None, end_time: typing.Optional[str]=None, max_size: typing.Optional[jsii.Number]=None, min_size: typing.Optional[jsii.Number]=None, recurrence: typing.Optional[str]=None, start_time: typing.Optional[str]=None):
"""Properties for defining a ``AWS::AutoScaling::ScheduledAction``.
:param auto_scaling_group_name: ``AWS::AutoScaling::ScheduledAction.AutoScalingGroupName``.
:param desired_capacity: ``AWS::AutoScaling::ScheduledAction.DesiredCapacity``.
:param end_time: ``AWS::AutoScaling::ScheduledAction.EndTime``.
:param max_size: ``AWS::AutoScaling::ScheduledAction.MaxSize``.
:param min_size: ``AWS::AutoScaling::ScheduledAction.MinSize``.
:param recurrence: ``AWS::AutoScaling::ScheduledAction.Recurrence``.
:param start_time: ``AWS::AutoScaling::ScheduledAction.StartTime``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-as-scheduledaction.html
"""
self._values = {
'auto_scaling_group_name': auto_scaling_group_name,
}
if desired_capacity is not None: self._values["desired_capacity"] = desired_capacity
if end_time is not None: self._values["end_time"] = end_time
if max_size is not None: self._values["max_size"] = max_size
if min_size is not None: self._values["min_size"] = min_size
if recurrence is not None: self._values["recurrence"] = recurrence
if start_time is not None: self._values["start_time"] = start_time
@builtins.property
def auto_scaling_group_name(self) -> str:
"""``AWS::AutoScaling::ScheduledAction.AutoScalingGroupName``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-as-scheduledaction.html#cfn-as-scheduledaction-asgname
"""
return self._values.get('auto_scaling_group_name')
@builtins.property
def desired_capacity(self) -> typing.Optional[jsii.Number]:
"""``AWS::AutoScaling::ScheduledAction.DesiredCapacity``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-as-scheduledaction.html#cfn-as-scheduledaction-desiredcapacity
"""
return self._values.get('desired_capacity')
@builtins.property
def end_time(self) -> typing.Optional[str]:
"""``AWS::AutoScaling::ScheduledAction.EndTime``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-as-scheduledaction.html#cfn-as-scheduledaction-endtime
"""
return self._values.get('end_time')
@builtins.property
def max_size(self) -> typing.Optional[jsii.Number]:
"""``AWS::AutoScaling::ScheduledAction.MaxSize``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-as-scheduledaction.html#cfn-as-scheduledaction-maxsize
"""
return self._values.get('max_size')
@builtins.property
def min_size(self) -> typing.Optional[jsii.Number]:
"""``AWS::AutoScaling::ScheduledAction.MinSize``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-as-scheduledaction.html#cfn-as-scheduledaction-minsize
"""
return self._values.get('min_size')
@builtins.property
def recurrence(self) -> typing.Optional[str]:
"""``AWS::AutoScaling::ScheduledAction.Recurrence``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-as-scheduledaction.html#cfn-as-scheduledaction-recurrence
"""
return self._values.get('recurrence')
@builtins.property
def start_time(self) -> typing.Optional[str]:
"""``AWS::AutoScaling::ScheduledAction.StartTime``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-as-scheduledaction.html#cfn-as-scheduledaction-starttime
"""
return self._values.get('start_time')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'CfnScheduledActionProps(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
@jsii.data_type(jsii_type="@aws-cdk/aws-autoscaling.CommonAutoScalingGroupProps", jsii_struct_bases=[], name_mapping={'allow_all_outbound': 'allowAllOutbound', 'associate_public_ip_address': 'associatePublicIpAddress', 'cooldown': 'cooldown', 'desired_capacity': 'desiredCapacity', 'health_check': 'healthCheck', 'ignore_unmodified_size_properties': 'ignoreUnmodifiedSizeProperties', 'key_name': 'keyName', 'max_capacity': 'maxCapacity', 'min_capacity': 'minCapacity', 'notifications_topic': 'notificationsTopic', 'replacing_update_min_successful_instances_percent': 'replacingUpdateMinSuccessfulInstancesPercent', 'resource_signal_count': 'resourceSignalCount', 'resource_signal_timeout': 'resourceSignalTimeout', 'rolling_update_configuration': 'rollingUpdateConfiguration', 'spot_price': 'spotPrice', 'update_type': 'updateType', 'vpc_subnets': 'vpcSubnets'})
class CommonAutoScalingGroupProps():
def __init__(self, *, allow_all_outbound: typing.Optional[bool]=None, associate_public_ip_address: typing.Optional[bool]=None, cooldown: typing.Optional[aws_cdk.core.Duration]=None, desired_capacity: typing.Optional[jsii.Number]=None, health_check: typing.Optional["HealthCheck"]=None, ignore_unmodified_size_properties: typing.Optional[bool]=None, key_name: typing.Optional[str]=None, max_capacity: typing.Optional[jsii.Number]=None, min_capacity: typing.Optional[jsii.Number]=None, notifications_topic: typing.Optional[aws_cdk.aws_sns.ITopic]=None, replacing_update_min_successful_instances_percent: typing.Optional[jsii.Number]=None, resource_signal_count: typing.Optional[jsii.Number]=None, resource_signal_timeout: typing.Optional[aws_cdk.core.Duration]=None, rolling_update_configuration: typing.Optional["RollingUpdateConfiguration"]=None, spot_price: typing.Optional[str]=None, update_type: typing.Optional["UpdateType"]=None, vpc_subnets: typing.Optional[aws_cdk.aws_ec2.SubnetSelection]=None):
"""Basic properties of an AutoScalingGroup, except the exact machines to run and where they should run.
Constructs that want to create AutoScalingGroups can inherit
this interface and specialize the essential parts in various ways.
:param allow_all_outbound: Whether the instances can initiate connections to anywhere by default. Default: true
:param associate_public_ip_address: Whether instances in the Auto Scaling Group should have public IP addresses associated with them. Default: - Use subnet setting.
:param cooldown: Default scaling cooldown for this AutoScalingGroup. Default: Duration.minutes(5)
:param desired_capacity: Initial amount of instances in the fleet. If this is set to a number, every deployment will reset the amount of instances to this number. It is recommended to leave this value blank. Default: minCapacity, and leave unchanged during deployment
:param health_check: Configuration for health checks. Default: - HealthCheck.ec2 with no grace period
:param ignore_unmodified_size_properties: If the ASG has scheduled actions, don't reset unchanged group sizes. Only used if the ASG has scheduled actions (which may scale your ASG up or down regardless of cdk deployments). If true, the size of the group will only be reset if it has been changed in the CDK app. If false, the sizes will always be changed back to what they were in the CDK app on deployment. Default: true
:param key_name: Name of SSH keypair to grant access to instances. Default: - No SSH access will be possible.
:param max_capacity: Maximum number of instances in the fleet. Default: desiredCapacity
:param min_capacity: Minimum number of instances in the fleet. Default: 1
:param notifications_topic: SNS topic to send notifications about fleet changes. Default: - No fleet change notifications will be sent.
:param replacing_update_min_successful_instances_percent: Configuration for replacing updates. Only used if updateType == UpdateType.ReplacingUpdate. Specifies how many instances must signal success for the update to succeed. Default: minSuccessfulInstancesPercent
:param resource_signal_count: How many ResourceSignal calls CloudFormation expects before the resource is considered created. Default: 1
:param resource_signal_timeout: The length of time to wait for the resourceSignalCount. The maximum value is 43200 (12 hours). Default: Duration.minutes(5)
:param rolling_update_configuration: Configuration for rolling updates. Only used if updateType == UpdateType.RollingUpdate. Default: - RollingUpdateConfiguration with defaults.
:param spot_price: The maximum hourly price (in USD) to be paid for any Spot Instance launched to fulfill the request. Spot Instances are launched when the price you specify exceeds the current Spot market price. Default: none
:param update_type: What to do when an AutoScalingGroup's instance configuration is changed. This is applied when any of the settings on the ASG are changed that affect how the instances should be created (VPC, instance type, startup scripts, etc.). It indicates how the existing instances should be replaced with new instances matching the new config. By default, nothing is done and only new instances are launched with the new config. Default: UpdateType.None
:param vpc_subnets: Where to place instances within the VPC. Default: - All Private subnets.
"""
if isinstance(rolling_update_configuration, dict): rolling_update_configuration = RollingUpdateConfiguration(**rolling_update_configuration)
if isinstance(vpc_subnets, dict): vpc_subnets = aws_cdk.aws_ec2.SubnetSelection(**vpc_subnets)
self._values = {
}
if allow_all_outbound is not None: self._values["allow_all_outbound"] = allow_all_outbound
if associate_public_ip_address is not None: self._values["associate_public_ip_address"] = associate_public_ip_address
if cooldown is not None: self._values["cooldown"] = cooldown
if desired_capacity is not None: self._values["desired_capacity"] = desired_capacity
if health_check is not None: self._values["health_check"] = health_check
if ignore_unmodified_size_properties is not None: self._values["ignore_unmodified_size_properties"] = ignore_unmodified_size_properties
if key_name is not None: self._values["key_name"] = key_name
if max_capacity is not None: self._values["max_capacity"] = max_capacity
if min_capacity is not None: self._values["min_capacity"] = min_capacity
if notifications_topic is not None: self._values["notifications_topic"] = notifications_topic
if replacing_update_min_successful_instances_percent is not None: self._values["replacing_update_min_successful_instances_percent"] = replacing_update_min_successful_instances_percent
if resource_signal_count is not None: self._values["resource_signal_count"] = resource_signal_count
if resource_signal_timeout is not None: self._values["resource_signal_timeout"] = resource_signal_timeout
if rolling_update_configuration is not None: self._values["rolling_update_configuration"] = rolling_update_configuration
if spot_price is not None: self._values["spot_price"] = spot_price
if update_type is not None: self._values["update_type"] = update_type
if vpc_subnets is not None: self._values["vpc_subnets"] = vpc_subnets
@builtins.property
def allow_all_outbound(self) -> typing.Optional[bool]:
"""Whether the instances can initiate connections to anywhere by default.
default
:default: true
"""
return self._values.get('allow_all_outbound')
@builtins.property
def associate_public_ip_address(self) -> typing.Optional[bool]:
"""Whether instances in the Auto Scaling Group should have public IP addresses associated with them.
default
:default: - Use subnet setting.
"""
return self._values.get('associate_public_ip_address')
@builtins.property
def cooldown(self) -> typing.Optional[aws_cdk.core.Duration]:
"""Default scaling cooldown for this AutoScalingGroup.
default
:default: Duration.minutes(5)
"""
return self._values.get('cooldown')
@builtins.property
def desired_capacity(self) -> typing.Optional[jsii.Number]:
"""Initial amount of instances in the fleet.
If this is set to a number, every deployment will reset the amount of
instances to this number. It is recommended to leave this value blank.
default
:default: minCapacity, and leave unchanged during deployment
see
:see: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-group.html#cfn-as-group-desiredcapacity
"""
return self._values.get('desired_capacity')
@builtins.property
def health_check(self) -> typing.Optional["HealthCheck"]:
"""Configuration for health checks.
default
:default: - HealthCheck.ec2 with no grace period
"""
return self._values.get('health_check')
@builtins.property
def ignore_unmodified_size_properties(self) -> typing.Optional[bool]:
"""If the ASG has scheduled actions, don't reset unchanged group sizes.
Only used if the ASG has scheduled actions (which may scale your ASG up
or down regardless of cdk deployments). If true, the size of the group
will only be reset if it has been changed in the CDK app. If false, the
sizes will always be changed back to what they were in the CDK app
on deployment.
default
:default: true
"""
return self._values.get('ignore_unmodified_size_properties')
@builtins.property
def key_name(self) -> typing.Optional[str]:
"""Name of SSH keypair to grant access to instances.
default
:default: - No SSH access will be possible.
"""
return self._values.get('key_name')
@builtins.property
def max_capacity(self) -> typing.Optional[jsii.Number]:
"""Maximum number of instances in the fleet.
default
:default: desiredCapacity
"""
return self._values.get('max_capacity')
@builtins.property
def min_capacity(self) -> typing.Optional[jsii.Number]:
"""Minimum number of instances in the fleet.
default
:default: 1
"""
return self._values.get('min_capacity')
@builtins.property
def notifications_topic(self) -> typing.Optional[aws_cdk.aws_sns.ITopic]:
"""SNS topic to send notifications about fleet changes.
default
:default: - No fleet change notifications will be sent.
"""
return self._values.get('notifications_topic')
@builtins.property
def replacing_update_min_successful_instances_percent(self) -> typing.Optional[jsii.Number]:
"""Configuration for replacing updates.
Only used if updateType == UpdateType.ReplacingUpdate. Specifies how
many instances must signal success for the update to succeed.
default
:default: minSuccessfulInstancesPercent
"""
return self._values.get('replacing_update_min_successful_instances_percent')
@builtins.property
def resource_signal_count(self) -> typing.Optional[jsii.Number]:
"""How many ResourceSignal calls CloudFormation expects before the resource is considered created.
default
:default: 1
"""
return self._values.get('resource_signal_count')
@builtins.property
def resource_signal_timeout(self) -> typing.Optional[aws_cdk.core.Duration]:
"""The length of time to wait for the resourceSignalCount.
The maximum value is 43200 (12 hours).
default
:default: Duration.minutes(5)
"""
return self._values.get('resource_signal_timeout')
@builtins.property
def rolling_update_configuration(self) -> typing.Optional["RollingUpdateConfiguration"]:
"""Configuration for rolling updates.
Only used if updateType == UpdateType.RollingUpdate.
default
:default: - RollingUpdateConfiguration with defaults.
"""
return self._values.get('rolling_update_configuration')
@builtins.property
def spot_price(self) -> typing.Optional[str]:
"""The maximum hourly price (in USD) to be paid for any Spot Instance launched to fulfill the request.
Spot Instances are
launched when the price you specify exceeds the current Spot market price.
default
:default: none
"""
return self._values.get('spot_price')
@builtins.property
def update_type(self) -> typing.Optional["UpdateType"]:
"""What to do when an AutoScalingGroup's instance configuration is changed.
This is applied when any of the settings on the ASG are changed that
affect how the instances should be created (VPC, instance type, startup
scripts, etc.). It indicates how the existing instances should be
replaced with new instances matching the new config. By default, nothing
is done and only new instances are launched with the new config.
default
:default: UpdateType.None
"""
return self._values.get('update_type')
@builtins.property
def vpc_subnets(self) -> typing.Optional[aws_cdk.aws_ec2.SubnetSelection]:
"""Where to place instances within the VPC.
default
:default: - All Private subnets.
"""
return self._values.get('vpc_subnets')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'CommonAutoScalingGroupProps(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
@jsii.data_type(jsii_type="@aws-cdk/aws-autoscaling.AutoScalingGroupProps", jsii_struct_bases=[CommonAutoScalingGroupProps], name_mapping={'allow_all_outbound': 'allowAllOutbound', 'associate_public_ip_address': 'associatePublicIpAddress', 'cooldown': 'cooldown', 'desired_capacity': 'desiredCapacity', 'health_check': 'healthCheck', 'ignore_unmodified_size_properties': 'ignoreUnmodifiedSizeProperties', 'key_name': 'keyName', 'max_capacity': 'maxCapacity', 'min_capacity': 'minCapacity', 'notifications_topic': 'notificationsTopic', 'replacing_update_min_successful_instances_percent': 'replacingUpdateMinSuccessfulInstancesPercent', 'resource_signal_count': 'resourceSignalCount', 'resource_signal_timeout': 'resourceSignalTimeout', 'rolling_update_configuration': 'rollingUpdateConfiguration', 'spot_price': 'spotPrice', 'update_type': 'updateType', 'vpc_subnets': 'vpcSubnets', 'instance_type': 'instanceType', 'machine_image': 'machineImage', 'vpc': 'vpc', 'block_devices': 'blockDevices', 'role': 'role', 'user_data': 'userData'})
class AutoScalingGroupProps(CommonAutoScalingGroupProps):
def __init__(self, *, allow_all_outbound: typing.Optional[bool]=None, associate_public_ip_address: typing.Optional[bool]=None, cooldown: typing.Optional[aws_cdk.core.Duration]=None, desired_capacity: typing.Optional[jsii.Number]=None, health_check: typing.Optional["HealthCheck"]=None, ignore_unmodified_size_properties: typing.Optional[bool]=None, key_name: typing.Optional[str]=None, max_capacity: typing.Optional[jsii.Number]=None, min_capacity: typing.Optional[jsii.Number]=None, notifications_topic: typing.Optional[aws_cdk.aws_sns.ITopic]=None, replacing_update_min_successful_instances_percent: typing.Optional[jsii.Number]=None, resource_signal_count: typing.Optional[jsii.Number]=None, resource_signal_timeout: typing.Optional[aws_cdk.core.Duration]=None, rolling_update_configuration: typing.Optional["RollingUpdateConfiguration"]=None, spot_price: typing.Optional[str]=None, update_type: typing.Optional["UpdateType"]=None, vpc_subnets: typing.Optional[aws_cdk.aws_ec2.SubnetSelection]=None, instance_type: aws_cdk.aws_ec2.InstanceType, machine_image: aws_cdk.aws_ec2.IMachineImage, vpc: aws_cdk.aws_ec2.IVpc, block_devices: typing.Optional[typing.List["BlockDevice"]]=None, role: typing.Optional[aws_cdk.aws_iam.IRole]=None, user_data: typing.Optional[aws_cdk.aws_ec2.UserData]=None):
"""Properties of a Fleet.
:param allow_all_outbound: Whether the instances can initiate connections to anywhere by default. Default: true
:param associate_public_ip_address: Whether instances in the Auto Scaling Group should have public IP addresses associated with them. Default: - Use subnet setting.
:param cooldown: Default scaling cooldown for this AutoScalingGroup. Default: Duration.minutes(5)
:param desired_capacity: Initial amount of instances in the fleet. If this is set to a number, every deployment will reset the amount of instances to this number. It is recommended to leave this value blank. Default: minCapacity, and leave unchanged during deployment
:param health_check: Configuration for health checks. Default: - HealthCheck.ec2 with no grace period
:param ignore_unmodified_size_properties: If the ASG has scheduled actions, don't reset unchanged group sizes. Only used if the ASG has scheduled actions (which may scale your ASG up or down regardless of cdk deployments). If true, the size of the group will only be reset if it has been changed in the CDK app. If false, the sizes will always be changed back to what they were in the CDK app on deployment. Default: true
:param key_name: Name of SSH keypair to grant access to instances. Default: - No SSH access will be possible.
:param max_capacity: Maximum number of instances in the fleet. Default: desiredCapacity
:param min_capacity: Minimum number of instances in the fleet. Default: 1
:param notifications_topic: SNS topic to send notifications about fleet changes. Default: - No fleet change notifications will be sent.
:param replacing_update_min_successful_instances_percent: Configuration for replacing updates. Only used if updateType == UpdateType.ReplacingUpdate. Specifies how many instances must signal success for the update to succeed. Default: minSuccessfulInstancesPercent
:param resource_signal_count: How many ResourceSignal calls CloudFormation expects before the resource is considered created. Default: 1
:param resource_signal_timeout: The length of time to wait for the resourceSignalCount. The maximum value is 43200 (12 hours). Default: Duration.minutes(5)
:param rolling_update_configuration: Configuration for rolling updates. Only used if updateType == UpdateType.RollingUpdate. Default: - RollingUpdateConfiguration with defaults.
:param spot_price: The maximum hourly price (in USD) to be paid for any Spot Instance launched to fulfill the request. Spot Instances are launched when the price you specify exceeds the current Spot market price. Default: none
:param update_type: What to do when an AutoScalingGroup's instance configuration is changed. This is applied when any of the settings on the ASG are changed that affect how the instances should be created (VPC, instance type, startup scripts, etc.). It indicates how the existing instances should be replaced with new instances matching the new config. By default, nothing is done and only new instances are launched with the new config. Default: UpdateType.None
:param vpc_subnets: Where to place instances within the VPC. Default: - All Private subnets.
:param instance_type: Type of instance to launch.
:param machine_image: AMI to launch.
:param vpc: VPC to launch these instances in.
:param block_devices: Specifies how block devices are exposed to the instance. You can specify virtual devices and EBS volumes. Each instance that is launched has an associated root device volume, either an Amazon EBS volume or an instance store volume. You can use block device mappings to specify additional EBS volumes or instance store volumes to attach to an instance when it is launched. Default: - Uses the block device mapping of the AMI
:param role: An IAM role to associate with the instance profile assigned to this Auto Scaling Group. The role must be assumable by the service principal ``ec2.amazonaws.com``: Default: A role will automatically be created, it can be accessed via the ``role`` property
:param user_data: Specific UserData to use. The UserData may still be mutated after creation. Default: - A UserData object appropriate for the MachineImage's Operating System is created.
"""
if isinstance(rolling_update_configuration, dict): rolling_update_configuration = RollingUpdateConfiguration(**rolling_update_configuration)
if isinstance(vpc_subnets, dict): vpc_subnets = aws_cdk.aws_ec2.SubnetSelection(**vpc_subnets)
self._values = {
'instance_type': instance_type,
'machine_image': machine_image,
'vpc': vpc,
}
if allow_all_outbound is not None: self._values["allow_all_outbound"] = allow_all_outbound
if associate_public_ip_address is not None: self._values["associate_public_ip_address"] = associate_public_ip_address
if cooldown is not None: self._values["cooldown"] = cooldown
if desired_capacity is not None: self._values["desired_capacity"] = desired_capacity
if health_check is not None: self._values["health_check"] = health_check
if ignore_unmodified_size_properties is not None: self._values["ignore_unmodified_size_properties"] = ignore_unmodified_size_properties
if key_name is not None: self._values["key_name"] = key_name
if max_capacity is not None: self._values["max_capacity"] = max_capacity
if min_capacity is not None: self._values["min_capacity"] = min_capacity
if notifications_topic is not None: self._values["notifications_topic"] = notifications_topic
if replacing_update_min_successful_instances_percent is not None: self._values["replacing_update_min_successful_instances_percent"] = replacing_update_min_successful_instances_percent
if resource_signal_count is not None: self._values["resource_signal_count"] = resource_signal_count
if resource_signal_timeout is not None: self._values["resource_signal_timeout"] = resource_signal_timeout
if rolling_update_configuration is not None: self._values["rolling_update_configuration"] = rolling_update_configuration
if spot_price is not None: self._values["spot_price"] = spot_price
if update_type is not None: self._values["update_type"] = update_type
if vpc_subnets is not None: self._values["vpc_subnets"] = vpc_subnets
if block_devices is not None: self._values["block_devices"] = block_devices
if role is not None: self._values["role"] = role
if user_data is not None: self._values["user_data"] = user_data
@builtins.property
def allow_all_outbound(self) -> typing.Optional[bool]:
"""Whether the instances can initiate connections to anywhere by default.
default
:default: true
"""
return self._values.get('allow_all_outbound')
@builtins.property
def associate_public_ip_address(self) -> typing.Optional[bool]:
"""Whether instances in the Auto Scaling Group should have public IP addresses associated with them.
default
:default: - Use subnet setting.
"""
return self._values.get('associate_public_ip_address')
@builtins.property
def cooldown(self) -> typing.Optional[aws_cdk.core.Duration]:
"""Default scaling cooldown for this AutoScalingGroup.
default
:default: Duration.minutes(5)
"""
return self._values.get('cooldown')
@builtins.property
def desired_capacity(self) -> typing.Optional[jsii.Number]:
"""Initial amount of instances in the fleet.
If this is set to a number, every deployment will reset the amount of
instances to this number. It is recommended to leave this value blank.
default
:default: minCapacity, and leave unchanged during deployment
see
:see: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-group.html#cfn-as-group-desiredcapacity
"""
return self._values.get('desired_capacity')
@builtins.property
def health_check(self) -> typing.Optional["HealthCheck"]:
"""Configuration for health checks.
default
:default: - HealthCheck.ec2 with no grace period
"""
return self._values.get('health_check')
@builtins.property
def ignore_unmodified_size_properties(self) -> typing.Optional[bool]:
"""If the ASG has scheduled actions, don't reset unchanged group sizes.
Only used if the ASG has scheduled actions (which may scale your ASG up
or down regardless of cdk deployments). If true, the size of the group
will only be reset if it has been changed in the CDK app. If false, the
sizes will always be changed back to what they were in the CDK app
on deployment.
default
:default: true
"""
return self._values.get('ignore_unmodified_size_properties')
@builtins.property
def key_name(self) -> typing.Optional[str]:
"""Name of SSH keypair to grant access to instances.
default
:default: - No SSH access will be possible.
"""
return self._values.get('key_name')
@builtins.property
def max_capacity(self) -> typing.Optional[jsii.Number]:
"""Maximum number of instances in the fleet.
default
:default: desiredCapacity
"""
return self._values.get('max_capacity')
@builtins.property
def min_capacity(self) -> typing.Optional[jsii.Number]:
"""Minimum number of instances in the fleet.
default
:default: 1
"""
return self._values.get('min_capacity')
@builtins.property
def notifications_topic(self) -> typing.Optional[aws_cdk.aws_sns.ITopic]:
"""SNS topic to send notifications about fleet changes.
default
:default: - No fleet change notifications will be sent.
"""
return self._values.get('notifications_topic')
@builtins.property
def replacing_update_min_successful_instances_percent(self) -> typing.Optional[jsii.Number]:
"""Configuration for replacing updates.
Only used if updateType == UpdateType.ReplacingUpdate. Specifies how
many instances must signal success for the update to succeed.
default
:default: minSuccessfulInstancesPercent
"""
return self._values.get('replacing_update_min_successful_instances_percent')
@builtins.property
def resource_signal_count(self) -> typing.Optional[jsii.Number]:
"""How many ResourceSignal calls CloudFormation expects before the resource is considered created.
default
:default: 1
"""
return self._values.get('resource_signal_count')
@builtins.property
def resource_signal_timeout(self) -> typing.Optional[aws_cdk.core.Duration]:
"""The length of time to wait for the resourceSignalCount.
The maximum value is 43200 (12 hours).
default
:default: Duration.minutes(5)
"""
return self._values.get('resource_signal_timeout')
@builtins.property
def rolling_update_configuration(self) -> typing.Optional["RollingUpdateConfiguration"]:
"""Configuration for rolling updates.
Only used if updateType == UpdateType.RollingUpdate.
default
:default: - RollingUpdateConfiguration with defaults.
"""
return self._values.get('rolling_update_configuration')
@builtins.property
def spot_price(self) -> typing.Optional[str]:
"""The maximum hourly price (in USD) to be paid for any Spot Instance launched to fulfill the request.
Spot Instances are
launched when the price you specify exceeds the current Spot market price.
default
:default: none
"""
return self._values.get('spot_price')
@builtins.property
def update_type(self) -> typing.Optional["UpdateType"]:
"""What to do when an AutoScalingGroup's instance configuration is changed.
This is applied when any of the settings on the ASG are changed that
affect how the instances should be created (VPC, instance type, startup
scripts, etc.). It indicates how the existing instances should be
replaced with new instances matching the new config. By default, nothing
is done and only new instances are launched with the new config.
default
:default: UpdateType.None
"""
return self._values.get('update_type')
@builtins.property
def vpc_subnets(self) -> typing.Optional[aws_cdk.aws_ec2.SubnetSelection]:
"""Where to place instances within the VPC.
default
:default: - All Private subnets.
"""
return self._values.get('vpc_subnets')
@builtins.property
def instance_type(self) -> aws_cdk.aws_ec2.InstanceType:
"""Type of instance to launch."""
return self._values.get('instance_type')
@builtins.property
def machine_image(self) -> aws_cdk.aws_ec2.IMachineImage:
"""AMI to launch."""
return self._values.get('machine_image')
@builtins.property
def vpc(self) -> aws_cdk.aws_ec2.IVpc:
"""VPC to launch these instances in."""
return self._values.get('vpc')
@builtins.property
def block_devices(self) -> typing.Optional[typing.List["BlockDevice"]]:
"""Specifies how block devices are exposed to the instance. You can specify virtual devices and EBS volumes.
Each instance that is launched has an associated root device volume,
either an Amazon EBS volume or an instance store volume.
You can use block device mappings to specify additional EBS volumes or
instance store volumes to attach to an instance when it is launched.
default
:default: - Uses the block device mapping of the AMI
see
:see: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/block-device-mapping-concepts.html
"""
return self._values.get('block_devices')
@builtins.property
def role(self) -> typing.Optional[aws_cdk.aws_iam.IRole]:
"""An IAM role to associate with the instance profile assigned to this Auto Scaling Group.
The role must be assumable by the service principal ``ec2.amazonaws.com``:
default
:default: A role will automatically be created, it can be accessed via the ``role`` property
Example::
# Example automatically generated without compilation. See https://github.com/aws/jsii/issues/826
role = iam.Role(self, "MyRole",
assumed_by=iam.ServicePrincipal("ec2.amazonaws.com")
)
"""
return self._values.get('role')
@builtins.property
def user_data(self) -> typing.Optional[aws_cdk.aws_ec2.UserData]:
"""Specific UserData to use.
The UserData may still be mutated after creation.
default
:default:
- A UserData object appropriate for the MachineImage's
Operating System is created.
"""
return self._values.get('user_data')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'AutoScalingGroupProps(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
@jsii.data_type(jsii_type="@aws-cdk/aws-autoscaling.CpuUtilizationScalingProps", jsii_struct_bases=[BaseTargetTrackingProps], name_mapping={'cooldown': 'cooldown', 'disable_scale_in': 'disableScaleIn', 'estimated_instance_warmup': 'estimatedInstanceWarmup', 'target_utilization_percent': 'targetUtilizationPercent'})
class CpuUtilizationScalingProps(BaseTargetTrackingProps):
def __init__(self, *, cooldown: typing.Optional[aws_cdk.core.Duration]=None, disable_scale_in: typing.Optional[bool]=None, estimated_instance_warmup: typing.Optional[aws_cdk.core.Duration]=None, target_utilization_percent: jsii.Number):
"""Properties for enabling scaling based on CPU utilization.
:param cooldown: Period after a scaling completes before another scaling activity can start. Default: - The default cooldown configured on the AutoScalingGroup.
:param disable_scale_in: Indicates whether scale in by the target tracking policy is disabled. If the value is true, scale in is disabled and the target tracking policy won't remove capacity from the autoscaling group. Otherwise, scale in is enabled and the target tracking policy can remove capacity from the group. Default: false
:param estimated_instance_warmup: Estimated time until a newly launched instance can send metrics to CloudWatch. Default: - Same as the cooldown.
:param target_utilization_percent: Target average CPU utilization across the task.
"""
self._values = {
'target_utilization_percent': target_utilization_percent,
}
if cooldown is not None: self._values["cooldown"] = cooldown
if disable_scale_in is not None: self._values["disable_scale_in"] = disable_scale_in
if estimated_instance_warmup is not None: self._values["estimated_instance_warmup"] = estimated_instance_warmup
@builtins.property
def cooldown(self) -> typing.Optional[aws_cdk.core.Duration]:
"""Period after a scaling completes before another scaling activity can start.
default
:default: - The default cooldown configured on the AutoScalingGroup.
"""
return self._values.get('cooldown')
@builtins.property
def disable_scale_in(self) -> typing.Optional[bool]:
"""Indicates whether scale in by the target tracking policy is disabled.
If the value is true, scale in is disabled and the target tracking policy
won't remove capacity from the autoscaling group. Otherwise, scale in is
enabled and the target tracking policy can remove capacity from the
group.
default
:default: false
"""
return self._values.get('disable_scale_in')
@builtins.property
def estimated_instance_warmup(self) -> typing.Optional[aws_cdk.core.Duration]:
"""Estimated time until a newly launched instance can send metrics to CloudWatch.
default
:default: - Same as the cooldown.
"""
return self._values.get('estimated_instance_warmup')
@builtins.property
def target_utilization_percent(self) -> jsii.Number:
"""Target average CPU utilization across the task."""
return self._values.get('target_utilization_percent')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'CpuUtilizationScalingProps(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
@jsii.data_type(jsii_type="@aws-cdk/aws-autoscaling.CronOptions", jsii_struct_bases=[], name_mapping={'day': 'day', 'hour': 'hour', 'minute': 'minute', 'month': 'month', 'week_day': 'weekDay'})
class CronOptions():
def __init__(self, *, day: typing.Optional[str]=None, hour: typing.Optional[str]=None, minute: typing.Optional[str]=None, month: typing.Optional[str]=None, week_day: typing.Optional[str]=None):
"""Options to configure a cron expression.
All fields are strings so you can use complex expresions. Absence of
a field implies '*' or '?', whichever one is appropriate.
:param day: The day of the month to run this rule at. Default: - Every day of the month
:param hour: The hour to run this rule at. Default: - Every hour
:param minute: The minute to run this rule at. Default: - Every minute
:param month: The month to run this rule at. Default: - Every month
:param week_day: The day of the week to run this rule at. Default: - Any day of the week
see
:see: http://crontab.org/
"""
self._values = {
}
if day is not None: self._values["day"] = day
if hour is not None: self._values["hour"] = hour
if minute is not None: self._values["minute"] = minute
if month is not None: self._values["month"] = month
if week_day is not None: self._values["week_day"] = week_day
@builtins.property
def day(self) -> typing.Optional[str]:
"""The day of the month to run this rule at.
default
:default: - Every day of the month
"""
return self._values.get('day')
@builtins.property
def hour(self) -> typing.Optional[str]:
"""The hour to run this rule at.
default
:default: - Every hour
"""
return self._values.get('hour')
@builtins.property
def minute(self) -> typing.Optional[str]:
"""The minute to run this rule at.
default
:default: - Every minute
"""
return self._values.get('minute')
@builtins.property
def month(self) -> typing.Optional[str]:
"""The month to run this rule at.
default
:default: - Every month
"""
return self._values.get('month')
@builtins.property
def week_day(self) -> typing.Optional[str]:
"""The day of the week to run this rule at.
default
:default: - Any day of the week
"""
return self._values.get('week_day')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'CronOptions(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
@jsii.enum(jsii_type="@aws-cdk/aws-autoscaling.DefaultResult")
class DefaultResult(enum.Enum):
CONTINUE = "CONTINUE"
ABANDON = "ABANDON"
@jsii.data_type(jsii_type="@aws-cdk/aws-autoscaling.EbsDeviceOptionsBase", jsii_struct_bases=[], name_mapping={'delete_on_termination': 'deleteOnTermination', 'iops': 'iops', 'volume_type': 'volumeType'})
class EbsDeviceOptionsBase():
def __init__(self, *, delete_on_termination: typing.Optional[bool]=None, iops: typing.Optional[jsii.Number]=None, volume_type: typing.Optional["EbsDeviceVolumeType"]=None):
"""Base block device options for an EBS volume.
:param delete_on_termination: Indicates whether to delete the volume when the instance is terminated. Default: - true for Amazon EC2 Auto Scaling, false otherwise (e.g. EBS)
:param iops: The number of I/O operations per second (IOPS) to provision for the volume. Must only be set for {@link volumeType}: {@link EbsDeviceVolumeType.IO1} The maximum ratio of IOPS to volume size (in GiB) is 50:1, so for 5,000 provisioned IOPS, you need at least 100 GiB storage on the volume. Default: - none, required for {@link EbsDeviceVolumeType.IO1}
:param volume_type: The EBS volume type. Default: {@link EbsDeviceVolumeType.GP2}
"""
self._values = {
}
if delete_on_termination is not None: self._values["delete_on_termination"] = delete_on_termination
if iops is not None: self._values["iops"] = iops
if volume_type is not None: self._values["volume_type"] = volume_type
@builtins.property
def delete_on_termination(self) -> typing.Optional[bool]:
"""Indicates whether to delete the volume when the instance is terminated.
default
:default: - true for Amazon EC2 Auto Scaling, false otherwise (e.g. EBS)
"""
return self._values.get('delete_on_termination')
@builtins.property
def iops(self) -> typing.Optional[jsii.Number]:
"""The number of I/O operations per second (IOPS) to provision for the volume.
Must only be set for {@link volumeType}: {@link EbsDeviceVolumeType.IO1}
The maximum ratio of IOPS to volume size (in GiB) is 50:1, so for 5,000 provisioned IOPS,
you need at least 100 GiB storage on the volume.
default
:default: - none, required for {@link EbsDeviceVolumeType.IO1}
see
:see: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html
"""
return self._values.get('iops')
@builtins.property
def volume_type(self) -> typing.Optional["EbsDeviceVolumeType"]:
"""The EBS volume type.
default
:default: {@link EbsDeviceVolumeType.GP2}
see
:see: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html
"""
return self._values.get('volume_type')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'EbsDeviceOptionsBase(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
@jsii.data_type(jsii_type="@aws-cdk/aws-autoscaling.EbsDeviceOptions", jsii_struct_bases=[EbsDeviceOptionsBase], name_mapping={'delete_on_termination': 'deleteOnTermination', 'iops': 'iops', 'volume_type': 'volumeType', 'encrypted': 'encrypted'})
class EbsDeviceOptions(EbsDeviceOptionsBase):
def __init__(self, *, delete_on_termination: typing.Optional[bool]=None, iops: typing.Optional[jsii.Number]=None, volume_type: typing.Optional["EbsDeviceVolumeType"]=None, encrypted: typing.Optional[bool]=None):
"""Block device options for an EBS volume.
:param delete_on_termination: Indicates whether to delete the volume when the instance is terminated. Default: - true for Amazon EC2 Auto Scaling, false otherwise (e.g. EBS)
:param iops: The number of I/O operations per second (IOPS) to provision for the volume. Must only be set for {@link volumeType}: {@link EbsDeviceVolumeType.IO1} The maximum ratio of IOPS to volume size (in GiB) is 50:1, so for 5,000 provisioned IOPS, you need at least 100 GiB storage on the volume. Default: - none, required for {@link EbsDeviceVolumeType.IO1}
:param volume_type: The EBS volume type. Default: {@link EbsDeviceVolumeType.GP2}
:param encrypted: Specifies whether the EBS volume is encrypted. Encrypted EBS volumes can only be attached to instances that support Amazon EBS encryption Default: false
"""
self._values = {
}
if delete_on_termination is not None: self._values["delete_on_termination"] = delete_on_termination
if iops is not None: self._values["iops"] = iops
if volume_type is not None: self._values["volume_type"] = volume_type
if encrypted is not None: self._values["encrypted"] = encrypted
@builtins.property
def delete_on_termination(self) -> typing.Optional[bool]:
"""Indicates whether to delete the volume when the instance is terminated.
default
:default: - true for Amazon EC2 Auto Scaling, false otherwise (e.g. EBS)
"""
return self._values.get('delete_on_termination')
@builtins.property
def iops(self) -> typing.Optional[jsii.Number]:
"""The number of I/O operations per second (IOPS) to provision for the volume.
Must only be set for {@link volumeType}: {@link EbsDeviceVolumeType.IO1}
The maximum ratio of IOPS to volume size (in GiB) is 50:1, so for 5,000 provisioned IOPS,
you need at least 100 GiB storage on the volume.
default
:default: - none, required for {@link EbsDeviceVolumeType.IO1}
see
:see: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html
"""
return self._values.get('iops')
@builtins.property
def volume_type(self) -> typing.Optional["EbsDeviceVolumeType"]:
"""The EBS volume type.
default
:default: {@link EbsDeviceVolumeType.GP2}
see
:see: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html
"""
return self._values.get('volume_type')
@builtins.property
def encrypted(self) -> typing.Optional[bool]:
"""Specifies whether the EBS volume is encrypted.
Encrypted EBS volumes can only be attached to instances that support Amazon EBS encryption
default
:default: false
see
:see: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html#EBSEncryption_supported_instances
"""
return self._values.get('encrypted')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'EbsDeviceOptions(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
@jsii.data_type(jsii_type="@aws-cdk/aws-autoscaling.EbsDeviceSnapshotOptions", jsii_struct_bases=[EbsDeviceOptionsBase], name_mapping={'delete_on_termination': 'deleteOnTermination', 'iops': 'iops', 'volume_type': 'volumeType', 'volume_size': 'volumeSize'})
class EbsDeviceSnapshotOptions(EbsDeviceOptionsBase):
def __init__(self, *, delete_on_termination: typing.Optional[bool]=None, iops: typing.Optional[jsii.Number]=None, volume_type: typing.Optional["EbsDeviceVolumeType"]=None, volume_size: typing.Optional[jsii.Number]=None):
"""Block device options for an EBS volume created from a snapshot.
:param delete_on_termination: Indicates whether to delete the volume when the instance is terminated. Default: - true for Amazon EC2 Auto Scaling, false otherwise (e.g. EBS)
:param iops: The number of I/O operations per second (IOPS) to provision for the volume. Must only be set for {@link volumeType}: {@link EbsDeviceVolumeType.IO1} The maximum ratio of IOPS to volume size (in GiB) is 50:1, so for 5,000 provisioned IOPS, you need at least 100 GiB storage on the volume. Default: - none, required for {@link EbsDeviceVolumeType.IO1}
:param volume_type: The EBS volume type. Default: {@link EbsDeviceVolumeType.GP2}
:param volume_size: The volume size, in Gibibytes (GiB). If you specify volumeSize, it must be equal or greater than the size of the snapshot. Default: - The snapshot size
"""
self._values = {
}
if delete_on_termination is not None: self._values["delete_on_termination"] = delete_on_termination
if iops is not None: self._values["iops"] = iops
if volume_type is not None: self._values["volume_type"] = volume_type
if volume_size is not None: self._values["volume_size"] = volume_size
@builtins.property
def delete_on_termination(self) -> typing.Optional[bool]:
"""Indicates whether to delete the volume when the instance is terminated.
default
:default: - true for Amazon EC2 Auto Scaling, false otherwise (e.g. EBS)
"""
return self._values.get('delete_on_termination')
@builtins.property
def iops(self) -> typing.Optional[jsii.Number]:
"""The number of I/O operations per second (IOPS) to provision for the volume.
Must only be set for {@link volumeType}: {@link EbsDeviceVolumeType.IO1}
The maximum ratio of IOPS to volume size (in GiB) is 50:1, so for 5,000 provisioned IOPS,
you need at least 100 GiB storage on the volume.
default
:default: - none, required for {@link EbsDeviceVolumeType.IO1}
see
:see: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html
"""
return self._values.get('iops')
@builtins.property
def volume_type(self) -> typing.Optional["EbsDeviceVolumeType"]:
"""The EBS volume type.
default
:default: {@link EbsDeviceVolumeType.GP2}
see
:see: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html
"""
return self._values.get('volume_type')
@builtins.property
def volume_size(self) -> typing.Optional[jsii.Number]:
"""The volume size, in Gibibytes (GiB).
If you specify volumeSize, it must be equal or greater than the size of the snapshot.
default
:default: - The snapshot size
"""
return self._values.get('volume_size')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'EbsDeviceSnapshotOptions(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
@jsii.data_type(jsii_type="@aws-cdk/aws-autoscaling.EbsDeviceProps", jsii_struct_bases=[EbsDeviceSnapshotOptions], name_mapping={'delete_on_termination': 'deleteOnTermination', 'iops': 'iops', 'volume_type': 'volumeType', 'volume_size': 'volumeSize', 'snapshot_id': 'snapshotId'})
class EbsDeviceProps(EbsDeviceSnapshotOptions):
def __init__(self, *, delete_on_termination: typing.Optional[bool]=None, iops: typing.Optional[jsii.Number]=None, volume_type: typing.Optional["EbsDeviceVolumeType"]=None, volume_size: typing.Optional[jsii.Number]=None, snapshot_id: typing.Optional[str]=None):
"""Properties of an EBS block device.
:param delete_on_termination: Indicates whether to delete the volume when the instance is terminated. Default: - true for Amazon EC2 Auto Scaling, false otherwise (e.g. EBS)
:param iops: The number of I/O operations per second (IOPS) to provision for the volume. Must only be set for {@link volumeType}: {@link EbsDeviceVolumeType.IO1} The maximum ratio of IOPS to volume size (in GiB) is 50:1, so for 5,000 provisioned IOPS, you need at least 100 GiB storage on the volume. Default: - none, required for {@link EbsDeviceVolumeType.IO1}
:param volume_type: The EBS volume type. Default: {@link EbsDeviceVolumeType.GP2}
:param volume_size: The volume size, in Gibibytes (GiB). If you specify volumeSize, it must be equal or greater than the size of the snapshot. Default: - The snapshot size
:param snapshot_id: The snapshot ID of the volume to use. Default: - No snapshot will be used
"""
self._values = {
}
if delete_on_termination is not None: self._values["delete_on_termination"] = delete_on_termination
if iops is not None: self._values["iops"] = iops
if volume_type is not None: self._values["volume_type"] = volume_type
if volume_size is not None: self._values["volume_size"] = volume_size
if snapshot_id is not None: self._values["snapshot_id"] = snapshot_id
@builtins.property
def delete_on_termination(self) -> typing.Optional[bool]:
"""Indicates whether to delete the volume when the instance is terminated.
default
:default: - true for Amazon EC2 Auto Scaling, false otherwise (e.g. EBS)
"""
return self._values.get('delete_on_termination')
@builtins.property
def iops(self) -> typing.Optional[jsii.Number]:
"""The number of I/O operations per second (IOPS) to provision for the volume.
Must only be set for {@link volumeType}: {@link EbsDeviceVolumeType.IO1}
The maximum ratio of IOPS to volume size (in GiB) is 50:1, so for 5,000 provisioned IOPS,
you need at least 100 GiB storage on the volume.
default
:default: - none, required for {@link EbsDeviceVolumeType.IO1}
see
:see: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html
"""
return self._values.get('iops')
@builtins.property
def volume_type(self) -> typing.Optional["EbsDeviceVolumeType"]:
"""The EBS volume type.
default
:default: {@link EbsDeviceVolumeType.GP2}
see
:see: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html
"""
return self._values.get('volume_type')
@builtins.property
def volume_size(self) -> typing.Optional[jsii.Number]:
"""The volume size, in Gibibytes (GiB).
If you specify volumeSize, it must be equal or greater than the size of the snapshot.
default
:default: - The snapshot size
"""
return self._values.get('volume_size')
@builtins.property
def snapshot_id(self) -> typing.Optional[str]:
"""The snapshot ID of the volume to use.
default
:default: - No snapshot will be used
"""
return self._values.get('snapshot_id')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'EbsDeviceProps(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
@jsii.enum(jsii_type="@aws-cdk/aws-autoscaling.EbsDeviceVolumeType")
class EbsDeviceVolumeType(enum.Enum):
"""Supported EBS volume types for blockDevices."""
STANDARD = "STANDARD"
"""Magnetic."""
IO1 = "IO1"
"""Provisioned IOPS SSD."""
GP2 = "GP2"
"""General Purpose SSD."""
ST1 = "ST1"
"""Throughput Optimized HDD."""
SC1 = "SC1"
"""Cold HDD."""
@jsii.data_type(jsii_type="@aws-cdk/aws-autoscaling.Ec2HealthCheckOptions", jsii_struct_bases=[], name_mapping={'grace': 'grace'})
class Ec2HealthCheckOptions():
def __init__(self, *, grace: typing.Optional[aws_cdk.core.Duration]=None):
"""EC2 Heath check options.
:param grace: Specified the time Auto Scaling waits before checking the health status of an EC2 instance that has come into service. Default: Duration.seconds(0)
"""
self._values = {
}
if grace is not None: self._values["grace"] = grace
@builtins.property
def grace(self) -> typing.Optional[aws_cdk.core.Duration]:
"""Specified the time Auto Scaling waits before checking the health status of an EC2 instance that has come into service.
default
:default: Duration.seconds(0)
"""
return self._values.get('grace')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'Ec2HealthCheckOptions(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
@jsii.data_type(jsii_type="@aws-cdk/aws-autoscaling.ElbHealthCheckOptions", jsii_struct_bases=[], name_mapping={'grace': 'grace'})
class ElbHealthCheckOptions():
def __init__(self, *, grace: aws_cdk.core.Duration):
"""ELB Heath check options.
:param grace: Specified the time Auto Scaling waits before checking the health status of an EC2 instance that has come into service. This option is required for ELB health checks.
"""
self._values = {
'grace': grace,
}
@builtins.property
def grace(self) -> aws_cdk.core.Duration:
"""Specified the time Auto Scaling waits before checking the health status of an EC2 instance that has come into service.
This option is required for ELB health checks.
"""
return self._values.get('grace')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'ElbHealthCheckOptions(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
class HealthCheck(metaclass=jsii.JSIIMeta, jsii_type="@aws-cdk/aws-autoscaling.HealthCheck"):
"""Health check settings."""
@jsii.member(jsii_name="ec2")
@builtins.classmethod
def ec2(cls, *, grace: typing.Optional[aws_cdk.core.Duration]=None) -> "HealthCheck":
"""Use EC2 for health checks.
:param grace: Specified the time Auto Scaling waits before checking the health status of an EC2 instance that has come into service. Default: Duration.seconds(0)
"""
options = Ec2HealthCheckOptions(grace=grace)
return jsii.sinvoke(cls, "ec2", [options])
@jsii.member(jsii_name="elb")
@builtins.classmethod
def elb(cls, *, grace: aws_cdk.core.Duration) -> "HealthCheck":
"""Use ELB for health checks.
It considers the instance unhealthy if it fails either the EC2 status checks or the load balancer health checks.
:param grace: Specified the time Auto Scaling waits before checking the health status of an EC2 instance that has come into service. This option is required for ELB health checks.
"""
options = ElbHealthCheckOptions(grace=grace)
return jsii.sinvoke(cls, "elb", [options])
@builtins.property
@jsii.member(jsii_name="type")
def type(self) -> str:
return jsii.get(self, "type")
@builtins.property
@jsii.member(jsii_name="gracePeriod")
def grace_period(self) -> typing.Optional[aws_cdk.core.Duration]:
return jsii.get(self, "gracePeriod")
@jsii.interface(jsii_type="@aws-cdk/aws-autoscaling.IAutoScalingGroup")
class IAutoScalingGroup(aws_cdk.core.IResource, jsii.compat.Protocol):
"""An AutoScalingGroup."""
@builtins.staticmethod
def __jsii_proxy_class__():
return _IAutoScalingGroupProxy
@builtins.property
@jsii.member(jsii_name="autoScalingGroupArn")
def auto_scaling_group_arn(self) -> str:
"""The arn of the AutoScalingGroup.
attribute:
:attribute:: true
"""
...
@builtins.property
@jsii.member(jsii_name="autoScalingGroupName")
def auto_scaling_group_name(self) -> str:
"""The name of the AutoScalingGroup.
attribute:
:attribute:: true
"""
...
@jsii.member(jsii_name="addLifecycleHook")
def add_lifecycle_hook(self, id: str, *, lifecycle_transition: "LifecycleTransition", notification_target: "ILifecycleHookTarget", default_result: typing.Optional["DefaultResult"]=None, heartbeat_timeout: typing.Optional[aws_cdk.core.Duration]=None, lifecycle_hook_name: typing.Optional[str]=None, notification_metadata: typing.Optional[str]=None, role: typing.Optional[aws_cdk.aws_iam.IRole]=None) -> "LifecycleHook":
"""Send a message to either an SQS queue or SNS topic when instances launch or terminate.
:param id: -
:param lifecycle_transition: The state of the Amazon EC2 instance to which you want to attach the lifecycle hook.
:param notification_target: The target of the lifecycle hook.
:param default_result: The action the Auto Scaling group takes when the lifecycle hook timeout elapses or if an unexpected failure occurs. Default: Continue
:param heartbeat_timeout: Maximum time between calls to RecordLifecycleActionHeartbeat for the hook. If the lifecycle hook times out, perform the action in DefaultResult. Default: - No heartbeat timeout.
:param lifecycle_hook_name: Name of the lifecycle hook. Default: - Automatically generated name.
:param notification_metadata: Additional data to pass to the lifecycle hook target. Default: - No metadata.
:param role: The role that allows publishing to the notification target. Default: - A role is automatically created.
"""
...
@jsii.member(jsii_name="scaleOnCpuUtilization")
def scale_on_cpu_utilization(self, id: str, *, target_utilization_percent: jsii.Number, cooldown: typing.Optional[aws_cdk.core.Duration]=None, disable_scale_in: typing.Optional[bool]=None, estimated_instance_warmup: typing.Optional[aws_cdk.core.Duration]=None) -> "TargetTrackingScalingPolicy":
"""Scale out or in to achieve a target CPU utilization.
:param id: -
:param target_utilization_percent: Target average CPU utilization across the task.
:param cooldown: Period after a scaling completes before another scaling activity can start. Default: - The default cooldown configured on the AutoScalingGroup.
:param disable_scale_in: Indicates whether scale in by the target tracking policy is disabled. If the value is true, scale in is disabled and the target tracking policy won't remove capacity from the autoscaling group. Otherwise, scale in is enabled and the target tracking policy can remove capacity from the group. Default: false
:param estimated_instance_warmup: Estimated time until a newly launched instance can send metrics to CloudWatch. Default: - Same as the cooldown.
"""
...
@jsii.member(jsii_name="scaleOnIncomingBytes")
def scale_on_incoming_bytes(self, id: str, *, target_bytes_per_second: jsii.Number, cooldown: typing.Optional[aws_cdk.core.Duration]=None, disable_scale_in: typing.Optional[bool]=None, estimated_instance_warmup: typing.Optional[aws_cdk.core.Duration]=None) -> "TargetTrackingScalingPolicy":
"""Scale out or in to achieve a target network ingress rate.
:param id: -
:param target_bytes_per_second: Target average bytes/seconds on each instance.
:param cooldown: Period after a scaling completes before another scaling activity can start. Default: - The default cooldown configured on the AutoScalingGroup.
:param disable_scale_in: Indicates whether scale in by the target tracking policy is disabled. If the value is true, scale in is disabled and the target tracking policy won't remove capacity from the autoscaling group. Otherwise, scale in is enabled and the target tracking policy can remove capacity from the group. Default: false
:param estimated_instance_warmup: Estimated time until a newly launched instance can send metrics to CloudWatch. Default: - Same as the cooldown.
"""
...
@jsii.member(jsii_name="scaleOnMetric")
def scale_on_metric(self, id: str, *, metric: aws_cdk.aws_cloudwatch.IMetric, scaling_steps: typing.List["ScalingInterval"], adjustment_type: typing.Optional["AdjustmentType"]=None, cooldown: typing.Optional[aws_cdk.core.Duration]=None, estimated_instance_warmup: typing.Optional[aws_cdk.core.Duration]=None, min_adjustment_magnitude: typing.Optional[jsii.Number]=None) -> "StepScalingPolicy":
"""Scale out or in, in response to a metric.
:param id: -
:param metric: Metric to scale on.
:param scaling_steps: The intervals for scaling. Maps a range of metric values to a particular scaling behavior.
:param adjustment_type: How the adjustment numbers inside 'intervals' are interpreted. Default: ChangeInCapacity
:param cooldown: Grace period after scaling activity. Default: Default cooldown period on your AutoScalingGroup
:param estimated_instance_warmup: Estimated time until a newly launched instance can send metrics to CloudWatch. Default: Same as the cooldown
:param min_adjustment_magnitude: Minimum absolute number to adjust capacity with as result of percentage scaling. Only when using AdjustmentType = PercentChangeInCapacity, this number controls the minimum absolute effect size. Default: No minimum scaling effect
"""
...
@jsii.member(jsii_name="scaleOnOutgoingBytes")
def scale_on_outgoing_bytes(self, id: str, *, target_bytes_per_second: jsii.Number, cooldown: typing.Optional[aws_cdk.core.Duration]=None, disable_scale_in: typing.Optional[bool]=None, estimated_instance_warmup: typing.Optional[aws_cdk.core.Duration]=None) -> "TargetTrackingScalingPolicy":
"""Scale out or in to achieve a target network egress rate.
:param id: -
:param target_bytes_per_second: Target average bytes/seconds on each instance.
:param cooldown: Period after a scaling completes before another scaling activity can start. Default: - The default cooldown configured on the AutoScalingGroup.
:param disable_scale_in: Indicates whether scale in by the target tracking policy is disabled. If the value is true, scale in is disabled and the target tracking policy won't remove capacity from the autoscaling group. Otherwise, scale in is enabled and the target tracking policy can remove capacity from the group. Default: false
:param estimated_instance_warmup: Estimated time until a newly launched instance can send metrics to CloudWatch. Default: - Same as the cooldown.
"""
...
@jsii.member(jsii_name="scaleOnSchedule")
def scale_on_schedule(self, id: str, *, schedule: "Schedule", desired_capacity: typing.Optional[jsii.Number]=None, end_time: typing.Optional[datetime.datetime]=None, max_capacity: typing.Optional[jsii.Number]=None, min_capacity: typing.Optional[jsii.Number]=None, start_time: typing.Optional[datetime.datetime]=None) -> "ScheduledAction":
"""Scale out or in based on time.
:param id: -
:param schedule: When to perform this action. Supports cron expressions. For more information about cron expressions, see https://en.wikipedia.org/wiki/Cron.
:param desired_capacity: The new desired capacity. At the scheduled time, set the desired capacity to the given capacity. At least one of maxCapacity, minCapacity, or desiredCapacity must be supplied. Default: - No new desired capacity.
:param end_time: When this scheduled action expires. Default: - The rule never expires.
:param max_capacity: The new maximum capacity. At the scheduled time, set the maximum capacity to the given capacity. At least one of maxCapacity, minCapacity, or desiredCapacity must be supplied. Default: - No new maximum capacity.
:param min_capacity: The new minimum capacity. At the scheduled time, set the minimum capacity to the given capacity. At least one of maxCapacity, minCapacity, or desiredCapacity must be supplied. Default: - No new minimum capacity.
:param start_time: When this scheduled action becomes active. Default: - The rule is activate immediately.
"""
...
@jsii.member(jsii_name="scaleToTrackMetric")
def scale_to_track_metric(self, id: str, *, metric: aws_cdk.aws_cloudwatch.IMetric, target_value: jsii.Number, cooldown: typing.Optional[aws_cdk.core.Duration]=None, disable_scale_in: typing.Optional[bool]=None, estimated_instance_warmup: typing.Optional[aws_cdk.core.Duration]=None) -> "TargetTrackingScalingPolicy":
"""Scale out or in in order to keep a metric around a target value.
:param id: -
:param metric: Metric to track. The metric must represent a utilization, so that if it's higher than the target value, your ASG should scale out, and if it's lower it should scale in.
:param target_value: Value to keep the metric around.
:param cooldown: Period after a scaling completes before another scaling activity can start. Default: - The default cooldown configured on the AutoScalingGroup.
:param disable_scale_in: Indicates whether scale in by the target tracking policy is disabled. If the value is true, scale in is disabled and the target tracking policy won't remove capacity from the autoscaling group. Otherwise, scale in is enabled and the target tracking policy can remove capacity from the group. Default: false
:param estimated_instance_warmup: Estimated time until a newly launched instance can send metrics to CloudWatch. Default: - Same as the cooldown.
"""
...
class _IAutoScalingGroupProxy(jsii.proxy_for(aws_cdk.core.IResource)):
"""An AutoScalingGroup."""
__jsii_type__ = "@aws-cdk/aws-autoscaling.IAutoScalingGroup"
@builtins.property
@jsii.member(jsii_name="autoScalingGroupArn")
def auto_scaling_group_arn(self) -> str:
"""The arn of the AutoScalingGroup.
attribute:
:attribute:: true
"""
return jsii.get(self, "autoScalingGroupArn")
@builtins.property
@jsii.member(jsii_name="autoScalingGroupName")
def auto_scaling_group_name(self) -> str:
"""The name of the AutoScalingGroup.
attribute:
:attribute:: true
"""
return jsii.get(self, "autoScalingGroupName")
@jsii.member(jsii_name="addLifecycleHook")
def add_lifecycle_hook(self, id: str, *, lifecycle_transition: "LifecycleTransition", notification_target: "ILifecycleHookTarget", default_result: typing.Optional["DefaultResult"]=None, heartbeat_timeout: typing.Optional[aws_cdk.core.Duration]=None, lifecycle_hook_name: typing.Optional[str]=None, notification_metadata: typing.Optional[str]=None, role: typing.Optional[aws_cdk.aws_iam.IRole]=None) -> "LifecycleHook":
"""Send a message to either an SQS queue or SNS topic when instances launch or terminate.
:param id: -
:param lifecycle_transition: The state of the Amazon EC2 instance to which you want to attach the lifecycle hook.
:param notification_target: The target of the lifecycle hook.
:param default_result: The action the Auto Scaling group takes when the lifecycle hook timeout elapses or if an unexpected failure occurs. Default: Continue
:param heartbeat_timeout: Maximum time between calls to RecordLifecycleActionHeartbeat for the hook. If the lifecycle hook times out, perform the action in DefaultResult. Default: - No heartbeat timeout.
:param lifecycle_hook_name: Name of the lifecycle hook. Default: - Automatically generated name.
:param notification_metadata: Additional data to pass to the lifecycle hook target. Default: - No metadata.
:param role: The role that allows publishing to the notification target. Default: - A role is automatically created.
"""
props = BasicLifecycleHookProps(lifecycle_transition=lifecycle_transition, notification_target=notification_target, default_result=default_result, heartbeat_timeout=heartbeat_timeout, lifecycle_hook_name=lifecycle_hook_name, notification_metadata=notification_metadata, role=role)
return jsii.invoke(self, "addLifecycleHook", [id, props])
@jsii.member(jsii_name="scaleOnCpuUtilization")
def scale_on_cpu_utilization(self, id: str, *, target_utilization_percent: jsii.Number, cooldown: typing.Optional[aws_cdk.core.Duration]=None, disable_scale_in: typing.Optional[bool]=None, estimated_instance_warmup: typing.Optional[aws_cdk.core.Duration]=None) -> "TargetTrackingScalingPolicy":
"""Scale out or in to achieve a target CPU utilization.
:param id: -
:param target_utilization_percent: Target average CPU utilization across the task.
:param cooldown: Period after a scaling completes before another scaling activity can start. Default: - The default cooldown configured on the AutoScalingGroup.
:param disable_scale_in: Indicates whether scale in by the target tracking policy is disabled. If the value is true, scale in is disabled and the target tracking policy won't remove capacity from the autoscaling group. Otherwise, scale in is enabled and the target tracking policy can remove capacity from the group. Default: false
:param estimated_instance_warmup: Estimated time until a newly launched instance can send metrics to CloudWatch. Default: - Same as the cooldown.
"""
props = CpuUtilizationScalingProps(target_utilization_percent=target_utilization_percent, cooldown=cooldown, disable_scale_in=disable_scale_in, estimated_instance_warmup=estimated_instance_warmup)
return jsii.invoke(self, "scaleOnCpuUtilization", [id, props])
@jsii.member(jsii_name="scaleOnIncomingBytes")
def scale_on_incoming_bytes(self, id: str, *, target_bytes_per_second: jsii.Number, cooldown: typing.Optional[aws_cdk.core.Duration]=None, disable_scale_in: typing.Optional[bool]=None, estimated_instance_warmup: typing.Optional[aws_cdk.core.Duration]=None) -> "TargetTrackingScalingPolicy":
"""Scale out or in to achieve a target network ingress rate.
:param id: -
:param target_bytes_per_second: Target average bytes/seconds on each instance.
:param cooldown: Period after a scaling completes before another scaling activity can start. Default: - The default cooldown configured on the AutoScalingGroup.
:param disable_scale_in: Indicates whether scale in by the target tracking policy is disabled. If the value is true, scale in is disabled and the target tracking policy won't remove capacity from the autoscaling group. Otherwise, scale in is enabled and the target tracking policy can remove capacity from the group. Default: false
:param estimated_instance_warmup: Estimated time until a newly launched instance can send metrics to CloudWatch. Default: - Same as the cooldown.
"""
props = NetworkUtilizationScalingProps(target_bytes_per_second=target_bytes_per_second, cooldown=cooldown, disable_scale_in=disable_scale_in, estimated_instance_warmup=estimated_instance_warmup)
return jsii.invoke(self, "scaleOnIncomingBytes", [id, props])
@jsii.member(jsii_name="scaleOnMetric")
def scale_on_metric(self, id: str, *, metric: aws_cdk.aws_cloudwatch.IMetric, scaling_steps: typing.List["ScalingInterval"], adjustment_type: typing.Optional["AdjustmentType"]=None, cooldown: typing.Optional[aws_cdk.core.Duration]=None, estimated_instance_warmup: typing.Optional[aws_cdk.core.Duration]=None, min_adjustment_magnitude: typing.Optional[jsii.Number]=None) -> "StepScalingPolicy":
"""Scale out or in, in response to a metric.
:param id: -
:param metric: Metric to scale on.
:param scaling_steps: The intervals for scaling. Maps a range of metric values to a particular scaling behavior.
:param adjustment_type: How the adjustment numbers inside 'intervals' are interpreted. Default: ChangeInCapacity
:param cooldown: Grace period after scaling activity. Default: Default cooldown period on your AutoScalingGroup
:param estimated_instance_warmup: Estimated time until a newly launched instance can send metrics to CloudWatch. Default: Same as the cooldown
:param min_adjustment_magnitude: Minimum absolute number to adjust capacity with as result of percentage scaling. Only when using AdjustmentType = PercentChangeInCapacity, this number controls the minimum absolute effect size. Default: No minimum scaling effect
"""
props = BasicStepScalingPolicyProps(metric=metric, scaling_steps=scaling_steps, adjustment_type=adjustment_type, cooldown=cooldown, estimated_instance_warmup=estimated_instance_warmup, min_adjustment_magnitude=min_adjustment_magnitude)
return jsii.invoke(self, "scaleOnMetric", [id, props])
@jsii.member(jsii_name="scaleOnOutgoingBytes")
def scale_on_outgoing_bytes(self, id: str, *, target_bytes_per_second: jsii.Number, cooldown: typing.Optional[aws_cdk.core.Duration]=None, disable_scale_in: typing.Optional[bool]=None, estimated_instance_warmup: typing.Optional[aws_cdk.core.Duration]=None) -> "TargetTrackingScalingPolicy":
"""Scale out or in to achieve a target network egress rate.
:param id: -
:param target_bytes_per_second: Target average bytes/seconds on each instance.
:param cooldown: Period after a scaling completes before another scaling activity can start. Default: - The default cooldown configured on the AutoScalingGroup.
:param disable_scale_in: Indicates whether scale in by the target tracking policy is disabled. If the value is true, scale in is disabled and the target tracking policy won't remove capacity from the autoscaling group. Otherwise, scale in is enabled and the target tracking policy can remove capacity from the group. Default: false
:param estimated_instance_warmup: Estimated time until a newly launched instance can send metrics to CloudWatch. Default: - Same as the cooldown.
"""
props = NetworkUtilizationScalingProps(target_bytes_per_second=target_bytes_per_second, cooldown=cooldown, disable_scale_in=disable_scale_in, estimated_instance_warmup=estimated_instance_warmup)
return jsii.invoke(self, "scaleOnOutgoingBytes", [id, props])
@jsii.member(jsii_name="scaleOnSchedule")
def scale_on_schedule(self, id: str, *, schedule: "Schedule", desired_capacity: typing.Optional[jsii.Number]=None, end_time: typing.Optional[datetime.datetime]=None, max_capacity: typing.Optional[jsii.Number]=None, min_capacity: typing.Optional[jsii.Number]=None, start_time: typing.Optional[datetime.datetime]=None) -> "ScheduledAction":
"""Scale out or in based on time.
:param id: -
:param schedule: When to perform this action. Supports cron expressions. For more information about cron expressions, see https://en.wikipedia.org/wiki/Cron.
:param desired_capacity: The new desired capacity. At the scheduled time, set the desired capacity to the given capacity. At least one of maxCapacity, minCapacity, or desiredCapacity must be supplied. Default: - No new desired capacity.
:param end_time: When this scheduled action expires. Default: - The rule never expires.
:param max_capacity: The new maximum capacity. At the scheduled time, set the maximum capacity to the given capacity. At least one of maxCapacity, minCapacity, or desiredCapacity must be supplied. Default: - No new maximum capacity.
:param min_capacity: The new minimum capacity. At the scheduled time, set the minimum capacity to the given capacity. At least one of maxCapacity, minCapacity, or desiredCapacity must be supplied. Default: - No new minimum capacity.
:param start_time: When this scheduled action becomes active. Default: - The rule is activate immediately.
"""
props = BasicScheduledActionProps(schedule=schedule, desired_capacity=desired_capacity, end_time=end_time, max_capacity=max_capacity, min_capacity=min_capacity, start_time=start_time)
return jsii.invoke(self, "scaleOnSchedule", [id, props])
@jsii.member(jsii_name="scaleToTrackMetric")
def scale_to_track_metric(self, id: str, *, metric: aws_cdk.aws_cloudwatch.IMetric, target_value: jsii.Number, cooldown: typing.Optional[aws_cdk.core.Duration]=None, disable_scale_in: typing.Optional[bool]=None, estimated_instance_warmup: typing.Optional[aws_cdk.core.Duration]=None) -> "TargetTrackingScalingPolicy":
"""Scale out or in in order to keep a metric around a target value.
:param id: -
:param metric: Metric to track. The metric must represent a utilization, so that if it's higher than the target value, your ASG should scale out, and if it's lower it should scale in.
:param target_value: Value to keep the metric around.
:param cooldown: Period after a scaling completes before another scaling activity can start. Default: - The default cooldown configured on the AutoScalingGroup.
:param disable_scale_in: Indicates whether scale in by the target tracking policy is disabled. If the value is true, scale in is disabled and the target tracking policy won't remove capacity from the autoscaling group. Otherwise, scale in is enabled and the target tracking policy can remove capacity from the group. Default: false
:param estimated_instance_warmup: Estimated time until a newly launched instance can send metrics to CloudWatch. Default: - Same as the cooldown.
"""
props = MetricTargetTrackingProps(metric=metric, target_value=target_value, cooldown=cooldown, disable_scale_in=disable_scale_in, estimated_instance_warmup=estimated_instance_warmup)
return jsii.invoke(self, "scaleToTrackMetric", [id, props])
@jsii.implements(aws_cdk.aws_elasticloadbalancing.ILoadBalancerTarget, aws_cdk.aws_ec2.IConnectable, aws_cdk.aws_elasticloadbalancingv2.IApplicationLoadBalancerTarget, aws_cdk.aws_elasticloadbalancingv2.INetworkLoadBalancerTarget, aws_cdk.aws_iam.IGrantable, IAutoScalingGroup)
class AutoScalingGroup(aws_cdk.core.Resource, metaclass=jsii.JSIIMeta, jsii_type="@aws-cdk/aws-autoscaling.AutoScalingGroup"):
"""A Fleet represents a managed set of EC2 instances.
The Fleet models a number of AutoScalingGroups, a launch configuration, a
security group and an instance role.
It allows adding arbitrary commands to the startup scripts of the instances
in the fleet.
The ASG spans all availability zones.
"""
def __init__(self, scope: aws_cdk.core.Construct, id: str, *, instance_type: aws_cdk.aws_ec2.InstanceType, machine_image: aws_cdk.aws_ec2.IMachineImage, vpc: aws_cdk.aws_ec2.IVpc, block_devices: typing.Optional[typing.List["BlockDevice"]]=None, role: typing.Optional[aws_cdk.aws_iam.IRole]=None, user_data: typing.Optional[aws_cdk.aws_ec2.UserData]=None, allow_all_outbound: typing.Optional[bool]=None, associate_public_ip_address: typing.Optional[bool]=None, cooldown: typing.Optional[aws_cdk.core.Duration]=None, desired_capacity: typing.Optional[jsii.Number]=None, health_check: typing.Optional["HealthCheck"]=None, ignore_unmodified_size_properties: typing.Optional[bool]=None, key_name: typing.Optional[str]=None, max_capacity: typing.Optional[jsii.Number]=None, min_capacity: typing.Optional[jsii.Number]=None, notifications_topic: typing.Optional[aws_cdk.aws_sns.ITopic]=None, replacing_update_min_successful_instances_percent: typing.Optional[jsii.Number]=None, resource_signal_count: typing.Optional[jsii.Number]=None, resource_signal_timeout: typing.Optional[aws_cdk.core.Duration]=None, rolling_update_configuration: typing.Optional["RollingUpdateConfiguration"]=None, spot_price: typing.Optional[str]=None, update_type: typing.Optional["UpdateType"]=None, vpc_subnets: typing.Optional[aws_cdk.aws_ec2.SubnetSelection]=None) -> None:
"""
:param scope: -
:param id: -
:param instance_type: Type of instance to launch.
:param machine_image: AMI to launch.
:param vpc: VPC to launch these instances in.
:param block_devices: Specifies how block devices are exposed to the instance. You can specify virtual devices and EBS volumes. Each instance that is launched has an associated root device volume, either an Amazon EBS volume or an instance store volume. You can use block device mappings to specify additional EBS volumes or instance store volumes to attach to an instance when it is launched. Default: - Uses the block device mapping of the AMI
:param role: An IAM role to associate with the instance profile assigned to this Auto Scaling Group. The role must be assumable by the service principal ``ec2.amazonaws.com``: Default: A role will automatically be created, it can be accessed via the ``role`` property
:param user_data: Specific UserData to use. The UserData may still be mutated after creation. Default: - A UserData object appropriate for the MachineImage's Operating System is created.
:param allow_all_outbound: Whether the instances can initiate connections to anywhere by default. Default: true
:param associate_public_ip_address: Whether instances in the Auto Scaling Group should have public IP addresses associated with them. Default: - Use subnet setting.
:param cooldown: Default scaling cooldown for this AutoScalingGroup. Default: Duration.minutes(5)
:param desired_capacity: Initial amount of instances in the fleet. If this is set to a number, every deployment will reset the amount of instances to this number. It is recommended to leave this value blank. Default: minCapacity, and leave unchanged during deployment
:param health_check: Configuration for health checks. Default: - HealthCheck.ec2 with no grace period
:param ignore_unmodified_size_properties: If the ASG has scheduled actions, don't reset unchanged group sizes. Only used if the ASG has scheduled actions (which may scale your ASG up or down regardless of cdk deployments). If true, the size of the group will only be reset if it has been changed in the CDK app. If false, the sizes will always be changed back to what they were in the CDK app on deployment. Default: true
:param key_name: Name of SSH keypair to grant access to instances. Default: - No SSH access will be possible.
:param max_capacity: Maximum number of instances in the fleet. Default: desiredCapacity
:param min_capacity: Minimum number of instances in the fleet. Default: 1
:param notifications_topic: SNS topic to send notifications about fleet changes. Default: - No fleet change notifications will be sent.
:param replacing_update_min_successful_instances_percent: Configuration for replacing updates. Only used if updateType == UpdateType.ReplacingUpdate. Specifies how many instances must signal success for the update to succeed. Default: minSuccessfulInstancesPercent
:param resource_signal_count: How many ResourceSignal calls CloudFormation expects before the resource is considered created. Default: 1
:param resource_signal_timeout: The length of time to wait for the resourceSignalCount. The maximum value is 43200 (12 hours). Default: Duration.minutes(5)
:param rolling_update_configuration: Configuration for rolling updates. Only used if updateType == UpdateType.RollingUpdate. Default: - RollingUpdateConfiguration with defaults.
:param spot_price: The maximum hourly price (in USD) to be paid for any Spot Instance launched to fulfill the request. Spot Instances are launched when the price you specify exceeds the current Spot market price. Default: none
:param update_type: What to do when an AutoScalingGroup's instance configuration is changed. This is applied when any of the settings on the ASG are changed that affect how the instances should be created (VPC, instance type, startup scripts, etc.). It indicates how the existing instances should be replaced with new instances matching the new config. By default, nothing is done and only new instances are launched with the new config. Default: UpdateType.None
:param vpc_subnets: Where to place instances within the VPC. Default: - All Private subnets.
"""
props = AutoScalingGroupProps(instance_type=instance_type, machine_image=machine_image, vpc=vpc, block_devices=block_devices, role=role, user_data=user_data, allow_all_outbound=allow_all_outbound, associate_public_ip_address=associate_public_ip_address, cooldown=cooldown, desired_capacity=desired_capacity, health_check=health_check, ignore_unmodified_size_properties=ignore_unmodified_size_properties, key_name=key_name, max_capacity=max_capacity, min_capacity=min_capacity, notifications_topic=notifications_topic, replacing_update_min_successful_instances_percent=replacing_update_min_successful_instances_percent, resource_signal_count=resource_signal_count, resource_signal_timeout=resource_signal_timeout, rolling_update_configuration=rolling_update_configuration, spot_price=spot_price, update_type=update_type, vpc_subnets=vpc_subnets)
jsii.create(AutoScalingGroup, self, [scope, id, props])
@jsii.member(jsii_name="fromAutoScalingGroupName")
@builtins.classmethod
def from_auto_scaling_group_name(cls, scope: aws_cdk.core.Construct, id: str, auto_scaling_group_name: str) -> "IAutoScalingGroup":
"""
:param scope: -
:param id: -
:param auto_scaling_group_name: -
"""
return jsii.sinvoke(cls, "fromAutoScalingGroupName", [scope, id, auto_scaling_group_name])
@jsii.member(jsii_name="addLifecycleHook")
def add_lifecycle_hook(self, id: str, *, lifecycle_transition: "LifecycleTransition", notification_target: "ILifecycleHookTarget", default_result: typing.Optional["DefaultResult"]=None, heartbeat_timeout: typing.Optional[aws_cdk.core.Duration]=None, lifecycle_hook_name: typing.Optional[str]=None, notification_metadata: typing.Optional[str]=None, role: typing.Optional[aws_cdk.aws_iam.IRole]=None) -> "LifecycleHook":
"""Send a message to either an SQS queue or SNS topic when instances launch or terminate.
:param id: -
:param lifecycle_transition: The state of the Amazon EC2 instance to which you want to attach the lifecycle hook.
:param notification_target: The target of the lifecycle hook.
:param default_result: The action the Auto Scaling group takes when the lifecycle hook timeout elapses or if an unexpected failure occurs. Default: Continue
:param heartbeat_timeout: Maximum time between calls to RecordLifecycleActionHeartbeat for the hook. If the lifecycle hook times out, perform the action in DefaultResult. Default: - No heartbeat timeout.
:param lifecycle_hook_name: Name of the lifecycle hook. Default: - Automatically generated name.
:param notification_metadata: Additional data to pass to the lifecycle hook target. Default: - No metadata.
:param role: The role that allows publishing to the notification target. Default: - A role is automatically created.
"""
props = BasicLifecycleHookProps(lifecycle_transition=lifecycle_transition, notification_target=notification_target, default_result=default_result, heartbeat_timeout=heartbeat_timeout, lifecycle_hook_name=lifecycle_hook_name, notification_metadata=notification_metadata, role=role)
return jsii.invoke(self, "addLifecycleHook", [id, props])
@jsii.member(jsii_name="addSecurityGroup")
def add_security_group(self, security_group: aws_cdk.aws_ec2.ISecurityGroup) -> None:
"""Add the security group to all instances via the launch configuration security groups array.
:param security_group: : The security group to add.
"""
return jsii.invoke(self, "addSecurityGroup", [security_group])
@jsii.member(jsii_name="addToRolePolicy")
def add_to_role_policy(self, statement: aws_cdk.aws_iam.PolicyStatement) -> None:
"""Adds a statement to the IAM role assumed by instances of this fleet.
:param statement: -
"""
return jsii.invoke(self, "addToRolePolicy", [statement])
@jsii.member(jsii_name="addUserData")
def add_user_data(self, *commands: str) -> None:
"""Add command to the startup script of fleet instances.
The command must be in the scripting language supported by the fleet's OS (i.e. Linux/Windows).
:param commands: -
"""
return jsii.invoke(self, "addUserData", [*commands])
@jsii.member(jsii_name="attachToApplicationTargetGroup")
def attach_to_application_target_group(self, target_group: aws_cdk.aws_elasticloadbalancingv2.IApplicationTargetGroup) -> aws_cdk.aws_elasticloadbalancingv2.LoadBalancerTargetProps:
"""Attach to ELBv2 Application Target Group.
:param target_group: -
"""
return jsii.invoke(self, "attachToApplicationTargetGroup", [target_group])
@jsii.member(jsii_name="attachToClassicLB")
def attach_to_classic_lb(self, load_balancer: aws_cdk.aws_elasticloadbalancing.LoadBalancer) -> None:
"""Attach to a classic load balancer.
:param load_balancer: -
"""
return jsii.invoke(self, "attachToClassicLB", [load_balancer])
@jsii.member(jsii_name="attachToNetworkTargetGroup")
def attach_to_network_target_group(self, target_group: aws_cdk.aws_elasticloadbalancingv2.INetworkTargetGroup) -> aws_cdk.aws_elasticloadbalancingv2.LoadBalancerTargetProps:
"""Attach to ELBv2 Application Target Group.
:param target_group: -
"""
return jsii.invoke(self, "attachToNetworkTargetGroup", [target_group])
@jsii.member(jsii_name="scaleOnCpuUtilization")
def scale_on_cpu_utilization(self, id: str, *, target_utilization_percent: jsii.Number, cooldown: typing.Optional[aws_cdk.core.Duration]=None, disable_scale_in: typing.Optional[bool]=None, estimated_instance_warmup: typing.Optional[aws_cdk.core.Duration]=None) -> "TargetTrackingScalingPolicy":
"""Scale out or in to achieve a target CPU utilization.
:param id: -
:param target_utilization_percent: Target average CPU utilization across the task.
:param cooldown: Period after a scaling completes before another scaling activity can start. Default: - The default cooldown configured on the AutoScalingGroup.
:param disable_scale_in: Indicates whether scale in by the target tracking policy is disabled. If the value is true, scale in is disabled and the target tracking policy won't remove capacity from the autoscaling group. Otherwise, scale in is enabled and the target tracking policy can remove capacity from the group. Default: false
:param estimated_instance_warmup: Estimated time until a newly launched instance can send metrics to CloudWatch. Default: - Same as the cooldown.
"""
props = CpuUtilizationScalingProps(target_utilization_percent=target_utilization_percent, cooldown=cooldown, disable_scale_in=disable_scale_in, estimated_instance_warmup=estimated_instance_warmup)
return jsii.invoke(self, "scaleOnCpuUtilization", [id, props])
@jsii.member(jsii_name="scaleOnIncomingBytes")
def scale_on_incoming_bytes(self, id: str, *, target_bytes_per_second: jsii.Number, cooldown: typing.Optional[aws_cdk.core.Duration]=None, disable_scale_in: typing.Optional[bool]=None, estimated_instance_warmup: typing.Optional[aws_cdk.core.Duration]=None) -> "TargetTrackingScalingPolicy":
"""Scale out or in to achieve a target network ingress rate.
:param id: -
:param target_bytes_per_second: Target average bytes/seconds on each instance.
:param cooldown: Period after a scaling completes before another scaling activity can start. Default: - The default cooldown configured on the AutoScalingGroup.
:param disable_scale_in: Indicates whether scale in by the target tracking policy is disabled. If the value is true, scale in is disabled and the target tracking policy won't remove capacity from the autoscaling group. Otherwise, scale in is enabled and the target tracking policy can remove capacity from the group. Default: false
:param estimated_instance_warmup: Estimated time until a newly launched instance can send metrics to CloudWatch. Default: - Same as the cooldown.
"""
props = NetworkUtilizationScalingProps(target_bytes_per_second=target_bytes_per_second, cooldown=cooldown, disable_scale_in=disable_scale_in, estimated_instance_warmup=estimated_instance_warmup)
return jsii.invoke(self, "scaleOnIncomingBytes", [id, props])
@jsii.member(jsii_name="scaleOnMetric")
def scale_on_metric(self, id: str, *, metric: aws_cdk.aws_cloudwatch.IMetric, scaling_steps: typing.List["ScalingInterval"], adjustment_type: typing.Optional["AdjustmentType"]=None, cooldown: typing.Optional[aws_cdk.core.Duration]=None, estimated_instance_warmup: typing.Optional[aws_cdk.core.Duration]=None, min_adjustment_magnitude: typing.Optional[jsii.Number]=None) -> "StepScalingPolicy":
"""Scale out or in, in response to a metric.
:param id: -
:param metric: Metric to scale on.
:param scaling_steps: The intervals for scaling. Maps a range of metric values to a particular scaling behavior.
:param adjustment_type: How the adjustment numbers inside 'intervals' are interpreted. Default: ChangeInCapacity
:param cooldown: Grace period after scaling activity. Default: Default cooldown period on your AutoScalingGroup
:param estimated_instance_warmup: Estimated time until a newly launched instance can send metrics to CloudWatch. Default: Same as the cooldown
:param min_adjustment_magnitude: Minimum absolute number to adjust capacity with as result of percentage scaling. Only when using AdjustmentType = PercentChangeInCapacity, this number controls the minimum absolute effect size. Default: No minimum scaling effect
"""
props = BasicStepScalingPolicyProps(metric=metric, scaling_steps=scaling_steps, adjustment_type=adjustment_type, cooldown=cooldown, estimated_instance_warmup=estimated_instance_warmup, min_adjustment_magnitude=min_adjustment_magnitude)
return jsii.invoke(self, "scaleOnMetric", [id, props])
@jsii.member(jsii_name="scaleOnOutgoingBytes")
def scale_on_outgoing_bytes(self, id: str, *, target_bytes_per_second: jsii.Number, cooldown: typing.Optional[aws_cdk.core.Duration]=None, disable_scale_in: typing.Optional[bool]=None, estimated_instance_warmup: typing.Optional[aws_cdk.core.Duration]=None) -> "TargetTrackingScalingPolicy":
"""Scale out or in to achieve a target network egress rate.
:param id: -
:param target_bytes_per_second: Target average bytes/seconds on each instance.
:param cooldown: Period after a scaling completes before another scaling activity can start. Default: - The default cooldown configured on the AutoScalingGroup.
:param disable_scale_in: Indicates whether scale in by the target tracking policy is disabled. If the value is true, scale in is disabled and the target tracking policy won't remove capacity from the autoscaling group. Otherwise, scale in is enabled and the target tracking policy can remove capacity from the group. Default: false
:param estimated_instance_warmup: Estimated time until a newly launched instance can send metrics to CloudWatch. Default: - Same as the cooldown.
"""
props = NetworkUtilizationScalingProps(target_bytes_per_second=target_bytes_per_second, cooldown=cooldown, disable_scale_in=disable_scale_in, estimated_instance_warmup=estimated_instance_warmup)
return jsii.invoke(self, "scaleOnOutgoingBytes", [id, props])
@jsii.member(jsii_name="scaleOnRequestCount")
def scale_on_request_count(self, id: str, *, target_requests_per_second: jsii.Number, cooldown: typing.Optional[aws_cdk.core.Duration]=None, disable_scale_in: typing.Optional[bool]=None, estimated_instance_warmup: typing.Optional[aws_cdk.core.Duration]=None) -> "TargetTrackingScalingPolicy":
"""Scale out or in to achieve a target request handling rate.
The AutoScalingGroup must have been attached to an Application Load Balancer
in order to be able to call this.
:param id: -
:param target_requests_per_second: Target average requests/seconds on each instance.
:param cooldown: Period after a scaling completes before another scaling activity can start. Default: - The default cooldown configured on the AutoScalingGroup.
:param disable_scale_in: Indicates whether scale in by the target tracking policy is disabled. If the value is true, scale in is disabled and the target tracking policy won't remove capacity from the autoscaling group. Otherwise, scale in is enabled and the target tracking policy can remove capacity from the group. Default: false
:param estimated_instance_warmup: Estimated time until a newly launched instance can send metrics to CloudWatch. Default: - Same as the cooldown.
"""
props = RequestCountScalingProps(target_requests_per_second=target_requests_per_second, cooldown=cooldown, disable_scale_in=disable_scale_in, estimated_instance_warmup=estimated_instance_warmup)
return jsii.invoke(self, "scaleOnRequestCount", [id, props])
@jsii.member(jsii_name="scaleOnSchedule")
def scale_on_schedule(self, id: str, *, schedule: "Schedule", desired_capacity: typing.Optional[jsii.Number]=None, end_time: typing.Optional[datetime.datetime]=None, max_capacity: typing.Optional[jsii.Number]=None, min_capacity: typing.Optional[jsii.Number]=None, start_time: typing.Optional[datetime.datetime]=None) -> "ScheduledAction":
"""Scale out or in based on time.
:param id: -
:param schedule: When to perform this action. Supports cron expressions. For more information about cron expressions, see https://en.wikipedia.org/wiki/Cron.
:param desired_capacity: The new desired capacity. At the scheduled time, set the desired capacity to the given capacity. At least one of maxCapacity, minCapacity, or desiredCapacity must be supplied. Default: - No new desired capacity.
:param end_time: When this scheduled action expires. Default: - The rule never expires.
:param max_capacity: The new maximum capacity. At the scheduled time, set the maximum capacity to the given capacity. At least one of maxCapacity, minCapacity, or desiredCapacity must be supplied. Default: - No new maximum capacity.
:param min_capacity: The new minimum capacity. At the scheduled time, set the minimum capacity to the given capacity. At least one of maxCapacity, minCapacity, or desiredCapacity must be supplied. Default: - No new minimum capacity.
:param start_time: When this scheduled action becomes active. Default: - The rule is activate immediately.
"""
props = BasicScheduledActionProps(schedule=schedule, desired_capacity=desired_capacity, end_time=end_time, max_capacity=max_capacity, min_capacity=min_capacity, start_time=start_time)
return jsii.invoke(self, "scaleOnSchedule", [id, props])
@jsii.member(jsii_name="scaleToTrackMetric")
def scale_to_track_metric(self, id: str, *, metric: aws_cdk.aws_cloudwatch.IMetric, target_value: jsii.Number, cooldown: typing.Optional[aws_cdk.core.Duration]=None, disable_scale_in: typing.Optional[bool]=None, estimated_instance_warmup: typing.Optional[aws_cdk.core.Duration]=None) -> "TargetTrackingScalingPolicy":
"""Scale out or in in order to keep a metric around a target value.
:param id: -
:param metric: Metric to track. The metric must represent a utilization, so that if it's higher than the target value, your ASG should scale out, and if it's lower it should scale in.
:param target_value: Value to keep the metric around.
:param cooldown: Period after a scaling completes before another scaling activity can start. Default: - The default cooldown configured on the AutoScalingGroup.
:param disable_scale_in: Indicates whether scale in by the target tracking policy is disabled. If the value is true, scale in is disabled and the target tracking policy won't remove capacity from the autoscaling group. Otherwise, scale in is enabled and the target tracking policy can remove capacity from the group. Default: false
:param estimated_instance_warmup: Estimated time until a newly launched instance can send metrics to CloudWatch. Default: - Same as the cooldown.
"""
props = MetricTargetTrackingProps(metric=metric, target_value=target_value, cooldown=cooldown, disable_scale_in=disable_scale_in, estimated_instance_warmup=estimated_instance_warmup)
return jsii.invoke(self, "scaleToTrackMetric", [id, props])
@builtins.property
@jsii.member(jsii_name="autoScalingGroupArn")
def auto_scaling_group_arn(self) -> str:
"""Arn of the AutoScalingGroup."""
return jsii.get(self, "autoScalingGroupArn")
@builtins.property
@jsii.member(jsii_name="autoScalingGroupName")
def auto_scaling_group_name(self) -> str:
"""Name of the AutoScalingGroup."""
return jsii.get(self, "autoScalingGroupName")
@builtins.property
@jsii.member(jsii_name="connections")
def connections(self) -> aws_cdk.aws_ec2.Connections:
"""Allows specify security group connections for instances of this fleet."""
return jsii.get(self, "connections")
@builtins.property
@jsii.member(jsii_name="grantPrincipal")
def grant_principal(self) -> aws_cdk.aws_iam.IPrincipal:
"""The principal to grant permissions to."""
return jsii.get(self, "grantPrincipal")
@builtins.property
@jsii.member(jsii_name="osType")
def os_type(self) -> aws_cdk.aws_ec2.OperatingSystemType:
"""The type of OS instances of this fleet are running."""
return jsii.get(self, "osType")
@builtins.property
@jsii.member(jsii_name="role")
def role(self) -> aws_cdk.aws_iam.IRole:
"""The IAM role assumed by instances of this fleet."""
return jsii.get(self, "role")
@builtins.property
@jsii.member(jsii_name="userData")
def user_data(self) -> aws_cdk.aws_ec2.UserData:
"""UserData for the instances."""
return jsii.get(self, "userData")
@builtins.property
@jsii.member(jsii_name="spotPrice")
def spot_price(self) -> typing.Optional[str]:
"""The maximum spot price configured for thie autoscaling group.
``undefined``
indicates that this group uses on-demand capacity.
"""
return jsii.get(self, "spotPrice")
@builtins.property
@jsii.member(jsii_name="albTargetGroup")
def _alb_target_group(self) -> typing.Optional[aws_cdk.aws_elasticloadbalancingv2.ApplicationTargetGroup]:
return jsii.get(self, "albTargetGroup")
@_alb_target_group.setter
def _alb_target_group(self, value: typing.Optional[aws_cdk.aws_elasticloadbalancingv2.ApplicationTargetGroup]):
jsii.set(self, "albTargetGroup", value)
@jsii.interface(jsii_type="@aws-cdk/aws-autoscaling.ILifecycleHook")
class ILifecycleHook(aws_cdk.core.IResource, jsii.compat.Protocol):
"""A basic lifecycle hook object."""
@builtins.staticmethod
def __jsii_proxy_class__():
return _ILifecycleHookProxy
@builtins.property
@jsii.member(jsii_name="role")
def role(self) -> aws_cdk.aws_iam.IRole:
"""The role for the lifecycle hook to execute."""
...
class _ILifecycleHookProxy(jsii.proxy_for(aws_cdk.core.IResource)):
"""A basic lifecycle hook object."""
__jsii_type__ = "@aws-cdk/aws-autoscaling.ILifecycleHook"
@builtins.property
@jsii.member(jsii_name="role")
def role(self) -> aws_cdk.aws_iam.IRole:
"""The role for the lifecycle hook to execute."""
return jsii.get(self, "role")
@jsii.interface(jsii_type="@aws-cdk/aws-autoscaling.ILifecycleHookTarget")
class ILifecycleHookTarget(jsii.compat.Protocol):
"""Interface for autoscaling lifecycle hook targets."""
@builtins.staticmethod
def __jsii_proxy_class__():
return _ILifecycleHookTargetProxy
@jsii.member(jsii_name="bind")
def bind(self, scope: aws_cdk.core.Construct, lifecycle_hook: "ILifecycleHook") -> "LifecycleHookTargetConfig":
"""Called when this object is used as the target of a lifecycle hook.
:param scope: -
:param lifecycle_hook: -
"""
...
class _ILifecycleHookTargetProxy():
"""Interface for autoscaling lifecycle hook targets."""
__jsii_type__ = "@aws-cdk/aws-autoscaling.ILifecycleHookTarget"
@jsii.member(jsii_name="bind")
def bind(self, scope: aws_cdk.core.Construct, lifecycle_hook: "ILifecycleHook") -> "LifecycleHookTargetConfig":
"""Called when this object is used as the target of a lifecycle hook.
:param scope: -
:param lifecycle_hook: -
"""
return jsii.invoke(self, "bind", [scope, lifecycle_hook])
@jsii.implements(ILifecycleHook)
class LifecycleHook(aws_cdk.core.Resource, metaclass=jsii.JSIIMeta, jsii_type="@aws-cdk/aws-autoscaling.LifecycleHook"):
"""Define a life cycle hook."""
def __init__(self, scope: aws_cdk.core.Construct, id: str, *, auto_scaling_group: "IAutoScalingGroup", lifecycle_transition: "LifecycleTransition", notification_target: "ILifecycleHookTarget", default_result: typing.Optional["DefaultResult"]=None, heartbeat_timeout: typing.Optional[aws_cdk.core.Duration]=None, lifecycle_hook_name: typing.Optional[str]=None, notification_metadata: typing.Optional[str]=None, role: typing.Optional[aws_cdk.aws_iam.IRole]=None) -> None:
"""
:param scope: -
:param id: -
:param auto_scaling_group: The AutoScalingGroup to add the lifecycle hook to.
:param lifecycle_transition: The state of the Amazon EC2 instance to which you want to attach the lifecycle hook.
:param notification_target: The target of the lifecycle hook.
:param default_result: The action the Auto Scaling group takes when the lifecycle hook timeout elapses or if an unexpected failure occurs. Default: Continue
:param heartbeat_timeout: Maximum time between calls to RecordLifecycleActionHeartbeat for the hook. If the lifecycle hook times out, perform the action in DefaultResult. Default: - No heartbeat timeout.
:param lifecycle_hook_name: Name of the lifecycle hook. Default: - Automatically generated name.
:param notification_metadata: Additional data to pass to the lifecycle hook target. Default: - No metadata.
:param role: The role that allows publishing to the notification target. Default: - A role is automatically created.
"""
props = LifecycleHookProps(auto_scaling_group=auto_scaling_group, lifecycle_transition=lifecycle_transition, notification_target=notification_target, default_result=default_result, heartbeat_timeout=heartbeat_timeout, lifecycle_hook_name=lifecycle_hook_name, notification_metadata=notification_metadata, role=role)
jsii.create(LifecycleHook, self, [scope, id, props])
@builtins.property
@jsii.member(jsii_name="lifecycleHookName")
def lifecycle_hook_name(self) -> str:
"""The name of this lifecycle hook.
attribute:
:attribute:: true
"""
return jsii.get(self, "lifecycleHookName")
@builtins.property
@jsii.member(jsii_name="role")
def role(self) -> aws_cdk.aws_iam.IRole:
"""The role that allows the ASG to publish to the notification target."""
return jsii.get(self, "role")
@jsii.data_type(jsii_type="@aws-cdk/aws-autoscaling.LifecycleHookProps", jsii_struct_bases=[BasicLifecycleHookProps], name_mapping={'lifecycle_transition': 'lifecycleTransition', 'notification_target': 'notificationTarget', 'default_result': 'defaultResult', 'heartbeat_timeout': 'heartbeatTimeout', 'lifecycle_hook_name': 'lifecycleHookName', 'notification_metadata': 'notificationMetadata', 'role': 'role', 'auto_scaling_group': 'autoScalingGroup'})
class LifecycleHookProps(BasicLifecycleHookProps):
def __init__(self, *, lifecycle_transition: "LifecycleTransition", notification_target: "ILifecycleHookTarget", default_result: typing.Optional["DefaultResult"]=None, heartbeat_timeout: typing.Optional[aws_cdk.core.Duration]=None, lifecycle_hook_name: typing.Optional[str]=None, notification_metadata: typing.Optional[str]=None, role: typing.Optional[aws_cdk.aws_iam.IRole]=None, auto_scaling_group: "IAutoScalingGroup"):
"""Properties for a Lifecycle hook.
:param lifecycle_transition: The state of the Amazon EC2 instance to which you want to attach the lifecycle hook.
:param notification_target: The target of the lifecycle hook.
:param default_result: The action the Auto Scaling group takes when the lifecycle hook timeout elapses or if an unexpected failure occurs. Default: Continue
:param heartbeat_timeout: Maximum time between calls to RecordLifecycleActionHeartbeat for the hook. If the lifecycle hook times out, perform the action in DefaultResult. Default: - No heartbeat timeout.
:param lifecycle_hook_name: Name of the lifecycle hook. Default: - Automatically generated name.
:param notification_metadata: Additional data to pass to the lifecycle hook target. Default: - No metadata.
:param role: The role that allows publishing to the notification target. Default: - A role is automatically created.
:param auto_scaling_group: The AutoScalingGroup to add the lifecycle hook to.
"""
self._values = {
'lifecycle_transition': lifecycle_transition,
'notification_target': notification_target,
'auto_scaling_group': auto_scaling_group,
}
if default_result is not None: self._values["default_result"] = default_result
if heartbeat_timeout is not None: self._values["heartbeat_timeout"] = heartbeat_timeout
if lifecycle_hook_name is not None: self._values["lifecycle_hook_name"] = lifecycle_hook_name
if notification_metadata is not None: self._values["notification_metadata"] = notification_metadata
if role is not None: self._values["role"] = role
@builtins.property
def lifecycle_transition(self) -> "LifecycleTransition":
"""The state of the Amazon EC2 instance to which you want to attach the lifecycle hook."""
return self._values.get('lifecycle_transition')
@builtins.property
def notification_target(self) -> "ILifecycleHookTarget":
"""The target of the lifecycle hook."""
return self._values.get('notification_target')
@builtins.property
def default_result(self) -> typing.Optional["DefaultResult"]:
"""The action the Auto Scaling group takes when the lifecycle hook timeout elapses or if an unexpected failure occurs.
default
:default: Continue
"""
return self._values.get('default_result')
@builtins.property
def heartbeat_timeout(self) -> typing.Optional[aws_cdk.core.Duration]:
"""Maximum time between calls to RecordLifecycleActionHeartbeat for the hook.
If the lifecycle hook times out, perform the action in DefaultResult.
default
:default: - No heartbeat timeout.
"""
return self._values.get('heartbeat_timeout')
@builtins.property
def lifecycle_hook_name(self) -> typing.Optional[str]:
"""Name of the lifecycle hook.
default
:default: - Automatically generated name.
"""
return self._values.get('lifecycle_hook_name')
@builtins.property
def notification_metadata(self) -> typing.Optional[str]:
"""Additional data to pass to the lifecycle hook target.
default
:default: - No metadata.
"""
return self._values.get('notification_metadata')
@builtins.property
def role(self) -> typing.Optional[aws_cdk.aws_iam.IRole]:
"""The role that allows publishing to the notification target.
default
:default: - A role is automatically created.
"""
return self._values.get('role')
@builtins.property
def auto_scaling_group(self) -> "IAutoScalingGroup":
"""The AutoScalingGroup to add the lifecycle hook to."""
return self._values.get('auto_scaling_group')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'LifecycleHookProps(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
@jsii.data_type(jsii_type="@aws-cdk/aws-autoscaling.LifecycleHookTargetConfig", jsii_struct_bases=[], name_mapping={'notification_target_arn': 'notificationTargetArn'})
class LifecycleHookTargetConfig():
def __init__(self, *, notification_target_arn: str):
"""Properties to add the target to a lifecycle hook.
:param notification_target_arn: The ARN to use as the notification target.
"""
self._values = {
'notification_target_arn': notification_target_arn,
}
@builtins.property
def notification_target_arn(self) -> str:
"""The ARN to use as the notification target."""
return self._values.get('notification_target_arn')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'LifecycleHookTargetConfig(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
@jsii.enum(jsii_type="@aws-cdk/aws-autoscaling.LifecycleTransition")
class LifecycleTransition(enum.Enum):
"""What instance transition to attach the hook to."""
INSTANCE_LAUNCHING = "INSTANCE_LAUNCHING"
"""Execute the hook when an instance is about to be added."""
INSTANCE_TERMINATING = "INSTANCE_TERMINATING"
"""Execute the hook when an instance is about to be terminated."""
@jsii.enum(jsii_type="@aws-cdk/aws-autoscaling.MetricAggregationType")
class MetricAggregationType(enum.Enum):
"""How the scaling metric is going to be aggregated."""
AVERAGE = "AVERAGE"
"""Average."""
MINIMUM = "MINIMUM"
"""Minimum."""
MAXIMUM = "MAXIMUM"
"""Maximum."""
@jsii.data_type(jsii_type="@aws-cdk/aws-autoscaling.MetricTargetTrackingProps", jsii_struct_bases=[BaseTargetTrackingProps], name_mapping={'cooldown': 'cooldown', 'disable_scale_in': 'disableScaleIn', 'estimated_instance_warmup': 'estimatedInstanceWarmup', 'metric': 'metric', 'target_value': 'targetValue'})
class MetricTargetTrackingProps(BaseTargetTrackingProps):
def __init__(self, *, cooldown: typing.Optional[aws_cdk.core.Duration]=None, disable_scale_in: typing.Optional[bool]=None, estimated_instance_warmup: typing.Optional[aws_cdk.core.Duration]=None, metric: aws_cdk.aws_cloudwatch.IMetric, target_value: jsii.Number):
"""Properties for enabling tracking of an arbitrary metric.
:param cooldown: Period after a scaling completes before another scaling activity can start. Default: - The default cooldown configured on the AutoScalingGroup.
:param disable_scale_in: Indicates whether scale in by the target tracking policy is disabled. If the value is true, scale in is disabled and the target tracking policy won't remove capacity from the autoscaling group. Otherwise, scale in is enabled and the target tracking policy can remove capacity from the group. Default: false
:param estimated_instance_warmup: Estimated time until a newly launched instance can send metrics to CloudWatch. Default: - Same as the cooldown.
:param metric: Metric to track. The metric must represent a utilization, so that if it's higher than the target value, your ASG should scale out, and if it's lower it should scale in.
:param target_value: Value to keep the metric around.
"""
self._values = {
'metric': metric,
'target_value': target_value,
}
if cooldown is not None: self._values["cooldown"] = cooldown
if disable_scale_in is not None: self._values["disable_scale_in"] = disable_scale_in
if estimated_instance_warmup is not None: self._values["estimated_instance_warmup"] = estimated_instance_warmup
@builtins.property
def cooldown(self) -> typing.Optional[aws_cdk.core.Duration]:
"""Period after a scaling completes before another scaling activity can start.
default
:default: - The default cooldown configured on the AutoScalingGroup.
"""
return self._values.get('cooldown')
@builtins.property
def disable_scale_in(self) -> typing.Optional[bool]:
"""Indicates whether scale in by the target tracking policy is disabled.
If the value is true, scale in is disabled and the target tracking policy
won't remove capacity from the autoscaling group. Otherwise, scale in is
enabled and the target tracking policy can remove capacity from the
group.
default
:default: false
"""
return self._values.get('disable_scale_in')
@builtins.property
def estimated_instance_warmup(self) -> typing.Optional[aws_cdk.core.Duration]:
"""Estimated time until a newly launched instance can send metrics to CloudWatch.
default
:default: - Same as the cooldown.
"""
return self._values.get('estimated_instance_warmup')
@builtins.property
def metric(self) -> aws_cdk.aws_cloudwatch.IMetric:
"""Metric to track.
The metric must represent a utilization, so that if it's higher than the
target value, your ASG should scale out, and if it's lower it should
scale in.
"""
return self._values.get('metric')
@builtins.property
def target_value(self) -> jsii.Number:
"""Value to keep the metric around."""
return self._values.get('target_value')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'MetricTargetTrackingProps(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
@jsii.data_type(jsii_type="@aws-cdk/aws-autoscaling.NetworkUtilizationScalingProps", jsii_struct_bases=[BaseTargetTrackingProps], name_mapping={'cooldown': 'cooldown', 'disable_scale_in': 'disableScaleIn', 'estimated_instance_warmup': 'estimatedInstanceWarmup', 'target_bytes_per_second': 'targetBytesPerSecond'})
class NetworkUtilizationScalingProps(BaseTargetTrackingProps):
def __init__(self, *, cooldown: typing.Optional[aws_cdk.core.Duration]=None, disable_scale_in: typing.Optional[bool]=None, estimated_instance_warmup: typing.Optional[aws_cdk.core.Duration]=None, target_bytes_per_second: jsii.Number):
"""Properties for enabling scaling based on network utilization.
:param cooldown: Period after a scaling completes before another scaling activity can start. Default: - The default cooldown configured on the AutoScalingGroup.
:param disable_scale_in: Indicates whether scale in by the target tracking policy is disabled. If the value is true, scale in is disabled and the target tracking policy won't remove capacity from the autoscaling group. Otherwise, scale in is enabled and the target tracking policy can remove capacity from the group. Default: false
:param estimated_instance_warmup: Estimated time until a newly launched instance can send metrics to CloudWatch. Default: - Same as the cooldown.
:param target_bytes_per_second: Target average bytes/seconds on each instance.
"""
self._values = {
'target_bytes_per_second': target_bytes_per_second,
}
if cooldown is not None: self._values["cooldown"] = cooldown
if disable_scale_in is not None: self._values["disable_scale_in"] = disable_scale_in
if estimated_instance_warmup is not None: self._values["estimated_instance_warmup"] = estimated_instance_warmup
@builtins.property
def cooldown(self) -> typing.Optional[aws_cdk.core.Duration]:
"""Period after a scaling completes before another scaling activity can start.
default
:default: - The default cooldown configured on the AutoScalingGroup.
"""
return self._values.get('cooldown')
@builtins.property
def disable_scale_in(self) -> typing.Optional[bool]:
"""Indicates whether scale in by the target tracking policy is disabled.
If the value is true, scale in is disabled and the target tracking policy
won't remove capacity from the autoscaling group. Otherwise, scale in is
enabled and the target tracking policy can remove capacity from the
group.
default
:default: false
"""
return self._values.get('disable_scale_in')
@builtins.property
def estimated_instance_warmup(self) -> typing.Optional[aws_cdk.core.Duration]:
"""Estimated time until a newly launched instance can send metrics to CloudWatch.
default
:default: - Same as the cooldown.
"""
return self._values.get('estimated_instance_warmup')
@builtins.property
def target_bytes_per_second(self) -> jsii.Number:
"""Target average bytes/seconds on each instance."""
return self._values.get('target_bytes_per_second')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'NetworkUtilizationScalingProps(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
@jsii.enum(jsii_type="@aws-cdk/aws-autoscaling.PredefinedMetric")
class PredefinedMetric(enum.Enum):
"""One of the predefined autoscaling metrics."""
ASG_AVERAGE_CPU_UTILIZATION = "ASG_AVERAGE_CPU_UTILIZATION"
"""Average CPU utilization of the Auto Scaling group."""
ASG_AVERAGE_NETWORK_IN = "ASG_AVERAGE_NETWORK_IN"
"""Average number of bytes received on all network interfaces by the Auto Scaling group."""
ASG_AVERAGE_NETWORK_OUT = "ASG_AVERAGE_NETWORK_OUT"
"""Average number of bytes sent out on all network interfaces by the Auto Scaling group."""
ALB_REQUEST_COUNT_PER_TARGET = "ALB_REQUEST_COUNT_PER_TARGET"
"""Number of requests completed per target in an Application Load Balancer target group.
Specify the ALB to look at in the ``resourceLabel`` field.
"""
@jsii.data_type(jsii_type="@aws-cdk/aws-autoscaling.RequestCountScalingProps", jsii_struct_bases=[BaseTargetTrackingProps], name_mapping={'cooldown': 'cooldown', 'disable_scale_in': 'disableScaleIn', 'estimated_instance_warmup': 'estimatedInstanceWarmup', 'target_requests_per_second': 'targetRequestsPerSecond'})
class RequestCountScalingProps(BaseTargetTrackingProps):
def __init__(self, *, cooldown: typing.Optional[aws_cdk.core.Duration]=None, disable_scale_in: typing.Optional[bool]=None, estimated_instance_warmup: typing.Optional[aws_cdk.core.Duration]=None, target_requests_per_second: jsii.Number):
"""Properties for enabling scaling based on request/second.
:param cooldown: Period after a scaling completes before another scaling activity can start. Default: - The default cooldown configured on the AutoScalingGroup.
:param disable_scale_in: Indicates whether scale in by the target tracking policy is disabled. If the value is true, scale in is disabled and the target tracking policy won't remove capacity from the autoscaling group. Otherwise, scale in is enabled and the target tracking policy can remove capacity from the group. Default: false
:param estimated_instance_warmup: Estimated time until a newly launched instance can send metrics to CloudWatch. Default: - Same as the cooldown.
:param target_requests_per_second: Target average requests/seconds on each instance.
"""
self._values = {
'target_requests_per_second': target_requests_per_second,
}
if cooldown is not None: self._values["cooldown"] = cooldown
if disable_scale_in is not None: self._values["disable_scale_in"] = disable_scale_in
if estimated_instance_warmup is not None: self._values["estimated_instance_warmup"] = estimated_instance_warmup
@builtins.property
def cooldown(self) -> typing.Optional[aws_cdk.core.Duration]:
"""Period after a scaling completes before another scaling activity can start.
default
:default: - The default cooldown configured on the AutoScalingGroup.
"""
return self._values.get('cooldown')
@builtins.property
def disable_scale_in(self) -> typing.Optional[bool]:
"""Indicates whether scale in by the target tracking policy is disabled.
If the value is true, scale in is disabled and the target tracking policy
won't remove capacity from the autoscaling group. Otherwise, scale in is
enabled and the target tracking policy can remove capacity from the
group.
default
:default: false
"""
return self._values.get('disable_scale_in')
@builtins.property
def estimated_instance_warmup(self) -> typing.Optional[aws_cdk.core.Duration]:
"""Estimated time until a newly launched instance can send metrics to CloudWatch.
default
:default: - Same as the cooldown.
"""
return self._values.get('estimated_instance_warmup')
@builtins.property
def target_requests_per_second(self) -> jsii.Number:
"""Target average requests/seconds on each instance."""
return self._values.get('target_requests_per_second')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'RequestCountScalingProps(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
@jsii.data_type(jsii_type="@aws-cdk/aws-autoscaling.RollingUpdateConfiguration", jsii_struct_bases=[], name_mapping={'max_batch_size': 'maxBatchSize', 'min_instances_in_service': 'minInstancesInService', 'min_successful_instances_percent': 'minSuccessfulInstancesPercent', 'pause_time': 'pauseTime', 'suspend_processes': 'suspendProcesses', 'wait_on_resource_signals': 'waitOnResourceSignals'})
class RollingUpdateConfiguration():
def __init__(self, *, max_batch_size: typing.Optional[jsii.Number]=None, min_instances_in_service: typing.Optional[jsii.Number]=None, min_successful_instances_percent: typing.Optional[jsii.Number]=None, pause_time: typing.Optional[aws_cdk.core.Duration]=None, suspend_processes: typing.Optional[typing.List["ScalingProcess"]]=None, wait_on_resource_signals: typing.Optional[bool]=None):
"""Additional settings when a rolling update is selected.
:param max_batch_size: The maximum number of instances that AWS CloudFormation updates at once. Default: 1
:param min_instances_in_service: The minimum number of instances that must be in service before more instances are replaced. This number affects the speed of the replacement. Default: 0
:param min_successful_instances_percent: The percentage of instances that must signal success for an update to succeed. If an instance doesn't send a signal within the time specified in the pauseTime property, AWS CloudFormation assumes that the instance wasn't updated. This number affects the success of the replacement. If you specify this property, you must also enable the waitOnResourceSignals and pauseTime properties. Default: 100
:param pause_time: The pause time after making a change to a batch of instances. This is intended to give those instances time to start software applications. Specify PauseTime in the ISO8601 duration format (in the format PT#H#M#S, where each # is the number of hours, minutes, and seconds, respectively). The maximum PauseTime is one hour (PT1H). Default: Duration.minutes(5) if the waitOnResourceSignals property is true, otherwise 0
:param suspend_processes: Specifies the Auto Scaling processes to suspend during a stack update. Suspending processes prevents Auto Scaling from interfering with a stack update. Default: HealthCheck, ReplaceUnhealthy, AZRebalance, AlarmNotification, ScheduledActions.
:param wait_on_resource_signals: Specifies whether the Auto Scaling group waits on signals from new instances during an update. AWS CloudFormation must receive a signal from each new instance within the specified PauseTime before continuing the update. To have instances wait for an Elastic Load Balancing health check before they signal success, add a health-check verification by using the cfn-init helper script. For an example, see the verify_instance_health command in the Auto Scaling rolling updates sample template. Default: true if you specified the minSuccessfulInstancesPercent property, false otherwise
"""
self._values = {
}
if max_batch_size is not None: self._values["max_batch_size"] = max_batch_size
if min_instances_in_service is not None: self._values["min_instances_in_service"] = min_instances_in_service
if min_successful_instances_percent is not None: self._values["min_successful_instances_percent"] = min_successful_instances_percent
if pause_time is not None: self._values["pause_time"] = pause_time
if suspend_processes is not None: self._values["suspend_processes"] = suspend_processes
if wait_on_resource_signals is not None: self._values["wait_on_resource_signals"] = wait_on_resource_signals
@builtins.property
def max_batch_size(self) -> typing.Optional[jsii.Number]:
"""The maximum number of instances that AWS CloudFormation updates at once.
default
:default: 1
"""
return self._values.get('max_batch_size')
@builtins.property
def min_instances_in_service(self) -> typing.Optional[jsii.Number]:
"""The minimum number of instances that must be in service before more instances are replaced.
This number affects the speed of the replacement.
default
:default: 0
"""
return self._values.get('min_instances_in_service')
@builtins.property
def min_successful_instances_percent(self) -> typing.Optional[jsii.Number]:
"""The percentage of instances that must signal success for an update to succeed.
If an instance doesn't send a signal within the time specified in the
pauseTime property, AWS CloudFormation assumes that the instance wasn't
updated.
This number affects the success of the replacement.
If you specify this property, you must also enable the
waitOnResourceSignals and pauseTime properties.
default
:default: 100
"""
return self._values.get('min_successful_instances_percent')
@builtins.property
def pause_time(self) -> typing.Optional[aws_cdk.core.Duration]:
"""The pause time after making a change to a batch of instances.
This is intended to give those instances time to start software applications.
Specify PauseTime in the ISO8601 duration format (in the format
PT#H#M#S, where each # is the number of hours, minutes, and seconds,
respectively). The maximum PauseTime is one hour (PT1H).
default
:default: Duration.minutes(5) if the waitOnResourceSignals property is true, otherwise 0
"""
return self._values.get('pause_time')
@builtins.property
def suspend_processes(self) -> typing.Optional[typing.List["ScalingProcess"]]:
"""Specifies the Auto Scaling processes to suspend during a stack update.
Suspending processes prevents Auto Scaling from interfering with a stack
update.
default
:default: HealthCheck, ReplaceUnhealthy, AZRebalance, AlarmNotification, ScheduledActions.
"""
return self._values.get('suspend_processes')
@builtins.property
def wait_on_resource_signals(self) -> typing.Optional[bool]:
"""Specifies whether the Auto Scaling group waits on signals from new instances during an update.
AWS CloudFormation must receive a signal from each new instance within
the specified PauseTime before continuing the update.
To have instances wait for an Elastic Load Balancing health check before
they signal success, add a health-check verification by using the
cfn-init helper script. For an example, see the verify_instance_health
command in the Auto Scaling rolling updates sample template.
default
:default: true if you specified the minSuccessfulInstancesPercent property, false otherwise
"""
return self._values.get('wait_on_resource_signals')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'RollingUpdateConfiguration(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
@jsii.data_type(jsii_type="@aws-cdk/aws-autoscaling.ScalingInterval", jsii_struct_bases=[], name_mapping={'change': 'change', 'lower': 'lower', 'upper': 'upper'})
class ScalingInterval():
def __init__(self, *, change: jsii.Number, lower: typing.Optional[jsii.Number]=None, upper: typing.Optional[jsii.Number]=None):
"""A range of metric values in which to apply a certain scaling operation.
:param change: The capacity adjustment to apply in this interval. The number is interpreted differently based on AdjustmentType: - ChangeInCapacity: add the adjustment to the current capacity. The number can be positive or negative. - PercentChangeInCapacity: add or remove the given percentage of the current capacity to itself. The number can be in the range [-100..100]. - ExactCapacity: set the capacity to this number. The number must be positive.
:param lower: The lower bound of the interval. The scaling adjustment will be applied if the metric is higher than this value. Default: Threshold automatically derived from neighbouring intervals
:param upper: The upper bound of the interval. The scaling adjustment will be applied if the metric is lower than this value. Default: Threshold automatically derived from neighbouring intervals
"""
self._values = {
'change': change,
}
if lower is not None: self._values["lower"] = lower
if upper is not None: self._values["upper"] = upper
@builtins.property
def change(self) -> jsii.Number:
"""The capacity adjustment to apply in this interval.
The number is interpreted differently based on AdjustmentType:
- ChangeInCapacity: add the adjustment to the current capacity.
The number can be positive or negative.
- PercentChangeInCapacity: add or remove the given percentage of the current
capacity to itself. The number can be in the range [-100..100].
- ExactCapacity: set the capacity to this number. The number must
be positive.
"""
return self._values.get('change')
@builtins.property
def lower(self) -> typing.Optional[jsii.Number]:
"""The lower bound of the interval.
The scaling adjustment will be applied if the metric is higher than this value.
default
:default: Threshold automatically derived from neighbouring intervals
"""
return self._values.get('lower')
@builtins.property
def upper(self) -> typing.Optional[jsii.Number]:
"""The upper bound of the interval.
The scaling adjustment will be applied if the metric is lower than this value.
default
:default: Threshold automatically derived from neighbouring intervals
"""
return self._values.get('upper')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'ScalingInterval(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
@jsii.enum(jsii_type="@aws-cdk/aws-autoscaling.ScalingProcess")
class ScalingProcess(enum.Enum):
LAUNCH = "LAUNCH"
TERMINATE = "TERMINATE"
HEALTH_CHECK = "HEALTH_CHECK"
REPLACE_UNHEALTHY = "REPLACE_UNHEALTHY"
AZ_REBALANCE = "AZ_REBALANCE"
ALARM_NOTIFICATION = "ALARM_NOTIFICATION"
SCHEDULED_ACTIONS = "SCHEDULED_ACTIONS"
ADD_TO_LOAD_BALANCER = "ADD_TO_LOAD_BALANCER"
class Schedule(metaclass=jsii.JSIIAbstractClass, jsii_type="@aws-cdk/aws-autoscaling.Schedule"):
"""Schedule for scheduled scaling actions."""
@builtins.staticmethod
def __jsii_proxy_class__():
return _ScheduleProxy
def __init__(self) -> None:
jsii.create(Schedule, self, [])
@jsii.member(jsii_name="cron")
@builtins.classmethod
def cron(cls, *, day: typing.Optional[str]=None, hour: typing.Optional[str]=None, minute: typing.Optional[str]=None, month: typing.Optional[str]=None, week_day: typing.Optional[str]=None) -> "Schedule":
"""Create a schedule from a set of cron fields.
:param day: The day of the month to run this rule at. Default: - Every day of the month
:param hour: The hour to run this rule at. Default: - Every hour
:param minute: The minute to run this rule at. Default: - Every minute
:param month: The month to run this rule at. Default: - Every month
:param week_day: The day of the week to run this rule at. Default: - Any day of the week
"""
options = CronOptions(day=day, hour=hour, minute=minute, month=month, week_day=week_day)
return jsii.sinvoke(cls, "cron", [options])
@jsii.member(jsii_name="expression")
@builtins.classmethod
def expression(cls, expression: str) -> "Schedule":
"""Construct a schedule from a literal schedule expression.
:param expression: The expression to use. Must be in a format that AutoScaling will recognize
see
:see: http://crontab.org/
"""
return jsii.sinvoke(cls, "expression", [expression])
@builtins.property
@jsii.member(jsii_name="expressionString")
@abc.abstractmethod
def expression_string(self) -> str:
"""Retrieve the expression for this schedule."""
...
class _ScheduleProxy(Schedule):
@builtins.property
@jsii.member(jsii_name="expressionString")
def expression_string(self) -> str:
"""Retrieve the expression for this schedule."""
return jsii.get(self, "expressionString")
class ScheduledAction(aws_cdk.core.Resource, metaclass=jsii.JSIIMeta, jsii_type="@aws-cdk/aws-autoscaling.ScheduledAction"):
"""Define a scheduled scaling action."""
def __init__(self, scope: aws_cdk.core.Construct, id: str, *, auto_scaling_group: "IAutoScalingGroup", schedule: "Schedule", desired_capacity: typing.Optional[jsii.Number]=None, end_time: typing.Optional[datetime.datetime]=None, max_capacity: typing.Optional[jsii.Number]=None, min_capacity: typing.Optional[jsii.Number]=None, start_time: typing.Optional[datetime.datetime]=None) -> None:
"""
:param scope: -
:param id: -
:param auto_scaling_group: The AutoScalingGroup to apply the scheduled actions to.
:param schedule: When to perform this action. Supports cron expressions. For more information about cron expressions, see https://en.wikipedia.org/wiki/Cron.
:param desired_capacity: The new desired capacity. At the scheduled time, set the desired capacity to the given capacity. At least one of maxCapacity, minCapacity, or desiredCapacity must be supplied. Default: - No new desired capacity.
:param end_time: When this scheduled action expires. Default: - The rule never expires.
:param max_capacity: The new maximum capacity. At the scheduled time, set the maximum capacity to the given capacity. At least one of maxCapacity, minCapacity, or desiredCapacity must be supplied. Default: - No new maximum capacity.
:param min_capacity: The new minimum capacity. At the scheduled time, set the minimum capacity to the given capacity. At least one of maxCapacity, minCapacity, or desiredCapacity must be supplied. Default: - No new minimum capacity.
:param start_time: When this scheduled action becomes active. Default: - The rule is activate immediately.
"""
props = ScheduledActionProps(auto_scaling_group=auto_scaling_group, schedule=schedule, desired_capacity=desired_capacity, end_time=end_time, max_capacity=max_capacity, min_capacity=min_capacity, start_time=start_time)
jsii.create(ScheduledAction, self, [scope, id, props])
@jsii.data_type(jsii_type="@aws-cdk/aws-autoscaling.ScheduledActionProps", jsii_struct_bases=[BasicScheduledActionProps], name_mapping={'schedule': 'schedule', 'desired_capacity': 'desiredCapacity', 'end_time': 'endTime', 'max_capacity': 'maxCapacity', 'min_capacity': 'minCapacity', 'start_time': 'startTime', 'auto_scaling_group': 'autoScalingGroup'})
class ScheduledActionProps(BasicScheduledActionProps):
def __init__(self, *, schedule: "Schedule", desired_capacity: typing.Optional[jsii.Number]=None, end_time: typing.Optional[datetime.datetime]=None, max_capacity: typing.Optional[jsii.Number]=None, min_capacity: typing.Optional[jsii.Number]=None, start_time: typing.Optional[datetime.datetime]=None, auto_scaling_group: "IAutoScalingGroup"):
"""Properties for a scheduled action on an AutoScalingGroup.
:param schedule: When to perform this action. Supports cron expressions. For more information about cron expressions, see https://en.wikipedia.org/wiki/Cron.
:param desired_capacity: The new desired capacity. At the scheduled time, set the desired capacity to the given capacity. At least one of maxCapacity, minCapacity, or desiredCapacity must be supplied. Default: - No new desired capacity.
:param end_time: When this scheduled action expires. Default: - The rule never expires.
:param max_capacity: The new maximum capacity. At the scheduled time, set the maximum capacity to the given capacity. At least one of maxCapacity, minCapacity, or desiredCapacity must be supplied. Default: - No new maximum capacity.
:param min_capacity: The new minimum capacity. At the scheduled time, set the minimum capacity to the given capacity. At least one of maxCapacity, minCapacity, or desiredCapacity must be supplied. Default: - No new minimum capacity.
:param start_time: When this scheduled action becomes active. Default: - The rule is activate immediately.
:param auto_scaling_group: The AutoScalingGroup to apply the scheduled actions to.
"""
self._values = {
'schedule': schedule,
'auto_scaling_group': auto_scaling_group,
}
if desired_capacity is not None: self._values["desired_capacity"] = desired_capacity
if end_time is not None: self._values["end_time"] = end_time
if max_capacity is not None: self._values["max_capacity"] = max_capacity
if min_capacity is not None: self._values["min_capacity"] = min_capacity
if start_time is not None: self._values["start_time"] = start_time
@builtins.property
def schedule(self) -> "Schedule":
"""When to perform this action.
Supports cron expressions.
For more information about cron expressions, see https://en.wikipedia.org/wiki/Cron.
Example::
# Example automatically generated without compilation. See https://github.com/aws/jsii/issues/826
08 * * ?
"""
return self._values.get('schedule')
@builtins.property
def desired_capacity(self) -> typing.Optional[jsii.Number]:
"""The new desired capacity.
At the scheduled time, set the desired capacity to the given capacity.
At least one of maxCapacity, minCapacity, or desiredCapacity must be supplied.
default
:default: - No new desired capacity.
"""
return self._values.get('desired_capacity')
@builtins.property
def end_time(self) -> typing.Optional[datetime.datetime]:
"""When this scheduled action expires.
default
:default: - The rule never expires.
"""
return self._values.get('end_time')
@builtins.property
def max_capacity(self) -> typing.Optional[jsii.Number]:
"""The new maximum capacity.
At the scheduled time, set the maximum capacity to the given capacity.
At least one of maxCapacity, minCapacity, or desiredCapacity must be supplied.
default
:default: - No new maximum capacity.
"""
return self._values.get('max_capacity')
@builtins.property
def min_capacity(self) -> typing.Optional[jsii.Number]:
"""The new minimum capacity.
At the scheduled time, set the minimum capacity to the given capacity.
At least one of maxCapacity, minCapacity, or desiredCapacity must be supplied.
default
:default: - No new minimum capacity.
"""
return self._values.get('min_capacity')
@builtins.property
def start_time(self) -> typing.Optional[datetime.datetime]:
"""When this scheduled action becomes active.
default
:default: - The rule is activate immediately.
"""
return self._values.get('start_time')
@builtins.property
def auto_scaling_group(self) -> "IAutoScalingGroup":
"""The AutoScalingGroup to apply the scheduled actions to."""
return self._values.get('auto_scaling_group')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'ScheduledActionProps(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
class StepScalingAction(aws_cdk.core.Construct, metaclass=jsii.JSIIMeta, jsii_type="@aws-cdk/aws-autoscaling.StepScalingAction"):
"""Define a step scaling action.
This kind of scaling policy adjusts the target capacity in configurable
steps. The size of the step is configurable based on the metric's distance
to its alarm threshold.
This Action must be used as the target of a CloudWatch alarm to take effect.
"""
def __init__(self, scope: aws_cdk.core.Construct, id: str, *, auto_scaling_group: "IAutoScalingGroup", adjustment_type: typing.Optional["AdjustmentType"]=None, cooldown: typing.Optional[aws_cdk.core.Duration]=None, estimated_instance_warmup: typing.Optional[aws_cdk.core.Duration]=None, metric_aggregation_type: typing.Optional["MetricAggregationType"]=None, min_adjustment_magnitude: typing.Optional[jsii.Number]=None) -> None:
"""
:param scope: -
:param id: -
:param auto_scaling_group: The auto scaling group.
:param adjustment_type: How the adjustment numbers are interpreted. Default: ChangeInCapacity
:param cooldown: Period after a scaling completes before another scaling activity can start. Default: The default cooldown configured on the AutoScalingGroup
:param estimated_instance_warmup: Estimated time until a newly launched instance can send metrics to CloudWatch. Default: Same as the cooldown
:param metric_aggregation_type: The aggregation type for the CloudWatch metrics. Default: Average
:param min_adjustment_magnitude: Minimum absolute number to adjust capacity with as result of percentage scaling. Only when using AdjustmentType = PercentChangeInCapacity, this number controls the minimum absolute effect size. Default: No minimum scaling effect
"""
props = StepScalingActionProps(auto_scaling_group=auto_scaling_group, adjustment_type=adjustment_type, cooldown=cooldown, estimated_instance_warmup=estimated_instance_warmup, metric_aggregation_type=metric_aggregation_type, min_adjustment_magnitude=min_adjustment_magnitude)
jsii.create(StepScalingAction, self, [scope, id, props])
@jsii.member(jsii_name="addAdjustment")
def add_adjustment(self, *, adjustment: jsii.Number, lower_bound: typing.Optional[jsii.Number]=None, upper_bound: typing.Optional[jsii.Number]=None) -> None:
"""Add an adjusment interval to the ScalingAction.
:param adjustment: What number to adjust the capacity with. The number is interpeted as an added capacity, a new fixed capacity or an added percentage depending on the AdjustmentType value of the StepScalingPolicy. Can be positive or negative.
:param lower_bound: Lower bound where this scaling tier applies. The scaling tier applies if the difference between the metric value and its alarm threshold is higher than this value. Default: -Infinity if this is the first tier, otherwise the upperBound of the previous tier
:param upper_bound: Upper bound where this scaling tier applies. The scaling tier applies if the difference between the metric value and its alarm threshold is lower than this value. Default: +Infinity
"""
adjustment_ = AdjustmentTier(adjustment=adjustment, lower_bound=lower_bound, upper_bound=upper_bound)
return jsii.invoke(self, "addAdjustment", [adjustment_])
@builtins.property
@jsii.member(jsii_name="scalingPolicyArn")
def scaling_policy_arn(self) -> str:
"""ARN of the scaling policy."""
return jsii.get(self, "scalingPolicyArn")
@jsii.data_type(jsii_type="@aws-cdk/aws-autoscaling.StepScalingActionProps", jsii_struct_bases=[], name_mapping={'auto_scaling_group': 'autoScalingGroup', 'adjustment_type': 'adjustmentType', 'cooldown': 'cooldown', 'estimated_instance_warmup': 'estimatedInstanceWarmup', 'metric_aggregation_type': 'metricAggregationType', 'min_adjustment_magnitude': 'minAdjustmentMagnitude'})
class StepScalingActionProps():
def __init__(self, *, auto_scaling_group: "IAutoScalingGroup", adjustment_type: typing.Optional["AdjustmentType"]=None, cooldown: typing.Optional[aws_cdk.core.Duration]=None, estimated_instance_warmup: typing.Optional[aws_cdk.core.Duration]=None, metric_aggregation_type: typing.Optional["MetricAggregationType"]=None, min_adjustment_magnitude: typing.Optional[jsii.Number]=None):
"""Properties for a scaling policy.
:param auto_scaling_group: The auto scaling group.
:param adjustment_type: How the adjustment numbers are interpreted. Default: ChangeInCapacity
:param cooldown: Period after a scaling completes before another scaling activity can start. Default: The default cooldown configured on the AutoScalingGroup
:param estimated_instance_warmup: Estimated time until a newly launched instance can send metrics to CloudWatch. Default: Same as the cooldown
:param metric_aggregation_type: The aggregation type for the CloudWatch metrics. Default: Average
:param min_adjustment_magnitude: Minimum absolute number to adjust capacity with as result of percentage scaling. Only when using AdjustmentType = PercentChangeInCapacity, this number controls the minimum absolute effect size. Default: No minimum scaling effect
"""
self._values = {
'auto_scaling_group': auto_scaling_group,
}
if adjustment_type is not None: self._values["adjustment_type"] = adjustment_type
if cooldown is not None: self._values["cooldown"] = cooldown
if estimated_instance_warmup is not None: self._values["estimated_instance_warmup"] = estimated_instance_warmup
if metric_aggregation_type is not None: self._values["metric_aggregation_type"] = metric_aggregation_type
if min_adjustment_magnitude is not None: self._values["min_adjustment_magnitude"] = min_adjustment_magnitude
@builtins.property
def auto_scaling_group(self) -> "IAutoScalingGroup":
"""The auto scaling group."""
return self._values.get('auto_scaling_group')
@builtins.property
def adjustment_type(self) -> typing.Optional["AdjustmentType"]:
"""How the adjustment numbers are interpreted.
default
:default: ChangeInCapacity
"""
return self._values.get('adjustment_type')
@builtins.property
def cooldown(self) -> typing.Optional[aws_cdk.core.Duration]:
"""Period after a scaling completes before another scaling activity can start.
default
:default: The default cooldown configured on the AutoScalingGroup
"""
return self._values.get('cooldown')
@builtins.property
def estimated_instance_warmup(self) -> typing.Optional[aws_cdk.core.Duration]:
"""Estimated time until a newly launched instance can send metrics to CloudWatch.
default
:default: Same as the cooldown
"""
return self._values.get('estimated_instance_warmup')
@builtins.property
def metric_aggregation_type(self) -> typing.Optional["MetricAggregationType"]:
"""The aggregation type for the CloudWatch metrics.
default
:default: Average
"""
return self._values.get('metric_aggregation_type')
@builtins.property
def min_adjustment_magnitude(self) -> typing.Optional[jsii.Number]:
"""Minimum absolute number to adjust capacity with as result of percentage scaling.
Only when using AdjustmentType = PercentChangeInCapacity, this number controls
the minimum absolute effect size.
default
:default: No minimum scaling effect
"""
return self._values.get('min_adjustment_magnitude')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'StepScalingActionProps(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
class StepScalingPolicy(aws_cdk.core.Construct, metaclass=jsii.JSIIMeta, jsii_type="@aws-cdk/aws-autoscaling.StepScalingPolicy"):
"""Define a acaling strategy which scales depending on absolute values of some metric.
You can specify the scaling behavior for various values of the metric.
Implemented using one or more CloudWatch alarms and Step Scaling Policies.
"""
def __init__(self, scope: aws_cdk.core.Construct, id: str, *, auto_scaling_group: "IAutoScalingGroup", metric: aws_cdk.aws_cloudwatch.IMetric, scaling_steps: typing.List["ScalingInterval"], adjustment_type: typing.Optional["AdjustmentType"]=None, cooldown: typing.Optional[aws_cdk.core.Duration]=None, estimated_instance_warmup: typing.Optional[aws_cdk.core.Duration]=None, min_adjustment_magnitude: typing.Optional[jsii.Number]=None) -> None:
"""
:param scope: -
:param id: -
:param auto_scaling_group: The auto scaling group.
:param metric: Metric to scale on.
:param scaling_steps: The intervals for scaling. Maps a range of metric values to a particular scaling behavior.
:param adjustment_type: How the adjustment numbers inside 'intervals' are interpreted. Default: ChangeInCapacity
:param cooldown: Grace period after scaling activity. Default: Default cooldown period on your AutoScalingGroup
:param estimated_instance_warmup: Estimated time until a newly launched instance can send metrics to CloudWatch. Default: Same as the cooldown
:param min_adjustment_magnitude: Minimum absolute number to adjust capacity with as result of percentage scaling. Only when using AdjustmentType = PercentChangeInCapacity, this number controls the minimum absolute effect size. Default: No minimum scaling effect
"""
props = StepScalingPolicyProps(auto_scaling_group=auto_scaling_group, metric=metric, scaling_steps=scaling_steps, adjustment_type=adjustment_type, cooldown=cooldown, estimated_instance_warmup=estimated_instance_warmup, min_adjustment_magnitude=min_adjustment_magnitude)
jsii.create(StepScalingPolicy, self, [scope, id, props])
@builtins.property
@jsii.member(jsii_name="lowerAction")
def lower_action(self) -> typing.Optional["StepScalingAction"]:
return jsii.get(self, "lowerAction")
@builtins.property
@jsii.member(jsii_name="lowerAlarm")
def lower_alarm(self) -> typing.Optional[aws_cdk.aws_cloudwatch.Alarm]:
return jsii.get(self, "lowerAlarm")
@builtins.property
@jsii.member(jsii_name="upperAction")
def upper_action(self) -> typing.Optional["StepScalingAction"]:
return jsii.get(self, "upperAction")
@builtins.property
@jsii.member(jsii_name="upperAlarm")
def upper_alarm(self) -> typing.Optional[aws_cdk.aws_cloudwatch.Alarm]:
return jsii.get(self, "upperAlarm")
@jsii.data_type(jsii_type="@aws-cdk/aws-autoscaling.StepScalingPolicyProps", jsii_struct_bases=[BasicStepScalingPolicyProps], name_mapping={'metric': 'metric', 'scaling_steps': 'scalingSteps', 'adjustment_type': 'adjustmentType', 'cooldown': 'cooldown', 'estimated_instance_warmup': 'estimatedInstanceWarmup', 'min_adjustment_magnitude': 'minAdjustmentMagnitude', 'auto_scaling_group': 'autoScalingGroup'})
class StepScalingPolicyProps(BasicStepScalingPolicyProps):
def __init__(self, *, metric: aws_cdk.aws_cloudwatch.IMetric, scaling_steps: typing.List["ScalingInterval"], adjustment_type: typing.Optional["AdjustmentType"]=None, cooldown: typing.Optional[aws_cdk.core.Duration]=None, estimated_instance_warmup: typing.Optional[aws_cdk.core.Duration]=None, min_adjustment_magnitude: typing.Optional[jsii.Number]=None, auto_scaling_group: "IAutoScalingGroup"):
"""
:param metric: Metric to scale on.
:param scaling_steps: The intervals for scaling. Maps a range of metric values to a particular scaling behavior.
:param adjustment_type: How the adjustment numbers inside 'intervals' are interpreted. Default: ChangeInCapacity
:param cooldown: Grace period after scaling activity. Default: Default cooldown period on your AutoScalingGroup
:param estimated_instance_warmup: Estimated time until a newly launched instance can send metrics to CloudWatch. Default: Same as the cooldown
:param min_adjustment_magnitude: Minimum absolute number to adjust capacity with as result of percentage scaling. Only when using AdjustmentType = PercentChangeInCapacity, this number controls the minimum absolute effect size. Default: No minimum scaling effect
:param auto_scaling_group: The auto scaling group.
"""
self._values = {
'metric': metric,
'scaling_steps': scaling_steps,
'auto_scaling_group': auto_scaling_group,
}
if adjustment_type is not None: self._values["adjustment_type"] = adjustment_type
if cooldown is not None: self._values["cooldown"] = cooldown
if estimated_instance_warmup is not None: self._values["estimated_instance_warmup"] = estimated_instance_warmup
if min_adjustment_magnitude is not None: self._values["min_adjustment_magnitude"] = min_adjustment_magnitude
@builtins.property
def metric(self) -> aws_cdk.aws_cloudwatch.IMetric:
"""Metric to scale on."""
return self._values.get('metric')
@builtins.property
def scaling_steps(self) -> typing.List["ScalingInterval"]:
"""The intervals for scaling.
Maps a range of metric values to a particular scaling behavior.
"""
return self._values.get('scaling_steps')
@builtins.property
def adjustment_type(self) -> typing.Optional["AdjustmentType"]:
"""How the adjustment numbers inside 'intervals' are interpreted.
default
:default: ChangeInCapacity
"""
return self._values.get('adjustment_type')
@builtins.property
def cooldown(self) -> typing.Optional[aws_cdk.core.Duration]:
"""Grace period after scaling activity.
default
:default: Default cooldown period on your AutoScalingGroup
"""
return self._values.get('cooldown')
@builtins.property
def estimated_instance_warmup(self) -> typing.Optional[aws_cdk.core.Duration]:
"""Estimated time until a newly launched instance can send metrics to CloudWatch.
default
:default: Same as the cooldown
"""
return self._values.get('estimated_instance_warmup')
@builtins.property
def min_adjustment_magnitude(self) -> typing.Optional[jsii.Number]:
"""Minimum absolute number to adjust capacity with as result of percentage scaling.
Only when using AdjustmentType = PercentChangeInCapacity, this number controls
the minimum absolute effect size.
default
:default: No minimum scaling effect
"""
return self._values.get('min_adjustment_magnitude')
@builtins.property
def auto_scaling_group(self) -> "IAutoScalingGroup":
"""The auto scaling group."""
return self._values.get('auto_scaling_group')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'StepScalingPolicyProps(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
class TargetTrackingScalingPolicy(aws_cdk.core.Construct, metaclass=jsii.JSIIMeta, jsii_type="@aws-cdk/aws-autoscaling.TargetTrackingScalingPolicy"):
def __init__(self, scope: aws_cdk.core.Construct, id: str, *, auto_scaling_group: "IAutoScalingGroup", target_value: jsii.Number, custom_metric: typing.Optional[aws_cdk.aws_cloudwatch.IMetric]=None, predefined_metric: typing.Optional["PredefinedMetric"]=None, resource_label: typing.Optional[str]=None, cooldown: typing.Optional[aws_cdk.core.Duration]=None, disable_scale_in: typing.Optional[bool]=None, estimated_instance_warmup: typing.Optional[aws_cdk.core.Duration]=None) -> None:
"""
:param scope: -
:param id: -
:param auto_scaling_group:
:param target_value: The target value for the metric.
:param custom_metric: A custom metric for application autoscaling. The metric must track utilization. Scaling out will happen if the metric is higher than the target value, scaling in will happen in the metric is lower than the target value. Exactly one of customMetric or predefinedMetric must be specified. Default: - No custom metric.
:param predefined_metric: A predefined metric for application autoscaling. The metric must track utilization. Scaling out will happen if the metric is higher than the target value, scaling in will happen in the metric is lower than the target value. Exactly one of customMetric or predefinedMetric must be specified. Default: - No predefined metric.
:param resource_label: The resource label associated with the predefined metric. Should be supplied if the predefined metric is ALBRequestCountPerTarget, and the format should be: app///targetgroup// Default: - No resource label.
:param cooldown: Period after a scaling completes before another scaling activity can start. Default: - The default cooldown configured on the AutoScalingGroup.
:param disable_scale_in: Indicates whether scale in by the target tracking policy is disabled. If the value is true, scale in is disabled and the target tracking policy won't remove capacity from the autoscaling group. Otherwise, scale in is enabled and the target tracking policy can remove capacity from the group. Default: false
:param estimated_instance_warmup: Estimated time until a newly launched instance can send metrics to CloudWatch. Default: - Same as the cooldown.
"""
props = TargetTrackingScalingPolicyProps(auto_scaling_group=auto_scaling_group, target_value=target_value, custom_metric=custom_metric, predefined_metric=predefined_metric, resource_label=resource_label, cooldown=cooldown, disable_scale_in=disable_scale_in, estimated_instance_warmup=estimated_instance_warmup)
jsii.create(TargetTrackingScalingPolicy, self, [scope, id, props])
@builtins.property
@jsii.member(jsii_name="scalingPolicyArn")
def scaling_policy_arn(self) -> str:
"""ARN of the scaling policy."""
return jsii.get(self, "scalingPolicyArn")
@jsii.data_type(jsii_type="@aws-cdk/aws-autoscaling.TargetTrackingScalingPolicyProps", jsii_struct_bases=[BasicTargetTrackingScalingPolicyProps], name_mapping={'cooldown': 'cooldown', 'disable_scale_in': 'disableScaleIn', 'estimated_instance_warmup': 'estimatedInstanceWarmup', 'target_value': 'targetValue', 'custom_metric': 'customMetric', 'predefined_metric': 'predefinedMetric', 'resource_label': 'resourceLabel', 'auto_scaling_group': 'autoScalingGroup'})
class TargetTrackingScalingPolicyProps(BasicTargetTrackingScalingPolicyProps):
def __init__(self, *, cooldown: typing.Optional[aws_cdk.core.Duration]=None, disable_scale_in: typing.Optional[bool]=None, estimated_instance_warmup: typing.Optional[aws_cdk.core.Duration]=None, target_value: jsii.Number, custom_metric: typing.Optional[aws_cdk.aws_cloudwatch.IMetric]=None, predefined_metric: typing.Optional["PredefinedMetric"]=None, resource_label: typing.Optional[str]=None, auto_scaling_group: "IAutoScalingGroup"):
"""Properties for a concrete TargetTrackingPolicy.
Adds the scalingTarget.
:param cooldown: Period after a scaling completes before another scaling activity can start. Default: - The default cooldown configured on the AutoScalingGroup.
:param disable_scale_in: Indicates whether scale in by the target tracking policy is disabled. If the value is true, scale in is disabled and the target tracking policy won't remove capacity from the autoscaling group. Otherwise, scale in is enabled and the target tracking policy can remove capacity from the group. Default: false
:param estimated_instance_warmup: Estimated time until a newly launched instance can send metrics to CloudWatch. Default: - Same as the cooldown.
:param target_value: The target value for the metric.
:param custom_metric: A custom metric for application autoscaling. The metric must track utilization. Scaling out will happen if the metric is higher than the target value, scaling in will happen in the metric is lower than the target value. Exactly one of customMetric or predefinedMetric must be specified. Default: - No custom metric.
:param predefined_metric: A predefined metric for application autoscaling. The metric must track utilization. Scaling out will happen if the metric is higher than the target value, scaling in will happen in the metric is lower than the target value. Exactly one of customMetric or predefinedMetric must be specified. Default: - No predefined metric.
:param resource_label: The resource label associated with the predefined metric. Should be supplied if the predefined metric is ALBRequestCountPerTarget, and the format should be: app///targetgroup// Default: - No resource label.
:param auto_scaling_group:
"""
self._values = {
'target_value': target_value,
'auto_scaling_group': auto_scaling_group,
}
if cooldown is not None: self._values["cooldown"] = cooldown
if disable_scale_in is not None: self._values["disable_scale_in"] = disable_scale_in
if estimated_instance_warmup is not None: self._values["estimated_instance_warmup"] = estimated_instance_warmup
if custom_metric is not None: self._values["custom_metric"] = custom_metric
if predefined_metric is not None: self._values["predefined_metric"] = predefined_metric
if resource_label is not None: self._values["resource_label"] = resource_label
@builtins.property
def cooldown(self) -> typing.Optional[aws_cdk.core.Duration]:
"""Period after a scaling completes before another scaling activity can start.
default
:default: - The default cooldown configured on the AutoScalingGroup.
"""
return self._values.get('cooldown')
@builtins.property
def disable_scale_in(self) -> typing.Optional[bool]:
"""Indicates whether scale in by the target tracking policy is disabled.
If the value is true, scale in is disabled and the target tracking policy
won't remove capacity from the autoscaling group. Otherwise, scale in is
enabled and the target tracking policy can remove capacity from the
group.
default
:default: false
"""
return self._values.get('disable_scale_in')
@builtins.property
def estimated_instance_warmup(self) -> typing.Optional[aws_cdk.core.Duration]:
"""Estimated time until a newly launched instance can send metrics to CloudWatch.
default
:default: - Same as the cooldown.
"""
return self._values.get('estimated_instance_warmup')
@builtins.property
def target_value(self) -> jsii.Number:
"""The target value for the metric."""
return self._values.get('target_value')
@builtins.property
def custom_metric(self) -> typing.Optional[aws_cdk.aws_cloudwatch.IMetric]:
"""A custom metric for application autoscaling.
The metric must track utilization. Scaling out will happen if the metric is higher than
the target value, scaling in will happen in the metric is lower than the target value.
Exactly one of customMetric or predefinedMetric must be specified.
default
:default: - No custom metric.
"""
return self._values.get('custom_metric')
@builtins.property
def predefined_metric(self) -> typing.Optional["PredefinedMetric"]:
"""A predefined metric for application autoscaling.
The metric must track utilization. Scaling out will happen if the metric is higher than
the target value, scaling in will happen in the metric is lower than the target value.
Exactly one of customMetric or predefinedMetric must be specified.
default
:default: - No predefined metric.
"""
return self._values.get('predefined_metric')
@builtins.property
def resource_label(self) -> typing.Optional[str]:
"""The resource label associated with the predefined metric.
Should be supplied if the predefined metric is ALBRequestCountPerTarget, and the
format should be:
app///targetgroup//
default
:default: - No resource label.
"""
return self._values.get('resource_label')
@builtins.property
def auto_scaling_group(self) -> "IAutoScalingGroup":
return self._values.get('auto_scaling_group')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'TargetTrackingScalingPolicyProps(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
@jsii.enum(jsii_type="@aws-cdk/aws-autoscaling.UpdateType")
class UpdateType(enum.Enum):
"""The type of update to perform on instances in this AutoScalingGroup."""
NONE = "NONE"
"""Don't do anything."""
REPLACING_UPDATE = "REPLACING_UPDATE"
"""Replace the entire AutoScalingGroup.
Builds a new AutoScalingGroup first, then delete the old one.
"""
ROLLING_UPDATE = "ROLLING_UPDATE"
"""Replace the instances in the AutoScalingGroup."""
__all__ = ["AdjustmentTier", "AdjustmentType", "AutoScalingGroup", "AutoScalingGroupProps", "BaseTargetTrackingProps", "BasicLifecycleHookProps", "BasicScheduledActionProps", "BasicStepScalingPolicyProps", "BasicTargetTrackingScalingPolicyProps", "BlockDevice", "BlockDeviceVolume", "CfnAutoScalingGroup", "CfnAutoScalingGroupProps", "CfnLaunchConfiguration", "CfnLaunchConfigurationProps", "CfnLifecycleHook", "CfnLifecycleHookProps", "CfnScalingPolicy", "CfnScalingPolicyProps", "CfnScheduledAction", "CfnScheduledActionProps", "CommonAutoScalingGroupProps", "CpuUtilizationScalingProps", "CronOptions", "DefaultResult", "EbsDeviceOptions", "EbsDeviceOptionsBase", "EbsDeviceProps", "EbsDeviceSnapshotOptions", "EbsDeviceVolumeType", "Ec2HealthCheckOptions", "ElbHealthCheckOptions", "HealthCheck", "IAutoScalingGroup", "ILifecycleHook", "ILifecycleHookTarget", "LifecycleHook", "LifecycleHookProps", "LifecycleHookTargetConfig", "LifecycleTransition", "MetricAggregationType", "MetricTargetTrackingProps", "NetworkUtilizationScalingProps", "PredefinedMetric", "RequestCountScalingProps", "RollingUpdateConfiguration", "ScalingInterval", "ScalingProcess", "Schedule", "ScheduledAction", "ScheduledActionProps", "StepScalingAction", "StepScalingActionProps", "StepScalingPolicy", "StepScalingPolicyProps", "TargetTrackingScalingPolicy", "TargetTrackingScalingPolicyProps", "UpdateType", "__jsii_assembly__"]
publication.publish()
| [
"vbloise3@gmail.com"
] | vbloise3@gmail.com |
96f26415c59c3f5d4cc5ad55a367af99e8cd7b23 | 75569ed16c90c7e4081e4ef3e5caafe8a622830f | /tests/basics/class-super.py | 6a87b2fd00c817af2924ff7e056f1ce7aab3b18f | [
"MIT"
] | permissive | aitjcize/micropython | 22b153e88d2f2c8c44c92ac9b9eeee0396ed385a | 203bc98804dd8ad60476b531f29b6658dcffadcf | refs/heads/master | 2020-05-20T17:56:26.304832 | 2014-04-15T11:52:59 | 2014-04-15T11:52:59 | 18,765,665 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 181 | py | class Base:
def meth(self):
print("in Base meth")
class Sub(Base):
def meth(self):
print("in Sub meth")
return super().meth()
a = Sub()
a.meth()
| [
"pfalcon@users.sourceforge.net"
] | pfalcon@users.sourceforge.net |
34934364d6e50c0f4baf9fbc1cb465d2decbba84 | c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c | /cases/pa2/sample/ast_coverage-249.py | 2b992ce35be39fd106d6df923513d9ec2dd95fb2 | [] | no_license | Virtlink/ccbench-chocopy | c3f7f6af6349aff6503196f727ef89f210a1eac8 | c7efae43bf32696ee2b2ee781bdfe4f7730dec3f | refs/heads/main | 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 676 | py | count:int = 0
def foo(s: str) -> int:
return len(s)
class bar(object):
p: bool = True
def baz(self:"bar", xx: [int]) -> str:
global count
x:int = 0
y:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[$INT]) == 1:
self.p = self is None
return "Nope"
print(bar().baz([1,2]))
| [
"647530+Virtlink@users.noreply.github.com"
] | 647530+Virtlink@users.noreply.github.com |
c1847152d25bfcdf25efa91b2c9419a6b9daf9e7 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03128/s188279652.py | ab5fe02baf01861864082c97175ad115cba641b4 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,290 | py | # coding: utf-8
import sys
sr = lambda: sys.stdin.readline().rstrip()
ir = lambda: int(sr())
lr = lambda: list(map(int, sr().split()))
# まずは桁数を増やす→数字の大きい順に並べる
N, M = lr()
A = lr()
matches = [0, 2, 5, 5, 4, 5, 6, 3, 7, 6]
A = [(matches[a], a) for a in A]
A.sort(key = lambda x: x[1], reverse=True)
A.sort(key = lambda x: x[0])
top_match = A[0][0]
dp = [None] * (N+1)
dp[0] = []
used = set()
for match, num in A:
if match in used:
continue
used.add(match)
for x in range(N+1):
if x - match < 0:
continue
if dp[x] == None and dp[x-match] != None:
dp[x] = dp[x-match] + [num]
elif dp[x] != None and dp[x-match] != None:
if len(dp[x-match]) >= len(dp[x]):
dp[x] = dp[x-match] + [num]
elif len(dp[x-match]) >= 1 and len(dp[x-match]) == len(dp[x]) - 1:
y = list(map(str, dp[x][::-1])); y.sort(reverse=True)
z = [str(num)] + list(map(str, dp[x-match][::-1])); z.sort(reverse=True)
y = int(''.join(y))
z = int(''.join(z))
if z > y:
dp[x] = dp[x-match] + [num]
X = dp[N]
X.sort(reverse=True)
answer = ''.join(list(map(str, X)))
print(answer)
# 37
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
2e0bd584fcf28fe9b5595a1f73d664b89e5ffe7b | 76a4ef48394cee091c00152de627342fa3b561a2 | /flask/login/bootstrap/wtforms_widget.py | 355554469b965e356923cf2fca2e4cd2a4d2cb26 | [] | no_license | fossabot/beecell | 4e6c80b1e53dcbe7c4eb7f4f8db47ecc89418818 | ca6326d7770c51c76df398e6e33a4a5508259bd6 | refs/heads/master | 2020-03-21T20:49:56.382728 | 2018-06-28T14:25:16 | 2018-06-28T14:25:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,870 | py | '''
Created on Jan 18, 2014
@author: darkbk
'''
from __future__ import unicode_literals
from cgi import escape
from wtforms.compat import text_type, string_types, iteritems
__all__ = (
'CheckboxInput', 'FileInput', 'HiddenInput', 'ListWidget', 'PasswordInput',
'RadioInput', 'Select', 'SubmitInput', 'TableWidget', 'TextArea',
'TextInput', 'Option'
)
def html_params(**kwargs):
"""
Generate HTML parameters from inputted keyword arguments.
The output value is sorted by the passed keys, to provide consistent output
each time this function is called with the same parameters. Because of the
frequent use of the normally reserved keywords `class` and `for`, suffixing
these with an underscore will allow them to be used.
>>> html_params(name='text1', id='f', class_='text') == 'class="text" id="f" name="text1"'
True
"""
params = []
for k,v in sorted(iteritems(kwargs)):
if k in ('class_', 'class__', 'for_'):
k = k[:-1]
if v is True:
params.append(k)
else:
params.append('%s="%s"' % (text_type(k), escape(text_type(v), quote=True)))
return ' '.join(params)
class HTMLString(text_type):
def __html__(self):
return self
class ListWidget(object):
"""
Renders a list of fields as a `ul` or `ol` list.
This is used for fields which encapsulate many inner fields as subfields.
The widget will try to iterate the field to get access to the subfields and
call them to render them.
If `prefix_label` is set, the subfield's label is printed before the field,
otherwise afterwards. The latter is useful for iterating radios or
checkboxes.
"""
def __init__(self, html_tag='ul', prefix_label=True):
assert html_tag in ('ol', 'ul')
self.html_tag = html_tag
self.prefix_label = prefix_label
def __call__(self, field, **kwargs):
kwargs.setdefault('id', field.id)
html = ['<%s %s>' % (self.html_tag, html_params(**kwargs))]
for subfield in field:
if self.prefix_label:
html.append('<li>%s %s</li>' % (subfield.label, subfield()))
else:
html.append('<li>%s %s</li>' % (subfield(), subfield.label))
html.append('</%s>' % self.html_tag)
return HTMLString(''.join(html))
class TableWidget(object):
"""
Renders a list of fields as a set of table rows with th/td pairs.
If `with_table_tag` is True, then an enclosing <table> is placed around the
rows.
Hidden fields will not be displayed with a row, instead the field will be
pushed into a subsequent table row to ensure XHTML validity. Hidden fields
at the end of the field list will appear outside the table.
"""
def __init__(self, with_table_tag=True):
self.with_table_tag = with_table_tag
def __call__(self, field, **kwargs):
html = []
if self.with_table_tag:
kwargs.setdefault('id', field.id)
html.append('<table %s>' % html_params(**kwargs))
hidden = ''
for subfield in field:
if subfield.type == 'HiddenField':
hidden += text_type(subfield)
else:
html.append('<tr><th>%s</th><td>%s%s</td></tr>' % (text_type(subfield.label), hidden, text_type(subfield)))
hidden = ''
if self.with_table_tag:
html.append('</table>')
if hidden:
html.append(hidden)
return HTMLString(''.join(html))
class Input(object):
"""
Render a basic ``<input>`` field.
This is used as the basis for most of the other input fields.
By default, the `_value()` method will be called upon the associated field
to provide the ``value=`` HTML attribute.
"""
html_params = staticmethod(html_params)
def __init__(self, input_type=None):
if input_type is not None:
self.input_type = input_type
def __call__(self, field, **kwargs):
#kwargs.setdefault('id', field.id)
#kwargs.setdefault('type', self.input_type)
#kwargs.setdefault('placeholder', field.placeholder)
if 'value' not in kwargs:
value = field._value()
else:
value = kwargs['value']
res = ['<div class=""><div>',
#'<label for="%s" class="col-sm-2 control-label">%s</label>' % (
# field.id, field.label),
#'<div class="col-sm-10">',
'<input type="%s" class="form-control" id="%s" name="%s" value="%s" placeholder="%s">' % (
self.input_type, field.id, field.id, field.default, field.label),
'</div></div>']
return HTMLString(''.join(res))
class TextInput(Input):
"""
Render a single-line text input.
"""
input_type = 'text'
class PasswordInput(Input):
"""
Render a password input.
For security purposes, this field will not reproduce the value on a form
submit by default. To have the value filled in, set `hide_value` to
`False`.
"""
input_type = 'password'
def __init__(self, hide_value=True):
self.hide_value = hide_value
def __call__(self, field, **kwargs):
if self.hide_value:
kwargs['value'] = ''
return super(PasswordInput, self).__call__(field, **kwargs)
class HiddenInput(Input):
"""
Render a hidden input.
"""
input_type = 'hidden'
def __call__(self, field, **kwargs):
res = ['<input type="%s" class="form-control" id="%s" name="%s" value="%s">' % (
self.input_type, field.id, field.id, field.default)]
return HTMLString(''.join(res))
class CheckboxInput(Input):
"""
Render a checkbox.
The ``checked`` HTML attribute is set if the field's data is a non-false value.
"""
input_type = 'checkbox'
def __call__(self, field, **kwargs):
res = ['<div class=""><div>',
#'<div class="col-sm-offset-2 col-sm-10">',
'<div class="checkbox">',
'<label></label>',
'<input type="checkbox">%s' % (field.label),
'<label>',
'</div></div></div>']
return HTMLString(''.join(res))
class RadioInput(Input):
"""
Render a single radio button.
This widget is most commonly used in conjunction with ListWidget or some
other listing, as singular radio buttons are not very useful.
"""
input_type = 'radio'
def __call__(self, field, **kwargs):
if field.checked:
kwargs['checked'] = True
return super(RadioInput, self).__call__(field, **kwargs)
class FileInput(object):
"""
Renders a file input chooser field.
"""
def __call__(self, field, **kwargs):
kwargs.setdefault('id', field.id)
return HTMLString('<input %s>' % html_params(name=field.name, type='file', **kwargs))
class SubmitInput(Input):
"""
Renders a submit button.
The field's label is used as the text of the submit button instead of the
data on the field.
"""
input_type = 'submit'
def __call__(self, field, **kwargs):
res = ['<div class=""><div>',
#'<div class="col-sm-offset-2 col-sm-10">',
'<button type="submit" id="%s" class="btn btn-lg btn-primary btn-block">%s</button>' % (field.id, field.label),
'</div></div>']
return HTMLString(''.join(res))
class TextArea(object):
"""
Renders a multi-line text area.
`rows` and `cols` ought to be passed as keyword args when rendering.
"""
def __call__(self, field, **kwargs):
kwargs.setdefault('id', field.id)
return HTMLString('<textarea %s>%s</textarea>' % (html_params(name=field.name, **kwargs), escape(text_type(field._value()))))
class Select(object):
"""
Renders a select field.
If `multiple` is True, then the `size` property should be specified on
rendering to make the field useful.
The field must provide an `iter_choices()` method which the widget will
call on rendering; this method must yield tuples of
`(value, label, selected)`.
"""
def __init__(self, multiple=False):
self.multiple = multiple
def __call__(self, field, **kwargs):
kwargs.setdefault('id', field.id)
if self.multiple:
kwargs['multiple'] = True
#html = ['<select %s>' % html_params(name=field.name, **kwargs)]
#for val, label, selected in field.iter_choices():
# html.append(self.render_option(val, label, selected))
#html.append('</select>')
html = [#'<div class="form-group">',
#'<label class="col-sm-2 control-label">%s</label>' % field.label,
#'<div class="col-sm-6">',
'<select class="form-control" id="%s" name="%s">' % (field.id, field.name)]
for val, label, selected in field.iter_choices():
html.append(self.render_option(val, label, selected))
html.append('</select>')
#html.append('</div>')
#html.append('</div>')
return HTMLString(''.join(html))
@classmethod
def render_option(cls, value, label, selected, **kwargs):
options = dict(kwargs, value=value)
if selected:
options['selected'] = True
return HTMLString('<option %s>%s</option>' % (html_params(**options), escape(text_type(label))))
class Option(object):
"""
Renders the individual option from a select field.
This is just a convenience for various custom rendering situations, and an
option by itself does not constitute an entire field.
"""
def __call__(self, field, **kwargs):
return Select.render_option(field._value(), field.label, field.checked, **kwargs) | [
"marco.panepinto@outlook.com"
] | marco.panepinto@outlook.com |
a38abf71e517cf272bf68a584acc8f5119e80b68 | ce083128fa87ca86c65059893aa8882d088461f5 | /python/python-salad-bdd/.venv/lib/python2.7/site-packages/zope/testbrowser/cookies.py | 9497507a71805ddce1f97e07dcf446042a5ad949 | [] | no_license | marcosptf/fedora | 581a446e7f81d8ae9a260eafb92814bc486ee077 | 359db63ff1fa79696b7bc803bcfa0042bff8ab44 | refs/heads/master | 2023-04-06T14:53:40.378260 | 2023-03-26T00:47:52 | 2023-03-26T00:47:52 | 26,059,824 | 6 | 5 | null | 2022-12-08T00:43:21 | 2014-11-01T18:48:56 | null | UTF-8 | Python | false | false | 13,917 | py | ##############################################################################
#
# Copyright (c) 2008 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
import datetime
import time
from zope.testbrowser._compat import (httpcookies, urlparse, url_quote,
MutableMapping, urllib_request)
import six
import pytz
import zope.interface
from zope.testbrowser import interfaces, utils
# Cookies class helpers
class BrowserStateError(Exception):
pass
class _StubHTTPMessage(object):
def __init__(self, cookies):
self._cookies = cookies
def getheaders(self, name, default=[]):
if name.lower() != 'set-cookie':
return default
else:
return self._cookies
get_all = getheaders
class _StubResponse(object):
def __init__(self, cookies):
self.message = _StubHTTPMessage(cookies)
def info(self):
return self.message
def expiration_string(expires): # this is not protected so usable in tests.
if isinstance(expires, datetime.datetime):
if expires.tzinfo is not None:
expires = expires.astimezone(pytz.UTC)
expires = expires.strftime('%a, %d %b %Y %H:%M:%S GMT')
return expires
# end Cookies class helpers
@zope.interface.implementer(interfaces.ICookies)
class Cookies(MutableMapping):
"""Cookies for testbrowser.
"""
def __init__(self, testapp, url=None, req_headers=None):
self.testapp = testapp
self._url = url
self._jar = testapp.cookiejar
self._req_headers = req_headers if req_headers is not None else {}
@property
def strict_domain_policy(self):
policy = self._jar._policy
flags = (policy.DomainStrictNoDots | policy.DomainRFC2965Match |
policy.DomainStrictNonDomain)
return policy.strict_ns_domain & flags == flags
@strict_domain_policy.setter
def strict_domain_policy(self, value):
jar = self._jar
policy = jar._policy
flags = (policy.DomainStrictNoDots | policy.DomainRFC2965Match |
policy.DomainStrictNonDomain)
policy.strict_ns_domain |= flags
if not value:
policy.strict_ns_domain ^= flags
def forURL(self, url):
return self.__class__(self.testapp, url)
@property
def url(self):
return self._url
@property
def _request(self):
return urllib_request.Request(self._url)
@property
def header(self):
request = self._request
self._jar.add_cookie_header(request)
if not request.has_header('Cookie'):
return ''
hdr = request.get_header('Cookie')
# We need a predictable order of cookies for tests, so we reparse and
# sort the header here.
return '; '.join(sorted(hdr.split('; ')))
def __str__(self):
return self.header
def __repr__(self):
# get the cookies for the current url
return '<%s.%s object at %r for %s (%s)>' % (
self.__class__.__module__, self.__class__.__name__,
id(self), self.url, self.header)
def _raw_cookies(self):
# We are using protected cookielib _cookies_for_request() here to avoid
# code duplication.
# Comply with cookielib internal protocol
self._jar._policy._now = self._jar._now = int(time.time())
cookies = self._jar._cookies_for_request(self._request)
# Sort cookies so that the longer paths would come first. This allows
# masking parent cookies.
cookies.sort(key=lambda c: (len(c.path), len(c.domain)), reverse=True)
return cookies
def _get_cookies(self, key=None):
if key is None:
seen = set()
for ck in self._raw_cookies():
if ck.name not in seen:
yield ck
seen.add(ck.name)
else:
for ck in self._raw_cookies():
if ck.name == key:
yield ck
_marker = object()
def _get(self, key, default=_marker):
for ck in self._raw_cookies():
if ck.name == key:
return ck
if default is self._marker:
raise KeyError(key)
return default
def __getitem__(self, key):
return self._get(key).value
def getinfo(self, key):
return self._getinfo(self._get(key))
def _getinfo(self, ck):
res = {'name': ck.name,
'value': ck.value,
'port': ck.port,
'domain': ck.domain,
'path': ck.path,
'secure': ck.secure,
'expires': None,
'comment': ck.comment,
'commenturl': ck.comment_url}
if ck.expires is not None:
res['expires'] = datetime.datetime.fromtimestamp(
ck.expires, pytz.UTC)
return res
def keys(self):
return [ck.name for ck in self._get_cookies()]
def __iter__(self):
return (ck.name for ck in self._get_cookies())
iterkeys = __iter__
def iterinfo(self, key=None):
return (self._getinfo(ck) for ck in self._get_cookies(key))
def iteritems(self):
return ((ck.name, ck.value) for ck in self._get_cookies())
def has_key(self, key):
return self._get(key, None) is not None
__contains__ = has_key
def __len__(self):
return len(list(self._get_cookies()))
def __delitem__(self, key):
ck = self._get(key)
self._jar.clear(ck.domain, ck.path, ck.name)
def create(self, name, value,
domain=None, expires=None, path=None, secure=None, comment=None,
commenturl=None, port=None):
if value is None:
raise ValueError('must provide value')
ck = self._get(name, None)
if (ck is not None and
(path is None or ck.path == path) and
(domain is None or ck.domain == domain or
ck.domain == domain) and (port is None or ck.port == port)):
# cookie already exists
raise ValueError('cookie already exists')
if domain is not None:
self._verifyDomain(domain, ck)
if path is not None:
self._verifyPath(path, ck)
now = int(time.time())
if expires is not None and self._is_expired(expires, now):
raise zope.testbrowser.interfaces.AlreadyExpiredError(
'May not create a cookie that is immediately expired')
self._setCookie(name, value, domain, expires, path, secure, comment,
commenturl, port, now=now)
def change(self, name, value=None, domain=None, expires=None, path=None,
secure=None, comment=None, commenturl=None, port=None):
now = int(time.time())
if expires is not None and self._is_expired(expires, now):
# shortcut
del self[name]
else:
self._change(self._get(name), value, domain, expires, path, secure,
comment, commenturl, port, now)
def _change(self, ck, value=None,
domain=None, expires=None, path=None, secure=None,
comment=None, commenturl=None, port=None, now=None):
if value is None:
value = ck.value
if domain is None:
domain = ck.domain
else:
self._verifyDomain(domain, None)
if expires is None:
expires = ck.expires
if path is None:
path = ck.path
else:
self._verifyPath(domain, None)
if secure is None:
secure = ck.secure
if comment is None:
comment = ck.comment
if commenturl is None:
commenturl = ck.comment_url
if port is None:
port = ck.port
self._setCookie(ck.name, value, domain, expires, path, secure, comment,
commenturl, port, ck.version, ck=ck, now=now)
def _verifyDomain(self, domain, ck):
tmp_domain = domain
if domain is not None and domain.startswith('.'):
tmp_domain = domain[1:]
self_host = utils.effective_request_host(self._request)
if (self_host != tmp_domain and
not self_host.endswith('.' + tmp_domain)):
raise ValueError('current url must match given domain')
if (ck is not None and ck.domain != tmp_domain and
ck.domain.endswith(tmp_domain)):
raise ValueError(
'cannot set a cookie that will be hidden by another '
'cookie for this url (%s)' % (self.url,))
def _verifyPath(self, path, ck):
self_path = urlparse.urlparse(self.url)[2]
if not self_path.startswith(path):
raise ValueError('current url must start with path, if given')
if ck is not None and ck.path != path and ck.path.startswith(path):
raise ValueError(
'cannot set a cookie that will be hidden by another '
'cookie for this url (%s)' % (self.url,))
def _setCookie(self, name, value, domain, expires, path, secure, comment,
commenturl, port, version=None, ck=None, now=None):
for nm, val in self._req_headers.items():
if nm.lower() in ('cookie', 'cookie2'):
raise ValueError('cookies are already set in `Cookie` header')
if domain and not domain.startswith('.'):
# we do a dance here so that we keep names that have been passed
# in consistent (i.e., if we get an explicit 'example.com' it stays
# 'example.com', rather than converting to '.example.com').
tmp_domain = domain
domain = None
if secure:
protocol = 'https'
else:
protocol = 'http'
url = '%s://%s%s' % (protocol, tmp_domain, path or '/')
request = urllib_request.Request(url)
else:
request = self._request
if request is None:
# TODO: fix exception
raise BrowserStateError(
'cannot create cookie without request or domain')
c = httpcookies.SimpleCookie()
name = str(name)
# Cookie value must be native string
c[name] = value.encode('utf8') if not six.PY3 else value
if secure:
c[name]['secure'] = True
if domain:
c[name]['domain'] = domain
if path:
c[name]['path'] = path
if expires:
c[name]['expires'] = expiration_string(expires)
if comment:
c[name]['comment'] = url_quote(
comment.encode('utf-8'), safe="/?:@&+")
if port:
c[name]['port'] = port
if commenturl:
c[name]['commenturl'] = commenturl
if version:
c[name]['version'] = version
# this use of objects like _StubResponse and _StubHTTPMessage is in
# fact supported by the documented client cookie API.
cookies = self._jar.make_cookies(
_StubResponse([c.output(header='').strip()]), request)
assert len(cookies) == 1, (
'programmer error: %d cookies made' % (len(cookies),))
policy = self._jar._policy
if now is None:
now = int(time.time())
policy._now = self._jar._now = now
if not policy.set_ok(cookies[0], request):
raise ValueError('policy does not allow this cookie')
if ck is not None:
self._jar.clear(ck.domain, ck.path, ck.name)
self._jar.set_cookie(cookies[0])
def __setitem__(self, key, value):
ck = self._get(key, None)
if ck is None:
self.create(key, value)
else:
self._change(ck, value)
def _is_expired(self, value, now): # now = int(time.time())
dnow = datetime.datetime.fromtimestamp(now, pytz.UTC)
if isinstance(value, datetime.datetime):
if value.tzinfo is None:
if value <= dnow.replace(tzinfo=None):
return True
elif value <= dnow:
return True
elif isinstance(value, six.string_types):
if datetime.datetime.fromtimestamp(
utils.http2time(value), pytz.UTC) <= dnow:
return True
return False
def clear(self):
# to give expected mapping behavior of resulting in an empty dict,
# we use _raw_cookies rather than _get_cookies.
for ck in self._raw_cookies():
self._jar.clear(ck.domain, ck.path, ck.name)
def clearAllSession(self):
self._jar.clear_session_cookies()
def clearAll(self):
self._jar.clear()
def pop(self, k, *args):
"""See zope.interface.common.mapping.IExtendedWriteMapping
"""
# Python3' MutableMapping doesn't offer pop() with variable arguments,
# so we are reimplementing it here as defined in IExtendedWriteMapping
super(Cookies, self).pop(k, *args)
def itervalues(self):
# Method, missing in Py3' MutableMapping, but required by
# IIterableMapping
return self.values()
def iterkeys(self):
# Method, missing in Py3' MutableMapping, but required by
# IIterableMapping
return self.keys()
| [
"marcosptf@yahoo.com.br"
] | marcosptf@yahoo.com.br |
3523e3922bad5ab5629c6103291d277ac1c2f3a6 | 2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae | /python/python_11129.py | f39d83392e18135ef51184fa254691203b79401e | [] | no_license | AK-1121/code_extraction | cc812b6832b112e3ffcc2bb7eb4237fd85c88c01 | 5297a4a3aab3bb37efa24a89636935da04a1f8b6 | refs/heads/master | 2020-05-23T08:04:11.789141 | 2015-10-22T19:19:40 | 2015-10-22T19:19:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 78 | py | # Python Package Import Error--Python Doesn't Recognize Package
import parser
| [
"ubuntu@ip-172-31-7-228.us-west-2.compute.internal"
] | ubuntu@ip-172-31-7-228.us-west-2.compute.internal |
292d385070572bf7f16310dfa66e9e0cbca0ef5c | 59fb17c240b261040026d713a6ac9c97d6a9f265 | /gym/gym/utils/colorize.py | ac7ea10f6f91bb8e623c462fa4a32657fde70f0a | [
"MIT"
] | permissive | dmeger/TeachingImitation | 3fb97499e76929959913266f127154f6ae5a8e99 | 5f4dba7e49987924c3d55cd27579cad4c71ef7a4 | refs/heads/master | 2023-03-28T13:25:01.307382 | 2021-04-06T15:07:08 | 2021-04-06T15:07:08 | 355,223,500 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 768 | py | """A set of common utilities used within the environments. These are
not intended as API functions, and will not remain stable over time.
"""
color2num = dict(
gray=30,
red=31,
green=32,
yellow=33,
blue=34,
magenta=35,
cyan=36,
white=37,
crimson=38
)
def colorize(string, color, bold=False, highlight = False):
"""Return string surrounded by appropriate terminal color codes to
print colorized text. Valid colors: gray, red, green, yellow,
blue, magenta, cyan, white, crimson
"""
attr = []
num = color2num[color]
if highlight: num += 10
attr.append(str(num))
if bold: attr.append('1')
attrs = ';'.join(attr)
return '\x1b[%sm%s\x1b[0m' % (attrs, string)
| [
"david.meger@gmail.com"
] | david.meger@gmail.com |
a73ca2447eb10abc6d70d53b6c07766887780a87 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_navahos.py | 2d6837a91058d376c49ef6307b7b9b61348d2dda | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 222 | py |
#calss header
class _NAVAHOS():
def __init__(self,):
self.name = "NAVAHOS"
self.definitions = navaho
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['navaho']
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
08b66af75db565b43692361530227c0edc3a2a89 | 04b4198a006d4527432ca8de8bf92cc5f9ded3de | /logistic.py | 3a73d7e510fa01243e5e4603d0add8223f4868de | [] | no_license | chenzhengsi1988/try | 4986623077a1bed6f40c3ed0327f1e96eea4a6ef | 5c448a6da317cd0853ec24db108a3e7237a2153e | refs/heads/master | 2021-09-06T17:49:06.923387 | 2018-02-09T08:49:49 | 2018-02-09T08:49:49 | 111,104,573 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,716 | py | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 15 11:20:58 2017
@author: zsc
"""
from __future__ import print_function, division
import tensorflow as tf
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn
from sklearn.cross_validation import train_test_split
import random
# 读取数据
data = pd.read_csv("~/ml/data/datatraining.txt")
print(type(data))
print(data)
# 拆分数据
X_train, X_test, y_train, y_test = train_test_split(
data[["Temperature", "Humidity", "Light", "CO2", "HumidityRatio"]].values, data["Occupancy"].values.reshape(-1, 1),
random_state=42)
# one-hot 编码
# print(y_train.shape)
y_train = tf.concat([1 - y_train, y_train], 1)
y_test = tf.concat([1 - y_test, y_test], 1)
# print(y_train.shape)
# 设置模型
learning_rate = 0.001
training_epoch = 5
batch_size = 100
display_step = 1
n_samples = X_train.shape[0]
# print(n_samples)
n_features = 5
n_class = 2
x = tf.placeholder(tf.float32, [None, n_features])
y = tf.placeholder(tf.float32, [None, n_class])
# 模型参数
W = tf.Variable(tf.zeros([n_features, n_class]))
b = tf.Variable(tf.zeros([n_class]))
# W = tf.Variable(tf.truncated_normal([n_features, n_class-1]))
# b = tf.Variable(tf.truncated_normal([n_class]))
# 计算预测值
pred = tf.nn.softmax(tf.matmul(x, W) + b)
# 计算损失值 使用相对熵计算损失值
cost = tf.reduce_mean(-tf.reduce_sum(y * tf.log(pred), reduction_indices=1))
# 定义优化器
# optimizer=tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost)
# 准确率
correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# 初始化所有变量
init = tf.initialize_all_variables()
aa = list()
bb = list()
# 训练模型
with tf.Session() as sess:
sess.run(init)
for epoch in range(training_epoch):
avg_cost = 0.
total_batch = int(n_samples / batch_size)
for i in range(total_batch):
batch_xs = X_train[i * batch_size: (i + 1) * batch_size]
batch_ys = sess.run(y_train[i * batch_size: (i + 1) * batch_size])
_, c = sess.run([optimizer, cost], feed_dict={x: batch_xs, y: batch_ys})
avg_cost += c / total_batch
if (epoch + 1) % display_step == 0:
print("Epoch:", "%04d" % (epoch + 1), "cost=", avg_cost)
aa.append(epoch + 1)
bb.append(avg_cost)
print("Optimization Finished!")
print("Testing Accuracy:", accuracy.eval({x: X_train, y: y_train.eval()}))
plt.xlabel("Epoch")
plt.ylabel("Cost")
plt.plot(aa, bb)
plt.show()
| [
"you@example.com"
] | you@example.com |
13dffa6919ac9b8c70595168f8ee24846398cf7e | 48d30fa3d9806fee872e76393e77900c6aab8717 | /djangochat/settings.py | f38b35058f2c0fbb98e59b355a7123e25440709e | [] | no_license | olivx/django-vue-chat | dbaca42d8c4531d4d803e376839e8fb6fbad722b | f4100513a7fa1d170d7a4973b0edb456ade00c2d | refs/heads/master | 2020-04-24T01:07:10.055368 | 2019-02-26T14:52:28 | 2019-02-26T14:52:28 | 171,587,402 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,571 | py | """
Django settings for djangochat project.
Generated by 'django-admin startproject' using Django 2.1.7.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
from dotenv import load_dotenv
load_dotenv()
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.getenv('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'djoser',
'corsheaders',
'rest_framework',
'rest_framework.authtoken',
'core',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'corsheaders.middleware.CorsMiddleware',
]
ROOT_URLCONF = 'djangochat.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'djangochat.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': os.getenv('DB_NAME'),
'USER': os.getenv('DB_USER'),
'PASSWORD': os.getenv('DB_PASSWORD'),
'HOST': os.getenv('DB_HOST'),
'PORT': os.getenv('DB_PORT'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.TokenAuthentication',
),
}
CORS_ORIGIN_ALLOW_ALL = True
| [
"oliveiravicente.net@gmail.com"
] | oliveiravicente.net@gmail.com |
52b92cc0359020e208aa18e832e1ad7cec007240 | 51d602577affebc8d91ffe234f926469d389dc75 | /lis/specimen/lab_aliquot/models/base_aliquot.py | 881efd0a10e60181bdeffbc6009a840b110f194b | [] | no_license | botswana-harvard/lis | 5ac491373f74eaf3855f173580b000539d7f4740 | 48dc601ae05e420e8f3ebb5ea398f44f02b2e5e7 | refs/heads/master | 2020-12-29T01:31:07.821681 | 2018-06-24T06:06:57 | 2018-06-24T06:06:57 | 35,820,860 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,204 | py | import datetime
from django.utils import timezone
from django.db import models
from django.core.urlresolvers import reverse
from ..choices import ALIQUOT_STATUS, SPECIMEN_MEASURE_UNITS, SPECIMEN_MEDIUM
class BaseAliquot (models.Model):
primary_aliquot = models.ForeignKey('self',
null=True,
related_name='primary',
editable=False)
source_aliquot = models.ForeignKey('self',
null=True,
related_name='source',
editable=False,
help_text='Aliquot from which this aliquot was created, Leave blank if this is the primary tube')
aliquot_identifier = models.CharField(
verbose_name='Aliquot Identifier',
max_length=25,
unique=True,
help_text="Aliquot identifier",
editable=False)
aliquot_datetime = models.DateTimeField(
verbose_name="Date and time aliquot created",
default=timezone.now)
count = models.IntegerField(
editable=False,
null=True)
medium = models.CharField(
verbose_name='Medium',
max_length=25,
choices=SPECIMEN_MEDIUM,
default='TUBE')
original_measure = models.DecimalField(
max_digits=10,
decimal_places=2,
default='5.00')
current_measure = models.DecimalField(
max_digits=10,
decimal_places=2,
default='5.00')
measure_units = models.CharField(
max_length=25,
choices=SPECIMEN_MEASURE_UNITS,
default='mL')
status = models.CharField(
max_length=25,
choices=ALIQUOT_STATUS,
default='available')
comment = models.CharField(
max_length=50,
null=True,
blank=True)
subject_identifier = models.CharField(
max_length=50,
null=True,
editable=False,
help_text="non-user helper field to simplify search and filtering")
is_packed = models.BooleanField(
verbose_name='packed',
default=False)
receive_identifier = models.CharField(
max_length=25,
null=True,
editable=False,
help_text="non-user helper field to simplify search and filter")
def __str__(self):
return '%s' % (self.aliquot_identifier)
def save(self, *args, **kwargs):
self.receive_identifier = self.receive.receive_identifier
super(BaseAliquot, self).save(*args, **kwargs)
def natural_key(self):
return (self.aliquot_identifier,)
def get_subject_identifier(self):
return self.subject_identifier
def get_visit(self):
return self.get_visit_model().objects.get(subject_identifier=self.get_subject_identifier())
def drawn(self):
return self.receive.drawn_datetime
def barcode_value(self):
return self.aliquot_identifier
def to_receive(self):
url = reverse('admin:{app_label}_receive_changelist'.format(app_label=self._meta.app_label,))
return '<a href="{url}?q={receive_identifier}">{receive_identifier}</a>'.format(
url=url, app_label=self._meta.app_label, receive_identifier=self.receive.receive_identifier)
to_receive.allow_tags = True
class Meta:
abstract = True
| [
"ckgathi@gmail.com"
] | ckgathi@gmail.com |
173faf68071a21ad7c81089f2ddfb4d75bfbdac2 | 6930a434c0506d44bf8a8e81cb86e95c219c3a77 | /python/day09/code/dict2.py | ce9b66edc2c41b3b8bb0cbac196501f4f251a01f | [] | no_license | Conquerk/test | ed15d5603538340559556c9e0f20cc61ad3e4486 | 7ff42c99b8a2132c6dd1c73315ff95cfef63a8f6 | refs/heads/master | 2020-04-19T01:47:28.322929 | 2019-01-28T01:52:00 | 2019-01-28T01:52:00 | 167,882,236 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 174 | py | #字典行参
def fun(**kwargs):
print("关键字传参个数是",len(kwargs))
print("kwargs",kwargs)
fun(a=1,b="BBBB",c=[2,3,4])#关键字传参
fun(a=1,b=2,c=3,d=4) | [
"tarena@tedu.cn"
] | tarena@tedu.cn |
d86dc0cb296c60aa5c62e35bd80bec4fb557775d | 9787a86bd6721062a8cf7cc04c21c092a4aeb4a0 | /dapper/mods/Lorenz63/anderson2010rhf.py | 93c7177874a95116146c42bb3ff40c051691fcd3 | [
"MIT"
] | permissive | 1895-art/DAPPER | 09d8b6a88c8997ad7f190f96930be559b43ee143 | bfc4075782f6b247a70cd5d04d4308d135ea5379 | refs/heads/master | 2023-01-19T23:14:13.370754 | 2020-11-23T16:01:21 | 2020-11-23T16:01:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 993 | py | """As in Anderson 2010 rank histogram filter"""
import numpy as np
import dapper as dpr
from dapper.mods.Lorenz63 import Tplot, dstep_dx, step, x0
t = dpr.Chronology(0.01, dkObs=12, KObs=1000, Tplot=Tplot, BurnIn=4*Tplot)
Nx = len(x0)
Dyn = {
'M': Nx,
'model': step,
'linear': dstep_dx,
'noise': 0
}
X0 = dpr.GaussRV(C=2, mu=x0)
Obs = dpr.partial_Id_Obs(Nx, np.arange(Nx))
Obs['noise'] = 8.0
HMM = dpr.HiddenMarkovModel(Dyn, Obs, t, X0)
####################
# Suggested tuning
####################
# Compare with Anderson's figure 10.
# Benchmarks are fairly reliable (KObs=2000):
# from dapper.mods.Lorenz63.anderson2010rhf import HMM # rmse.a
# xps += SL_EAKF(N=20,infl=1.01,rot=True,loc_rad=np.inf) # 0.87
# xps += EnKF_N (N=20,rot=True) # 0.87
# xps += RHF (N=50,infl=1.10) # 1.28
# xps += RHF (N=50,infl=0.95,rot=True) # 0.94
# xps += RHF (N=20,infl=0.95,rot=True) # 1.07
| [
"patrick.n.raanes@gmail.com"
] | patrick.n.raanes@gmail.com |
086b12d7a882e9d4d5550998cef39d299a4472d5 | 929e65367e6cd2115990456b017c16938e1012b1 | /Import package.py | 0dade4b7680e697e7bafb0ac74f5aeeb6100d2e7 | [] | no_license | abhiwer/Introduction-to-Python | 261437a5ec5929fef92f467d75347d1c1c8aae2d | 9fed8cb117c441659196a901fad9ea93f59deeb5 | refs/heads/master | 2022-11-29T23:56:11.828440 | 2020-08-08T17:32:39 | 2020-08-08T17:32:39 | 286,092,757 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 934 | py |
'''
Import package
As a data scientist, some notions of geometry never hurt. Let's refresh some of the basics.
For a fancy clustering algorithm, you want to find the circumference, C, and area, A, of a circle. When the radius of the circle is r, you can calculate C and A as:
C=2πr
A=πr2
To use the constant pi, you'll need the math package. A variable r is already coded in the script. Fill in the code to calculate C and A and see how the print() functions create some nice printouts.
Instructions
100 XP
Import the math package. Now you can access the constant pi with math.pi.
Calculate the circumference of the circle and store it in C.
Calculate the area of the circle and store it in A.
Take Hint (-30 XP)
'''
# Definition of radius
r = 0.43
# Import the math package
import math
# Calculate C
C = 2*math.pi*r
# Calculate A
A = math.pi*r*r
# Build printout
print("Circumference: " + str(C))
print("Area: " + str(A)) | [
"rajanpan97@gmail.com"
] | rajanpan97@gmail.com |
eaf64dbae77589cd3f1c16332b15cd409f93a1db | 8b53a8b9803d92003f3a3a9e1b08def7642ba35d | /TALLERES/TAL7_FUNCIONES_20210316_cur/ej23_funcionesreturn_listapositnegat.py | cc1e48bcad49794b734bf0271aba9bb904e9bb83 | [] | no_license | smarulan613/fundamentos_prog_20211_sebasmc | 637cdf9e1f61de0f876fe74530df4e6a5b40d6a6 | 0a87d81dae2bd5656a3e6a521585da661efe6cf6 | refs/heads/main | 2023-05-02T04:26:47.035698 | 2021-05-27T03:37:05 | 2021-05-27T03:37:05 | 356,059,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,154 | py | # -*- coding: utf-8 -*-
"""
Created on Mon Mar 22 15:14:20 2021
@author: Sebastian Marulanda Correa
Ejercicio 23 curso. funciones Python
Confeccionar un programa que permita:
1) Cargar una lista de 10 elementos enteros.
2) Generar dos listas a partir de la primera. En una guardar los valores
positivos y en otra los negativos.
3) Imprimir las dos listas generadas.
"""
def cargar():
lista=[]
for x in range(10):
valor=int(input("Ingrese valor:"))
lista.append(valor)
return lista
def generar_listas(lista):
listanega=[]
listaposi=[]
for x in range(len(lista)):
if lista[x]<0:
listanega.append(lista[x])
else:
if lista[x]>0:
listaposi.append(lista[x])
return [listanega,listaposi]
def imprimir(lista):
for x in range(len(lista)):
print(lista[x])
# programa principal
lista=cargar()
listanega,listaposi=generar_listas(lista)
print("Lista con los valores negativos")
imprimir(listanega)
print("Lista con los valores positivos")
imprimir(listaposi) | [
"noreply@github.com"
] | smarulan613.noreply@github.com |
0181873a9410e165b4d662fca16012fd3bc2c05e | 9a9d6052f8cf91dd57be9a9b6564290b0fac9e52 | /Algorithm/JUNGOL/1. Language_Coder/배열1/557_배열1_자가진단3.py | d2eb1da75defeca5b3a9fc5fcff83988dda3f204 | [] | no_license | Gyeong-Yeon/TIL | 596ec6a093eec34a17dad68bcd91fa9dd08690e8 | eb1f43ee0525da93233b70716cd35caab8d82bda | refs/heads/master | 2023-03-31T19:56:30.979062 | 2021-03-28T13:09:27 | 2021-03-28T13:09:27 | 280,307,737 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 117 | py | arr = [' '] + list(map(str,input().split()))
for i in range(1,10):
if i % 3 == 1:
print(arr[i], end=" ") | [
"lky4156@naver.com"
] | lky4156@naver.com |
86b7939905609e12215640e9f40f41369636f45e | 8b86009da06c2ac987e2a02dcbce15ebeb0e559c | /hyperanalysis/decomposition/base.py | 0c14e91c87b74085fb26add1c471d1db2404d2f4 | [
"MIT"
] | permissive | jiangqn/hyperanalysis-archive | df1a7373f066f8a572242dcd5ff39f1bbdd53f0a | 1e68d4a8674a2fbe7ae7566d1abd3881167039e5 | refs/heads/main | 2023-03-05T11:19:59.907392 | 2021-02-23T05:33:50 | 2021-02-23T05:33:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,362 | py | import torch
from typing import Tuple
class Decomposition(object):
"""
The base class of dimension reduction models.
"""
def __init__(self, n_components: int) -> None:
super(Decomposition, self).__init__()
self._n_components = n_components
self._is_trained = False
@property
def n_components(self) -> int:
return self._n_components
@property
def is_trained(self) -> bool:
return self._is_trained
# def fit(self):
# raise NotImplementedError("The fit method of the Decomposition class is not implemented.")
#
# def transform(self):
# raise NotImplementedError("The transform method of the Decomposition class is not implemented.")
#
# def fit_transform(self):
# raise NotImplementedError("The fit_transform method of the Decomposition class is not implemented.")
class UnsupervisedDecomposition(Decomposition):
"""
The base class of unsupervised dimension reduction models.
"""
def __init__(self, n_components: int) -> None:
super(UnsupervisedDecomposition, self).__init__(n_components)
def fit(self, X: torch.Tensor) -> None:
self._validate_inputs(X)
self._fit(X)
self._is_trained = True
def transform(self, X: torch.Tensor) -> torch.Tensor:
self._validate_inputs(X)
assert self.is_trained
return self._transform(X)
def fit_transform(self, X: torch.Tensor) -> torch.Tensor:
"""
:param X: FloatTensor (num, dim)
"""
self._validate_inputs(X)
self._fit(X)
self._is_trained = True
return self._transform(X)
def _fit(self, X: torch.Tensor) -> None:
"""
:param X: FloatTensor (num, dim)
"""
raise NotImplementedError("The _fit method is not implemented in the UnsupervisedDecomposition class.")
def _transform(self, X: torch.Tensor) -> torch.Tensor:
"""
:param X: FloatTensor (num, dim)
"""
raise NotImplementedError("The _transform method is not implemented in the UnsupervisedDecomposition class.")
def _validate_inputs(self, X: torch.Tensor) -> None:
"""
:param X: FloatTensor (num, dim)
"""
assert isinstance(X, torch.Tensor), "The type of input X is wrong."
assert len(X.size()) == 2, "This size of input X is wrong."
class SupervisedDecomposition(Decomposition):
"""
The base class of supervised dimension reduction models.
"""
def __init__(self, n_components: int) -> None:
super(SupervisedDecomposition, self).__init__(n_components)
def fit(self, X: torch.Tensor, y: torch.Tensor) -> None:
self._validate_inputs(X, y)
self._fit(X, y)
self._is_trained = True
def transform(self, X: torch.Tensor) -> torch.Tensor:
self._validate_inputs(X)
assert self.is_trained
return self._transform(X)
def fit_transform(self, X: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
self._validate_inputs(X, y)
self._fit(X, y)
self._is_trained = True
return self._transform(X)
def _fit(self, X: torch.Tensor, y: torch.Tensor) -> None:
"""
:param X: FloatTensor (num, dim)
:param y: FloatTensor (num,) or LongTensor (num,)
"""
raise NotImplementedError("The _fit method is not implemented in the SupervisedDecomposition class.")
def _transform(self, X: torch.Tensor) -> torch.Tensor:
"""
:param X: FloatTensor (num, dim)
"""
raise NotImplementedError("The _transform method is not implemented in the SupervisedDecomposition class.")
def _validate_inputs(self, X: torch.Tensor, y: torch.Tensor = None) -> None:
"""
:param X: FloatTensor (num, dim)
:param y: FloatTensor (num,) or LongTensor (num,)
"""
assert isinstance(X, torch.Tensor), "The type of input X is wrong."
assert len(X.size()) == 2, "This size of input X is wrong."
if y is not None:
assert isinstance(y, torch.Tensor), "The type of input y is wrong."
assert len(y.size()) == 1, "This size of input y is wrong."
assert X.size(0) == y.size(0), "The num of X is not equal to y."
class CrossDecomposition(Decomposition):
"""
The base class of cross dimension reduction models.
"""
def __init__(self, n_components: int) -> None:
super(CrossDecomposition, self).__init__(n_components)
def fit(self, X: torch.Tensor, Y: torch.Tensor) -> None:
self._validate_inputs(X, Y)
self._fit(X, Y)
self._is_trained = True
def transform(self, X: torch.Tensor, Y: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
self._validate_inputs(X, Y)
assert self.is_trained
return self._transform(X, Y)
def fit_transform(self, X: torch.Tensor, Y: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
self._validate_inputs(X, Y)
self._fit(X, Y)
self._is_trained = True
return self._transform(X, Y)
def _fit(self, X: torch.Tensor, Y: torch.Tensor) -> None:
"""
:param X: FloatTensor (num, dim1)
:param Y: FloatTensor (num, dim2)
"""
raise NotImplementedError("The _fit method is not implemented in the CrossDecomposition class.")
def _transform(self, X: torch.Tensor, Y: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""
:param X: FloatTensor (num, dim1)
:param Y: FloatTensor (num, dim2)
:return X': FloatTensor (num, n_components)
:return Y': FloatTensor (num, n_components)
"""
raise NotImplementedError("The _transform method is not implemented in the CrossDecomposition class.")
def _validate_inputs(self, X: torch.Tensor, Y: torch.Tensor) -> None:
"""
:param X: FloatTensor (num, dim1)
:param Y: FloatTensor (num, dim2)
"""
assert isinstance(X, torch.Tensor), "The type of input X is wrong."
assert len(X.size()) == 2, "This size of input X is wrong."
assert isinstance(Y, torch.Tensor), "The type of input Y is wrong."
assert len(Y.size()) == 2, "This size of input Y is wrong."
assert X.size(0) == Y.size(0), "The num of X is not equal to Y." | [
"1935448858@qq.com"
] | 1935448858@qq.com |
f5e5f08367caa58f682b03c02fa22c8c90e2a22a | 6929a33a7259dad9b45192ca088a492085ed2953 | /solutions/0283-move-zeroes/move-zeroes.py | af0c715153e20b1875b77015c630bf46ec33870e | [] | no_license | moqi112358/leetcode | 70366d29c474d19c43180fd4c282cc02c890af03 | fab9433ff7f66d00023e3af271cf309b2d481722 | refs/heads/master | 2022-12-10T01:46:14.799231 | 2021-01-14T05:00:09 | 2021-01-14T05:00:09 | 218,163,960 | 3 | 0 | null | 2022-07-06T20:26:38 | 2019-10-28T23:26:47 | Python | UTF-8 | Python | false | false | 778 | py | # Given an array nums, write a function to move all 0's to the end of it while maintaining the relative order of the non-zero elements.
#
# Example:
#
#
# Input: [0,1,0,3,12]
# Output: [1,3,12,0,0]
#
# Note:
#
#
# You must do this in-place without making a copy of the array.
# Minimize the total number of operations.
#
class Solution:
def moveZeroes(self, nums: List[int]) -> None:
"""
Do not return anything, modify nums in-place instead.
"""
i, j = 0, 0
while j < len(nums):
if nums[i] != 0:
i += 1
j += 1
elif nums[i] == 0:
if nums[j] != 0:
nums[i], nums[j] = nums[j], nums[i]
else:
j += 1
| [
"983028670@qq.com"
] | 983028670@qq.com |
030f2a1c40bbb4791c369f6c2ad71a3cc9152459 | a2d9dd4b373f4dad4e966f59436fad7ce2ef3459 | /workspace/src/Kalibr/aslam_offline_calibration/kalibr/python/kalibr_rs_camera_calibration/RsCalibrator.py | 1e8ececa39af441eca4fc33fcae6cd304d05d22a | [
"BSD-3-Clause"
] | permissive | luoxz-ai/Drone-trajectory-tracking-and-replication | 7978c3a2bd57d18a9b59c1687898ce85bf108740 | f1dae18d43a36affb71c736abd87c6b267ea592e | refs/heads/master | 2023-07-08T04:00:21.673758 | 2019-10-06T20:33:32 | 2019-10-06T20:33:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,550 | py | # encoding:UTF-8
import sm
import aslam_backend as aopt
import aslam_cv_backend as acvb
import aslam_cv as acv
import aslam_splines as asp
import incremental_calibration as inc
import bsplines
import numpy as np
import multiprocessing
import sys
import gc
import math
from ReprojectionErrorKnotSequenceUpdateStrategy import *
from RsPlot import plotSpline
from RsPlot import plotSplineValues
import pylab as pl
import pdb
# make numpy print prettier
np.set_printoptions(suppress=True)
CALIBRATION_GROUP_ID = 0
class RsCalibratorConfiguration(object):
deltaX = 1e-8
"""Stopping criterion for the optimizer on x"""
deltaJ = 1e-4
"""Stopping criterion for the optimizer on J"""
maxNumberOfIterations = 20
"""Maximum number of iterations of the batch optimizer"""
maxKnotPlacementIterations = 10
"""Maximum number of iterations to take in the adaptive knot-placement step"""
adaptiveKnotPlacement = True
"""Whether to enable adaptive knot placement"""
knotUpdateStrategy = ReprojectionErrorKnotSequenceUpdateStrategy
"""The adaptive knot placement strategy to use"""
timeOffsetConstantSparsityPattern = 0.08
"""A time offset to pad the blocks generated in the hessian/jacobian to ensure a constant symbolic representation
of the batch estimation problem, even when a change in the shutter timing shifts the capture time to another
spline segment.
"""
inverseFeatureCovariance = 1 / 0.26
"""The inverse covariance of the feature detector. Used to standardize the error terms."""
estimateParameters = {'shutter': True, 'intrinsics': True, 'distortion': True, 'pose': True, 'landmarks': False}
"""Which parameters to estimate. Dictionary with shutter, intrinsics, distortion, pose, landmarks as bool"""
splineOrder = 4
"""Order of the spline to use for ct-parametrization"""
timeOffsetPadding = 0.05
"""Time offset to add to the beginning and end of the spline to ensure we remain
in-bounds while estimating time-depending parameters that shift the spline.
"""
numberOfKnots = None
"""Set to an integer to start with a fixed number of uniformly distributed knots on the spline."""
W = None
"""6x6 diagonal matrix with a weak motion prior"""
framerate = 30
"""The approximate framerate of the camera. Required as approximate threshold in adaptive
knot placement and for initializing a knot sequence if no number of knots is given.
"""
def validate(self, isRollingShutter):
"""Validate the configuration."""
# only rolling shutters can be estimated
if (not isRollingShutter):
self.estimateParameters['shutter'] = False
self.adaptiveKnotPlacement = False
class RsCalibrator(object):
__observations = None
"""Store the list of observations"""
__cameraGeometry = None
"""The geometry container of which the calibration is performed."""
__camera = None
"""The camera geometry itself."""
__camera_dv = None
"""The camera design variable"""
__cameraModelFactory = None
"""Factory object that can create a typed objects for a camera (error terms, frames, design variables etc)"""
__poseSpline = None
"""The spline describing the pose of the camera"""
__poseSpline_dv = None
"""The design variable representation of the pose spline of the camera"""
__config = None
"""Configuration container \see RsCalibratorConfiguration"""
__frames = []
"""All frames observed"""
__reprojection_errors = []
"""Reprojection errors of the latest optimizer iteration"""
def calibrate(self,
cameraGeometry,
observations,
config
):
"""
A Motion regularization term is added with low a priori knowledge to avoid
diverging parts in the spline of too many knots are selected/provided or if
no image information is available for long sequences and to regularize the
last few frames (which typically contain no image information but need to have
knots to /close/ the spline).
Kwargs:
cameraGeometry (kcc.CameraGeometry): a camera geometry object with an initialized target
observations ([]: The list of observation \see extractCornersFromDataset
config (RsCalibratorConfiguration): calibration configuration
"""
## set internal objects
self.__observations = observations
self.__cameraGeometry = cameraGeometry
self.__cameraModelFactory = cameraGeometry.model
self.__camera_dv = cameraGeometry.dv
self.__camera = cameraGeometry.geometry
self.__config = config
self.__config.validate(self.__isRollingShutter())
# obtain initial guesses for extrinsics and intrinsics
if (not self.__generateIntrinsicsInitialGuess()):
sm.logError("Could not generate initial guess.")
# obtain the extrinsic initial guess for every observation
self.__generateExtrinsicsInitialGuess()
# set the value for the motion prior term or uses the defaults
W = self.__getMotionModelPriorOrDefault()
self.__poseSpline = self.__generateInitialSpline(
self.__config.splineOrder,
self.__config.timeOffsetPadding,
self.__config.numberOfKnots,
self.__config.framerate
)
# build estimator problem
optimisation_problem = self.__buildOptimizationProblem(W)
self.__runOptimization(
optimisation_problem,
self.__config.deltaJ,
self.__config.deltaX,
self.__config.maxNumberOfIterations
)
# continue with knot replacement
if self.__config.adaptiveKnotPlacement:
knotUpdateStrategy = self.__config.knotUpdateStrategy(self.__config.framerate)
for iteration in range(self.__config.maxKnotPlacementIterations):
# generate the new knots list
[knots, requiresUpdate] = knotUpdateStrategy.generateKnotList(
self.__reprojection_errors,
self.__poseSpline_dv.spline()
)
# if no new knotlist was generated, we are done.
if (not requiresUpdate):
break;
# otherwise update the spline dv and rebuild the problem
self.__poseSpline = knotUpdateStrategy.getUpdatedSpline(self.__poseSpline_dv.spline(), knots,
self.__config.splineOrder)
optimisation_problem = self.__buildOptimizationProblem(W)
self.__runOptimization(
optimisation_problem,
self.__config.deltaJ,
self.__config.deltaX,
self.__config.maxNumberOfIterations
)
self.__printResults()
def __generateExtrinsicsInitialGuess(self):
"""Estimate the pose of the camera with a PnP solver. Call after initializing the intrinsics"""
# estimate and set T_c in the observations
for idx, observation in enumerate(self.__observations):
(success, T_t_c) = self.__camera.estimateTransformation(observation)
if (success):
observation.set_T_t_c(T_t_c)
else:
sm.logWarn("Could not estimate T_t_c for observation at index {0}".format(idx))
return
def __generateIntrinsicsInitialGuess(self):
"""
Get an initial guess for the camera geometry (intrinsics, distortion). Distortion is typically left as 0,0,0,0.
The parameters of the geometryModel are updated in place.
"""
if (self.__isRollingShutter()):
sensorRows = self.__observations[0].imRows()
self.__camera.shutter().setParameters(np.array([1.0 / self.__config.framerate / float(sensorRows)]))
return self.__camera.initializeIntrinsics(self.__observations)
def __getMotionModelPriorOrDefault(self):
"""Get the motion model prior or the default value"""
W = self.__config.W
if W is None:
W = np.eye(6)
W[:3, :3] *= 1e-3
W[3:, 3:] *= 1
W *= 1e-2
return W
def __generateInitialSpline(self, splineOrder, timeOffsetPadding, numberOfKnots=None, framerate=None):
poseSpline = bsplines.BSplinePose(splineOrder, sm.RotationVector())
# Get the observation times.
times = np.array([observation.time().toSec() for observation in self.__observations])
# get the pose values of the initial transformations at observation time
curve = np.matrix(
[poseSpline.transformationToCurveValue(observation.T_t_c().T()) for observation in self.__observations]).T
# make sure all values are well defined
if np.isnan(curve).any():
raise RuntimeError("Nans in curve values")
sys.exit(0)
# Add 2 seconds on either end to allow the spline to slide during optimization
times = np.hstack((times[0] - (timeOffsetPadding * 2.0), times, times[-1] + (timeOffsetPadding * 2.0)))
curve = np.hstack((curve[:, 0], curve, curve[:, -1]))
self.__ensureContinuousRotationVectors(curve)
seconds = times[-1] - times[0]
# fixed number of knots
if (numberOfKnots is not None):
knots = numberOfKnots
# otherwise with framerate estimate
else:
knots = int(round(seconds * framerate / 3))
print
print "Initializing a pose spline with %d knots (%f knots per second over %f seconds)" % (knots, 100, seconds)
poseSpline.initPoseSplineSparse(times, curve, knots, 1e-4)
return poseSpline
def __buildOptimizationProblem(self, W):
"""Build the optimisation problem"""
problem = inc.CalibrationOptimizationProblem()
# Initialize all design variables.
self.__initPoseDesignVariables(problem)
#####
## build error terms and add to problem
# store all frames
self.__frames = []
self.__reprojection_errors = []
# This code assumes that the order of the landmarks in the observations
# is invariant across all observations. At least for the chessboards it is true.
#####
# add all the landmarks once
landmarks = []
landmarks_expr = {}
keypoint_ids0 = self.__observations[0].getCornersIdx()
for idx, landmark in enumerate(self.__observations[0].getCornersTargetFrame()):
# design variable for landmark
landmark_w_dv = aopt.HomogeneousPointDv(sm.toHomogeneous(landmark))
landmark_w_dv.setActive(self.__config.estimateParameters['landmarks']);
landmarks.append(landmark_w_dv)
landmarks_expr[keypoint_ids0[idx]] = landmark_w_dv.toExpression()
problem.addDesignVariable(landmark_w_dv, CALIBRATION_GROUP_ID)
#####
# activate design variables
self.__camera_dv.setActive(
self.__config.estimateParameters['intrinsics'],
self.__config.estimateParameters['distortion'],
self.__config.estimateParameters['shutter']
)
#####
# Add design variables
# add the camera design variables last for optimal sparsity patterns
problem.addDesignVariable(self.__camera_dv.shutterDesignVariable(), CALIBRATION_GROUP_ID)
problem.addDesignVariable(self.__camera_dv.projectionDesignVariable(), CALIBRATION_GROUP_ID)
problem.addDesignVariable(self.__camera_dv.distortionDesignVariable(), CALIBRATION_GROUP_ID)
#####
# Regularization term / motion prior
motionError = asp.BSplineMotionError(self.__poseSpline_dv, W)
problem.addErrorTerm(motionError)
#####
# add a reprojection error for every corner of each observation
for frameid, observation in enumerate(self.__observations):
# only process successful observations of a pattern
if (observation.hasSuccessfulObservation()):
# add a frame
frame = self.__cameraModelFactory.frameType()
frame.setGeometry(self.__camera)
frame.setTime(observation.time())
self.__frames.append(frame)
#####
# add an error term for every observed corner
for index, point in enumerate(observation.getCornersImageFrame()):
# keypoint time offset by line delay as expression type
keypoint_time = self.__camera_dv.keypointTime(frame.time(), point)
# from target to world transformation.
T_w_t = self.__poseSpline_dv.transformationAtTime(
keypoint_time,
self.__config.timeOffsetConstantSparsityPattern,
self.__config.timeOffsetConstantSparsityPattern
)
T_t_w = T_w_t.inverse()
# we only have the the first image's design variables
# so any landmark that is not in that frame won't be in the problem
# thus we must skip those measurements that are of a keypoint that isn't visible
keypoint_ids = observation.getCornersIdx()
if not np.any(keypoint_ids[index] == keypoint_ids0):
sm.logWarn("landmark {0} in frame {1} not in first frame".format(keypoint_ids[index], frameid))
continue
# transform target point to camera frame
p_t = T_t_w * landmarks_expr[keypoint_ids[index]]
# create the keypoint
keypoint_index = frame.numKeypoints()
keypoint = acv.Keypoint2()
keypoint.setMeasurement(point)
inverseFeatureCovariance = self.__config.inverseFeatureCovariance;
keypoint.setInverseMeasurementCovariance(np.eye(len(point)) * inverseFeatureCovariance)
keypoint.setLandmarkId(keypoint_index)
frame.addKeypoint(keypoint)
# create reprojection error
reprojection_error = self.__buildErrorTerm(
frame,
keypoint_index,
p_t,
self.__camera_dv,
self.__poseSpline_dv
)
self.__reprojection_errors.append(reprojection_error)
problem.addErrorTerm(reprojection_error)
return problem
def __buildErrorTerm(self, frame, keypoint_index, p_t, camera_dv, poseSpline_dv):
"""
Build an error term that considers the shutter type. A Global Shutter camera gets the standard reprojection error
a Rolling Shutter gets the adaptive covariance error term that considers the camera motion.
"""
# it is a global shutter camera -> no covariance error
if (self.__isRollingShutter()):
return self.__cameraModelFactory.reprojectionErrorAdaptiveCovariance(
frame,
keypoint_index,
p_t,
camera_dv,
poseSpline_dv
)
else:
return self.__cameraModelFactory.reprojectionError(
frame,
keypoint_index,
p_t,
camera_dv
)
def __ensureContinuousRotationVectors(self, curve):
"""
Ensures that the rotation vector does not flip and enables a continuous trajectory modeling.
Updates curves in place.
"""
for i in range(1, curve.shape[1]):
previousRotationVector = curve[3:6, i - 1]
r = curve[3:6, i]
angle = np.linalg.norm(r)
axis = r / angle
best_r = r
best_dist = np.linalg.norm(best_r - previousRotationVector)
for s in range(-3, 4):
aa = axis * (angle + math.pi * 2.0 * s)
dist = np.linalg.norm(aa - previousRotationVector)
if dist < best_dist:
best_r = aa
best_dist = dist
curve[3:6, i] = best_r;
def __initPoseDesignVariables(self, problem):
"""Get the design variable representation of the pose spline and add them to the problem"""
# get the design variable
self.__poseSpline_dv = asp.BSplinePoseDesignVariable(self.__poseSpline)
# activate all contained dv and add to problem
for i in range(0, self.__poseSpline_dv.numDesignVariables()):
dv = self.__poseSpline_dv.designVariable(i)
dv.setActive(self.__config.estimateParameters['pose'])
problem.addDesignVariable(dv, CALIBRATION_GROUP_ID)
def __runOptimization(self, problem, deltaJ, deltaX, maxIt):
"""Run the given optimization problem problem"""
print "run new optimisation with initial values:"
self.__printResults()
# verbose and choldmod solving with schur complement trick
options = aopt.Optimizer2Options()
options.verbose = True
options.linearSolver = aopt.BlockCholeskyLinearSystemSolver()
options.doSchurComplement = True
# stopping criteria
options.maxIterations = maxIt
options.convergenceDeltaJ = deltaJ
options.convergenceDeltaX = deltaX
# use the dogleg trustregion policy
options.trustRegionPolicy = aopt.DogLegTrustRegionPolicy()
# create the optimizer
optimizer = aopt.Optimizer2(options)
optimizer.setProblem(problem)
# go for it:
return optimizer.optimize()
def __isRollingShutter(self):
return self.__cameraModelFactory.shutterType == acv.RollingShutter
from pprint import pprint
def __printResults(self):
shutter = self.__camera_dv.shutterDesignVariable().value()
proj = self.__camera_dv.projectionDesignVariable().value()
dist = self.__camera_dv.distortionDesignVariable().value()
proj_errors = self.__reprojection_errors
print
if (self.__isRollingShutter()):
print "LineDelay:"
print shutter.lineDelay()
print "Intrinsics:"
print proj.getParameters().flatten()
print "Distortion:"
print dist.getParameters().flatten()
print "projection Error:"
# plotSpline(self.__poseSpline)
# plotSpline(self.__poseSpline_dv.spline)
# plotSplineValues(self.__poseSpline)
# plotSplineValues(self.__poseSpline_dv.spline)
# self.__dump(proj_errors[-1], 0)
item = proj_errors[-1]
print ("name:", item.getMEstimatorName())
print ("RawSquaredError:", item.getRawSquaredError())
print ("WeightedSquaredError:", item.getWeightedSquaredError())
def __dump(self, obj, PREFIX):
for attr in dir(obj):
if hasattr(obj, attr):
print(PREFIX,"obj.%s = %s" % (attr, getattr(obj, attr)))
if attr == "error" or attr == "getRawSquaredError" or attr == "getWeightedSquaredError":
self.__dump(getattr(obj, attr), PREFIX+1)
| [
"jonas.le.fevre@gmail.com"
] | jonas.le.fevre@gmail.com |
830d9c3bc9d6971c814cf9ad5c519397f7b4e369 | baff68c47362e9911b415e2d68b470a33da968ae | /src/add_metadata.py | 154521b3f22a398fb84578e0be287c216430a487 | [] | no_license | Assimila/ClimateRiskDisclosure | 6eacb8f91780d3b7e43c1b81b402fe632d112053 | 0a0bc23e8a117bbf23f05277043ea9ea70406265 | refs/heads/main | 2022-11-24T08:31:06.562223 | 2022-11-02T17:37:59 | 2022-11-02T17:37:59 | 243,382,947 | 3 | 4 | null | null | null | null | UTF-8 | Python | false | false | 1,901 | py |
import os
import sys
import gdal
from osgeo import gdal_array
import numpy as np
import datetime
variable = sys.argv[1]
#fname = f'../ERA5/Europe/{variable}/Europe_monthly_mean_{variable}_1979_2019.tif'
fname = f'../ERA5/Europe/{variable}/Europe_monthly_mean_{variable}_2002_2019.tif'
d = gdal.Open(fname)
data = d.ReadAsArray()
# Get datadir
datadir = os.path.dirname(fname)
bands, rows, cols = data.shape
driver = gdal.GetDriverByName('GTiff')
driver_options = ["COMPRESS=DEFLATE",
"BIGTIFF=YES",
"PREDICTOR=1",
"TILED=YES",
"BLOCKXSIZE=256",
"BLOCKYSIZE=256",
"INTERLEAVE=BAND"]
# Get projection and geotransform
proj = d.GetProjection()
gt = d.GetGeoTransform()
# Get GDAL datatype from NumPy datatype
dtype = gdal_array.NumericTypeCodeToGDALTypeCode(data.dtype)
# Create dataset
fname = os.path.join(datadir, f'{variable}.tif')
dst_ds = driver.Create(fname, cols, rows, bands, dtype, driver_options)
# Set cartographic projection
dst_ds.SetProjection(proj)
dst_ds.SetGeoTransform(gt)
# Dates
#startyear = 1979
#startmonth = 1
startyear = 2002
startmonth = 7
endyear = 2019
endmonth = 12
dates = [datetime.date(m//12, m%12+1, 1) for m in range(startyear*12+startmonth-1, endyear*12+endmonth)]
if not len(dates) == bands:
raise "Inconsistent number of bands for date range"
for i in range(bands):
dst_ds.GetRasterBand(i+1).WriteArray(data[i])
dst_band = dst_ds.GetRasterBand(i+1)
dst_band.SetMetadataItem('RANGEBEGINNINGDATE', dates[i].strftime("%Y-%m-%d"))
dst_ds = None
| [
"gerardo.lopezsaldana@assimila.eu"
] | gerardo.lopezsaldana@assimila.eu |
432a7901e1e5361f33fedadd22fc71f1071a8b47 | 0db19410e9751790af8ce4a0a9332293e379c02f | /mmpose/models/data_preprocessors/__init__.py | 7c9bd22e2b20be84a17d05ab3058efd8d934f261 | [
"Apache-2.0"
] | permissive | open-mmlab/mmpose | 2c9986521d35eee35d822fb255e8e68486026d94 | 537bd8e543ab463fb55120d5caaa1ae22d6aaf06 | refs/heads/main | 2023-08-30T19:44:21.349410 | 2023-07-04T13:18:22 | 2023-07-04T13:18:22 | 278,003,645 | 4,037 | 1,171 | Apache-2.0 | 2023-09-14T09:44:55 | 2020-07-08T06:02:55 | Python | UTF-8 | Python | false | false | 136 | py | # Copyright (c) OpenMMLab. All rights reserved.
from .data_preprocessor import PoseDataPreprocessor
__all__ = ['PoseDataPreprocessor']
| [
"noreply@github.com"
] | open-mmlab.noreply@github.com |
6f3433cb3e7e5eb3c3337244453de9eaff43fad6 | 78144baee82268a550400bbdb8c68de524adc68f | /Production/python/Autumn18/StealthSHH_2t4b_mStop-550_mSo-100_TuneCP2_13TeV-madgraphMLM-pythia8_cff.py | 128c93aac2b48480c4d289b30e955524f2e7f9c3 | [] | no_license | tklijnsma/TreeMaker | e6989c03189b849aff2007bad22e2bfc6922a244 | 248f2c04cc690ef2e2202b452d6f52837c4c08e5 | refs/heads/Run2_2017 | 2023-05-26T23:03:42.512963 | 2020-05-12T18:44:15 | 2020-05-12T18:44:15 | 263,960,056 | 1 | 2 | null | 2020-09-25T00:27:35 | 2020-05-14T15:57:20 | null | UTF-8 | Python | false | false | 5,569 | py | import FWCore.ParameterSet.Config as cms
maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )
readFiles = cms.untracked.vstring()
secFiles = cms.untracked.vstring()
source = cms.Source ("PoolSource",fileNames = readFiles, secondaryFileNames = secFiles)
readFiles.extend( [
'/store/mc/RunIIAutumn18MiniAOD/StealthSHH_2t4b_mStop-550_mSo-100_TuneCP2_13TeV-madgraphMLM-pythia8/MINIAODSIM/102X_upgrade2018_realistic_v15-v2/230000/1A72164E-7E35-BE48-93F4-51B6A56AB387.root',
'/store/mc/RunIIAutumn18MiniAOD/StealthSHH_2t4b_mStop-550_mSo-100_TuneCP2_13TeV-madgraphMLM-pythia8/MINIAODSIM/102X_upgrade2018_realistic_v15-v2/230000/23B7582C-CFFE-D047-9D90-B1AB20E8460E.root',
'/store/mc/RunIIAutumn18MiniAOD/StealthSHH_2t4b_mStop-550_mSo-100_TuneCP2_13TeV-madgraphMLM-pythia8/MINIAODSIM/102X_upgrade2018_realistic_v15-v2/230000/242F17FC-CCE3-5247-B6E3-F9C60EE0BB0E.root',
'/store/mc/RunIIAutumn18MiniAOD/StealthSHH_2t4b_mStop-550_mSo-100_TuneCP2_13TeV-madgraphMLM-pythia8/MINIAODSIM/102X_upgrade2018_realistic_v15-v2/230000/4EDFE926-072E-4040-AD2A-72590327AE44.root',
'/store/mc/RunIIAutumn18MiniAOD/StealthSHH_2t4b_mStop-550_mSo-100_TuneCP2_13TeV-madgraphMLM-pythia8/MINIAODSIM/102X_upgrade2018_realistic_v15-v2/230000/4F1426AD-286C-264A-8329-ED7EA04FB16A.root',
'/store/mc/RunIIAutumn18MiniAOD/StealthSHH_2t4b_mStop-550_mSo-100_TuneCP2_13TeV-madgraphMLM-pythia8/MINIAODSIM/102X_upgrade2018_realistic_v15-v2/230000/50A822A2-174A-AF4E-A9E7-41D0481B49F4.root',
'/store/mc/RunIIAutumn18MiniAOD/StealthSHH_2t4b_mStop-550_mSo-100_TuneCP2_13TeV-madgraphMLM-pythia8/MINIAODSIM/102X_upgrade2018_realistic_v15-v2/230000/5732D25E-2B0A-1B40-AC97-666DA053D583.root',
'/store/mc/RunIIAutumn18MiniAOD/StealthSHH_2t4b_mStop-550_mSo-100_TuneCP2_13TeV-madgraphMLM-pythia8/MINIAODSIM/102X_upgrade2018_realistic_v15-v2/230000/690AEB5F-B848-1742-8BBC-E00DFCFEB3F4.root',
'/store/mc/RunIIAutumn18MiniAOD/StealthSHH_2t4b_mStop-550_mSo-100_TuneCP2_13TeV-madgraphMLM-pythia8/MINIAODSIM/102X_upgrade2018_realistic_v15-v2/230000/75B39567-D8EB-8248-9A2C-12C82E92D5C5.root',
'/store/mc/RunIIAutumn18MiniAOD/StealthSHH_2t4b_mStop-550_mSo-100_TuneCP2_13TeV-madgraphMLM-pythia8/MINIAODSIM/102X_upgrade2018_realistic_v15-v2/230000/8662C5ED-186B-6040-8927-EEB9B38DD1E8.root',
'/store/mc/RunIIAutumn18MiniAOD/StealthSHH_2t4b_mStop-550_mSo-100_TuneCP2_13TeV-madgraphMLM-pythia8/MINIAODSIM/102X_upgrade2018_realistic_v15-v2/230000/9C4895D2-E2A6-684D-8A47-1DE03CC307A3.root',
'/store/mc/RunIIAutumn18MiniAOD/StealthSHH_2t4b_mStop-550_mSo-100_TuneCP2_13TeV-madgraphMLM-pythia8/MINIAODSIM/102X_upgrade2018_realistic_v15-v2/230000/9F03752E-18A7-7A43-BF64-F4F322FEA017.root',
'/store/mc/RunIIAutumn18MiniAOD/StealthSHH_2t4b_mStop-550_mSo-100_TuneCP2_13TeV-madgraphMLM-pythia8/MINIAODSIM/102X_upgrade2018_realistic_v15-v2/230000/9FDE3C91-1555-AD48-98E5-ED50217D992A.root',
'/store/mc/RunIIAutumn18MiniAOD/StealthSHH_2t4b_mStop-550_mSo-100_TuneCP2_13TeV-madgraphMLM-pythia8/MINIAODSIM/102X_upgrade2018_realistic_v15-v2/230000/A389E8D9-B7FC-BF46-AC66-8E6BAA7EE586.root',
'/store/mc/RunIIAutumn18MiniAOD/StealthSHH_2t4b_mStop-550_mSo-100_TuneCP2_13TeV-madgraphMLM-pythia8/MINIAODSIM/102X_upgrade2018_realistic_v15-v2/230000/A7966514-AA81-364A-A6BE-1BC66A1110C2.root',
'/store/mc/RunIIAutumn18MiniAOD/StealthSHH_2t4b_mStop-550_mSo-100_TuneCP2_13TeV-madgraphMLM-pythia8/MINIAODSIM/102X_upgrade2018_realistic_v15-v2/230000/B79AC8B5-B6DB-EF4E-8FCE-59702DDEE86E.root',
'/store/mc/RunIIAutumn18MiniAOD/StealthSHH_2t4b_mStop-550_mSo-100_TuneCP2_13TeV-madgraphMLM-pythia8/MINIAODSIM/102X_upgrade2018_realistic_v15-v2/230000/B86B69C9-E654-FA48-8552-EF1E2B6CA7B2.root',
'/store/mc/RunIIAutumn18MiniAOD/StealthSHH_2t4b_mStop-550_mSo-100_TuneCP2_13TeV-madgraphMLM-pythia8/MINIAODSIM/102X_upgrade2018_realistic_v15-v2/230000/B99ED273-F20A-114D-8BD1-CAD86C32B048.root',
'/store/mc/RunIIAutumn18MiniAOD/StealthSHH_2t4b_mStop-550_mSo-100_TuneCP2_13TeV-madgraphMLM-pythia8/MINIAODSIM/102X_upgrade2018_realistic_v15-v2/230000/BF155687-9CB8-9B40-BB4E-D957D42026B4.root',
'/store/mc/RunIIAutumn18MiniAOD/StealthSHH_2t4b_mStop-550_mSo-100_TuneCP2_13TeV-madgraphMLM-pythia8/MINIAODSIM/102X_upgrade2018_realistic_v15-v2/230000/C40A5067-2A58-0847-84BF-F54A8B722CE2.root',
'/store/mc/RunIIAutumn18MiniAOD/StealthSHH_2t4b_mStop-550_mSo-100_TuneCP2_13TeV-madgraphMLM-pythia8/MINIAODSIM/102X_upgrade2018_realistic_v15-v2/230000/CA0F1C9A-BE60-0541-9329-15AADA5E05C9.root',
'/store/mc/RunIIAutumn18MiniAOD/StealthSHH_2t4b_mStop-550_mSo-100_TuneCP2_13TeV-madgraphMLM-pythia8/MINIAODSIM/102X_upgrade2018_realistic_v15-v2/230000/CBB08FA3-1757-B64C-8503-CB42D0102C0E.root',
'/store/mc/RunIIAutumn18MiniAOD/StealthSHH_2t4b_mStop-550_mSo-100_TuneCP2_13TeV-madgraphMLM-pythia8/MINIAODSIM/102X_upgrade2018_realistic_v15-v2/230000/D2BB86E4-82D6-7645-8F66-3FD9DEA64463.root',
'/store/mc/RunIIAutumn18MiniAOD/StealthSHH_2t4b_mStop-550_mSo-100_TuneCP2_13TeV-madgraphMLM-pythia8/MINIAODSIM/102X_upgrade2018_realistic_v15-v2/230000/DB32F19A-7DCA-CB42-A4DF-89C93A910167.root',
'/store/mc/RunIIAutumn18MiniAOD/StealthSHH_2t4b_mStop-550_mSo-100_TuneCP2_13TeV-madgraphMLM-pythia8/MINIAODSIM/102X_upgrade2018_realistic_v15-v2/230000/E8BDCF1D-0976-2C43-8857-186C589D584B.root',
'/store/mc/RunIIAutumn18MiniAOD/StealthSHH_2t4b_mStop-550_mSo-100_TuneCP2_13TeV-madgraphMLM-pythia8/MINIAODSIM/102X_upgrade2018_realistic_v15-v2/230000/F2BF0D96-1981-F942-A9F2-48AFD121D453.root',
] )
| [
"Chris_Madrid@Baylor.edu"
] | Chris_Madrid@Baylor.edu |
8f9924d0cca9605376bdd8571df236762324976f | 4b896d1e582f2b18753d63221e458fb977bcab3a | /go/debug/objsize.py | c7d50bec22f3ac3f0c02190fe3bafc02f9907f5c | [] | no_license | klangner/deep-learning-go | 1589b86bccdfa229b0e947af2145dc3a48b9e939 | b8fc8062eac2d608161d8b52dae7d0aeaa0dbf0b | refs/heads/master | 2020-04-19T07:10:27.525346 | 2019-11-06T21:40:09 | 2019-11-06T21:40:09 | 168,039,663 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,842 | py | from __future__ import print_function
from sys import getsizeof, stderr
from itertools import chain
from collections import deque
try:
from reprlib import repr
except ImportError:
pass
def total_size(o, handlers={}, verbose=False):
""" Returns the approximate memory footprint an object and all of its contents.
Automatically finds the contents of the following builtin containers and
their subclasses: tuple, list, deque, dict, set and frozenset.
To search other containers, add handlers to iterate over their contents:
handlers = {SomeContainerClass: iter,
OtherContainerClass: OtherContainerClass.get_elements}
"""
dict_handler = lambda d: chain.from_iterable(d.items())
all_handlers = {tuple: iter,
list: iter,
deque: iter,
dict: dict_handler,
set: iter,
frozenset: iter,
}
all_handlers.update(handlers) # user handlers take precedence
seen = set() # track which object id's have already been seen
default_size = getsizeof(0) # estimate sizeof object without __sizeof__
def sizeof(o):
if id(o) in seen: # do not double count the same object
return 0
seen.add(id(o))
s = getsizeof(o, default_size)
if verbose:
print(s, type(o), repr(o), file=stderr)
for typ, handler in all_handlers.items():
if isinstance(o, typ):
s += sum(map(sizeof, handler(o)))
break
return s
return sizeof(o)
##### Example call #####
if __name__ == '__main__':
d = dict(a=1, b=2, c=3, d=[4,5,6,7], e='a string of chars')
print('Total size: {} KB'.format(total_size(d, verbose=True) // 1024)) | [
"klangner@gmail.com"
] | klangner@gmail.com |
88540f3f59978d61c2a4d2f65b37a0348c555d12 | 1825283527f5a479204708feeaf55f4ab6d1290b | /leetcode/python/252/sol.py | 9f045bf520410fe87ee096f43d4c9c6bcba54931 | [] | no_license | frankieliu/problems | b82c61d3328ffcc1da2cbc95712563355f5d44b5 | 911c6622448a4be041834bcab25051dd0f9209b2 | refs/heads/master | 2023-01-06T14:41:58.044871 | 2019-11-24T03:47:22 | 2019-11-24T03:47:22 | 115,065,956 | 1 | 0 | null | 2023-01-04T07:25:52 | 2017-12-22T02:06:57 | HTML | UTF-8 | Python | false | false | 401 | py |
My Python Solution
https://leetcode.com/problems/meeting-rooms/discuss/67812
* Lang: python3
* Author: yinfeng.zhang.9
* Votes: 21
def canAttendMeetings(self, intervals):
intervals.sort(key=lambda x: x.start)
for i in range(1, len(intervals)):
if intervals[i].start < intervals[i-1].end:
return False
return True
| [
"frankie.y.liu@gmail.com"
] | frankie.y.liu@gmail.com |
bc8d42394d2159030516acac6df507518ddb8972 | 7e41d70ee3bf07dc3043afef020cde173d5fb0bc | /airflow_client/client/model/task_instance_collection.py | 50cd340cc81054d04aecf2aa847f8c96265afd68 | [
"Apache-2.0"
] | permissive | apache/airflow-client-python | fb11789076bfed191d730c459c84273781d50246 | 38d55888f7533253857baa878322007f4581fc21 | refs/heads/main | 2023-09-05T18:23:37.049610 | 2023-08-23T13:10:27 | 2023-08-23T13:10:27 | 275,569,232 | 251 | 44 | Apache-2.0 | 2023-08-23T07:49:13 | 2020-06-28T11:20:41 | Python | UTF-8 | Python | false | false | 22,832 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Airflow API (Stable)
# Overview To facilitate management, Apache Airflow supports a range of REST API endpoints across its objects. This section provides an overview of the API design, methods, and supported use cases. Most of the endpoints accept `JSON` as input and return `JSON` responses. This means that you must usually add the following headers to your request: ``` Content-type: application/json Accept: application/json ``` ## Resources The term `resource` refers to a single type of object in the Airflow metadata. An API is broken up by its endpoint's corresponding resource. The name of a resource is typically plural and expressed in camelCase. Example: `dagRuns`. Resource names are used as part of endpoint URLs, as well as in API parameters and responses. ## CRUD Operations The platform supports **C**reate, **R**ead, **U**pdate, and **D**elete operations on most resources. You can review the standards for these operations and their standard parameters below. Some endpoints have special behavior as exceptions. ### Create To create a resource, you typically submit an HTTP `POST` request with the resource's required metadata in the request body. The response returns a `201 Created` response code upon success with the resource's metadata, including its internal `id`, in the response body. ### Read The HTTP `GET` request can be used to read a resource or to list a number of resources. A resource's `id` can be submitted in the request parameters to read a specific resource. The response usually returns a `200 OK` response code upon success, with the resource's metadata in the response body. If a `GET` request does not include a specific resource `id`, it is treated as a list request. The response usually returns a `200 OK` response code upon success, with an object containing a list of resources' metadata in the response body. When reading resources, some common query parameters are usually available. e.g.: ``` v1/connections?limit=25&offset=25 ``` |Query Parameter|Type|Description| |---------------|----|-----------| |limit|integer|Maximum number of objects to fetch. Usually 25 by default| |offset|integer|Offset after which to start returning objects. For use with limit query parameter.| ### Update Updating a resource requires the resource `id`, and is typically done using an HTTP `PATCH` request, with the fields to modify in the request body. The response usually returns a `200 OK` response code upon success, with information about the modified resource in the response body. ### Delete Deleting a resource requires the resource `id` and is typically executed via an HTTP `DELETE` request. The response usually returns a `204 No Content` response code upon success. ## Conventions - Resource names are plural and expressed in camelCase. - Names are consistent between URL parameter name and field name. - Field names are in snake_case. ```json { \"description\": \"string\", \"name\": \"string\", \"occupied_slots\": 0, \"open_slots\": 0 \"queued_slots\": 0, \"running_slots\": 0, \"scheduled_slots\": 0, \"slots\": 0, } ``` ### Update Mask Update mask is available as a query parameter in patch endpoints. It is used to notify the API which fields you want to update. Using `update_mask` makes it easier to update objects by helping the server know which fields to update in an object instead of updating all fields. The update request ignores any fields that aren't specified in the field mask, leaving them with their current values. Example: ``` resource = request.get('/resource/my-id').json() resource['my_field'] = 'new-value' request.patch('/resource/my-id?update_mask=my_field', data=json.dumps(resource)) ``` ## Versioning and Endpoint Lifecycle - API versioning is not synchronized to specific releases of the Apache Airflow. - APIs are designed to be backward compatible. - Any changes to the API will first go through a deprecation phase. # Trying the API You can use a third party client, such as [curl](https://curl.haxx.se/), [HTTPie](https://httpie.org/), [Postman](https://www.postman.com/) or [the Insomnia rest client](https://insomnia.rest/) to test the Apache Airflow API. Note that you will need to pass credentials data. For e.g., here is how to pause a DAG with [curl](https://curl.haxx.se/), when basic authorization is used: ```bash curl -X PATCH 'https://example.com/api/v1/dags/{dag_id}?update_mask=is_paused' \\ -H 'Content-Type: application/json' \\ --user \"username:password\" \\ -d '{ \"is_paused\": true }' ``` Using a graphical tool such as [Postman](https://www.postman.com/) or [Insomnia](https://insomnia.rest/), it is possible to import the API specifications directly: 1. Download the API specification by clicking the **Download** button at the top of this document 2. Import the JSON specification in the graphical tool of your choice. - In *Postman*, you can click the **import** button at the top - With *Insomnia*, you can just drag-and-drop the file on the UI Note that with *Postman*, you can also generate code snippets by selecting a request and clicking on the **Code** button. ## Enabling CORS [Cross-origin resource sharing (CORS)](https://developer.mozilla.org/en-US/docs/Web/HTTP/CORS) is a browser security feature that restricts HTTP requests that are initiated from scripts running in the browser. For details on enabling/configuring CORS, see [Enabling CORS](https://airflow.apache.org/docs/apache-airflow/stable/security/api.html). # Authentication To be able to meet the requirements of many organizations, Airflow supports many authentication methods, and it is even possible to add your own method. If you want to check which auth backend is currently set, you can use `airflow config get-value api auth_backends` command as in the example below. ```bash $ airflow config get-value api auth_backends airflow.api.auth.backend.basic_auth ``` The default is to deny all requests. For details on configuring the authentication, see [API Authorization](https://airflow.apache.org/docs/apache-airflow/stable/security/api.html). # Errors We follow the error response format proposed in [RFC 7807](https://tools.ietf.org/html/rfc7807) also known as Problem Details for HTTP APIs. As with our normal API responses, your client must be prepared to gracefully handle additional members of the response. ## Unauthenticated This indicates that the request has not been applied because it lacks valid authentication credentials for the target resource. Please check that you have valid credentials. ## PermissionDenied This response means that the server understood the request but refuses to authorize it because it lacks sufficient rights to the resource. It happens when you do not have the necessary permission to execute the action you performed. You need to get the appropriate permissions in other to resolve this error. ## BadRequest This response means that the server cannot or will not process the request due to something that is perceived to be a client error (e.g., malformed request syntax, invalid request message framing, or deceptive request routing). To resolve this, please ensure that your syntax is correct. ## NotFound This client error response indicates that the server cannot find the requested resource. ## MethodNotAllowed Indicates that the request method is known by the server but is not supported by the target resource. ## NotAcceptable The target resource does not have a current representation that would be acceptable to the user agent, according to the proactive negotiation header fields received in the request, and the server is unwilling to supply a default representation. ## AlreadyExists The request could not be completed due to a conflict with the current state of the target resource, e.g. the resource it tries to create already exists. ## Unknown This means that the server encountered an unexpected condition that prevented it from fulfilling the request. # noqa: E501
The version of the OpenAPI document: 2.7.0
Contact: dev@airflow.apache.org
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from airflow_client.client.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from airflow_client.client.exceptions import ApiAttributeError
def lazy_import():
from airflow_client.client.model.collection_info import CollectionInfo
from airflow_client.client.model.task_instance import TaskInstance
from airflow_client.client.model.task_instance_collection_all_of import TaskInstanceCollectionAllOf
globals()['CollectionInfo'] = CollectionInfo
globals()['TaskInstance'] = TaskInstance
globals()['TaskInstanceCollectionAllOf'] = TaskInstanceCollectionAllOf
class TaskInstanceCollection(ModelComposed):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'task_instances': ([TaskInstance],), # noqa: E501
'total_entries': (int,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'task_instances': 'task_instances', # noqa: E501
'total_entries': 'total_entries', # noqa: E501
}
read_only_vars = {
}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""TaskInstanceCollection - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
task_instances ([TaskInstance]): [optional] # noqa: E501
total_entries (int): Count of total objects in the current result set before pagination parameters (limit, offset) are applied. . [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
constant_args = {
'_check_type': _check_type,
'_path_to_item': _path_to_item,
'_spec_property_naming': _spec_property_naming,
'_configuration': _configuration,
'_visited_composed_classes': self._visited_composed_classes,
}
composed_info = validate_get_composed_info(
constant_args, kwargs, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
discarded_args = composed_info[3]
for var_name, var_value in kwargs.items():
if var_name in discarded_args and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self._additional_properties_model_instances:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
'_composed_instances',
'_var_name_to_model_instances',
'_additional_properties_model_instances',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""TaskInstanceCollection - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
task_instances ([TaskInstance]): [optional] # noqa: E501
total_entries (int): Count of total objects in the current result set before pagination parameters (limit, offset) are applied. . [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
constant_args = {
'_check_type': _check_type,
'_path_to_item': _path_to_item,
'_spec_property_naming': _spec_property_naming,
'_configuration': _configuration,
'_visited_composed_classes': self._visited_composed_classes,
}
composed_info = validate_get_composed_info(
constant_args, kwargs, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
discarded_args = composed_info[3]
for var_name, var_value in kwargs.items():
if var_name in discarded_args and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self._additional_properties_model_instances:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
@cached_property
def _composed_schemas():
# we need this here to make our import statements work
# we must store _composed_schemas in here so the code is only run
# when we invoke this method. If we kept this at the class
# level we would get an error because the class level
# code would be run when this module is imported, and these composed
# classes don't exist yet because their module has not finished
# loading
lazy_import()
return {
'anyOf': [
],
'allOf': [
CollectionInfo,
TaskInstanceCollectionAllOf,
],
'oneOf': [
],
}
| [
"noreply@github.com"
] | apache.noreply@github.com |
cac3db1f41734eb2a40f81a1e4082961e055b269 | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/2/d_n.py | 2d5072d41deade0c2c2f76ec2ebb45a5eda4977c | [] | no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'd_N':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"juliettaylorswift@gmail.com"
] | juliettaylorswift@gmail.com |
7eb01a752b3791f19bcdfa84bf02cf02c25aedef | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03416/s958545838.py | b462b8076901011891d089895dfad5d7ccd0140c | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 175 | py | A, B = map(int, input().split())
count = 0
for i in range(A, B+1):
num = str(i)
if num[0] == num[4]:
if num[1] == num[3]:
count += 1
print(count) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
fda7809b36d2aad19a3c58c0e669438ed43d6868 | 07c61596c1fba2e2a7034fe5af9707794ea2e2c1 | /Hackerrank/ProjectEuler/99.py3 | 2de3e1198f2cc5dd0ec84038640c552bb9ea446d | [] | no_license | H-Shen/Collection_of_my_coding_practice | 2fcb2f8fef9451ad4a3a9c063bbf6a34ea5966b4 | 6415552d38a756c9c89de0c774799654c73073a6 | refs/heads/master | 2023-08-24T21:19:08.886667 | 2023-08-22T03:47:39 | 2023-08-22T03:47:39 | 180,731,825 | 8 | 1 | null | 2021-08-13T18:25:25 | 2019-04-11T06:48:09 | C++ | UTF-8 | Python | false | false | 258 | py3 | from math import log
import heapq
A = []
n = int(input())
for i in range(n):
a, b = list(map(int, input().split()))
A.append([b * log(a), a, b])
k = int(input())
result = heapq.nsmallest(k, A, key = lambda x : x[0])[-1]
print(result[1], result[2])
| [
"haohu.shen@ucalgary.ca"
] | haohu.shen@ucalgary.ca |
f09b6648d2399c48d65cc7224e21b179ef864204 | 69a7257eae464b4598183eca15b324307fedadca | /parse_all.py | 6124fe630ae60d48fdeea63eca5b0a4f922da0df | [] | no_license | chenc10/SparkTrace | b1de2848f8eec44a9a0296c53ef97726d7a26d68 | 1e77dd334e8c52d26ab4991de222e1b2d573719b | refs/heads/master | 2016-09-14T11:43:31.928980 | 2016-05-16T01:06:03 | 2016-05-16T01:06:03 | 58,176,685 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 474 | py | import os
import sys
import parse_logs
def main(argv):
dirname = argv[0]
print "Parsing files in ", dirname
agg_results_filename = os.path.join(dirname, "agg_results")
for filename in os.listdir(argv[0]):
full_name = os.path.join(dirname, filename)
if os.path.isfile(full_name) and filename.endswith("job_log"):
print "Parsing file ", full_name
parse_logs.parse(full_name, agg_results_filename)
if __name__ == "__main__":
main(sys.argv[1:])
| [
"chenc10@126.com"
] | chenc10@126.com |
8a0590a6380487f11389d2d2e063ca5783a0bc38 | dc0b6b680fd1fc0ab86ed7a3460137cde3a8612d | /Meus códigos/Python/Economia/Antigos/importa_csv_tse_1b.py | 1be2bda35b65a06a2e53b139401c7268b730c330 | [] | no_license | pedromfnakashima/codigos_versionados | 6c8c692bc08a0dda39a82bf91c5245f28d9be330 | c40c94d69f1ee3dd4317786f1c25bcc1bbcc2bb9 | refs/heads/main | 2023-03-21T20:32:53.677701 | 2021-03-20T00:03:10 | 2021-03-20T00:03:10 | 305,754,740 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,533 | py | import pandas as pd
class importa():
pasta = r'C:\Users\pedro\bd\TEMÁTICO\TSE\consulta_cand_2018'
arquivo = r'\consulta_cand_2018_MS.csv'
caminho_completo = (pasta + arquivo).replace("\\", "/")
def importa_csv(self, lista):
df = pd.read_csv(self.caminho_completo,
usecols = lista,
encoding = 'latin',
delimiter = ';')
return df
class tse_colunas():
lista1 = [
'ANO_ELEICAO',
'NR_TURNO',
'DT_ELEICAO',
'DS_CARGO',
'NM_CANDIDATO',
'NM_URNA_CANDIDATO',
'NR_CPF_CANDIDATO',
'NM_EMAIL',
'TP_AGREMIACAO',
'NR_PARTIDO',
'SG_PARTIDO',
'NM_PARTIDO',
'NM_COLIGACAO',
'DS_COMPOSICAO_COLIGACAO',
'SG_UF_NASCIMENTO',
'NM_MUNICIPIO_NASCIMENTO',
'DT_NASCIMENTO',
'DS_GENERO',
'DS_GRAU_INSTRUCAO',
'DS_ESTADO_CIVIL',
'DS_COR_RACA',
'DS_OCUPACAO',
'DS_SIT_TOT_TURNO',
'ST_REELEICAO'
]
lista2 = [
'ANO_ELEICAO',
'NR_TURNO',
'DT_ELEICAO'
]
del lista1, lista2
df_tse_1 = pd.read_csv(caminho_completo,
usecols = lista1,
encoding = 'latin',
delimiter = ';')
df_tse_2 = pd.read_csv(caminho_completo,
usecols = lista2,
encoding = 'latin',
delimiter = ';')
df_tse_3 = pd.read_csv(caminho_completo,
usecols = lambda x: x not in lista1,
encoding = 'latin',
delimiter = ';')
del df_tse_1
##### ##### ##### ##### ##### ##### ##### ##### ##### ##### ##### ##### ##### ##### ##### #####
##### ##### ##### ##### ##### ##### ##### ##### ##### ##### ##### ##### ##### ##### ##### #####
##### ##### ##### ##### ##### ##### ##### ##### ##### ##### ##### ##### ##### ##### ##### #####
import pandas as pd
from pathlib import Path
caminho = Path(r'C:\Users\pedro\bd\TEMÁTICO\TSE\consulta_cand_2018\consulta_cand_2018_MS.csv')
caminho
#f1 = lambda x: pd.to_numeric(x.replace("R$ ","").replace(",","."), errors="coerce")
converte_data = lambda x: pd.to_datetime(x, format='%d/%m/%Y', errors='coerce')
df_tse_1 = pd.read_csv(caminho,
usecols = tse_colunas.lista1,
encoding = 'latin',
dtype = {'DS_CARGO': 'category',
'TP_AGREMIACAO': 'category',
'NM_PARTIDO': 'category',
'NM_COLIGACAO': 'category',
'DS_COMPOSICAO_COLIGACAO': 'category',
'SG_UF_NASCIMENTO': 'category',
'NM_MUNICIPIO_NASCIMENTO': 'category',
'DS_GENERO': 'category',
'DS_GRAU_INSTRUCAO': 'category',
'DS_ESTADO_CIVIL': 'category',
'DS_COR_RACA': 'category',
'DS_OCUPACAO': 'category',
'DS_SIT_TOT_TURNO': 'category'},
delimiter = ';',
converters={"DT_NASCIMENTO": converte_data},
true_values=["S"],
false_values=["N"],
engine = "python")
# Funcionou com o Path!
df_tse_1.dtypes
print(df_tse_1.head())
########################################################
df_tse_1 = pd.read_csv(caminho,
usecols = tse_colunas.lista1,
encoding = 'latin',
delimiter = ';',
converters={"DT_NASCIMENTO": converte_data},
true_values=["S"],
false_values=["N"],
engine = "python")
# Funcionou com o Path!
df_tse_1.dtypes
print(df_tse_1.head())
##### ##### ##### ##### ##### ##### ##### ##### ##### ##### ##### ##### ##### ##### ##### #####
##### ##### ##### ##### ##### ##### ##### ##### ##### ##### ##### ##### ##### ##### ##### #####
##### ##### ##### ##### ##### ##### ##### ##### ##### ##### ##### ##### ##### ##### ##### #####
## Filtros
df_tse_1.loc[df_tse_1['NM_CANDIDATO'].str.contains("PICARELLI")]
resultado1 = df_tse_1.loc[df_tse_1['NM_URNA_CANDIDATO']
.str.contains("BARBOSINHA"),
['NM_CANDIDATO', 'NM_URNA_CANDIDATO']]
resultado1 = df_tse_1.loc[df_tse_1['DS_CARGO'].str.contains("ESTADUAL"),
['DS_CARGO', 'NM_URNA_CANDIDATO', 'SG_PARTIDO', 'DT_NASCIMENTO', 'DS_GENERO', 'DS_GRAU_INSTRUCAO', 'DS_ESTADO_CIVIL', 'DS_SIT_TOT_TURNO']]
resultado1 = df_tse_1.loc[(df_tse_1['DS_CARGO'].str.contains("ESTADUAL"),
df_tse_1['DS_SIT_TOT_TURNO'] == 'ELEITO'),
['DS_CARGO', 'NM_URNA_CANDIDATO', 'SG_PARTIDO', 'DT_NASCIMENTO', 'DS_GENERO', 'DS_GRAU_INSTRUCAO', 'DS_ESTADO_CIVIL', 'DS_SIT_TOT_TURNO']]
resultado1 = df_tse_1.loc[:,['DS_CARGO', 'NM_URNA_CANDIDATO']]
resultado2 = df_tse_1.loc[:,['DS_CARGO', 'NM_URNA_CANDIDATO', 'SG_PARTIDO', 'DT_NASCIMENTO', 'DS_GENERO', 'DS_GRAU_INSTRUCAO', 'DS_ESTADO_CIVIL', 'DS_SIT_TOT_TURNO']]\
.loc[df_tse_1['DS_CARGO'].str.contains("ESTADUAL")]\
.loc[df_tse_1['DS_SIT_TOT_TURNO'] == 'ELEITO']
##########################################
resultado1 = df_tse_1.loc[:, ['DS_CARGO',
'NM_URNA_CANDIDATO',
'SG_PARTIDO',
'DT_NASCIMENTO',
'DS_GENERO',
'DS_GRAU_INSTRUCAO',
'DS_ESTADO_CIVIL',
'DS_SIT_TOT_TURNO']]\
.loc[df_tse_1['DS_CARGO'].str.contains("ESTADUAL")]\
.loc[df_tse_1['DS_SIT_TOT_TURNO'] == 'SUPLENTE']
##########################################
resultado1 = df_tse_1.loc[:, ['NM_URNA_CANDIDATO',
'SG_PARTIDO',
'DT_NASCIMENTO',
'DS_GENERO',
'DS_GRAU_INSTRUCAO',
'DS_ESTADO_CIVIL',
'DS_SIT_TOT_TURNO']]\
.loc[df_tse_1['DS_CARGO'].str.contains('ESTADUAL')]\
.loc[df_tse_1['DS_SIT_TOT_TURNO'].str.contains('^ELEITO', regex=True)]
len(resultado1)
##########################################
| [
"pedromfnakashima@gmail.com"
] | pedromfnakashima@gmail.com |
1a2cee0728d54f0070f9d82395335d23057df172 | ddba80990fcf1e1147a12e0d024dc4ce0c1239e1 | /MUNDO 2/Exercícios MUNDO 2/ex047 (Contagem de pares).py | bd5a1acc4590ae970044406d272ff4ae73b0797c | [
"MIT"
] | permissive | LeonMarqs/Curso-Em-Video-Python3 | f8f70b91248bf5feb7f971ed2599a19031f58a86 | fd774af030ac3b385c03213441d7ac7fad46efca | refs/heads/master | 2020-07-31T17:17:55.519292 | 2020-02-02T23:39:51 | 2020-02-02T23:39:51 | 210,689,782 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 126 | py | print('-=-'* 10)
print('Números pares entre 1 e 50')
print('-=-'* 10)
for c in range(2, 51, 2):
print(c, end=' ')
| [
"noreply@github.com"
] | LeonMarqs.noreply@github.com |
2bc38fed6e6b19e8b25708fd31d90eb07578d5f7 | 02467e9975b50c14b4dc8cdc6dc03748f9aa8245 | /openshift/test/test_v1_secret_key_selector.py | a1b2b55845f182d6d49bff42036d5628cbf82401 | [
"Apache-2.0"
] | permissive | ashcrow/python-openshift | 3995e3c4b72bf52a62bc6b07dabf3d0f709444ae | 74c9ade612def941938016385842631342e926de | refs/heads/master | 2021-01-11T19:29:04.419005 | 2017-01-18T19:31:58 | 2017-01-18T19:31:58 | 79,377,387 | 0 | 0 | null | 2017-01-18T19:46:04 | 2017-01-18T19:46:04 | null | UTF-8 | Python | false | false | 4,192 | py | # coding: utf-8
"""
OpenShift API (with Kubernetes)
OpenShift provides builds, application lifecycle, image content management, and administrative policy on top of Kubernetes. The API allows consistent management of those objects. All API operations are authenticated via an Authorization bearer token that is provided for service accounts as a generated secret (in JWT form) or via the native OAuth endpoint located at /oauth/authorize. Core infrastructure components may use openshift.client certificates that require no authentication. All API operations return a 'resourceVersion' string that represents the version of the object in the underlying storage. The standard LIST operation performs a snapshot read of the underlying objects, returning a resourceVersion representing a consistent version of the listed objects. The WATCH operation allows all updates to a set of objects after the provided resourceVersion to be observed by a openshift.client. By listing and beginning a watch from the returned resourceVersion, openshift.clients may observe a consistent view of the state of one or more objects. Note that WATCH always returns the update after the provided resourceVersion. Watch may be extended a limited time in the past - using etcd 2 the watch window is 1000 events (which on a large cluster may only be a few tens of seconds) so openshift.clients must explicitly handle the \"watch to old error\" by re-listing. Objects are divided into two rough categories - those that have a lifecycle and must reflect the state of the cluster, and those that have no state. Objects with lifecycle typically have three main sections: * 'metadata' common to all objects * a 'spec' that represents the desired state * a 'status' that represents how much of the desired state is reflected on the cluster at the current time Objects that have no state have 'metadata' but may lack a 'spec' or 'status' section. Objects are divided into those that are namespace scoped (only exist inside of a namespace) and those that are cluster scoped (exist outside of a namespace). A namespace scoped resource will be deleted when the namespace is deleted and cannot be created if the namespace has not yet been created or is in the process of deletion. Cluster scoped resources are typically only accessible to admins - resources like nodes, persistent volumes, and cluster policy. All objects have a schema that is a combination of the 'kind' and 'apiVersion' fields. This schema is additive only for any given version - no backwards incompatible changes are allowed without incrementing the apiVersion. The server will return and accept a number of standard responses that share a common schema - for instance, the common error type is 'unversioned.Status' (described below) and will be returned on any error from the API server. The API is available in multiple serialization formats - the default is JSON (Accept: application/json and Content-Type: application/json) but openshift.clients may also use YAML (application/yaml) or the native Protobuf schema (application/vnd.kubernetes.protobuf). Note that the format of the WATCH API call is slightly different - for JSON it returns newline delimited objects while for Protobuf it returns length-delimited frames (4 bytes in network-order) that contain a 'versioned.Watch' Protobuf object. See the OpenShift documentation at https://docs.openshift.org for more information.
OpenAPI spec version: v1.5.0-alpha1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import openshift.client
from openshift.client.rest import ApiException
from openshift.client.models.v1_secret_key_selector import V1SecretKeySelector
class TestV1SecretKeySelector(unittest.TestCase):
""" V1SecretKeySelector unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1SecretKeySelector(self):
"""
Test V1SecretKeySelector
"""
model = openshift.client.models.v1_secret_key_selector.V1SecretKeySelector()
if __name__ == '__main__':
unittest.main()
| [
"jdetiber@redhat.com"
] | jdetiber@redhat.com |
680429a9272c50f295242542fb8d69945d3f2917 | e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f | /indices/nnwicked.py | e148022c53291f23cbb55d62a489d2fe1f44db2f | [] | no_license | psdh/WhatsintheVector | e8aabacc054a88b4cb25303548980af9a10c12a8 | a24168d068d9c69dc7a0fd13f606c080ae82e2a6 | refs/heads/master | 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 | Python | UTF-8 | Python | false | false | 1,103 | py | ii = [('BentJDO2.py', 2), ('GodwWSL2.py', 3), ('SadlMLP.py', 8), ('FerrSDO3.py', 1), ('WilbRLW4.py', 2), ('MartHSI2.py', 2), ('KembFJ1.py', 2), ('WilbRLW5.py', 2), ('BailJD2.py', 1), ('ChalTPW2.py', 1), ('AdamWEP.py', 1), ('WilbRLW2.py', 9), ('CarlTFR.py', 3), ('GrimSLE.py', 2), ('CookGHP2.py', 3), ('BailJD1.py', 2), ('CoolWHM.py', 1), ('CrokTPS.py', 1), ('ClarGE.py', 1), ('LyelCPG.py', 3), ('DibdTRL2.py', 2), ('AinsWRR.py', 1), ('WadeJEB.py', 1), ('GodwWLN.py', 2), ('CoopJBT.py', 2), ('MedwTAI2.py', 1), ('BachARE.py', 1), ('SoutRD.py', 1), ('HowiWRL2.py', 2), ('BailJD3.py', 4), ('MartHRW.py', 2), ('BentJRP.py', 5), ('EdgeMHT.py', 1), ('BowrJMM.py', 1), ('LyttELD3.py', 1), ('FerrSDO.py', 1), ('ThomGLG.py', 4), ('StorJCC.py', 1), ('KembFJ2.py', 2), ('HaliTBC.py', 1), ('WilbRLW3.py', 6), ('AinsWRR2.py', 1), ('ClarGE3.py', 1), ('RogeSIP.py', 1), ('DibdTRL.py', 1), ('HogaGMM2.py', 1), ('MartHSI.py', 1), ('DwigTHH.py', 1), ('BowrJMM2.py', 1), ('BowrJMM3.py', 1), ('TaylIF.py', 5), ('ChalTPW.py', 6), ('KeigTSS.py', 2), ('KirbWPW.py', 1), ('BentJDO.py', 2), ('ClarGE4.py', 1), ('HowiWRL.py', 1)] | [
"varunwachaspati@gmail.com"
] | varunwachaspati@gmail.com |
437895ed7aaef43911f95aafcd5cb599969661f1 | 5173c3e3956387a3f2ae8fcf4aed7c7a600dac78 | /SWEA/SWEA_5185_이진수.py | 3035925afbd3f73521eb396dd724f1fe9f88dd20 | [] | no_license | ma0723/Min_Algorithm | df75f53f6e89b7817d4b52d686effb8236a4ddac | b02d1043008cb32e22daa9d4207b9a45f111d66f | refs/heads/master | 2023-07-25T11:00:15.397093 | 2021-08-30T02:08:05 | 2021-08-30T02:08:05 | 375,613,927 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,185 | py | import sys
sys.stdin = open("5185.txt", "r")
hex_lst = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F']
T = int(input())
for tc in range(1, T+1):
print("#{}".format(tc), end= ' ')
N, hex = input().split()
# N 자리수 16진수
dec = []
# 10진수 변환 (hex_lst의 index)
for i in hex:
for j in range(16):
if hex_lst[j] == i:
dec.append(j)
# print(dec)
# 2진수 변환
for i in dec:
ans = ''
while i > 0:
ans = str(i % 2) + ans
# 나머지 (역순이므로 ans 뒤에 배치)
i //= 2
# 몫
# N자리 16진수가 주어지면 각 자리 수를 4자리 2진수로 표시하는 프로그램
# 2진수의 앞자리 0도 반드시 출력
if len(ans) != 4:
my_ans = '0'*(4-len(ans)) + ans
# 4자리가 모두 채워지지 않은 경우 앞부분 0 채우기
else:
my_ans = ans
# 4자리 모두 채워진 경우
print(my_ans, end='')
# 4자리 2진수 공백없이 나열
print()
# 다음문제 개행
| [
"ma0723@naver.com"
] | ma0723@naver.com |
fe203d83a8cb7de4eaf56810b600faefafa9d551 | 1548ce77537dcd50ab04b0eaee050b5d30553e23 | /autotabular/pipeline/components/regression/mlp.py | a4223afac0672806f0541bd738b70cdcc8163b4b | [
"Apache-2.0"
] | permissive | Shamoo100/AutoTabular | 4a20e349104246bf825ebceae33dca0a79928f2e | 7d71bf01d2b7d84fcf5f65c9f45c5cea1255d8a2 | refs/heads/main | 2023-08-13T21:34:34.329888 | 2021-10-02T07:06:00 | 2021-10-02T07:06:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,429 | py | import copy
import numpy as np
from autotabular.pipeline.components.base import AutotabularRegressionAlgorithm, IterativeComponent
from autotabular.pipeline.constants import DENSE, PREDICTIONS, SPARSE, UNSIGNED_DATA
from autotabular.util.common import check_for_bool
from ConfigSpace.conditions import InCondition
from ConfigSpace.configuration_space import ConfigurationSpace
from ConfigSpace.hyperparameters import (CategoricalHyperparameter, Constant,
UniformFloatHyperparameter,
UniformIntegerHyperparameter,
UnParametrizedHyperparameter)
class MLPRegressor(IterativeComponent, AutotabularRegressionAlgorithm):
def __init__(self,
hidden_layer_depth,
num_nodes_per_layer,
activation,
alpha,
learning_rate_init,
early_stopping,
solver,
batch_size,
n_iter_no_change,
tol,
shuffle,
beta_1,
beta_2,
epsilon,
validation_fraction=None,
random_state=None,
verbose=0):
self.hidden_layer_depth = hidden_layer_depth
self.num_nodes_per_layer = num_nodes_per_layer
self.max_iter = self.get_max_iter()
self.activation = activation
self.alpha = alpha
self.learning_rate_init = learning_rate_init
self.early_stopping = early_stopping
self.n_iter_no_change = n_iter_no_change
self.validation_fraction = validation_fraction
self.tol = tol
self.solver = solver
self.batch_size = batch_size
self.shuffle = shuffle
self.beta_1 = beta_1
self.beta_2 = beta_2
self.epsilon = epsilon
self.beta_1 = beta_1
self.random_state = random_state
self.verbose = verbose
self.estimator = None
@staticmethod
def get_max_iter():
return 512
def get_current_iter(self):
return self.estimator.n_iter_
def iterative_fit(self, X, y, n_iter=2, refit=False):
"""Set n_iter=2 for the same reason as for SGD."""
from sklearn.neural_network import MLPRegressor
import sklearn.preprocessing
n_iter = max(n_iter, 2)
if refit:
self.estimator = None
self.scaler = None
if self.estimator is None:
self._fully_fit = False
self.max_iter = int(self.max_iter)
self.hidden_layer_depth = int(self.hidden_layer_depth)
self.num_nodes_per_layer = int(self.num_nodes_per_layer)
self.hidden_layer_sizes = tuple(
self.num_nodes_per_layer
for i in range(self.hidden_layer_depth))
self.activation = str(self.activation)
self.alpha = float(self.alpha)
self.learning_rate_init = float(self.learning_rate_init)
self.early_stopping = str(self.early_stopping)
if self.early_stopping == 'train':
self.validation_fraction = 0.0
self.tol = float(self.tol)
self.n_iter_no_change = int(self.n_iter_no_change)
self.early_stopping_val = False
elif self.early_stopping == 'valid':
self.validation_fraction = float(self.validation_fraction)
self.tol = float(self.tol)
self.n_iter_no_change = int(self.n_iter_no_change)
self.early_stopping_val = True
else:
raise ValueError('Set early stopping to unknown value %s' %
self.early_stopping)
# elif self.early_stopping == "off":
# self.validation_fraction = 0
# self.tol = 10000
# self.n_iter_no_change = self.max_iter
# self.early_stopping_val = False
self.solver = self.solver
try:
self.batch_size = int(self.batch_size)
except ValueError:
self.batch_size = str(self.batch_size)
self.shuffle = check_for_bool(self.shuffle)
self.beta_1 = float(self.beta_1)
self.beta_2 = float(self.beta_2)
self.epsilon = float(self.epsilon)
self.beta_1 = float(self.beta_1)
self.verbose = int(self.verbose)
n_iter = int(np.ceil(n_iter))
# initial fit of only increment trees
self.estimator = MLPRegressor(
hidden_layer_sizes=self.hidden_layer_sizes,
activation=self.activation,
solver=self.solver,
alpha=self.alpha,
batch_size=self.batch_size,
learning_rate_init=self.learning_rate_init,
max_iter=n_iter,
shuffle=self.shuffle,
random_state=copy.copy(self.random_state),
verbose=self.verbose,
warm_start=True,
early_stopping=self.early_stopping_val,
validation_fraction=self.validation_fraction,
n_iter_no_change=self.n_iter_no_change,
tol=self.tol,
beta_1=self.beta_2,
beta_2=self.beta_1,
epsilon=self.epsilon,
# We do not use these, see comments below in search space
# momentum=self.momentum,
# nesterovs_momentum=self.nesterovs_momentum,
# power_t=self.power_t,
# learning_rate=self.learning_rate,
# max_fun=self.max_fun
)
self.scaler = sklearn.preprocessing.StandardScaler(copy=True)
self.scaler.fit(y.reshape((-1, 1)))
else:
new_max_iter = min(self.max_iter - self.estimator.n_iter_, n_iter)
self.estimator.max_iter = new_max_iter
Y_scaled = self.scaler.transform(y.reshape((-1, 1))).ravel()
self.estimator.fit(X, Y_scaled)
if self.estimator.n_iter_ >= self.max_iter or \
self.estimator._no_improvement_count > self.n_iter_no_change:
self._fully_fit = True
return self
def configuration_fully_fitted(self):
if self.estimator is None:
return False
elif not hasattr(self, '_fully_fit'):
return False
else:
return self._fully_fit
def predict(self, X):
if self.estimator is None:
raise NotImplementedError
Y_pred = self.estimator.predict(X)
return self.scaler.inverse_transform(Y_pred)
@staticmethod
def get_properties(dataset_properties=None):
return {
'shortname': 'MLP',
'name': 'Multilayer Percepton',
'handles_regression': True,
'handles_classification': False,
'handles_multiclass': False,
'handles_multilabel': False,
'handles_multioutput': False,
'is_deterministic': True,
'input': (DENSE, SPARSE, UNSIGNED_DATA),
'output': (PREDICTIONS, )
}
@staticmethod
def get_hyperparameter_search_space(dataset_properties=None):
cs = ConfigurationSpace()
hidden_layer_depth = UniformIntegerHyperparameter(
name='hidden_layer_depth', lower=1, upper=3, default_value=1)
num_nodes_per_layer = UniformIntegerHyperparameter(
name='num_nodes_per_layer',
lower=16,
upper=264,
default_value=32,
log=True)
activation = CategoricalHyperparameter(
name='activation', choices=['tanh', 'relu'], default_value='tanh')
alpha = UniformFloatHyperparameter(
name='alpha', lower=1e-7, upper=1e-1, default_value=1e-4, log=True)
learning_rate_init = UniformFloatHyperparameter(
name='learning_rate_init',
lower=1e-4,
upper=0.5,
default_value=1e-3,
log=True)
# Not allowing to turn off early stopping
early_stopping = CategoricalHyperparameter(
name='early_stopping',
choices=['valid', 'train'], # , "off"],
default_value='valid')
# Constants
n_iter_no_change = Constant(
name='n_iter_no_change', value=32) # default=10 is too low
validation_fraction = Constant(name='validation_fraction', value=0.1)
tol = UnParametrizedHyperparameter(name='tol', value=1e-4)
solver = Constant(name='solver', value='adam')
# Relying on sklearn defaults for now
batch_size = UnParametrizedHyperparameter(
name='batch_size', value='auto')
shuffle = UnParametrizedHyperparameter(name='shuffle', value='True')
beta_1 = UnParametrizedHyperparameter(name='beta_1', value=0.9)
beta_2 = UnParametrizedHyperparameter(name='beta_2', value=0.999)
epsilon = UnParametrizedHyperparameter(name='epsilon', value=1e-8)
# Not used
# solver=["sgd", "lbfgs"] --> not used to keep searchspace simpler
# learning_rate --> only used when using solver=sgd
# power_t --> only used when using solver=sgd & learning_rate=invscaling
# momentum --> only used when solver=sgd
# nesterovs_momentum --> only used when solver=sgd
# max_fun --> only used when solver=lbfgs
# activation=["identity", "logistic"] --> not useful for classification
cs.add_hyperparameters([
hidden_layer_depth, num_nodes_per_layer, activation, alpha,
learning_rate_init, early_stopping, n_iter_no_change,
validation_fraction, tol, solver, batch_size, shuffle, beta_1,
beta_2, epsilon
])
validation_fraction_cond = InCondition(validation_fraction,
early_stopping, ['valid'])
cs.add_conditions([validation_fraction_cond])
# We always use early stopping
# n_iter_no_change_cond = InCondition(n_iter_no_change, early_stopping, ["valid", "train"])
# tol_cond = InCondition(n_iter_no_change, early_stopping, ["valid", "train"])
# cs.add_conditions([n_iter_no_change_cond, tol_cond])
return cs
| [
"jianzhnie@126.com"
] | jianzhnie@126.com |
911f9d2ca22cfaffd45aa32eed0a648b30ff9cec | 781e2692049e87a4256320c76e82a19be257a05d | /all_data/exercism_data/python/rna-transcription/8117955873ba4f469f73ce4e2a833897.py | 930b205143cc064eaf9cd0425dad20fa99fdb7c2 | [] | no_license | itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null | UTF-8 | Python | false | false | 136 | py | dict_map = {
'A': 'U',
'G': 'C',
'T': 'A',
'C': 'G'
}
def to_rna(dna):
return ''.join([dict_map[x] for x in dna])
| [
"rrc@berkeley.edu"
] | rrc@berkeley.edu |
f074af7028ccd82b4eb009889b16d1dabc110c69 | 38c10c01007624cd2056884f25e0d6ab85442194 | /remoting/host/DEPS | 3abc8545dbe42bf8933ae6066acb8037b5c824d2 | [
"BSD-3-Clause"
] | permissive | zenoalbisser/chromium | 6ecf37b6c030c84f1b26282bc4ef95769c62a9b2 | e71f21b9b4b9b839f5093301974a45545dad2691 | refs/heads/master | 2022-12-25T14:23:18.568575 | 2016-07-14T21:49:52 | 2016-07-23T08:02:51 | 63,980,627 | 0 | 2 | BSD-3-Clause | 2022-12-12T12:43:41 | 2016-07-22T20:14:04 | null | UTF-8 | Python | false | false | 350 | include_rules = [
"+ash",
"+cc/output",
"+components/policy/core/common",
"+extensions/browser/api/messaging",
"+jingle/glue",
"+net",
"+remoting/codec",
"+remoting/protocol",
"+remoting/signaling",
"+remoting/tools",
"+third_party/jsoncpp",
"+third_party/modp_b64",
"+third_party/skia",
"+third_party/webrtc",
"+ui",
]
| [
"zeno.albisser@hemispherian.com"
] | zeno.albisser@hemispherian.com | |
480d52454e03c72cf93ca575ed707fe89e0b6db4 | e52b0124ad5f875ea16a10cc8aa5e771f5d7c3ea | /guniflask/security/web_authentication_details.py | 5375b3912e675d9a30289b0ae702bc9a83433405 | [
"MIT"
] | permissive | jadbin/guniflask | 24ec0c755827fe15ebbfeaec3149882ac6bc79b9 | f0f5029d03219b7793482dc3ed09eab508e538d6 | refs/heads/master | 2023-08-18T07:56:36.331037 | 2023-08-09T02:48:23 | 2023-08-09T02:48:23 | 147,511,047 | 14 | 2 | null | null | null | null | UTF-8 | Python | false | false | 134 | py | from flask import request
class WebAuthenticationDetails:
def __init__(self):
self.remote_address = request.remote_addr
| [
"jadbin.com@hotmail.com"
] | jadbin.com@hotmail.com |
f670c6e3e5f42ee3819ef8f36431f55282ea2c60 | 8c917dc4810e2dddf7d3902146280a67412c65ea | /v_7/Dongola/common/hr_custom/report/transfer_report_1.py | ff5c7f89843d579d6b10234303ef5e47105ca182 | [] | no_license | musabahmed/baba | d0906e03c1bbd222d3950f521533f3874434b993 | 0b997095c260d58b026440967fea3a202bef7efb | refs/heads/master | 2021-10-09T02:37:32.458269 | 2018-12-20T06:00:00 | 2018-12-20T06:00:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,597 | py | import time
import pooler
#import rml_parse
import copy
from report import report_sxw
import pdb
import re
class transfer_report_1(report_sxw.rml_parse):
_name = 'report.transfer.report.1'
def __init__(self, cr, uid, name, context):
super(transfer_report_1, self).__init__(cr, uid, name, context=context)
self.localcontext.update({
'time': time,
'_get_emp':self._get_emp,
})
def _get_emp(self,ids):
#print "ids",ids
p = pooler.get_pool(self.cr.dbname).get('hr.process.archive')
#s=p.search(self.cr, self.uid,[('employee_id','=',ids)])
emp_id=p.browse(self.cr, self.uid,[ids])[0]
#print "jjjj",emp_id.employee_id.id
emp=emp_id.employee_id.id
comp=emp_id.company_id.id
#print "emp",emp
self.cr.execute('SELECT c.name as company,d.name as department,ar.transfer_date AS date,r.name AS employee_name FROM hr2_basic_transfer_archive AS ar left join hr_employee AS e on (ar.employee_id=e.id) left join resource_resource AS r on (e.resource_id=r.id) left join hr_department as d on (ar.department_id=d.id) left join res_company as c on (ar.company_id=c.id) where r.id=%s and c.id=%s order by ar.transfer_date'%(emp,comp))
res = self.cr.dictfetchall()
#print "transfer",res
return res
report_sxw.report_sxw('report.transfer.report.1', 'hr.process.archive',
'addons/hr_process/report/transfer_report_1.rml', parser=transfer_report_1, header=True)
| [
"bakry@exp-sa.com"
] | bakry@exp-sa.com |
9ee83697c077bff9c366dc651065957a5dcfd94c | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_steering.py | 9d1b8159a6d286fa6567d17d84d7a4e429ff66b2 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 222 | py |
#calss header
class _STEERING():
def __init__(self,):
self.name = "STEERING"
self.definitions = steer
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['steer']
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
59a007f714b57328d856f3367ba98cfccfb2001b | c26dc7928b1facac2c0912f6532076d35c19e835 | /devel/_setup_util.py | 8976eb5f7d8437a5de065f0475d88be74a03e568 | [] | no_license | mattedminster/inmoov_ros | 33c29a2ea711f61f15ad5e2c53dd9db65ef6437f | e063a90b61418c3612b8df7876a633bc0dc2c428 | refs/heads/master | 2021-01-23T02:39:36.090746 | 2017-08-09T02:56:42 | 2017-08-09T02:56:42 | 85,995,826 | 0 | 0 | null | 2017-03-23T20:45:32 | 2017-03-23T20:45:32 | null | UTF-8 | Python | false | false | 12,467 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Software License Agreement (BSD License)
#
# Copyright (c) 2012, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
'''This file generates shell code for the setup.SHELL scripts to set environment variables'''
from __future__ import print_function
import argparse
import copy
import errno
import os
import platform
import sys
CATKIN_MARKER_FILE = '.catkin'
system = platform.system()
IS_DARWIN = (system == 'Darwin')
IS_WINDOWS = (system == 'Windows')
# subfolder of workspace prepended to CMAKE_PREFIX_PATH
ENV_VAR_SUBFOLDERS = {
'CMAKE_PREFIX_PATH': '',
'LD_LIBRARY_PATH' if not IS_DARWIN else 'DYLD_LIBRARY_PATH': ['lib', os.path.join('lib', 'x86_64-linux-gnu')],
'PATH': 'bin',
'PKG_CONFIG_PATH': [os.path.join('lib', 'pkgconfig'), os.path.join('lib', 'x86_64-linux-gnu', 'pkgconfig')],
'PYTHONPATH': 'lib/python2.7/dist-packages',
}
def rollback_env_variables(environ, env_var_subfolders):
'''
Generate shell code to reset environment variables
by unrolling modifications based on all workspaces in CMAKE_PREFIX_PATH.
This does not cover modifications performed by environment hooks.
'''
lines = []
unmodified_environ = copy.copy(environ)
for key in sorted(env_var_subfolders.keys()):
subfolders = env_var_subfolders[key]
if not isinstance(subfolders, list):
subfolders = [subfolders]
value = _rollback_env_variable(unmodified_environ, key, subfolders)
if value is not None:
environ[key] = value
lines.append(assignment(key, value))
if lines:
lines.insert(0, comment('reset environment variables by unrolling modifications based on all workspaces in CMAKE_PREFIX_PATH'))
return lines
def _rollback_env_variable(environ, name, subfolders):
'''
For each catkin workspace in CMAKE_PREFIX_PATH remove the first entry from env[NAME] matching workspace + subfolder.
:param subfolders: list of str '' or subfoldername that may start with '/'
:returns: the updated value of the environment variable.
'''
value = environ[name] if name in environ else ''
env_paths = [path for path in value.split(os.pathsep) if path]
value_modified = False
for subfolder in subfolders:
if subfolder:
if subfolder.startswith(os.path.sep) or (os.path.altsep and subfolder.startswith(os.path.altsep)):
subfolder = subfolder[1:]
if subfolder.endswith(os.path.sep) or (os.path.altsep and subfolder.endswith(os.path.altsep)):
subfolder = subfolder[:-1]
for ws_path in _get_workspaces(environ, include_fuerte=True, include_non_existing=True):
path_to_find = os.path.join(ws_path, subfolder) if subfolder else ws_path
path_to_remove = None
for env_path in env_paths:
env_path_clean = env_path[:-1] if env_path and env_path[-1] in [os.path.sep, os.path.altsep] else env_path
if env_path_clean == path_to_find:
path_to_remove = env_path
break
if path_to_remove:
env_paths.remove(path_to_remove)
value_modified = True
new_value = os.pathsep.join(env_paths)
return new_value if value_modified else None
def _get_workspaces(environ, include_fuerte=False, include_non_existing=False):
'''
Based on CMAKE_PREFIX_PATH return all catkin workspaces.
:param include_fuerte: The flag if paths starting with '/opt/ros/fuerte' should be considered workspaces, ``bool``
'''
# get all cmake prefix paths
env_name = 'CMAKE_PREFIX_PATH'
value = environ[env_name] if env_name in environ else ''
paths = [path for path in value.split(os.pathsep) if path]
# remove non-workspace paths
workspaces = [path for path in paths if os.path.isfile(os.path.join(path, CATKIN_MARKER_FILE)) or (include_fuerte and path.startswith('/opt/ros/fuerte')) or (include_non_existing and not os.path.exists(path))]
return workspaces
def prepend_env_variables(environ, env_var_subfolders, workspaces):
'''
Generate shell code to prepend environment variables
for the all workspaces.
'''
lines = []
lines.append(comment('prepend folders of workspaces to environment variables'))
paths = [path for path in workspaces.split(os.pathsep) if path]
prefix = _prefix_env_variable(environ, 'CMAKE_PREFIX_PATH', paths, '')
lines.append(prepend(environ, 'CMAKE_PREFIX_PATH', prefix))
for key in sorted([key for key in env_var_subfolders.keys() if key != 'CMAKE_PREFIX_PATH']):
subfolder = env_var_subfolders[key]
prefix = _prefix_env_variable(environ, key, paths, subfolder)
lines.append(prepend(environ, key, prefix))
return lines
def _prefix_env_variable(environ, name, paths, subfolders):
'''
Return the prefix to prepend to the environment variable NAME, adding any path in NEW_PATHS_STR without creating duplicate or empty items.
'''
value = environ[name] if name in environ else ''
environ_paths = [path for path in value.split(os.pathsep) if path]
checked_paths = []
for path in paths:
if not isinstance(subfolders, list):
subfolders = [subfolders]
for subfolder in subfolders:
path_tmp = path
if subfolder:
path_tmp = os.path.join(path_tmp, subfolder)
# skip nonexistent paths
if not os.path.exists(path_tmp):
continue
# exclude any path already in env and any path we already added
if path_tmp not in environ_paths and path_tmp not in checked_paths:
checked_paths.append(path_tmp)
prefix_str = os.pathsep.join(checked_paths)
if prefix_str != '' and environ_paths:
prefix_str += os.pathsep
return prefix_str
def assignment(key, value):
if not IS_WINDOWS:
return 'export %s="%s"' % (key, value)
else:
return 'set %s=%s' % (key, value)
def comment(msg):
if not IS_WINDOWS:
return '# %s' % msg
else:
return 'REM %s' % msg
def prepend(environ, key, prefix):
if key not in environ or not environ[key]:
return assignment(key, prefix)
if not IS_WINDOWS:
return 'export %s="%s$%s"' % (key, prefix, key)
else:
return 'set %s=%s%%%s%%' % (key, prefix, key)
def find_env_hooks(environ, cmake_prefix_path):
'''
Generate shell code with found environment hooks
for the all workspaces.
'''
lines = []
lines.append(comment('found environment hooks in workspaces'))
generic_env_hooks = []
generic_env_hooks_workspace = []
specific_env_hooks = []
specific_env_hooks_workspace = []
generic_env_hooks_by_filename = {}
specific_env_hooks_by_filename = {}
generic_env_hook_ext = 'bat' if IS_WINDOWS else 'sh'
specific_env_hook_ext = environ['CATKIN_SHELL'] if not IS_WINDOWS and 'CATKIN_SHELL' in environ and environ['CATKIN_SHELL'] else None
# remove non-workspace paths
workspaces = [path for path in cmake_prefix_path.split(os.pathsep) if path and os.path.isfile(os.path.join(path, CATKIN_MARKER_FILE))]
for workspace in reversed(workspaces):
env_hook_dir = os.path.join(workspace, 'etc', 'catkin', 'profile.d')
if os.path.isdir(env_hook_dir):
for filename in sorted(os.listdir(env_hook_dir)):
if filename.endswith('.%s' % generic_env_hook_ext):
# remove previous env hook with same name if present
if filename in generic_env_hooks_by_filename:
i = generic_env_hooks.index(generic_env_hooks_by_filename[filename])
generic_env_hooks.pop(i)
generic_env_hooks_workspace.pop(i)
# append env hook
generic_env_hooks.append(os.path.join(env_hook_dir, filename))
generic_env_hooks_workspace.append(workspace)
generic_env_hooks_by_filename[filename] = generic_env_hooks[-1]
elif specific_env_hook_ext is not None and filename.endswith('.%s' % specific_env_hook_ext):
# remove previous env hook with same name if present
if filename in specific_env_hooks_by_filename:
i = specific_env_hooks.index(specific_env_hooks_by_filename[filename])
specific_env_hooks.pop(i)
specific_env_hooks_workspace.pop(i)
# append env hook
specific_env_hooks.append(os.path.join(env_hook_dir, filename))
specific_env_hooks_workspace.append(workspace)
specific_env_hooks_by_filename[filename] = specific_env_hooks[-1]
env_hooks = generic_env_hooks + specific_env_hooks
env_hooks_workspace = generic_env_hooks_workspace + specific_env_hooks_workspace
count = len(env_hooks)
lines.append(assignment('_CATKIN_ENVIRONMENT_HOOKS_COUNT', count))
for i in range(count):
lines.append(assignment('_CATKIN_ENVIRONMENT_HOOKS_%d' % i, env_hooks[i]))
lines.append(assignment('_CATKIN_ENVIRONMENT_HOOKS_%d_WORKSPACE' % i, env_hooks_workspace[i]))
return lines
def _parse_arguments(args=None):
parser = argparse.ArgumentParser(description='Generates code blocks for the setup.SHELL script.')
parser.add_argument('--extend', action='store_true', help='Skip unsetting previous environment variables to extend context')
return parser.parse_known_args(args=args)[0]
if __name__ == '__main__':
try:
try:
args = _parse_arguments()
except Exception as e:
print(e, file=sys.stderr)
sys.exit(1)
# environment at generation time
CMAKE_PREFIX_PATH = '/home/robot/catkin_ws/devel;/home/robot/inmoov_ros/devel;/opt/ros/kinetic'.split(';')
# prepend current workspace if not already part of CPP
base_path = os.path.dirname(__file__)
if base_path not in CMAKE_PREFIX_PATH:
CMAKE_PREFIX_PATH.insert(0, base_path)
CMAKE_PREFIX_PATH = os.pathsep.join(CMAKE_PREFIX_PATH)
environ = dict(os.environ)
lines = []
if not args.extend:
lines += rollback_env_variables(environ, ENV_VAR_SUBFOLDERS)
lines += prepend_env_variables(environ, ENV_VAR_SUBFOLDERS, CMAKE_PREFIX_PATH)
lines += find_env_hooks(environ, CMAKE_PREFIX_PATH)
print('\n'.join(lines))
# need to explicitly flush the output
sys.stdout.flush()
except IOError as e:
# and catch potential "broken pipe" if stdout is not writable
# which can happen when piping the output to a file but the disk is full
if e.errno == errno.EPIPE:
print(e, file=sys.stderr)
sys.exit(2)
raise
sys.exit(0)
| [
"mattedminster@gmail.com"
] | mattedminster@gmail.com |
a3548216505b1663d119fa33ba99a302992ca5a7 | 258fffc9b1fda6ed152d3520facdcba54d5460d1 | /manga_py/providers/readmanga_me.py | feada82b8ce302fe0689a757f8e92de2a2386776 | [
"MIT"
] | permissive | theincognito-inc/manga-dl | a29422b8417a398e6b0273ae6f2374f3f4401021 | 899905bafb6c6891815b58cce41eaff32a682570 | refs/heads/stable_1.x | 2021-07-09T10:45:18.197767 | 2020-07-20T11:21:18 | 2020-07-20T11:21:18 | 161,990,334 | 0 | 0 | MIT | 2020-07-20T11:21:20 | 2018-12-16T10:31:37 | Python | UTF-8 | Python | false | false | 1,360 | py | from manga_py.provider import Provider
from .helpers.std import Std
class ReadmangaMe(Provider, Std):
def get_archive_name(self) -> str:
idx = self.get_chapter_index()
vol, ch = idx.split('-')
return self.normal_arc_name({'vol': vol, 'ch': ch})
def get_chapter_index(self):
_re = r'/.+/(?:vol)?([^/]+/[^/]+)(?:/|\?ma?t)?'
name = self.re.search(_re, self.chapter).group(1)
if ~name.find('?'):
name = name[:name.find('?')]
return name.replace('/', '-')
def get_main_content(self):
return self._get_content('{}/{}?mature=1&mtr=1')
def get_manga_name(self):
return self._get_name(r'\.\w{2,7}/([^/]+)')
def get_chapters(self):
return self._elements('div.chapters-link tr > td > a')
def get_files(self):
_uri = self.http().normalize_uri(self.chapter)
content = self.http_get(_uri)
result = self.re.search(r'rm_h\.init.+?(\[\[.+\]\])', content, self.re.M)
if not result:
return []
images = self.json.loads(
result.groups()[0].replace("'", '"')
)
return [i[0] + i[2] for i in images]
def get_cover(self):
return self._cover_from_content('.picture-fotorama > img')
def book_meta(self) -> dict:
# todo meta
pass
main = ReadmangaMe
| [
"sttv-pc@mail.ru"
] | sttv-pc@mail.ru |
d76bf2a679b9464bd4006997fb666f97c9a71c0f | 4c1d9bace72c568b39ffd88b2f3c6bcc58bfe892 | /main/board.py | 23bf487563dab53661c51941f95d421bebc6f853 | [] | no_license | tanghee/PULZIP_Project_Flask_Add | a5cd05190b4cb9f280508e44b797728a6a037efe | e240a510b7ec8cb94fff1a7e3ad47b30fe1b8c2a | refs/heads/master | 2023-02-01T12:45:23.377652 | 2020-12-16T01:50:57 | 2020-12-16T01:50:57 | 321,837,801 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,668 | py | from main import *
from flask import Blueprint
blueprint = Blueprint("board", __name__, url_prefix="/board")
category = [
{"박물관소개": {
"관장 인사글": "about",
"관람 안내 및 오시는 길": "location",
"관련 기사": "news",
"로고 소개": "logo",
}},
{"풀짚공예 전시실": {
"소장유물 소개": "relic",
"상설 전시": "expatiation_exhibition",
"특별 전시": "special_exhibition",
"체험교육 전시": "experience_exhibition",
}},
{"풀짚공예 교육": {
"풀짚공예란?": "info",
"만들기 동영상": "video",
"체험학습": "field_study",
"일반&전문가 심화과정": "normal_study",
}},
{"풀짚 문화": {
"책 소개": "culture_book",
"바구니여행": "culture_basket",
"풀짚갤러리": "pulzip_gallery",
}},
{"커뮤니티": {
"공지사항": "notice",
"자유게시판": "free",
"포토갤러리": "gallery",
"체험예약": "reservation",
}},
]
@blueprint.route("/list")
def board_list():
# 페이지 값 (값이 없는 경우 기본값은 1), 리미트 값 (몇 개의 게시물을 나오게 할 것인지)
page = request.args.get("page", 1, type=int)
limit = request.args.get("limit", 10, type=int)
board_sort = request.args.get("board_sort", -1, type=int)
board = mongo.db.board
tot_count = board.find({}).count() # 게시물의 총 개수
last_page_num = math.ceil(tot_count / limit) # 마지막 페이지 수 = 전체 게시물 수 / 페이지당 게시물 수
block_size = 5
block_num = int((page - 1) / block_size) # block 현재 위치
block_start = int((block_size * block_num) + 1) # block 시작 위치
block_last = math.ceil(block_start + (block_size - 1)) # block 마지막 위치
datas = board.find({}).skip((page - 1) * limit).limit(limit).sort("pubdate", -1)
if board_sort == 0:
datas = board.find({}).skip((page - 1) * limit).limit(limit).sort("title", 1)
elif board_sort == 1:
datas = board.find({}).skip((page - 1) * limit).limit(limit).sort("title", -1)
elif board_sort == 2:
datas = board.find({}).skip((page - 1) * limit).limit(limit).sort("name", 1)
elif board_sort == 3:
datas = board.find({}).skip((page - 1) * limit).limit(limit).sort("name", -1)
elif board_sort == 4:
datas = board.find({}).skip((page - 1) * limit).limit(limit).sort("pubdate", -1)
elif board_sort == 5:
datas = board.find({}).skip((page - 1) * limit).limit(limit).sort("pubdate", 1)
elif board_sort == 6:
datas = board.find({}).skip((page - 1) * limit).limit(limit).sort("view", -1)
elif board_sort == 7:
datas = board.find({}).skip((page - 1) * limit).limit(limit).sort("view", 1)
return render_template("/board/list.html", page=page, limit=limit, board_sort=board_sort, datas=datas, tot_count=tot_count, block_start=block_start, block_last=block_last, last_page_num=last_page_num, category=category)
@blueprint.route("/view/<idx>")
@login_required
def board_view(idx):
# idx = request.args.get("idx")
if idx is not None:
page = request.args.get("page")
board_sort = request.args.get("board_sort")
board = mongo.db.board
# data = board.find_one({"_id": ObjectId(idx)})
data = board.find_one_and_update({"_id": ObjectId(idx)}, {"$inc": {"view": 1}}, return_document=True)
if data is not None:
result = {
"id": data.get("_id"),
"name": data.get("name"),
"title": data.get("title"),
"contents": data.get("contents"),
"pubdate": data.get("pubdate"),
"view": data.get("view"),
"writer_id": data.get("writer_id", "")
}
return render_template("/board/view.html", result=result, page=page, board_sort=board_sort, category=category)
return abort(404)
@blueprint.route('/write', methods=["GET", "POST"])
@login_required
def board_write():
if request.method == "POST":
name = request.form.get("name")
title = request.form.get("title")
contents = request.form.get("contents")
current_utc_time = round(datetime.utcnow().timestamp() * 1000)
board = mongo.db.board
post = {
"name": name,
"title": title,
"contents": contents,
"pubdate": current_utc_time,
"writer_id": session.get("id"),
"view": 0,
}
x = board.insert_one(post)
return redirect(url_for("board.board_view", idx=x.inserted_id))
else:
return render_template("/board/write.html", category=category)
@blueprint.route("/edit/<idx>", methods=["GET", "POST"])
def board_edit(idx):
if request.method == "GET":
board = mongo.db.board
data = board.find_one({"_id": ObjectId(idx)})
if data is None:
flash("해당 게시물이 존재하지 않습니다.")
return redirect(url_for("board.board_list"))
else:
if session.get("id") == data.get("writer_id"):
return render_template("/board/edit.html", data=data, category=category)
else:
flash("글 수정 권한이 없습니다.")
return redirect(url_for("board.board_list"))
else:
title = request.form.get("title")
contents = request.form.get("contents")
board = mongo.db.board
data = board.find_one({"_id": ObjectId(idx)})
if session.get("id") == data.get("writer_id"):
board.update_one({"_id": ObjectId(idx)}, {
"$set": {
"title": title,
"contents": contents,
}
})
flash("수정되었습니다.")
return redirect(url_for("board.board_view", idx=idx))
else:
flash("글 수정 권한이 없습니다.")
return redirect(url_for("board.board_list"))
@blueprint.route("/delete/<idx>")
def board_delete(idx):
board = mongo.db.board
data = board.find_one({"_id": ObjectId(idx)})
if data.get("writer_id") == session.get("id"):
board.delete_one({"_id": ObjectId(idx)})
flash("삭제되었습니다.")
else:
flash("삭제 권한이 없습니다.")
return redirect(url_for("board.board_list"))
| [
"s2018w37@e-mirim.hs.kr"
] | s2018w37@e-mirim.hs.kr |
a2f5d5a0f1175cfa8fd5b843ffa900cbfa6afda5 | 99c4d4a6592fded0e8e59652484ab226ac0bd38c | /code/batch-2/dn4 - krajevne funkcije/M-17046-2105.py | 762885ace55c8b9721373140258748191ebfffad | [] | no_license | benquick123/code-profiling | 23e9aa5aecb91753e2f1fecdc3f6d62049a990d5 | 0d496d649247776d121683d10019ec2a7cba574c | refs/heads/master | 2021-10-08T02:53:50.107036 | 2018-12-06T22:56:38 | 2018-12-06T22:56:38 | 126,011,752 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,796 | py | from math import sqrt
def koordinate(ime, kraji):
for kraj_ime, kraj_x, kraj_y in kraji:
if ime == kraj_ime:
return (kraj_x, kraj_y)
return None
def razdalja_koordinat(x1, y1, x2, y2):
return sqrt( (x2-x1)**2 + (y2-y1)**2 )
def razdalja(ime1, ime2, kraji):
x1, y1 = koordinate(ime1,kraji)
x2, y2 = koordinate(ime2,kraji)
return razdalja_koordinat(x1,y1,x2,y2)
def v_dometu(ime, domet, kraji):
kraji_v_dometu = []
for ime_primerjanga_kraja, x, y in kraji:
if ime != ime_primerjanga_kraja and razdalja(ime,ime_primerjanga_kraja, kraji) <= domet:
kraji_v_dometu.append(ime_primerjanga_kraja)
return kraji_v_dometu
def najbolj_oddaljeni(ime, imena, kraji):
ime_najoddaljenesi_kraj = imena[0]
for ime_kraja in imena:
if razdalja(ime_kraja,ime, kraji) > razdalja(ime_najoddaljenesi_kraj,ime, kraji):
ime_najoddaljenesi_kraj = ime_kraja
return ime_najoddaljenesi_kraj
def zalijemo(ime, domet, kraji):
kraji_v_dometu = v_dometu(ime,domet,kraji)
return najbolj_oddaljeni(ime,kraji_v_dometu,kraji)
def presek(s1, s2):
return [element for element in s1 if element in s2]
def skupno_zalivanje(ime1, ime2, domet, kraji):
return presek(v_dometu(ime1,domet,kraji),v_dometu(ime2,domet,kraji))
import unittest
class TestKraji(unittest.TestCase):
vsi_kraji = [
('Brežice', 68.66, 7.04),
('Lenart', 85.20, 78.75),
('Rateče', -65.04, 70.04),
('Ljutomer', 111.26, 71.82),
('Rogaška Slatina', 71.00, 42.00),
('Ribnica', 7.10, -10.50),
('Dutovlje', -56.80, -6.93),
('Lokve', -57.94, 19.32),
('Vinica', 43.81, -38.43),
('Brtonigla', -71.00, -47.25),
('Kanal', -71.00, 26.25),
('Črnomelj', 39.05, -27.93),
('Trbovlje', 29.61, 35.07),
('Beltinci', 114.81, 80.54),
('Domžale', -2.34, 31.50),
('Hodoš', 120.70, 105.00),
('Škofja Loka', -23.64, 35.07),
('Velike Lašče', 0.00, 0.00),
('Velenje', 33.16, 54.29),
('Šoštanj', 29.61, 57.75),
('Laško', 42.60, 33.29),
('Postojna', -29.54, -5.25),
('Ilirska Bistrica', -27.19, -27.93),
('Radenci', 100.61, 84.00),
('Črna', 15.41, 66.57),
('Radeče', 39.05, 24.57),
('Vitanje', 47.36, 57.75),
('Bled', -37.84, 56.07),
('Tolmin', -63.90, 36.75),
('Miren', -72.14, 7.04),
('Ptuj', 87.61, 61.32),
('Gornja Radgona', 97.06, 89.25),
('Plave', -73.34, 21.00),
('Novo mesto', 37.91, -3.47),
('Bovec', -76.89, 52.50),
('Nova Gorica', -69.79, 12.29),
('Krško', 60.35, 14.07),
('Cerknica', -18.89, -3.47),
('Slovenska Bistrica', 66.31, 57.75),
('Anhovo', -72.14, 22.78),
('Ormož', 107.71, 61.32),
('Škofije', -59.14, -27.93),
('Čepovan', -60.35, 22.78),
('Murska Sobota', 108.91, 87.57),
('Ljubljana', -8.24, 22.78),
('Idrija', -43.74, 17.54),
('Radlje ob Dravi', 41.46, 82.32),
('Žalec', 37.91, 43.79),
('Mojstrana', -49.70, 64.79),
('Log pod Mangartom', -73.34, 59.54),
('Podkoren', -62.69, 70.04),
('Kočevje', 16.61, -21.00),
('Soča', -69.79, 52.50),
('Ajdovščina', -53.25, 5.25),
('Bohinjska Bistrica', -48.49, 47.25),
('Tržič', -22.44, 56.07),
('Piran', -75.69, -31.50),
('Kranj', -20.09, 43.79),
('Kranjska Gora', -60.35, 68.25),
('Izola', -68.59, -31.50),
('Radovljica', -31.95, 54.29),
('Gornji Grad', 13.06, 49.03),
('Šentjur', 54.46, 40.32),
('Koper', -63.90, -29.72),
('Celje', 45.01, 42.00),
('Mislinja', 42.60, 66.57),
('Metlika', 48.56, -19.21),
('Žaga', -81.65, 49.03),
('Komen', -63.90, -1.68),
('Žužemberk', 21.30, 0.00),
('Pesnica', 74.55, 80.54),
('Vrhnika', -23.64, 14.07),
('Dravograd', 28.40, 78.75),
('Kamnik', -1.14, 40.32),
('Jesenice', -40.19, 64.79),
('Kobarid', -74.55, 43.79),
('Portorož', -73.34, -33.18),
('Muta', 37.91, 82.32),
('Sežana', -54.39, -13.96),
('Vipava', -47.29, 1.79),
('Maribor', 72.21, 75.28),
('Slovenj Gradec', 31.95, 71.82),
('Litija', 14.20, 22.78),
('Na Logu', -62.69, 57.75),
('Stara Fužina', -52.04, 47.25),
('Motovun', -56.80, -52.50),
('Pragersko', 73.41, 57.75),
('Most na Soči', -63.90, 33.29),
('Brestanica', 60.35, 15.75),
('Savudrija', -80.44, -34.96),
('Sodražica', 0.00, -6.93),
]
class CountCalls:
def __init__(self, f):
self.f = f
self.call_count = 0
def __call__(self, *args, **kwargs):
self.call_count += 1
return self.f(*args, **kwargs)
@classmethod
def setUpClass(cls):
global koordinate, razdalja_koordinat
try:
koordinate = cls.CountCalls(koordinate)
except:
pass
try:
razdalja_koordinat = cls.CountCalls(razdalja_koordinat)
except:
pass
def test_1_koordinate(self):
kraji = [
('Brežice', 68.66, 7.04),
('Lenart', 85.20, 78.75),
('Rateče', -65.04, 70.04),
('Ljutomer', 111.26, 71.82)
]
self.assertEqual(koordinate("Brežice", kraji), (68.66, 7.04))
self.assertEqual(koordinate("Lenart", kraji), (85.20, 78.75))
self.assertEqual(koordinate("Rateče", kraji), (-65.04, 70.04))
self.assertEqual(koordinate("Ljutomer", kraji), (111.26, 71.82))
self.assertIsNone(koordinate("Ljubljana", kraji))
kraji = [('Brežice', 68.66, 7.04)]
self.assertEqual(koordinate("Brežice", kraji), (68.66, 7.04))
self.assertIsNone(koordinate("Lenart", kraji))
kraji = []
self.assertIsNone(koordinate("Brežice", kraji))
def test_1_range_len(self):
class NoGetItem(list):
def __getitem__(*x):
raise IndexError("Nauči se (pravilno) uporabljati zanko for!")
kraji = NoGetItem([('Brežice', 68.66, 7.04), ('Lenart', 85.20, 78.75),
('Rateče', -65.04, 70.04)])
self.assertEqual(koordinate("Brežice", kraji), (68.66, 7.04))
self.assertEqual(koordinate("Lenart", kraji), (85.20, 78.75))
self.assertEqual(koordinate("Rateče", kraji), (-65.04, 70.04))
self.assertIsNone(koordinate("Ljubljana", kraji))
def test_2_razdalja_koordinat(self):
self.assertEqual(razdalja_koordinat(0, 0, 1, 0), 1)
self.assertEqual(razdalja_koordinat(0, 0, 0, 1), 1)
self.assertEqual(razdalja_koordinat(0, 0, -1, 0), 1)
self.assertEqual(razdalja_koordinat(0, 0, 0, -1), 1)
self.assertEqual(razdalja_koordinat(1, 0, 0, 0), 1)
self.assertEqual(razdalja_koordinat(0, 1, 0, 0), 1)
self.assertEqual(razdalja_koordinat(-1, 0, 0, 0), 1)
self.assertEqual(razdalja_koordinat(0, -1, 0, 0), 1)
self.assertEqual(razdalja_koordinat(1, 2, 4, 6), 5)
self.assertEqual(razdalja_koordinat(1, 2, -2, 6), 5)
self.assertEqual(razdalja_koordinat(1, 2, 4, -2), 5)
self.assertEqual(razdalja_koordinat(1, 2, -2, -2), 5)
from math import sqrt
self.assertAlmostEqual(razdalja_koordinat(1, 2, 0, 1), sqrt(2))
def test_3_razdalja_krajev(self):
kraji = [
('Brežice', 10, 20),
('Lenart', 13, 24),
('Rateče', 17, 20),
('Ljutomer', 8, 36)
]
from math import sqrt
self.assertEqual(razdalja("Brežice", "Lenart", kraji), 5)
self.assertEqual(razdalja("Lenart", "Brežice", kraji), 5)
self.assertEqual(razdalja("Brežice", "Rateče", kraji), 7)
self.assertAlmostEqual(razdalja("Lenart", "Rateče", kraji), sqrt(32))
self.assertEqual(razdalja("Lenart", "Ljutomer", kraji), 13)
koordinate.call_count = razdalja_koordinat.call_count = 0
razdalja("Brežice", "Lenart", kraji)
self.assertEqual(
koordinate.call_count, 2,
"Funkcija `razdalja` mora dvakrat poklicati `koordinate`")
self.assertEqual(
razdalja_koordinat.call_count, 1,
"Funkcija `razdalja` mora enkrat poklicati `razdalja`")
def test_4_v_dometu(self):
kraji = [
('Lenart', 13, 24),
('Brežice', 10, 20), # Lenart <-> Brežice = 5
('Rateče', 17, 20), # Lenart <-> Rateče = 5.66
('Ljutomer', 8, 36) # Lenart <-> Ljutomer = 13
]
self.assertEqual(v_dometu("Lenart", 5, kraji), ["Brežice"])
self.assertEqual(v_dometu("Lenart", 3, kraji), [])
self.assertEqual(set(v_dometu("Lenart", 6, kraji)), {"Brežice", "Rateče"})
kraji = self.vsi_kraji
self.assertEqual(set(v_dometu("Ljubljana", 20, kraji)), {'Vrhnika', 'Domžale', 'Kamnik', 'Škofja Loka'})
def test_5_najbolj_oddaljeni(self):
kraji = [
('Lenart', 13, 24),
('Brežice', 10, 20), # Lenart <-> Brežice = 5
('Rateče', 17, 20), # Lenart <-> Rateče = 5.66
('Ljutomer', 8, 36) # Lenart <-> Ljutomer = 13
]
self.assertEqual(najbolj_oddaljeni("Lenart", ["Brežice", "Rateče"], kraji), "Rateče")
self.assertEqual(najbolj_oddaljeni("Lenart", ["Brežice"], kraji), "Brežice")
kraji = self.vsi_kraji
self.assertEqual(najbolj_oddaljeni("Ljubljana", ["Domžale", "Kranj", "Maribor", "Vrhnika"], kraji), "Maribor")
def test_6_zalijemo(self):
self.assertEqual(zalijemo("Ljubljana", 30, self.vsi_kraji), "Cerknica")
def test_7_presek(self):
self.assertEqual(presek([1, 5, 2], [3, 1, 4]), [1])
self.assertEqual(presek([1, 5, 2], [3, 0, 4]), [])
self.assertEqual(presek([1, 5, 2], []), [])
self.assertEqual(presek([], [3, 0, 4]), [])
self.assertEqual(presek([], []), [])
self.assertEqual(set(presek([1, 5, 2], [2, 0, 5])), {2, 5})
self.assertEqual(presek(["Ana", "Berta", "Cilka"], ["Cilka", "Dani", "Ema"]), ["Cilka"])
def test_8_skupno_zalivanje(self):
self.assertEqual(set(skupno_zalivanje("Bled", "Ljubljana", 30, self.vsi_kraji)),
{"Kranj", "Škofja Loka"})
if __name__ == "__main__":
unittest.main()
| [
"benjamin.fele@gmail.com"
] | benjamin.fele@gmail.com |
40b2cbe54c638b602a87ec294c62d7fd598a0607 | 4b27a7e99c55a343cb845d085dd88aa7e77a8079 | /iter.py | 817bc91b4ad139ed8a8b3c340c75968d37d55a26 | [] | no_license | damodardikonda/Python-Pandas- | a1f0395a9514dbb639116d35ae465b7135d92c2c | de95146cbb01047d87a5bb297d94c21181dbd629 | refs/heads/master | 2022-09-19T01:25:31.985004 | 2020-06-05T07:35:44 | 2020-06-05T07:35:44 | 269,561,445 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 640 | py | import numpy as np
import pandas as pd
df=pd.DataFrame(np.random.rand(4,3),columns=['c1','c2','c3'])
print(df)
for k,v in df.iteritems():
print(k,v)
print("\n\nthrough row \n\n")
for row,row_index in df.iterrows():
print(row,row_index)
print("\n\n giving tuple asa value. first it print an index\n\n")
for r in df.itertuples():
print(r)
#Note − Do not try to modify any object while iterating. Iterating is meant for reading and the
# iterator returns a copy of the original object (a view), thus the changes will not reflect on the original object.
for i in df.iterrows();
i['a']=30
print(i)#it wont changes
| [
"damodar2dikonda@gmail.com"
] | damodar2dikonda@gmail.com |
d18ab02ef3bfd99d5e082a7ae112e606c37c79e5 | c14b274e98beeea6ad9f49b56dbc658e9083e160 | /Instanssi/ext_programme/migrations/0003_auto__add_field_programmeevent_event_type.py | 933e006bed9e56f87ab96c3d8cf25b9a7338ec97 | [
"MIT"
] | permissive | Yaamboo/Instanssi.org | 3096e59aa3c328dd52e4a5b8c29cdf8e5baddec0 | 17a09c3013ea83f46bd66dd412cfe5bb3a606710 | refs/heads/master | 2021-01-24T21:58:02.707521 | 2014-02-12T22:10:02 | 2014-02-12T22:10:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,017 | py | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'ProgrammeEvent.event_type'
db.add_column('ext_programme_programmeevent', 'event_type',
self.gf('django.db.models.fields.IntegerField')(default=0),
keep_default=False)
def backwards(self, orm):
# Deleting field 'ProgrammeEvent.event_type'
db.delete_column('ext_programme_programmeevent', 'event_type')
models = {
'ext_programme.programmeevent': {
'Meta': {'object_name': 'ProgrammeEvent'},
'description': ('django.db.models.fields.TextField', [], {}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'end': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'event': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['kompomaatti.Event']"}),
'event_type': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'facebook_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'github_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'home_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'icon_original': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'linkedin_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'presenters': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'presenters_titles': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'start': ('django.db.models.fields.DateTimeField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'twitter_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'wiki_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'kompomaatti.event': {
'Meta': {'object_name': 'Event'},
'archived': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'date': ('django.db.models.fields.DateField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mainurl': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'})
}
}
complete_apps = ['ext_programme'] | [
"katajakasa@gmail.com"
] | katajakasa@gmail.com |
84a090daec0107588cb4ff337ef930806f29e773 | 5ae15acd125798f3746c092d15dab5a9456d0121 | /backend/home/migrations/0002_load_initial_data.py | b745cb17280fa4e392e7f36d0c5f4cb05d00b5a9 | [] | no_license | crowdbotics-apps/kirpi-19355 | 8c5be04d51dccf08c44101e35561ad7f78207407 | 52aaca086597f9f9a98bc5af889d6f7addf0ac10 | refs/heads/master | 2022-11-25T06:54:58.142128 | 2020-08-04T03:43:38 | 2020-08-04T03:43:38 | 284,869,887 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,274 | py | from django.db import migrations
def create_customtext(apps, schema_editor):
CustomText = apps.get_model("home", "CustomText")
customtext_title = "kirpi"
CustomText.objects.create(title=customtext_title)
def create_homepage(apps, schema_editor):
HomePage = apps.get_model("home", "HomePage")
homepage_body = """
<h1 class="display-4 text-center">kirpi</h1>
<p class="lead">
This is the sample application created and deployed from the Crowdbotics app.
You can view list of packages selected for this application below.
</p>"""
HomePage.objects.create(body=homepage_body)
def create_site(apps, schema_editor):
Site = apps.get_model("sites", "Site")
custom_domain = "kirpi-19355.botics.co"
site_params = {
"name": "kirpi",
}
if custom_domain:
site_params["domain"] = custom_domain
Site.objects.update_or_create(defaults=site_params, id=1)
class Migration(migrations.Migration):
dependencies = [
("home", "0001_initial"),
("sites", "0002_alter_domain_unique"),
]
operations = [
migrations.RunPython(create_customtext),
migrations.RunPython(create_homepage),
migrations.RunPython(create_site),
]
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
ceeb42f92f947c9545d365c69ef98e60cff3c358 | 0c507f09b7328e58b8dc2003a30056699c772d6d | /binary_search/search_insert_position.py | de25f23ea581768d74647a7902df6e71d7dc0281 | [] | no_license | chicocheco/leetcode_exercises | 81c531fa418eaa62097ccda07cf1d21a882fb965 | c97b47907ddf6b5b26b448969f515068648ea9d9 | refs/heads/main | 2023-08-30T07:50:05.498088 | 2021-11-16T16:27:44 | 2021-11-16T16:27:44 | 402,787,468 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 910 | py | """
https://leetcode.com/problems/search-insert-position
Given a sorted array of distinct integers and a target value, return the index if the target is found. If not,
return the index where it would be if it were inserted in order.
You must write an algorithm with O(log n) runtime complexity.
Input: nums = [1,3,5,6], target = 5
Output: 2
"""
def search_insert(nums, target):
left = 0
right = len(nums) - 1
while left <= right:
pivot = (left + right) // 2
curr = nums[pivot]
if target == curr:
return pivot
if target < curr:
right = pivot - 1
else:
left = pivot + 1
return left # no break and nothing returned in while loop
assert search_insert([7, 9, 11, 14], 14) == 3
assert search_insert([7, 9, 11, 14], 15) == 4
assert search_insert([7, 9, 11, 14], 8) == 1
assert search_insert([7, 9, 11, 14], 12) == 3
| [
"stanislav.matas@gmail.com"
] | stanislav.matas@gmail.com |
72db980ed59813e041dd86e6d8a4e7bbe29346aa | 6923f79f1eaaba0ab28b25337ba6cb56be97d32d | /Python_Scripting_for_Computational_Science_Third_Edition/app/wavesim2D/F77/Verify/test1.py | 102913c481edabc9df7232e41e9ae80f82a89026 | [] | no_license | burakbayramli/books | 9fe7ba0cabf06e113eb125d62fe16d4946f4a4f0 | 5e9a0e03aa7ddf5e5ddf89943ccc68d94b539e95 | refs/heads/master | 2023-08-17T05:31:08.885134 | 2023-08-14T10:05:37 | 2023-08-14T10:05:37 | 72,460,321 | 223 | 174 | null | 2022-10-24T12:15:06 | 2016-10-31T17:24:00 | Jupyter Notebook | UTF-8 | Python | false | false | 786 | py | #!/usr/bin/env python
import os, shutil, glob
os.chdir(os.pardir)
shutil.copy('main.f.orig', 'main.f')
shutil.copy('F77WAVE.fcp.orig', 'F77WAVE.fcp')
# edit main.f such that solutions are dumped,
# also use a small grid
os.system("perl -pi.old~ -e 's#^C(\s+)call dump# $1call dump#' main.f")
os.system("perl -pi.old~ -e 's#^[^C]\s+PARAMETER \(n=(\d+)\)# PARAMETER (n=31)#' main.f")
os.system("./make.sh")
os.chdir("Verify")
tmpfiles = glob.glob("tmp_*.mtv")
for file in tmpfiles: os.remove(file)
f = open('tmp.input', 'w')
f.write('20\n') # no of time steps
f.close()
os.system("../app < tmp.input")
# show on the screen:
from scitools.misc import findprograms
if findprograms(['plotmtv'], write_message=1):
os.system("plotmtv -geometry 600x700 -nodate -3d tmp_*.mtv")
| [
"bb@b.om"
] | bb@b.om |
51a5ce2d7d807281164d71093567d658e7265769 | 243ce25168eea65144713a1100ca997a2d29f280 | /p7.py | 5941a9ed24c7c2caed8a8b366ceac70c9efd6811 | [] | no_license | acadien/projecteuler | 6aa1efbb1141ecf36d6b23bb6b058070e5e881e0 | 2efb0b5577cee7f046ed4f67d0f01f438cbf3770 | refs/heads/master | 2020-04-28T21:33:49.631044 | 2013-12-06T19:25:20 | 2013-12-06T19:25:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,307 | py | #!/usr/bin/python
def prod(l):
tot=1
for i in l:
tot*=int(i)
return tot
num="7316717653133062491922511967442657474235534919493496983520312774506326239578318016984801869478851843858615607891129494954595017379583319528532088055111254069874715852386305071569329096329522744304355766896648950445244523161731856403098711121722383113622298934233803081353362766142828064444866452387493035890729629049156044077239071381051585930796086670172427121883998797908792274921901699720888093776657273330010533678812202354218097512545405947522435258490771167055601360483958644670632441572215539753697817977846174064955149290862569321978468622482839722413756570560574902614079729686524145351004748216637048440319989000889524345065854122758866688116427171479924442928230863465674813919123162824586178664583591245665294765456828489128831426076900422421902267105562632111110937054421750694165896040807198403850962455444362981230987879927244284909188845801561660979191338754992005240636899125607176060588611646710940507754100225698315520005593572972571636269561882670428252483600823257530420752963450"
window=list()
[window.append(int(num[i])) for i in range(5)]
large=0
for i in range(5,1000):
if prod(window)>large:
large=prod(window)
window.pop(0)
window.append(num[i])
print large
| [
"adamcadien@gmail.com"
] | adamcadien@gmail.com |
ea125b5976ac3edee7174c1cbb098beeb5a9b5e9 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_212/ch117_2020_03_30_13_52_24_265011.py | dab219b83b876930425f22a7e679427c1de4cd78 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 294 | py | from mat import sen
def snell_descartes (n1, n2, anguloI) :
ânguloR = ( n1/n2 ) * sen(anguloI)
return ânguloR
n1 = input ("qual o meio que o raio provem?")
n2 = input (" qual meio para o qual o raio passsa?")
anguloI = input ("qual o angulo de incidência?")
print (snell_descartes) | [
"you@example.com"
] | you@example.com |
a9c34a38bcbaac8f21ed25259bc049d08cead3f7 | ae83914f309ee203c9743a1c2273539862126e92 | /src/modules/bilstm_crf.py | 82e924e47768fb453d725c3dba95f9d9b1f84159 | [] | no_license | kajyuuen/pytorch-ner | e416af264dd24470b0d7c3c10346a96e241810da | 5b6084b122aa7bfe2f18fe63411535f180e24f8d | refs/heads/master | 2020-08-10T16:52:24.310618 | 2019-10-18T06:57:16 | 2019-10-18T06:57:16 | 214,380,670 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,213 | py | import torch
import torch.nn as nn
import torch.nn.functional as F
from pytorch_partial_crf import CRF
from pytorch_partial_crf import PartialCRF
from typing import Tuple
from src.common.config import PAD_TAG
from src.common.utils import create_possible_tag_masks
from src.modules.bilstm import BiLSTM
class BiLSTM_CRF(nn.Module):
def __init__(self,
num_tags,
label_vocab,
char_vocab,
word_vocab,
emb_dict,
dropout_rate = 0,
batch_first = True,
inference_type = "CRF"):
super().__init__()
self.encoder = BiLSTM(num_tags,
label_vocab,
char_vocab,
word_vocab,
emb_dict,
batch_first = batch_first)
if inference_type in ["CRF", "Simple", "Hard"]:
self.inferencer = CRF(num_tags)
elif inference_type == "PartialCRF":
self.inferencer = PartialCRF(num_tags)
else:
raise ModuleNotFoundError
self.num_tags = num_tags
def forward(self, batch) -> torch.Tensor:
emissions, tags, mask = self._get_variable_for_decode(batch)
loss = self.inferencer(emissions, tags, mask)
return loss
def decode(self, batch) -> Tuple[torch.Tensor, torch.Tensor]:
emissions, tags, mask = self._get_variable_for_decode(batch)
best_tags_list = self.inferencer.viterbi_decode(emissions, mask)
return best_tags_list
def restricted_decode(self, base_batch, batch) -> Tuple[torch.Tensor, torch.Tensor]:
possible_tags = create_possible_tag_masks(self.num_tags, base_batch.label)
emissions, _, mask = self._get_variable_for_decode(batch)
best_tags_list = self.inferencer.restricted_viterbi_decode(emissions, possible_tags, mask)
return best_tags_list
def _get_variable_for_decode(self, batch) -> torch.Tensor:
emissions = self.encoder(batch)
tags = batch.label
mask = tags.clone().byte()
mask[mask != 0] = 1
return emissions, tags, mask
| [
"kajyuuen@gmail.com"
] | kajyuuen@gmail.com |
c54a497e07b5067727c7be96464ec42e722a69f7 | 9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97 | /sdBs/AllRun/pg_0928+031/sdB_pg_0928+031_coadd.py | be060adebe66994d01bc1fb878a3d294c48c85fb | [] | no_license | tboudreaux/SummerSTScICode | 73b2e5839b10c0bf733808f4316d34be91c5a3bd | 4dd1ffbb09e0a599257d21872f9d62b5420028b0 | refs/heads/master | 2021-01-20T18:07:44.723496 | 2016-08-08T16:49:53 | 2016-08-08T16:49:53 | 65,221,159 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 429 | py | from gPhoton.gMap import gMap
def main():
gMap(band="NUV", skypos=[142.748667,2.842431], skyrange=[0.0333333333333,0.0333333333333], stepsz = 30., cntfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdBs/sdB_pg_0928+031/sdB_pg_0928+031_movie_count.fits", cntcoaddfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdB/sdB_pg_0928+031/sdB_pg_0928+031_count_coadd.fits", overwrite=True, verbose=3)
if __name__ == "__main__":
main()
| [
"thomas@boudreauxmail.com"
] | thomas@boudreauxmail.com |
5601667b2989a7eb779fe5ff718bd7670f4c92cb | 52e8841ac9603e994fc487ecb52f232e55a50e07 | /Bio/NeuralNetwork/Training.py | af8c2f0be8fc4b890d4c2958db495acfaf54b2ad | [] | no_license | rored/RozszerzenieBio.PDB | aff434fddfe57199a7465f79126eba62b1c789ae | 7c9d696faacabff912b1263fe19291d6a198c3c2 | refs/heads/master | 2021-01-21T04:50:37.903227 | 2016-06-23T19:15:42 | 2016-06-23T19:15:42 | 55,064,794 | 0 | 3 | null | null | null | null | UTF-8 | Python | false | false | 3,149 | py | # This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
#
"""Provide classes for dealing with Training Neural Networks."""
# standard modules
import random
__docformat__ = "restructuredtext en"
class TrainingExample(object):
"""Hold inputs and outputs of a training example."""
def __init__(self, inputs, outputs, name=""):
self.name = name
self.inputs = inputs
self.outputs = outputs
class ExampleManager(object):
"""Manage a grouping of Training Examples.
This is meant to make it easy to split a bunch of training examples
into three types of data:
o Training Data -- These are the data used to do the actual training
of the network.
o Validation Data -- These data are used to validate the network
while training. They provide an independent method to evaluate how
the network is doing, and make sure the network gets trained independent
of noise in the training data set.
o Testing Data -- The data which are used to verify how well a network
works. They should not be used at all in the training process, so they
provide a completely independent method of testing how well a network
performs.
"""
def __init__(self, training_percent=.4, validation_percent=.4):
"""Initialize the manager with the training examples.
Arguments:
o training_percent - The percentage of the training examples that
should be used for training the network.
o validation_percent - Percent of training examples for validating
a network during training.
Attributes:
o train_examples - A randomly chosen set of examples for training
purposes.
o valdiation_examples - Randomly chosesn set of examples for
use in validation of a network during training.
o test_examples - Examples for training purposes.
"""
assert training_percent + validation_percent <= 1.0, \
"Training and validation percentages more than 100 percent"
self.train_examples = []
self.validation_examples = []
self.test_examples = []
self.training_percent = training_percent
self.validation_percent = validation_percent
def add_examples(self, training_examples):
"""Add a set of training examples to the manager.
Arguments:
o training_examples - A list of TrainingExamples to manage.
"""
placement_rand = random.Random()
# assign exact example randomly to the example types
for example in training_examples:
chance_num = placement_rand.random()
# assign with the specified percentage
if chance_num <= self.training_percent:
self.train_examples.append(example)
elif chance_num <= (self.training_percent +
self.validation_percent):
self.validation_examples.append(example)
else:
self.test_examples.append(example)
| [
"Viktoria@MacBook-Pro-Viktoria.local"
] | Viktoria@MacBook-Pro-Viktoria.local |
4734c0100d27206fceada3dd91a5fcf113ff12e2 | 82f2bc45d08d06395009f24e9a0eb8de10c9aec9 | /src/canvas_utils/query_sorter.py | 6877bd6e68b6848492c223398af00fdf77f33c98 | [] | no_license | paepcke/canvas_utils | 3d4502634d639a6f3ea600e9b66c6ca8411eb44d | 6b7d9d4ccf93d034c88ed058ed06ddf02f124785 | refs/heads/master | 2020-04-09T04:23:49.766448 | 2019-12-19T22:00:34 | 2019-12-19T22:00:34 | 160,021,203 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,736 | py | #!/usr/bin/env python
'''
Created on Aug 14, 2019
@author: paepcke
'''
from _io import StringIO
import copy
import os
import re
import sys
# NOTE: don't import utilities module here.
# would lead to circular import.
class QuerySorter(object):
'''
Runs through all queries in the Queries subdirectory,
and looks for query text that refers to other tables.
We assume that the files in Queries all have the names
of tables, with extension '.sql' appended.
Will order the list of tables such that all dependencies
of MySQL running the table X sql on other tables having
been created first are satisfied.
Raises ValueError if circular dependency is detected.
'''
#-------------------------
# Constructor
#--------------
def __init__(self, unittests=False):
'''
Constructor
'''
self.curr_dir = os.path.dirname(__file__)
self.query_dir = os.path.join(self.curr_dir, 'Queries')
# Get basenames of query files, e.g. Terms.sql:
self.query_file_names = os.listdir(self.query_dir)
self.table_names = [file_name.split('.')[0]
for file_name in self.query_file_names
if file_name.split('.')[1] == 'sql']
if unittests:
# Allow unittests to call the various methods
# in isolation:
return
self.query_texts = self.get_query_texts(self.query_file_names)
self.precedence_dict = self.build_precedence_dict(self.query_texts)
self._sorted_table_names = self.sort(self.precedence_dict)
#-------------------------
# property sorted_table_names
#--------------
@property
def sorted_table_names(self):
return self._sorted_table_names
#-------------------------
# get_query_texts
#--------------
def get_query_texts(self, file_basenames):
'''
Read all queries in files within Query.
Return a dict {table_name : "the query text"}
Leave out lines with sharp char (comment) at
the start
@param file_basenames: names of query file names in Queries
(not full paths)
@type file_basenames: [str]
@return: dictionary mapping table names to the SQL text
that creates them
@rtype: {str : str}
'''
full_query_paths = [os.path.join(self.query_dir, file_name)
for file_name in file_basenames
if file_name.endswith('.sql')]
text_dict = {}
for query_path in full_query_paths:
# Table name is name of file without extension:
table_name = os.path.splitext(os.path.basename(query_path))[0]
with open(query_path, 'r') as fd:
in_buf = StringIO(fd.read())
# Discard comments with hash char at start of line:
out_buf = StringIO()
for line in in_buf:
if line[0] == '#':
continue
out_buf.write(line)
# Store the entire query file content
# in the value of the table dict:
text_dict[table_name] = out_buf.getvalue()
return text_dict
#-------------------------
# build_precedence_dict
#--------------
def build_precedence_dict(self, text_dict):
'''
Given a dict: {<table_name> : <query_text_StringIO_buf},
construct a dict:
{<table_name> : [table_name, table_name, ...]}
where the array contains names of tables that must
be processed before the table_name in the key.
Strategy:
1. Build a regular expression that will find every
table name in a string, and makes a regegx group
out of it. Ex: r"(tbl1_name)|(tbl2_name)|(tbl3_name)"
2. For each query string that creates a table, apply
the search pattern to get tuples of groups. For
string "I am tbl1_name\nAnd you are tbl3_name"
We get:
[('tbl2_name','',''),('','','tbl3_name')]
3. Pick the non-empty table names out from each tuple
to get a list of mentioned table names.
@param text_dict: dict table_name to query text StringIO buffer
@type text_dict: {str : str}
@return: dict mapping a table name to an array of
table names that need to be processed earlier.
@rtype: {str : [str]}
'''
precedence_dict = {}
# Build a regular expression that makes a group out
# of every occurrence of a table name in a multi-line
# string: r"(<tablename1>)|(<tablename2>)|..."
table_names = text_dict.keys()
# Convert ['table1', 'table2', ...] to ['(table1)', '(table2)', ...]:
table_name_grps = [f"({table_name})" for table_name in table_names]
# Put the regex OR operator between each group:
search_pattern = re.compile('|'.join(table_name_grps))
for (table_name, query_str) in text_dict.items():
precedence_set = set()
# Get a list of groups of table names:
# [('','Table3', ''), ('','', 'Table10'),
# where each table name is one found in the
# query string:
group_tuples = search_pattern.findall(query_str)
# Go through each tuple and find the group that is
# not empty, and is not the name of the table whose
# dependencies we are trying to find:
for group_tuple in group_tuples:
found_tbl_names = [found_table_name for found_table_name in group_tuple
if len(found_table_name) > 0 and found_table_name != table_name]
precedence_set = precedence_set.union(set(found_tbl_names))
precedence_dict[table_name] = list(precedence_set)
return precedence_dict
#-------------------------
# sort
#--------------
def sort(self, precedence_dict):
'''
Given a dict:
{table_name3 : [table_name1, table_name2],
table_name1 : [],
table_name2 : [table_name1]
}
returns an ordered list of table names such that
there will not be a "unknown table" error when
loading the corresponding files in order.
@param precedence_dict: map from table names to lists of
tables that must be loaded ahead of time.
@type precedence_dict: { str : [str]}
@return: ordered list of table names
@rtyp: [str]
@raise TableError: if there two tables have a mutual dependency,
or if any table in the queries has not corresponding
.sql file in Queries.
'''
# Check for cycles: tables that mutually require the
# other one to be loaded first: Throws a value
# errors with informative msg if dicovers a conflict.
# Else returns True:
ordered_tables = self.detect_mutual_table_dependencies(precedence_dict)
return ordered_tables
#-------------------------
# detect_mutual_table_dependencies
#--------------
def detect_mutual_table_dependencies(self, precedence_dict, table_list_todo=None, tables_done=[], tables_being_considered=[]):
'''
Given a precedence dict: {table : [table1, table2, ...]} of
tables and their dependencies, return True if there are
no mutual dependencies. Else raise ValueError with informative
message.
A mutual dependency occurs when:
{table1 : [table2],
table2 : [table1]
}
or with more complexity:
{table1 : [table2],
table2 : [table3],
table3 : [table1]
}
The method is recursive, diving depth-first into the
dependencies.
From the top level, only the precedence_dict is typically
provided. The remaining args are for recursive calls.
@param precedence_dict: dict of table interdependencies
@type precedence_dict: {str : [str]}
@param table_list_todo: list of table names that are to
be examined for conflicts.
@type table_list_todo: [str]
@param tables_done: list of tables that are already processed
@type tables_done: [str]
@param tables_being_considered: list of tables that are being
processed in the current layers of recursion
@type tables_being_considered: [str]
@return: list of tables in the order in which they can be loaded.
@rtype: [str]
@raise: TableError if mutual dependency is found, or a table name
appears in the queries that does not have a corresponding .sql
file in Queries.
'''
if table_list_todo is None:
# First time in (i.e top level)
top_level = True
# Copy the passed-in precedence_dict, b/c belo we pop
# values off some of its entries.
precedence_dict = copy.deepcopy(precedence_dict)
# Total number of tables to examine:
table_list_todo = list(precedence_dict.keys())
# Right off the bat: declare all tables without
# dependencies winners: Transfer them to tables_done:
no_precedence_table_it = filter(lambda tname: len(precedence_dict[tname]) == 0, table_list_todo)
tables_done = [tname for tname in no_precedence_table_it]
# Remove the done ones from the todos:
table_list_todo = [tbl_name for tbl_name in table_list_todo if tbl_name not in tables_done]
# Sort tables todo by decreasing number of
# dependencies that each table needs. The
# array pop() in the while loop will therefore
# do as many low dependency tables as possible:
table_list_todo = sorted(table_list_todo,
key=lambda tbl_name: len(precedence_dict[tbl_name]),
reverse=True)
# No dependency chain yet:
tables_being_considered = []
# No error encountered yet:
original_error = None
else:
# This is a recursive call:
top_level = False
while True:
try:
curr_table = table_list_todo.pop()
except IndexError:
# All done:
return tables_done
if curr_table in tables_done:
# Satisfied this table's dependencies
# earlier, or has none:
continue
if curr_table in tables_being_considered:
raise InternalTableError(curr_table, "Mutual load order dependency")
try:
curr_dependencies = precedence_dict[curr_table]
except KeyError as e:
raise InternalTableError(curr_table, f"Missing table file {curr_table}.sql in Queries directory")
satisfied = [dep for dep in curr_dependencies if dep in tables_done]
curr_dependencies = [unfilled for unfilled in curr_dependencies if unfilled not in satisfied]
if len(curr_dependencies) > 0:
try:
tables_being_considered.append(curr_table)
tables_done = self.detect_mutual_table_dependencies(precedence_dict,
table_list_todo=curr_dependencies,
tables_done=tables_done,
tables_being_considered=tables_being_considered
)
tables_being_considered.pop()
except InternalTableError as e:
original_error = e
# Unwind the recursion:
if not top_level:
raise e
# Recursion unwound, check for error:
if original_error:
# Add the current table to the end of
# the dependency chain. That will be
# the same as the list head. Ex:
# [CourseEnrollment, Terms, Student, CourseEnrollment]
tables_being_considered.append(curr_table)
# Build nice error message
raise TableError(tuple(tables_being_considered), original_error.message)
tables_done.append(curr_table)
# --------------------- Exception Classes --------------------
#-------------------------
# ConfigurationError
#--------------
class ConfigurationError(Exception):
'''
Exception raised when the setup.cfg file cannot
be read, or holds faulty content.
Attributes:
message -- explanation of the error
'''
def __init__(self, message):
self._message_fragment = message
super().__init__(self.message)
@property
def message(self):
return f"{self._message_fragment}"
#-------------------------
# TableError
#--------------
class TableError(Exception):
'''
Exception raised for tables missing in Queries subdir,
or mutually required tables.
Attributes:
tuple of two table names involved in an error
message -- explanation of the error
'''
def __init__(self, table_tuple, message):
self.table_tuple = table_tuple
self._message_fragment = message
super().__init__(self.message)
@property
def message(self):
return f"{self.table_tuple}: {self._message_fragment}"
#-------------------------
# DatabaseError
#--------------
class DatabaseError(Exception):
'''
Error during interaction with database.
One property: message
'''
def __init__(self, message):
self._message_fragment = message
super().__init__(self.message)
@property
def message(self):
return f"{self._message_fragment}"
#-------------------------
# InternalTableError
#--------------
class InternalTableError(Exception):
def __init__(self, table_name, message, implicated_tables=[]):
self.table_name = table_name
self._message_fragment = message
self.implicated_tables = implicated_tables
super().__init__(self.message)
@property
def message(self):
return self._message_fragment
# -------------------- Main --------------------
if __name__ == '__main__':
if len(sys.argv) > 1:
print("Usage: no command line options. When run, prints list of aux tables\n" +
"whose .sql in subdirectory Queries must be run in order to avoid\n" +
"MySQL table-not-found errors. Used internally."
)
sys.exit()
sorter = QuerySorter()
print (f"Table order:\n{sorter.sorted_table_names}") | [
"paepcke@cs.stanford.edu"
] | paepcke@cs.stanford.edu |
fe12c229a6c55bc5aa28a38dcbb95155d30832de | 0ca2d3fcd53fb9795c2d8741affe87cccede300c | /scipy_doc/routines.sort.html/numpy.argmax.py | 93c8150171a861d2001b87f58f31e8fd46dc7b9c | [] | no_license | yzozulya/numpy_test_examples | 9bafc5d0711149a3366a0644309be6ff6d480c7a | 4b1e65b160728a4c483d883bd00b72b2f61377b8 | refs/heads/master | 2021-01-01T19:01:50.560912 | 2015-04-08T12:18:46 | 2015-04-08T12:18:46 | 33,604,541 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 191 | py | import numpy as np
a = np.arange(6).reshape(2, 3)
a
np.argmax(a)
np.argmax(a, axis=0)
np.argmax(a, axis=1)
b = np.arange(6)
b[1] = 5
b
np.argmax(b) # Only the first occurrence is returned.
| [
"yulia.zozulya@jetbrains.com"
] | yulia.zozulya@jetbrains.com |
3033ca338ddf9127998248fb43affafd10c89356 | 55afd3bbe5187dba96be169a7c068c7cf7543447 | /article17/speciessummary/attrs_conclusion/td_range_conclusion.py | 5198c6beeb71524b0e782223750eee3efb0afa07 | [] | no_license | eaudeweb/art17-2006 | 6d9413439e10f4db0b72fc49c80b7c50ee1ef59e | 4bc61cd2972f94769dae97b95ccb55f2a0952cf1 | refs/heads/master | 2016-09-05T13:33:19.280952 | 2014-01-30T09:54:27 | 2014-01-30T09:54:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 912 | py | # Script (Python)
# /article17/speciessummary/attrs_conclusion/td_range_conclusion
# params: 'assesment_speciesname, region, record, conclusions'
## Script (Python) "td_range_conclusion"
##bind container=container
##bind context=context
##bind namespace=
##bind script=script
##bind subpath=traverse_subpath
##parameters=assesment_speciesname, region, record, conclusions
##title=
##
output = context.background_colour(record['conclusion_range'], 'center', conclusions)
title = output.get('title', '')
method = record['method_range'] or ''
cursor = context.sql_methods.get_range_conclusion_value(assesment_speciesname=assesment_speciesname, region=region, assessment_method=method)
if len(cursor):
concl_value = cursor[0]['percentage_range_surface_area']
if concl_value:
title = "%s: %s" % (title, concl_value)
output.update({
'content': method,
'title': title,
})
return output
| [
"alex@grep.ro"
] | alex@grep.ro |
2da48ad1d33abb08e7a09bd8a674dd46969a63a8 | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-5/9b44109ba3091e6538dedfac2987cf31793e892d-<configure_cpu_and_memory>-bug.py | fddbaac7d9ab62f50ffbbd644493ecf7bba42ed2 | [] | no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,690 | py | def configure_cpu_and_memory(self, vm_obj, vm_creation=False):
if ('hardware' in self.params):
if ('num_cpus' in self.params['hardware']):
try:
num_cpus = int(self.params['hardware']['num_cpus'])
except ValueError as e:
self.module.fail_json(msg='hardware.num_cpus attribute should be an integer value.')
if ('num_cpu_cores_per_socket' in self.params['hardware']):
try:
num_cpu_cores_per_socket = int(self.params['hardware']['num_cpu_cores_per_socket'])
except ValueError as e:
self.module.fail_json(msg='hardware.num_cpu_cores_per_socket attribute should be an integer value.')
if ((num_cpus % num_cpu_cores_per_socket) != 0):
self.module.fail_json(msg='hardware.num_cpus attribute should be a multiple of hardware.num_cpu_cores_per_socket')
self.configspec.numCoresPerSocket = num_cpu_cores_per_socket
if ((vm_obj is None) or (self.configspec.numCoresPerSocket != vm_obj.config.hardware.numCoresPerSocket)):
self.change_detected = True
self.configspec.numCPUs = num_cpus
if ((vm_obj is None) or (self.configspec.numCPUs != vm_obj.config.hardware.numCPU)):
self.change_detected = True
elif (vm_creation and (not self.params['template'])):
self.module.fail_json(msg='hardware.num_cpus attribute is mandatory for VM creation')
if ('memory_mb' in self.params['hardware']):
try:
self.configspec.memoryMB = int(self.params['hardware']['memory_mb'])
except ValueError:
self.module.fail_json(msg='Failed to parse hardware.memory_mb value. Please refer the documentation and provide correct value.')
if ((vm_obj is None) or (self.configspec.memoryMB != vm_obj.config.hardware.memoryMB)):
self.change_detected = True
elif (vm_creation and (not self.params['template'])):
self.module.fail_json(msg='hardware.memory_mb attribute is mandatory for VM creation')
if ('hotadd_memory' in self.params['hardware']):
self.configspec.memoryHotAddEnabled = bool(self.params['hardware']['hotadd_memory'])
if ((vm_obj is None) or (self.configspec.memoryHotAddEnabled != vm_obj.config.memoryHotAddEnabled)):
self.change_detected = True
if ('hotadd_cpu' in self.params['hardware']):
self.configspec.cpuHotAddEnabled = bool(self.params['hardware']['hotadd_cpu'])
if ((vm_obj is None) or (self.configspec.cpuHotAddEnabled != vm_obj.config.cpuHotAddEnabled)):
self.change_detected = True
if ('hotremove_cpu' in self.params['hardware']):
self.configspec.cpuHotRemoveEnabled = bool(self.params['hardware']['hotremove_cpu'])
if ((vm_obj is None) or (self.configspec.cpuHotRemoveEnabled != vm_obj.config.cpuHotRemoveEnabled)):
self.change_detected = True
if ('memory_reservation' in self.params['hardware']):
memory_reservation_mb = 0
try:
memory_reservation_mb = int(self.params['hardware']['memory_reservation'])
except ValueError as e:
self.module.fail_json(msg=('Failed to set memory_reservation value.Valid value for memory_reservation value in MB (integer): %s' % e))
mem_alloc = vim.ResourceAllocationInfo()
mem_alloc.reservation = memory_reservation_mb
self.configspec.memoryAllocation = mem_alloc
if ((vm_obj is None) or (self.configspec.memoryAllocation.reservation != vm_obj.config.memoryAllocation.reservation)):
self.change_detected = True
if ('memory_reservation_lock' in self.params['hardware']):
self.configspec.memoryReservationLockedToMax = bool(self.params['hardware']['memory_reservation_lock'])
if ((vm_obj is None) or (self.configspec.memoryReservationLockedToMax != vm_obj.config.memoryReservationLockedToMax)):
self.change_detected = True
if ('boot_firmware' in self.params['hardware']):
boot_firmware = self.params['hardware']['boot_firmware'].lower()
if (boot_firmware not in ('bios', 'efi')):
self.module.fail_json(msg=("hardware.boot_firmware value is invalid [%s]. Need one of ['bios', 'efi']." % boot_firmware))
self.configspec.firmware = boot_firmware
if ((vm_obj is None) or (self.configspec.firmware != vm_obj.config.firmware)):
self.change_detected = True | [
"dg1732004@smail.nju.edu.cn"
] | dg1732004@smail.nju.edu.cn |
e906b7e87133a2381c44cadedd1eb9b84a7f7ba0 | 34851e4fedf2fea3aa5b87a923951ee8887344e9 | /public_markets/campbxusd.py | 5325df38835caeb491fda2567a16a2633a0ae639 | [] | no_license | kafitz/btc-arbitrage | cfb8a32bdea0312bb3a5a9bd70a36ebdf7f6d011 | 52d66bd47c2bccb9a75c06dda0ee4db9a7436ebb | refs/heads/master | 2021-01-23T18:11:11.211742 | 2013-03-25T18:16:13 | 2013-03-25T18:16:13 | 8,922,078 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,431 | py | import urllib2
import json
import logging
from market import Market
class CampBXUSD(Market):
'''Updates CampBX depth tables'''
def __init__(self):
super(CampBXUSD, self).__init__("USD")
self.update_rate = 25
self.depth = {'asks': [{'price': 0, 'amount': 0}], 'bids': [{'price': 0, 'amount': 0}]}
# {withdraw: amount bitcoins charged as network fee, exchange_rate: % for currency exchange}
self.fees = {'withdraw': 0, 'exchange_rate': 0.0055}
def update_depth(self):
try:
res = urllib2.urlopen('http://campbx.com/api/xdepth.php')
jsonstr = res.read()
data = json.loads(jsonstr)
self.depth = self.format_depth(data)
except:
logging.error("%s - depth data fetch error." % (self.name,))
def sort_and_format(self, l, reverse=False):
# Sort the list of prices/amount lists by price
l.sort(key=lambda x: float(x[0]), reverse=reverse)
# Create a dict pair from each list keypair
ret = []
for i in l:
ret.append({'price': float(i[0]), 'amount': float(i[1])})
return ret
def format_depth(self, data):
bids = self.sort_and_format(data["Bids"], True)
asks = self.sort_and_format(data["Asks"], False)
return {'asks': asks, 'bids': bids}
if __name__ == "__main__":
market = CampBXUSD()
print market.get_depth()
| [
"kafitz22@gmail.com"
] | kafitz22@gmail.com |
9e553106f69822bb6c99bf3d3f16c2118c568b90 | 9cc51b53bc3cac814843758fb98d212cd9656a0b | /model/product_base_info.py | 40efd57256ca03c7d4d1aabcf8292fbcf495cfef | [
"Apache-2.0"
] | permissive | juxiangwu/PSS | ead5317e54fcc3ad12e6df10b9956e276cfa8a6d | 9108ca6b669f0bec9647d015d14b421dacc02645 | refs/heads/master | 2021-05-10T15:53:01.649567 | 2018-02-17T14:35:37 | 2018-02-17T14:35:37 | 118,563,046 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,037 | py | # -*- coding:utf-8 -*-
# 商品基本信息表
from config.appconfig import db
import datetime
class ProductBaseInfo(db.Model):
__tablename__ = "t_product_base_info"
id = db.Column('id',db.Integer,primary_key = True)
shopId = db.Column('shop_id',db.Integer)
name = db.Column('name',db.String(128))
code = db.Column('code',db.String(128))
barcode = db.Column('barcode',db.String(128))
pinyinCode = db.Column('pinyin_code',db.String(128))
categoryName = db.Column('category_name',db.String(128))
categoryId = db.Column('category_id',db.Integer)
unitName = db.Column('unit_name',db.String(128))
puchasePrice = db.Column('puchase_price',db.Float)
retailPrice = db.Column('sell_price',db.Float)
wholesalePrice = db.Column('wholesale_price',db.Float)
supplierName = db.Column('supplier_name',db.String(128))
supplierId = db.Column('supplier_id',db.Integer)
isEnabled = db.Column('is_enabled',db.Boolean)
createDateTime = db.Column('create_datetime',db.DateTime)
modifyDateTime = db.Column('modify_datetime',db.DateTime)
def __init__(self,shopId,name,code,barcode,pinyinCode,categoryId,
categoryName,unitName,puchasePrice,retailPrice,
wholesalePrice,supplierName,supplierId,createDateTime,
modifyDateTime,isEnabled=True):
self.shopId = shopId
self.name = name
self.code = code
self.barcode = barcode
self.pinyinCode = pinyinCode
self.categoryId = categoryId
self.categoryName = categoryName
self.unitName = unitName
self.puchasePrice = puchasePrice
self.wholesalePrice = wholesalePrice
self.retailPrice = retailPrice
self.supplierId = supplierId
self.supplierName = supplierName
self.createDateTime = createDateTime
self.modifyDateTime = modifyDateTime
self.isEnabled = isEnabled
def to_json(self):
return {
"id":self.id,
"shopId":self.shopId,
"name":self.name,
"code":self.code,
"barcode":self.barcode,
"pinyinCode":self.pinyinCode,
"categoryName":self.categoryName,
"categoryId":self.categoryId,
"unitName":self.unitName,
"purchasePrice":self.puchasePrice,
"retailPrice":self.retailPrice,
"wholesalePrice":self.wholesalePrice,
"supplierName":self.supplierName,
"supplierId":self.supplierId,
"isEnable":self.isEnabled,
"createDateTime":self.createDateTime.strftime("%Y-%m-%d %H:%M:%S"),
"modifyDateTime":self.modifyDateTime.strftime("%Y-%m-%d %H:%M:%S")
}
def __repr__(self):
if self.id:
return '<ProductBaseInfo@id=%d,name=%s,shopId=%d,categoryId=%d>' %(self.id,self.name,self.shopId,self.categoryId)
else:
return '<ProductBaseInfo@name=%s,shopId=%d,categoryId=%d>' %(self.name,self.shopId,self.categoryId) | [
"kkoolerter@gmail.com"
] | kkoolerter@gmail.com |
d4450b0c6d0c8606e68e61901bc5d21cb770cc72 | e23a4f57ce5474d468258e5e63b9e23fb6011188 | /090_logging/examples/Python Logging – Simplest Guide with Full Code and Examples/008_9. How to include traceback information in logged messages.py | 50bc97aa806ada705b4a3271a710053b45bd153f | [] | no_license | syurskyi/Python_Topics | 52851ecce000cb751a3b986408efe32f0b4c0835 | be331826b490b73f0a176e6abed86ef68ff2dd2b | refs/heads/master | 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 | Python | UTF-8 | Python | false | false | 974 | py | # Besides debug, info, warning, error, and critical messages, you can log exceptions that will include any
# associated traceback information.
# With logger.exception, you can log traceback information should the code encounter any error. logger.exception will log
# the message provided in its arguments as well as the error message traceback info.
#
# Below is a nice example.
import logging
# Create or get the logger
logger = logging.getLogger(__name__)
# set log level
logger.setLevel(logging.INFO)
def divide(x, y):
try:
out = x / y
except ZeroDivisionError:
logger.exception("Division by zero problem")
else:
return out
# Logs
logger.error("Divide {x} / {y} = {c}".format(x=10, y=0, c=divide(10,0)))
#> ERROR:__main__:Division by zero problem
#> Traceback (most recent call last):
#> File "<ipython-input-16-a010a44fdc0a>", line 12, in divide
#> out = x / y
#> ZeroDivisionError: division by zero
#> ERROR:__main__:None | [
"sergejyurskyj@yahoo.com"
] | sergejyurskyj@yahoo.com |
7d2228d08984e1abc9de2e0912b38ac6830e3e21 | f3b233e5053e28fa95c549017bd75a30456eb50c | /bace_input/L4M/4M-3J_MD_NVT_rerun/set.py | 15a567d11dc43cc9d57465a49bebaf753252a7d4 | [] | no_license | AnguseZhang/Input_TI | ddf2ed40ff1c0aa24eea3275b83d4d405b50b820 | 50ada0833890be9e261c967d00948f998313cb60 | refs/heads/master | 2021-05-25T15:02:38.858785 | 2020-02-18T16:57:04 | 2020-02-18T16:57:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,082 | py | import os
dir = '/mnt/scratch/songlin3/run/bace/L4M/MD/ti_one-step/4M_3J/'
filesdir = dir + 'files/'
temp_equiin = filesdir + 'temp_equi.in'
temp_prodin = filesdir + 'temp_prod.in'
temp_pbs = filesdir + 'temp.pbs'
lambd = [ 0.00922, 0.04794, 0.11505, 0.20634, 0.31608, 0.43738, 0.56262, 0.68392, 0.79366, 0.88495, 0.95206, 0.99078]
for j in lambd:
os.system("rm -r %6.5f" %(j))
os.system("mkdir %6.5f" %(j))
os.chdir("%6.5f" %(j))
os.system("rm *")
workdir = dir + "%6.5f" %(j) + '/'
#equiin
eqin = workdir + "%6.5f_equi.in" %(j)
os.system("cp %s %s" %(temp_equiin, eqin))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, eqin))
#prodin
prodin = workdir + "%6.5f_prod.in" %(j)
os.system("cp %s %s" %(temp_prodin, prodin))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, prodin))
#PBS
pbs = workdir + "%6.5f.pbs" %(j)
os.system("cp %s %s" %(temp_pbs, pbs))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, pbs))
#top
os.system("cp ../4M-3J_merged.prmtop .")
os.system("cp ../0.5_equi_0.rst .")
#submit pbs
os.system("qsub %s" %(pbs))
os.chdir(dir)
| [
"songlin3@msu.edu"
] | songlin3@msu.edu |
07e54c5c203580c9ff29ebbc1c41db6d46819a28 | a3cc7286d4a319cb76f3a44a593c4a18e5ddc104 | /lib/surface/datastore/export.py | 4794c8107aa6b3cc5df32a1d806019761b0348b6 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | jordanistan/Google-Cloud-SDK | f2c6bb7abc2f33b9dfaec5de792aa1be91154099 | 42b9d7914c36a30d1e4b84ae2925df7edeca9962 | refs/heads/master | 2023-09-01T01:24:53.495537 | 2023-08-22T01:12:23 | 2023-08-22T01:12:23 | 127,072,491 | 0 | 1 | NOASSERTION | 2023-08-22T01:12:24 | 2018-03-28T02:31:19 | Python | UTF-8 | Python | false | false | 3,599 | py | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The gcloud datastore export command."""
from googlecloudsdk.api_lib.datastore import admin_api
from googlecloudsdk.api_lib.datastore import operations
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.datastore import flags
from googlecloudsdk.core import properties
from googlecloudsdk.core import resources
@base.ReleaseTracks(base.ReleaseTrack.BETA)
class Export(base.Command):
"""Export Cloud Datastore entities to Google Cloud Storage.
Export a copy of all or a subset of entities from Google Cloud Datastore
to another storage system, such as Google Cloud Storage. Recent
updates to entities may not be reflected in the export. The export occurs in
the background and its progress can be monitored and managed via the operation
commands. The output of an export may only be used once the operation has
completed. If an export operation is cancelled before completion then it may
leave partial data behind in Google Cloud Storage.
"""
@staticmethod
def Args(parser):
"""Register flags for this command."""
flags.AddEntityFilterFlags(parser)
flags.AddLabelsFlag(parser)
parser.add_argument(
'output_url_prefix',
help="""
Location for the export metadata and data files. Must be a valid
Google Cloud Storage bucket with an optional path prefix. For example:
$ {command} gs://mybucket/my/path
Will place the export in the `mybucket` bucket in objects prefixed with
`my/path`.
""")
base.ASYNC_FLAG.AddToParser(parser)
def Run(self, args):
project = properties.VALUES.core.project.Get(required=True)
destination = self._ParseGCSObjectPrefix(args.output_url_prefix)
response = admin_api.Export(
project,
# use join and filter to avoid trailing '/'.
'gs://{}'
.format('/'.join([part for part in destination if part is not None])),
kinds=args.kinds,
namespaces=args.namespaces,
labels=args.operation_labels)
if not args.async:
operations.WaitForOperation(response)
return response
def _ParseGCSObjectPrefix(self, resource):
"""Parses a GCS bucket with an optional object prefix.
Args:
resource: the user input resource string.
Returns:
a tuple of strings containing the GCS bucket and GCS object. The GCS
object may be None.
"""
try:
# Try as bucket first so that a single id is interpretted as a bucket
# instead of an object with a missing bucket.
bucket_ref = resources.REGISTRY.Parse(
resource, collection='storage.buckets')
# Call Parse rather than Create to set validate to False, allowing the
# empty object.
return (bucket_ref.bucket, None)
except resources.UserError:
# Ignored, we'll try parsing again as an object.
pass
object_ref = resources.REGISTRY.Parse(
resource, collection='storage.objects')
return (object_ref.bucket, object_ref.object)
| [
"jordan.robison@gmail.com"
] | jordan.robison@gmail.com |
a0aed7ae9aa7d5db56497466a5298e8c84067b43 | 9da09ad3aba9501d856f343bbc6d55bdcff1a346 | /apiv1/views.py | 68b9a851a6bf654e2fdb7270c5f321e4be0f2171 | [] | no_license | akiyoko/drf-vue-sample.vue.config.js | 696220a3bf7b590090d8ebe3658bf2f8c88c94b5 | c988326e43a89247e7620b115d37bb81060c5532 | refs/heads/master | 2020-05-01T19:41:24.296791 | 2019-03-25T19:54:53 | 2019-03-25T19:54:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 321 | py | from rest_framework import viewsets
from rest_framework.permissions import IsAuthenticated
from shop.models import Book
from .serializers import BookSerializer
class BookViewSet(viewsets.ModelViewSet):
queryset = Book.objects.all()
serializer_class = BookSerializer
permission_classes = (IsAuthenticated,)
| [
"akiyoko@users.noreply.github.com"
] | akiyoko@users.noreply.github.com |
71a60be3123022f45cd35066b7233371ce0fd3d4 | b08870f8fe7b3cf1bbab3c52a7bacbb36ee1dcc6 | /verp/support/doctype/issue/issue.py | be0ef67aab804d959242aecc2b10a7fc4dbe6dc4 | [] | no_license | vsadminpk18/verpfinalversion | 7148a64fe6134e2a6371470aceb1b57cc4b5a559 | 93d164b370ad9ca0dd5cda0053082dc3abbd20da | refs/heads/master | 2023-07-13T04:11:59.211046 | 2021-08-27T06:26:48 | 2021-08-27T06:26:48 | 400,410,611 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,033 | py | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
import json
from frappe import _
from frappe.model.document import Document
from frappe.utils import now_datetime, time_diff_in_seconds, get_datetime, date_diff
from frappe.core.utils import get_parent_doc
from datetime import timedelta
from frappe.model.mapper import get_mapped_doc
from frappe.utils.user import is_website_user
from frappe.email.inbox import link_communication_to_document
class Issue(Document):
def get_feed(self):
return "{0}: {1}".format(_(self.status), self.subject)
def validate(self):
if self.is_new() and self.via_customer_portal:
self.flags.create_communication = True
if not self.raised_by:
self.raised_by = frappe.session.user
self.set_lead_contact(self.raised_by)
def on_update(self):
# Add a communication in the issue timeline
if self.flags.create_communication and self.via_customer_portal:
self.create_communication()
self.flags.communication_created = None
def set_lead_contact(self, email_id):
import email.utils
email_id = email.utils.parseaddr(email_id)[1]
if email_id:
if not self.lead:
self.lead = frappe.db.get_value("Lead", {"email_id": email_id})
if not self.contact and not self.customer:
self.contact = frappe.db.get_value("Contact", {"email_id": email_id})
if self.contact:
contact = frappe.get_doc("Contact", self.contact)
self.customer = contact.get_link_for("Customer")
if not self.company:
self.company = frappe.db.get_value("Lead", self.lead, "company") or \
frappe.db.get_default("Company")
def create_communication(self):
communication = frappe.new_doc("Communication")
communication.update({
"communication_type": "Communication",
"communication_medium": "Email",
"sent_or_received": "Received",
"email_status": "Open",
"subject": self.subject,
"sender": self.raised_by,
"content": self.description,
"status": "Linked",
"reference_doctype": "Issue",
"reference_name": self.name
})
communication.ignore_permissions = True
communication.ignore_mandatory = True
communication.save()
@frappe.whitelist()
def split_issue(self, subject, communication_id):
# Bug: Pressing enter doesn't send subject
from copy import deepcopy
replicated_issue = deepcopy(self)
replicated_issue.subject = subject
replicated_issue.issue_split_from = self.name
replicated_issue.first_response_time = 0
replicated_issue.first_responded_on = None
replicated_issue.creation = now_datetime()
# Reset SLA
if replicated_issue.service_level_agreement:
replicated_issue.service_level_agreement_creation = now_datetime()
replicated_issue.service_level_agreement = None
replicated_issue.agreement_status = "Ongoing"
replicated_issue.response_by = None
replicated_issue.response_by_variance = None
replicated_issue.resolution_by = None
replicated_issue.resolution_by_variance = None
replicated_issue.reset_issue_metrics()
frappe.get_doc(replicated_issue).insert()
# Replicate linked Communications
# TODO: get all communications in timeline before this, and modify them to append them to new doc
comm_to_split_from = frappe.get_doc("Communication", communication_id)
communications = frappe.get_all("Communication",
filters={"reference_doctype": "Issue",
"reference_name": comm_to_split_from.reference_name,
"creation": (">=", comm_to_split_from.creation)})
for communication in communications:
doc = frappe.get_doc("Communication", communication.name)
doc.reference_name = replicated_issue.name
doc.save(ignore_permissions=True)
frappe.get_doc({
"doctype": "Comment",
"comment_type": "Info",
"reference_doctype": "Issue",
"reference_name": replicated_issue.name,
"content": " - Split the Issue from <a href='/app/Form/Issue/{0}'>{1}</a>".format(self.name, frappe.bold(self.name)),
}).insert(ignore_permissions=True)
return replicated_issue.name
def get_list_context(context=None):
return {
"title": _("Issues"),
"get_list": get_issue_list,
"row_template": "templates/includes/issue_row.html",
"show_sidebar": True,
"show_search": True,
"no_breadcrumbs": True
}
def get_issue_list(doctype, txt, filters, limit_start, limit_page_length=20, order_by=None):
from frappe.www.list import get_list
user = frappe.session.user
contact = frappe.db.get_value("Contact", {"user": user}, "name")
customer = None
if contact:
contact_doc = frappe.get_doc("Contact", contact)
customer = contact_doc.get_link_for("Customer")
ignore_permissions = False
if is_website_user():
if not filters: filters = {}
if customer:
filters["customer"] = customer
else:
filters["raised_by"] = user
ignore_permissions = True
return get_list(doctype, txt, filters, limit_start, limit_page_length, ignore_permissions=ignore_permissions)
@frappe.whitelist()
def set_multiple_status(names, status):
for name in json.loads(names):
frappe.db.set_value("Issue", name, "status", status)
@frappe.whitelist()
def set_status(name, status):
frappe.db.set_value("Issue", name, "status", status)
def auto_close_tickets():
"""Auto-close replied support tickets after 7 days"""
auto_close_after_days = frappe.db.get_value("Support Settings", "Support Settings", "close_issue_after_days") or 7
issues = frappe.db.sql(""" select name from tabIssue where status='Replied' and
modified<DATE_SUB(CURDATE(), INTERVAL %s DAY) """, (auto_close_after_days), as_dict=True)
for issue in issues:
doc = frappe.get_doc("Issue", issue.get("name"))
doc.status = "Closed"
doc.flags.ignore_permissions = True
doc.flags.ignore_mandatory = True
doc.save()
def has_website_permission(doc, ptype, user, verbose=False):
from verp.controllers.website_list_for_contact import has_website_permission
permission_based_on_customer = has_website_permission(doc, ptype, user, verbose)
return permission_based_on_customer or doc.raised_by==user
def update_issue(contact, method):
"""Called when Contact is deleted"""
frappe.db.sql("""UPDATE `tabIssue` set contact='' where contact=%s""", contact.name)
@frappe.whitelist()
def make_task(source_name, target_doc=None):
return get_mapped_doc("Issue", source_name, {
"Issue": {
"doctype": "Task"
}
}, target_doc)
@frappe.whitelist()
def make_issue_from_communication(communication, ignore_communication_links=False):
""" raise a issue from email """
doc = frappe.get_doc("Communication", communication)
issue = frappe.get_doc({
"doctype": "Issue",
"subject": doc.subject,
"communication_medium": doc.communication_medium,
"raised_by": doc.sender or "",
"raised_by_phone": doc.phone_no or ""
}).insert(ignore_permissions=True)
link_communication_to_document(doc, "Issue", issue.name, ignore_communication_links)
return issue.name
def get_time_in_timedelta(time):
"""
Converts datetime.time(10, 36, 55, 961454) to datetime.timedelta(seconds=38215)
"""
return timedelta(hours=time.hour, minutes=time.minute, seconds=time.second)
def set_first_response_time(communication, method):
if communication.get('reference_doctype') == "Issue":
issue = get_parent_doc(communication)
if is_first_response(issue):
first_response_time = calculate_first_response_time(issue, get_datetime(issue.first_responded_on))
issue.db_set("first_response_time", first_response_time)
def is_first_response(issue):
responses = frappe.get_all('Communication', filters = {'reference_name': issue.name, 'sent_or_received': 'Sent'})
if len(responses) == 1:
return True
return False
def calculate_first_response_time(issue, first_responded_on):
issue_creation_date = issue.creation
issue_creation_time = get_time_in_seconds(issue_creation_date)
first_responded_on_in_seconds = get_time_in_seconds(first_responded_on)
support_hours = frappe.get_cached_doc("Service Level Agreement", issue.service_level_agreement).support_and_resolution
if issue_creation_date.day == first_responded_on.day:
if is_work_day(issue_creation_date, support_hours):
start_time, end_time = get_working_hours(issue_creation_date, support_hours)
# issue creation and response on the same day during working hours
if is_during_working_hours(issue_creation_date, support_hours) and is_during_working_hours(first_responded_on, support_hours):
return get_elapsed_time(issue_creation_date, first_responded_on)
# issue creation is during working hours, but first response was after working hours
elif is_during_working_hours(issue_creation_date, support_hours):
return get_elapsed_time(issue_creation_time, end_time)
# issue creation was before working hours but first response is during working hours
elif is_during_working_hours(first_responded_on, support_hours):
return get_elapsed_time(start_time, first_responded_on_in_seconds)
# both issue creation and first response were after working hours
else:
return 1.0 # this should ideally be zero, but it gets reset when the next response is sent if the value is zero
else:
return 1.0
else:
# response on the next day
if date_diff(first_responded_on, issue_creation_date) == 1:
first_response_time = 0
else:
first_response_time = calculate_initial_frt(issue_creation_date, date_diff(first_responded_on, issue_creation_date)- 1, support_hours)
# time taken on day of issue creation
if is_work_day(issue_creation_date, support_hours):
start_time, end_time = get_working_hours(issue_creation_date, support_hours)
if is_during_working_hours(issue_creation_date, support_hours):
first_response_time += get_elapsed_time(issue_creation_time, end_time)
elif is_before_working_hours(issue_creation_date, support_hours):
first_response_time += get_elapsed_time(start_time, end_time)
# time taken on day of first response
if is_work_day(first_responded_on, support_hours):
start_time, end_time = get_working_hours(first_responded_on, support_hours)
if is_during_working_hours(first_responded_on, support_hours):
first_response_time += get_elapsed_time(start_time, first_responded_on_in_seconds)
elif not is_before_working_hours(first_responded_on, support_hours):
first_response_time += get_elapsed_time(start_time, end_time)
if first_response_time:
return first_response_time
else:
return 1.0
def get_time_in_seconds(date):
return timedelta(hours=date.hour, minutes=date.minute, seconds=date.second)
def get_working_hours(date, support_hours):
if is_work_day(date, support_hours):
weekday = frappe.utils.get_weekday(date)
for day in support_hours:
if day.workday == weekday:
return day.start_time, day.end_time
def is_work_day(date, support_hours):
weekday = frappe.utils.get_weekday(date)
for day in support_hours:
if day.workday == weekday:
return True
return False
def is_during_working_hours(date, support_hours):
start_time, end_time = get_working_hours(date, support_hours)
time = get_time_in_seconds(date)
if time >= start_time and time <= end_time:
return True
return False
def get_elapsed_time(start_time, end_time):
return round(time_diff_in_seconds(end_time, start_time), 2)
def calculate_initial_frt(issue_creation_date, days_in_between, support_hours):
initial_frt = 0
for i in range(days_in_between):
date = issue_creation_date + timedelta(days = (i+1))
if is_work_day(date, support_hours):
start_time, end_time = get_working_hours(date, support_hours)
initial_frt += get_elapsed_time(start_time, end_time)
return initial_frt
def is_before_working_hours(date, support_hours):
start_time, end_time = get_working_hours(date, support_hours)
time = get_time_in_seconds(date)
if time < start_time:
return True
return False
def get_holidays(holiday_list_name):
holiday_list = frappe.get_cached_doc("Holiday List", holiday_list_name)
holidays = [holiday.holiday_date for holiday in holiday_list.holidays]
return holidays
| [
"admin@vespersolutions.tech"
] | admin@vespersolutions.tech |
2dd2beb5a598c05ee0915af77e5ed8c43a7e983b | 677ee80f61be1faa4397c747e5c8c21e1e8fab17 | /test_case/case49.py | 7a03366997b09cb24d31db7aec56a25c97cbf11f | [] | no_license | YGragon/PythonSelfStudy | 9f08d1d295f075e996dd493c68c99be94176f3d5 | ffbf0a7a1f9bfb053eb878fac5467563d8e3fb92 | refs/heads/master | 2021-09-10T14:08:02.511883 | 2018-03-27T14:31:10 | 2018-03-27T14:31:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 301 | py | # 题目:使用lambda来创建匿名函数
#!/usr/bin/python
# -*- coding: UTF-8 -*-
MAXIMUM = lambda x,y : (x > y) * x + (x < y) * y
MINIMUM = lambda x,y : (x > y) * y + (x < y) * x
a = 10
b = 20
print('The largar one is %d' % MAXIMUM(a,b))
print('The lower one is %d' % MINIMUM(a,b)) | [
"1105894953@qq.com"
] | 1105894953@qq.com |
7b14a4dce496da2bceb177b18310310a5ac69c53 | f7c07caa1210d2a08e8433cdd854b1232efa88e3 | /Directory-And-File-Modules/Linecache-Module/Read-A-Specific-Line-In-A-File.py | 5eb74f24c0ccceaab9a46b4594528dd0fdab16a6 | [] | no_license | rchicoli/ispycode-python | c2fbecc28bf32933150986d24f77b7297f50b78e | fa27f2377943ac2e4d983065406578151091e3f5 | refs/heads/master | 2020-03-20T11:34:59.698618 | 2018-06-14T21:14:02 | 2018-06-14T21:14:02 | 137,407,150 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 74 | py |
import linecache
line = linecache.getline('file.txt',2)
print line
| [
"rafaelchicoli@hotmail.com"
] | rafaelchicoli@hotmail.com |
ef6cf78a22ffb85b42ae2798abe283fe30f6ff82 | 54f352a242a8ad6ff5516703e91da61e08d9a9e6 | /Source Codes/AtCoder/arc078/A/3912467.py | 658f7cff8c6dea4de0cd4537b922000c3104b973 | [] | no_license | Kawser-nerd/CLCDSA | 5cbd8a4c3f65173e4e8e0d7ed845574c4770c3eb | aee32551795763b54acb26856ab239370cac4e75 | refs/heads/master | 2022-02-09T11:08:56.588303 | 2022-01-26T18:53:40 | 2022-01-26T18:53:40 | 211,783,197 | 23 | 9 | null | null | null | null | UTF-8 | Python | false | false | 228 | py | n = int(input())
cards = list(map(int, input().split()))
a = 0
b = sum(cards)
min_diff = float('inf')
for i in range(n - 1):
a += cards[i]
b -= cards[i]
min_diff = min(min_diff, abs(a - b))
print(min_diff) | [
"kwnafi@yahoo.com"
] | kwnafi@yahoo.com |
3a183e26c8edd2de4a97a8f172af772f6a7dd122 | 148072ce210ca4754ea4a37d83057e2cf2fdc5a1 | /src/core/w3af/w3af/plugins/tests/infrastructure/test_dot_net_errors.py | ff918717ca0724de044b8a05f9c1bfaf423b4898 | [] | no_license | ycc1746582381/webfuzzer | 8d42fceb55c8682d6c18416b8e7b23f5e430c45f | 0d9aa35c3218dc58f81c429cae0196e4c8b7d51b | refs/heads/master | 2021-06-14T18:46:59.470232 | 2017-03-14T08:49:27 | 2017-03-14T08:49:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,626 | py | """
test_dot_net_errors.py
Copyright 2012 Andres Riancho
This file is part of w3af, http://w3af.org/ .
w3af is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation version 2 of the License.
w3af is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with w3af; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
from nose.plugins.attrib import attr
from w3af.plugins.tests.helper import PluginTest, PluginConfig
class TestDotNetErrors(PluginTest):
moth_url = 'http://moth/w3af/infrastructure/dot_net_errors/'
_run_configs = {
'cfg': {
'target': moth_url,
'plugins': {'infrastructure': (PluginConfig('dot_net_errors'),),
'crawl': (PluginConfig('web_spider',
('only_forward', True, PluginConfig.BOOL), ),)}
}
}
@attr('ci_fails')
def test_dot_net_errors(self):
cfg = self._run_configs['cfg']
self._scan(cfg['target'], cfg['plugins'])
infos = self.kb.get('dot_net_errors', 'dot_net_errors')
self.assertEqual(len(infos), 1, infos)
info = infos[0]
self.assertEqual(
info.get_name(), 'Information disclosure via .NET errors')
| [
"everping@outlook.com"
] | everping@outlook.com |
601167fca8573e9ec1732967599f8addb1c342fc | 233f97c6f360d478bf975016dd9e9c2be4a64adb | /Circle and Lattice Points.py | 3591c2952ec5ff8683a94b818e74834b0ad5c205 | [] | no_license | unknownboyy/GUVI | 3dbd1bb2bc6b3db52f5f79491accd6c56a2dec45 | d757dd473c4f5eef526a516cf64a1757eb235869 | refs/heads/master | 2020-03-27T00:07:12.449280 | 2019-03-19T12:57:03 | 2019-03-19T12:57:03 | 145,595,379 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 195 | py | for _ in range(int(input())) :
n=int(input())
count=0
for i in range(-n,n+1):
for j in range(-n,n+1):
if i**2+j**2==n**2:
count+=1
print(count) | [
"ankitagrawal11b@gmail.com"
] | ankitagrawal11b@gmail.com |
8a44e891c7a3a9bce2c86d9cd8efebbbcfe03c93 | 42de984305948658f7487a19f0019034d53781e3 | /Config/AutoStkConfig.py | a9d46ed52c7976044f6e601356f39da0cca81080 | [] | no_license | lzwhw2000/MoDeng | a5037d3298f0285d9aca6af831084dbc60738bba | a521f23214a30ff0497e0ad5797e2190be057848 | refs/heads/master | 2020-08-10T17:16:33.771057 | 2019-10-10T10:01:39 | 2019-10-10T10:01:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,827 | py | # encoding=utf-8
"""
这个脚本是用来存储 stk自动检测 程序的配置信息
"""
import os
cubic_test_last_step = 7 # 在曲线拟合时,取最后的几个值进行二次拟合
# 图片存在的路径,如果没有自动创建
if os.path.exists('C:/Users/paul/Desktop/软件代码/MoDeng/TempPicStore/'):
pic_save_dir_root = 'C:/Users/paul/Desktop/软件代码/MoDeng/TempPicStore/'
elif os.path.exists('F:/软件代码/MoDeng/TempPicStore/'):
pic_save_dir_root = 'F:/软件代码/MoDeng/TempPicStore/'
else:
os.makedirs('C:/Users/paul/Desktop/软件代码/MoDeng/TempPicStore/')
pic_save_dir_root = 'C:/Users/paul/Desktop/软件代码/MoDeng/TempPicStore/'
plot_current_days_amount = 40 # 画出近期的stk走势情况,该参数指示最近取的天数
tailLengthForMACD = 150 # 在计算MACD时,因为之用最近的几个数,所以不需要往前延伸太多,以节省计算量
# 关心的stk
stk_list = [
'cyb',
'sh',
'sz',
'300508',
'000625',
'000725',
'000001',
'000333',
'300508',
'002456',
'603421',
'300059',
'600487',
'600036'
]
step_corner_detect = 6 # 在判断拐点的时候,取最近的数据的个数
corner_Pot_Retrospective_Half = 6 # 进行后验检测拐点时,时间窗的一半
curPath = os.path.abspath(os.path.dirname(__file__))
# rootPath = curPath[:curPath.find("MoDeng\\")+len("MoDeng\\")] # 获取myProject,也就是项目的根路径
rootPath = curPath[:curPath.find("MoDeng\\")+len("MoDeng\\")] # 获取myProject,也就是项目的根路径
MDataPWD = os.path.abspath(rootPath + '/RelativeRank/')
SeaSelectDataPWD = os.path.abspath(rootPath+'/AutoDailyOpt/SeaSelect/')
LastScale = os.path.abspath(rootPath+'/AutoDailyOpt/') + '/LastScale/'
| [
"1210055099@qq.com"
] | 1210055099@qq.com |
508e5a4048912eff6337b872b13c26966803af58 | 372edad1cd6399cadba82818e9fb9682c3bac1b4 | /packages/python/plotly/plotly/validators/layout/grid/_xaxes.py | 0d74e6e6750d28b16d373255bc04c0c95bdc1dd2 | [
"MIT"
] | permissive | OGVGdev/plotly.py | 78bfa9e25e92c367f0da30af7885cdd163ba612b | 96a9101c79aa588023f56153bf274d0d570ffcf6 | refs/heads/master | 2022-11-10T16:44:06.732450 | 2020-06-26T13:07:06 | 2020-06-26T13:07:06 | 275,173,321 | 1 | 0 | MIT | 2020-06-26T14:19:41 | 2020-06-26T14:19:40 | null | UTF-8 | Python | false | false | 756 | py | import _plotly_utils.basevalidators
class XaxesValidator(_plotly_utils.basevalidators.InfoArrayValidator):
def __init__(self, plotly_name="xaxes", parent_name="layout.grid", **kwargs):
super(XaxesValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
free_length=kwargs.pop("free_length", True),
items=kwargs.pop(
"items",
{
"valType": "enumerated",
"values": ["/^x([2-9]|[1-9][0-9]+)?$/", ""],
"editType": "plot",
},
),
role=kwargs.pop("role", "info"),
**kwargs
)
| [
"noreply@github.com"
] | OGVGdev.noreply@github.com |
a09316c76e6e4ff2072418a9532609c531667c01 | 4b2fc3b910ea69b5dbaa2acd76527d7735880421 | /mule/wsgi.py | 7792c42102941d861aa4a9074fd595a260d5140b | [] | no_license | tefra/py-mule | bb3ebdca16f0074a1d37e1b66527dc2d12490e31 | 5e82638db1e2f486810be995dceefe93d032787f | refs/heads/master | 2021-06-11T14:02:19.287771 | 2018-09-09T20:02:47 | 2018-09-10T08:20:04 | 146,319,905 | 0 | 0 | null | 2021-03-19T22:17:15 | 2018-08-27T15:50:09 | Python | UTF-8 | Python | false | false | 385 | py | """
WSGI config for mule project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "mule.settings")
application = get_wsgi_application()
| [
"chris@komposta.net"
] | chris@komposta.net |
f108a254b580b424e84bb5fd810f6968e00aa74f | b31e7898aa5131125f243eaff973049b17e08512 | /.venv/lib/python3.10/site-packages/dill/tests/__main__.py | b68e86778239f5f5f2118187bfce74c1a380518a | [] | no_license | ramsred/MyProjects | f2978eeda3d73421daf0da9f2d012caef6c3ccda | a7f90ef1ecfbc7517be61e71286bd14405985de5 | refs/heads/master | 2023-07-09T03:19:17.683705 | 2023-07-02T19:30:19 | 2023-07-02T19:30:19 | 71,980,729 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 899 | py | #!/usr/bin/env python
#
# Author: Mike McKerns (mmckerns @caltech and @uqfoundation)
# Copyright (c) 2018-2022 The Uncertainty Quantification Foundation.
# License: 3-clause BSD. The full license text is available at:
# - https://github.com/uqfoundation/dill/blob/master/LICENSE
import glob
import os
import sys
import subprocess as sp
python = sys.executable
try:
import pox
python = pox.which_python(version=True) or python
except ImportError:
pass
shell = sys.platform[:3] == 'win'
suite = os.path.dirname(__file__) or os.path.curdir
tests = glob.glob(suite + os.path.sep + 'test_*.py')
if __name__ == '__main__':
failed = 0
for test in tests:
p = sp.Popen([python, test], shell=shell).wait()
if p:
print('F', end='', flush=True)
failed = 1
else:
print('.', end='', flush=True)
print('')
exit(failed)
| [
"venkataramireddy534@gmail.com"
] | venkataramireddy534@gmail.com |
f3b2560ba77a4984ceabfcf27511aa0776e95190 | d735b8354e06eb26aa5ed0ac25ebf96bdd8d67b6 | /python16/day1-21/day003 字符串/07 字符串常用操作2.py | 352270ab025dd6ec5f426bbad105b2152683d6ba | [] | no_license | cn5036518/xq_py | e004766e6b2582ba37d7335320ed6b42f563c46c | ac932dc7fcb89a7a7faf8bda80791743755fd557 | refs/heads/master | 2021-07-15T18:44:19.244025 | 2020-09-12T09:38:25 | 2020-09-12T09:38:25 | 208,355,433 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,272 | py | #!/usr/bin/env python
#-*- coding:utf-8 -*-
#五 条件判断
# #1 判断是否由字母和数字组成
# s = "abc123"
# s2 =s.isalnum()
# print(s2) #True
#
# s = "abc123一二壹"
# s2 =s.isalnum() #注意:一二 壹 isalnum()也认识
# print(s2) #True
#
# #2 判断是否是字母组成
# s ="abc"
# s2 =s.isalpha()
# print(s2) #True
#
# s ="abc "
# s2 =s.isalpha() #多了一个空格
# print(s2) #False
#3判断是否是数字组成(整数\十进制整数\中文数字-一二壹等)
# s = "123"
# s2 =s.isdigit() #判断整数 数字 #重点 判断是否由数字组成
# print(s2) #True
#
# s = "123.2"
# s2 =s.isdigit() #判断整数 数字
# print(s2) #False
# s = "1.5"
# s2 =s.isdecimal() #判断十进制数字,而不是判断小数
# print(s2) #False
#
# s = "145"
# s2 =s.isdecimal() #判断十进制数字,而不是判断小数
# print(s2) #True
# s ="一二壹123"
# s2 =s.isnumeric() #认识中文 一二壹
# print(s2) #True
# s ="一二壹123两"
# s2 =s.isnumeric() #认识中文 一二壹 注意:两 isnumeric认为不是数字
# print(s2) #False
##判断数字的应用场景:购物车,用户在输入菜单的时候,必须保证用户输入的是数字,即可以用上isdigit()
#课上练习:用算法判断某一个字符串是否是小数
s = "-123.12"
"""
思路:
1、先去掉-,用替换-replace
2、先判断是否是整数 -isdigit()
3、如果不是整数的话
1、计算.出现的次数-count
2、判断 如果.出现的次数是1且它不是出现在最前面--startswith(),也不是出现在最后面--endswith()
就是小数
3、否则,就不是小数
注意点:
1、判断是否是整数时候,是无法判断负号-的,所以要用replace先去掉负号
"""
def isfloat(s):
s1 = s.replace("-","") #注意:判断整数的时候,是不能判断负号的-,所有要先去掉负号-
# print(s1) #123.12 字符串类型
if s1.isdigit():
print("%s是整数"%s)
else:
count_point = s.count(".") #计算点 出现的次数是1
# print(count_point) #1 #
if count_point == 1 and not s.startswith(".") and not s.endswith("."):
print("%s是小数"% s)
else:
print("%s不是小数"% s)
s="-123.99" #注意:这里的数必须是字符串类型,才能判断,最后可以用int float转换成数字
isfloat(s) #-123.99是小数
#六 计算字符串的长度
s = "dfhdhafk"
print(len(s)) #8 内置函数 和print()的写法类型 不是s.函数名()的写法
#七 可迭代
"""
可迭代的概念:可以一个一个往外取值的对象
1、字符串就是可迭代的(列表、字典都是可迭代的) 可以通过索引号的递增来取值
2、数字就不是可迭代的
"""
s = "朱元璋朱棣"
#1while取出字符串的每个字符--nok
count = 0
while count<len(s):
print(s[count]) #这里的count是索引号 可迭代对象(字符串、列表等):可以通过索引号的递增来取值
count+=1
print("----------1")
# #2for取出字符串的每个字符 (可迭代对象-字符串、列表等可以直接用for循环进行遍历,取出其中的元素)
for i in s: #把可迭代对象的每一个元素,每循环一次,都分别赋值给前面的变量i(方便可迭代对象的遍历)
# for 变量 in 可迭代对象
pass
print(i)
"""
in的两种用法
1、不在for中,是判断xxx是非在出现在str中(判断子字符串)--例子:判断敏感词-广告法
2、在for中,是把可迭代对象(字符串、列表等)的每一个元素取出来,赋值给前面的变量i
4. for循环
for 变量 in 可迭代对象:
循环体(break, continue)
else:
当循环正常结束的时候执行else(如果break,就不会执行else)
"""
#计算在字符串串"I am sylar, I'm 14 years old, I have 2 dogs!" 数字的个数
s1 = "I am sylar, I'm 14 years old, I have 2 dogs!"
count=0
for i in s1:
if i.isdigit():
# print(i)
count+=1
print(count) #统计字符串中有多少个数字
# for i in 10: #TypeError: 'int' object is not iterable
# #因为整数10不是可迭代的类型
# print(i)
for i in "10": #这里的“10”是字符串,可迭代类型
print(i)
| [
"wangtongpei@meicai.cn"
] | wangtongpei@meicai.cn |
b557056f5ed14dcda81056ce8e5cc36a59a3db25 | 35117d0c9b33e3591115b921de3bf781d6dd0dca | /chat/consumers.py | 8e97a9252c5687b11467a50d63459d6041a6e65d | [] | no_license | cnbillow/webim | f65197343aa41eebc9eaf8d4abcd7e349fc0433f | 6320580ca742754430162a4ce8be61d065b45d70 | refs/heads/master | 2020-04-07T15:38:26.708529 | 2018-12-03T06:46:56 | 2018-12-03T06:46:56 | 158,494,142 | 0 | 0 | null | 2018-11-21T05:12:59 | 2018-11-21T05:12:58 | null | UTF-8 | Python | false | false | 2,668 | py | # chat/consumers.py
from channels.generic.websocket import AsyncWebsocketConsumer
from asgiref.sync import async_to_sync
from channels.layers import get_channel_layer
from .models import IMGroup
import json
class ChatConsumer(AsyncWebsocketConsumer):
# 当 websocket 一链接上以后触发该函数
# self.scope可以类比的理解为django中的self.request
# 从url中取出room_name字段备用,这里的变量名是在路由中设置的
async def connect(self):
# print(self.scope['url_route'])
self.room_name = self.scope['url_route']['kwargs']['room_name']
self.room_group_name = self.room_name
# Join room group
await self.channel_layer.group_add(
self.room_group_name,
self.channel_name
)
# 通知好友用户上线
# 找到该用户的所有好友,发送通知
# 查询用户的好友分组
# groups = Group.objects.filter(owner=self.room_group_name)
# for item in groups:
# for friend in item.group_members.all():
# channel_publish(friend.id, {'channel_type': 'friend_on', 'user_id': self.room_group_name})
await self.accept()
# 断开链接是触发该函数
async def disconnect(self, close_code):
# Leave room group
# groups = Group.objects.filter(owner=self.room_group_name)
# for item in groups:
# for friend in item.group_members.all():
# channel_publish(friend.id, {'channel_type': 'friend_off', 'user_id': self.room_group_name})
await self.channel_layer.group_discard(
self.room_group_name,
self.channel_name
)
# Receive message from WebSocket
# 前端发送来消息时,通过这个接口传递
async def receive(self, text_data):
text_data_json = json.loads(text_data)
message = text_data_json['message']
# Send message to room group
# 这里的type要在当前类中实现一个相应的函数,
# 下划线或者'.'的都会被Channels转换为下划线处理,
# 所以这里写 'chat.message'也没问题
await self.channel_layer.group_send(
self.room_group_name,
{
'type': 'chat.message',
'message': message
}
)
# Receive message from room group
async def chat_message(self, event):
message = event['message']
# Send message to WebSocket
await self.send(text_data=json.dumps({
'message': message
}))
# channels发送通知到页面
channel_layer = get_channel_layer()
def channel_publish(topic, content):
# print(topic)
# print(content)
try:
'''
type需要与consumer中的receive中一致
group_name是consumer中的room_group_name
'''
async_to_sync(channel_layer.group_send)(
topic,
{
'type': 'chat.message',
'message': content,
}
)
except Exception as e:
raise e | [
"js_huang@foxmail.com"
] | js_huang@foxmail.com |
384c86b5b2f05720e11321d45a390059c0bb5b0c | f2d297f8c2590ca9a172d223285d462236826c23 | /backend/chat/migrations/0001_initial.py | 0e414882ec563661b7ceda17f318a2554c0e76d1 | [] | no_license | crowdbotics-apps/lifit-24470 | 139aa9a2d582362c2bf9489068080f6ee903bcfc | 9003aab61e747de542c7c73b402248e8008cc6c6 | refs/heads/master | 2023-03-07T02:39:01.321520 | 2021-02-12T15:34:44 | 2021-02-12T15:34:44 | 338,357,664 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,805 | py | # Generated by Django 2.2.18 on 2021-02-12 15:34
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('chat_user_profile', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Message',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('message', models.TextField()),
('attachment', models.URLField()),
('is_draft', models.BooleanField()),
('is_delivered', models.BooleanField()),
('is_read', models.BooleanField()),
('timestamp_created', models.DateTimeField(auto_now_add=True)),
('timestamp_delivered', models.DateTimeField()),
('timestamp_read', models.DateTimeField()),
],
),
migrations.CreateModel(
name='Thread',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('thread_photo', models.URLField()),
('timestamp_created', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='ThreadMember',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('is_admin', models.BooleanField()),
('timestamp_joined', models.DateTimeField(auto_now_add=True)),
('timestamp_left', models.DateTimeField()),
('last_rejoined', models.DateTimeField()),
('profile', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='threadmember_profile', to='chat_user_profile.Profile')),
('thread', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='threadmember_thread', to='chat.Thread')),
],
),
migrations.CreateModel(
name='ThreadAction',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('action', models.CharField(max_length=7)),
('timestamp_action', models.DateTimeField(auto_now_add=True)),
('profile', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='threadaction_profile', to='chat_user_profile.Profile')),
('thread', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='threadaction_thread', to='chat.Thread')),
],
),
migrations.CreateModel(
name='MessageAction',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('action', models.CharField(max_length=7)),
('timestamp_action', models.DateTimeField(auto_now_add=True)),
('message', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='messageaction_message', to='chat.Message')),
('profile', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='messageaction_profile', to='chat_user_profile.Profile')),
],
),
migrations.AddField(
model_name='message',
name='sent_by',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='message_sent_by', to='chat.ThreadMember'),
),
migrations.AddField(
model_name='message',
name='thread',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='message_thread', to='chat.Thread'),
),
migrations.CreateModel(
name='ForwardedMessage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('timestamp_forwarded', models.DateTimeField(auto_now_add=True)),
('forwarded_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='forwardedmessage_forwarded_by', to='chat_user_profile.Profile')),
('forwarded_to', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='forwardedmessage_forwarded_to', to='chat.Thread')),
('message', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='forwardedmessage_message', to='chat.Message')),
],
),
]
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
253a0d5a8d540228170ac6262fe99d0a839bb0b5 | 489da428bc0e1ab8f5117c0f8ba5ddb7aff05360 | /scripts/motors.py | a3e07bacd50b1dce8373510020926d7be4a24904 | [
"BSD-3-Clause"
] | permissive | norihisayamada/pimouse_ros | 4f77e769b7ac9cbfc4af6e703764af1d2df56b30 | 3b07880a6ceb584d92cf640c1a38864130d44189 | refs/heads/master | 2020-04-17T03:03:49.424738 | 2019-02-23T11:05:38 | 2019-02-23T11:05:38 | 166,164,916 | 1 | 0 | BSD-3-Clause | 2019-01-17T05:18:41 | 2019-01-17T05:18:41 | null | UTF-8 | Python | false | false | 3,430 | py | #!/usr/bin/env python
#encoding: utf8
import sys, rospy, math
from pimouse_ros.msg import MotorFreqs
from geometry_msgs.msg import Twist
from std_srvs.srv import Trigger, TriggerResponse
from pimouse_ros.srv import TimedMotion #追加
class Motor():
def __init__(self):
if not self.set_power(False): sys.exit(1) #モータの電源を切る(TrueをFalseに)
rospy.on_shutdown(self.set_power)
self.sub_raw = rospy.Subscriber('motor_raw', MotorFreqs, self.callback_raw_freq)
self.sub_cmd_vel = rospy.Subscriber('cmd_vel', Twist, self.callback_cmd_vel)
self.srv_on = rospy.Service('motor_on', Trigger, self.callback_on)
self.srv_off = rospy.Service('motor_off', Trigger, self.callback_off)
self.srv_tm = rospy.Service('timed_motion', TimedMotion, self.callback_tm) #追加
self.last_time = rospy.Time.now()
self.using_cmd_vel = False
def set_power(self,onoff=False):
en = "/dev/rtmotoren0"
try:
with open(en,'w') as f:
f.write("1\n" if onoff else "0\n")
self.is_on = onoff
return True
except:
rospy.logerr("cannot write to " + en)
return False
def set_raw_freq(self,left_hz,right_hz):
if not self.is_on:
rospy.logerr("not enpowered")
return
try:
with open("/dev/rtmotor_raw_l0",'w') as lf,\
open("/dev/rtmotor_raw_r0",'w') as rf:
lf.write(str(int(round(left_hz))) + "\n")
rf.write(str(int(round(right_hz))) + "\n")
except:
rospy.logerr("cannot write to rtmotor_raw_*")
def callback_raw_freq(self,message):
self.set_raw_freq(message.left_hz,message.right_hz)
def callback_cmd_vel(self,message):
forward_hz = 80000.0*message.linear.x/(9*math.pi)
rot_hz = 400.0*message.angular.z/math.pi
self.set_raw_freq(forward_hz-rot_hz, forward_hz+rot_hz)
self.using_cmd_vel = True
self.last_time = rospy.Time.now()
def onoff_response(self,onoff): #以下3つのメソッドを追加
d = TriggerResponse()
d.success = self.set_power(onoff)
d.message = "ON" if self.is_on else "OFF"
return d
def callback_on(self,message): return self.onoff_response(True)
def callback_off(self,message): return self.onoff_response(False)
def callback_tm(self,message):
if not self.is_on:
rospy.logerr("not enpowered")
return False
dev = "/dev/rtmotor0"
try:
with open(dev,'w') as f:
f.write("%d %d %d\n" %
(message.left_hz,message.right_hz,message.duration_ms))
except:
rospy.logerr("cannot write to " + dev)
return False
return True
if __name__ == '__main__':
rospy.init_node('motors')
m = Motor()
rate = rospy.Rate(10)
while not rospy.is_shutdown():
if m.using_cmd_vel and rospy.Time.now().to_sec() - m.last_time.to_sec() >= 1.0:
m.set_raw_freq(0,0)
m.using_cmd_vel = False
rate.sleep()
# Copyright 2016 Ryuichi Ueda
# Released under the BSD License.
# To make line numbers be identical with the book, this statement is written here. Don't move it to the header.
| [
"ryuichiueda@gmail.com"
] | ryuichiueda@gmail.com |
e96ed567a6e93e60ecbb430c97f1b152028d82cc | 4502834012bd50135569a820a502c2d965b0a1df | /wdom/themes/vital.py | 05f72b22f634c0e9e334e8dcfe9d7b66955ab40a | [
"MIT"
] | permissive | miyakogi/wdom | 9db7fccf7402fa2e2fc97f53c2a42e4aa2b8633a | c7cd8b3428ca154af6fb1ecb6c7d2f0e17551802 | refs/heads/master | 2020-04-04T22:07:12.970584 | 2018-03-05T15:32:55 | 2018-03-05T15:32:55 | 49,849,994 | 72 | 14 | null | null | null | null | UTF-8 | Python | false | false | 2,152 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# flake8: noqa
from wdom.tag import NewTagClass as NewTag
from wdom.themes import *
name = 'Vital'
project_url = 'https://vitalcss.com/'
project_repository = 'https://github.com/doximity/vital'
license = 'Apache 2.0'
license_url = 'https://github.com/doximity/vital/blob/master/LICENSE.md'
css_files = [
'//cdn.rawgit.com/doximity/vital/v2.2.1/dist/css/vital.min.css',
]
Button = NewTag('Button', bases=Button, class_='btn')
DefaultButton = NewTag('DefaultButton', 'button', Button, class_='solid', is_='default-button')
PrimaryButton = NewTag('PrimaryButton', 'button', DefaultButton, class_='blue', is_='primary-button')
SecondaryButton = NewTag('SecondaryButton', 'button', Button, class_='blue', is_='secondary-button')
SuccessButton = NewTag('SuccessButton', 'button', DefaultButton, class_='green', is_='success-button')
InfoButton = NewTag('InfoButton', 'button', Button, class_='blue', is_='info-button')
WarningButton = NewTag('WarningButton', 'button', DefaultButton, class_='orange', is_='warning-button')
DangerButton = NewTag('DangerButton', 'button', DefaultButton, class_='red', is_='danger-button')
ErrorButton = NewTag('ErrorButton', 'button', DefaultButton, class_='red', is_='error-button')
LinkButton = NewTag('LinkButton', 'button', Button, class_='no-outline blue', is_='link-button')
Ol = NewTag('Ol', 'ol', class_='list')
Ul = NewTag('Ul', 'Ul', class_='list')
Col = NewTag('Col', 'div', Col, class_='col')
# Col1 = NewTag('Col1', 'div', Col1, class_='col-1-12')
# Col2 = NewTag('Col2', 'div', Col2, class_='col-1-6')
Col3 = NewTag('Col3', 'div', Col3, class_='col-1-4')
Col4 = NewTag('Col4', 'div', Col4, class_='col-1-3')
# Col5 = NewTag('Col5', 'div', Col5, class_='col-5-12')
Col6 = NewTag('Col6', 'div', Col6, class_='col-1-2')
# Col7 = NewTag('Col7', 'div', Col7, class_='col-7-12')
Col8 = NewTag('Col8', 'div', Col8, class_='col-2-3')
Col9 = NewTag('Col9', 'div', Col9, class_='col-3-4')
# Col10 = NewTag('Col10', 'div', Col10, class_='col-5-6')
# Col11 = NewTag('Col11', 'div', Col11, class_='col-11-12')
# Col12 = NewTag('Col12', 'div', Col12, class_='col-1-1')
| [
"miyako.dev@gmail.com"
] | miyako.dev@gmail.com |
91f2d3d83f90775793a431658c4f158ea7eeeb1b | aea0837dd60da3e3746c5ab0970bde246db9493d | /allapps/profiles/signals.py | 4692cdce5b6e15e132221dfde54eb9745bd88e20 | [] | no_license | anykate/veryacademy-demo | 5f05707b1fd9085ef1a088b3fc02f48dfa0bd897 | 697d9ab6fffd169c80d6b3fcc0e838e4b5ae6739 | refs/heads/master | 2023-03-13T18:52:39.499619 | 2020-07-15T19:45:22 | 2020-07-15T19:45:22 | 279,959,596 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 478 | py | from django.contrib.auth import get_user_model
from django.db.models.signals import post_save
from django.dispatch import receiver
from ..profiles.models import UserProfile
@receiver(post_save, sender=get_user_model())
def create_user_profile(sender, instance, created, **kwargs):
if created:
UserProfile.objects.create(user=instance)
@receiver(post_save, sender=get_user_model())
def save_user_profile(sender, instance, **kwargs):
instance.profiles.save()
| [
"aryamane.aniket@gmail.com"
] | aryamane.aniket@gmail.com |
37550aa18fc249a080093d9df9dddf1746e817c1 | d721258b53f0f44b1010cb8e8efac8e2a5c96c26 | /adventure/migrations/0015_auto_20160503_2339.py | adf3848226a6d63e10f367b4eadfaf5402adac09 | [
"LicenseRef-scancode-proprietary-license",
"MIT"
] | permissive | kdechant/eamon | a6662285f51a6cad5797bb9be92ca709ae36921c | 080a43aa80c3a1605c402e68616545a8e9c7975c | refs/heads/master | 2023-05-24T08:20:18.551604 | 2022-08-14T10:27:01 | 2023-04-08T07:31:45 | 49,559,304 | 28 | 7 | MIT | 2023-03-14T21:09:55 | 2016-01-13T08:07:28 | TypeScript | UTF-8 | Python | false | false | 1,252 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-05-04 06:39
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('adventure', '0014_auto_20160419_2324'),
]
operations = [
migrations.AlterField(
model_name='player',
name='gender',
field=models.CharField(choices=[('m', 'Male'), ('f', 'Female')], max_length=6),
),
migrations.AlterField(
model_name='playerartifact',
name='dice',
field=models.IntegerField(default=1, null=True),
),
migrations.AlterField(
model_name='playerartifact',
name='odds',
field=models.IntegerField(default=0, null=True),
),
migrations.AlterField(
model_name='playerartifact',
name='sides',
field=models.IntegerField(default=1, null=True),
),
migrations.AlterField(
model_name='playerartifact',
name='weapon_type',
field=models.IntegerField(choices=[(1, 'Axe'), (2, 'Bow'), (3, 'Club'), (4, 'Spear'), (5, 'Sword')], default=0, null=True),
),
]
| [
"keith.dechant@gmail.com"
] | keith.dechant@gmail.com |
55f755b1f5a9ff18d933ab5ec6fe7a4c62a49b2c | 7e01c039f2427d434a4ef44a1b9dc0ea21db65ba | /venv/lib/python3.8/site-packages/django/contrib/gis/db/backends/postgis/adapter.py | 8f456ff7c0f6bac9b3fc69a9235e9221b9a73623 | [] | no_license | dmfranz/Spike-exercise | 09f8051163d2a63dfbc3f75da2de0a1bbbbb122d | 83971e95a72d504f629778fece2cdfb953e5d08b | refs/heads/main | 2023-08-23T04:18:43.934471 | 2021-10-11T04:54:28 | 2021-10-11T04:54:28 | 413,568,735 | 0 | 1 | null | 2021-10-11T04:36:22 | 2021-10-04T20:10:01 | Python | UTF-8 | Python | false | false | 2,257 | py | """
This object provides quoting for GEOS geometries into PostgreSQL/PostGIS.
"""
from psycopg2 import Binary
from psycopg2.extensions import ISQLQuote
from django.contrib.gis.db.backends.postgis.pgraster import to_pgraster
from django.contrib.gis.geos import GEOSGeometry
class PostGISAdapter:
def __init__(self, obj, geography=False):
"""
Initialize on the spatial object.
"""
self.is_geometry = isinstance(obj, (GEOSGeometry, PostGISAdapter))
# Getting the WKB (in string form, to allow easy pickling of
# the adaptor) and the SRID from the geometry or raster.
if self.is_geometry:
self.ewkb = bytes(obj.ewkb)
self._adapter = Binary(self.ewkb)
else:
self.ewkb = to_pgraster(obj)
self.srid = obj.srid
self.geography = geography
def __conform__(self, proto):
"""Does the given protocol conform to what Psycopg2 expects?"""
if proto == ISQLQuote:
return self
else:
raise Exception('Error implementing psycopg2 protocol. Is psycopg2 installed?')
def __eq__(self, other):
return isinstance(other, PostGISAdapter) and self.ewkb == other.ewkb
def __hash__(self):
return hash(self.ewkb)
def __str__(self):
return self.getquoted()
@classmethod
def _fix_polygon(cls, poly):
return poly
def prepare(self, conn):
"""
This method allows escaping the binary in the style required by the
server's `standard_conforming_string` setting.
"""
if self.is_geometry:
self._adapter.prepare(conn)
def getquoted(self):
"""
Return a properly quoted string for use in PostgreSQL/PostGIS.
"""
if self.is_geometry:
# Psycopg will figure out whether to use E'\\000' or '\000'.
return '%s(%s)' % (
'ST_GeogFromWKB' if self.geography else 'ST_GeomFromEWKB',
self._adapter.getquoted().decode()
)
else:
# For rasters, add explicit type cast to WKB string.
return "'%s'::raster" % self.ewkb
| [
"marmara@wisc.edu"
] | marmara@wisc.edu |
32cd827945206d96c0bf02543fc24863d50c006a | 7329f788dc8e48db398cee81da7ca9621d681878 | /mljar/client/result.py | 5d000930c285a3cba9dfb7dbb6912c5c7b5fc50d | [
"Apache-2.0"
] | permissive | armandoayudame/mljar-api-python | 87c810168d348bcc988aa816f782e8f32f6c2281 | b4843a59bb22060707da569b4aa569ab40669421 | refs/heads/master | 2020-03-09T06:33:33.006071 | 2017-07-20T15:22:34 | 2017-07-20T15:22:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 767 | py | from base import MljarHttpClient
from ..model.result import Result
from ..exceptions import NotFoundException
class ResultClient(MljarHttpClient):
'''
Client to interact with MLJAR results (models).
'''
def __init__(self, project_hid):
self.url = "/results/"
self.project_hid = project_hid
super(ResultClient, self).__init__()
def get_results(self, experiment_hid = None):
'''
List all models.
'''
data = {'project_id': self.project_hid}
if experiment_hid is not None:
data['experiment_id'] = experiment_hid
response = self.request("POST", self.url, data = data)
results_dict = response.json()
return [Result.from_dict(r) for r in results_dict]
| [
"pplonski86@gmail.com"
] | pplonski86@gmail.com |
e99861aa467364b0490888232da7c8bcbacdb2f1 | 50948d4cb10dcb1cc9bc0355918478fb2841322a | /azure-mgmt-compute/azure/mgmt/compute/v2018_10_01/models/diff_disk_settings.py | ac0d894264c9c6e558e3b2eb6af585ab1412167d | [
"MIT"
] | permissive | xiafu-msft/azure-sdk-for-python | de9cd680b39962702b629a8e94726bb4ab261594 | 4d9560cfd519ee60667f3cc2f5295a58c18625db | refs/heads/master | 2023-08-12T20:36:24.284497 | 2019-05-22T00:55:16 | 2019-05-22T00:55:16 | 187,986,993 | 1 | 0 | MIT | 2020-10-02T01:17:02 | 2019-05-22T07:33:46 | Python | UTF-8 | Python | false | false | 1,184 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class DiffDiskSettings(Model):
"""Describes the parameters of ephemeral disk settings that can be specified
for operating system disk. <br><br> NOTE: The ephemeral disk settings can
only be specified for managed disk.
:param option: Specifies the ephemeral disk settings for operating system
disk. Possible values include: 'Local'
:type option: str or
~azure.mgmt.compute.v2018_10_01.models.DiffDiskOptions
"""
_attribute_map = {
'option': {'key': 'option', 'type': 'str'},
}
def __init__(self, **kwargs):
super(DiffDiskSettings, self).__init__(**kwargs)
self.option = kwargs.get('option', None)
| [
"lmazuel@microsoft.com"
] | lmazuel@microsoft.com |
4b8702aa921be40726e62a3312c0bce0e233541d | 41a1b3d3491037000de0735823c3d8720f746af0 | /Lib/fontgoggles/mac/sliderGroup.py | ec445810098eb93da099699a4d8f63883e05ffa7 | [
"Apache-2.0"
] | permissive | chrissimpkins/fontgoggles | a8ea5391605c0197e85db211d81b1e290a1f6909 | 814eef1633e017da6fb9a48441e5860d4b5769a3 | refs/heads/master | 2023-03-21T02:22:11.102996 | 2021-03-26T15:45:40 | 2021-03-26T15:45:40 | 244,532,440 | 1 | 0 | Apache-2.0 | 2020-03-03T03:28:11 | 2020-03-03T03:28:10 | null | UTF-8 | Python | false | false | 7,019 | py | import AppKit
from vanilla import Button, EditText, Group, Slider, TextBox, VanillaBaseObject
from fontgoggles.misc.properties import weakrefCallbackProperty
class SliderGroup(Group):
_callback = weakrefCallbackProperty()
def __init__(self, width, sliderInfo, continuous=True, callback=None):
super().__init__((0, 0, width, 0))
self._callback = callback
self._continuous = continuous
self._tags = []
self.setSliderInfo(sliderInfo)
def _breakCycles(self):
self._callback = None
super()._breakCycles()
def setSliderInfo(self, sliderInfo):
savedState = self.get()
# clear all subviews
for attr, value in list(self.__dict__.items()):
if isinstance(value, VanillaBaseObject):
delattr(self, attr)
margin = 10
y = margin
self._tags = []
self._defaultValues = {}
for tag, (label, minValue, defaultValue, maxValue) in sliderInfo.items():
self._tags.append(tag)
self._defaultValues[tag] = defaultValue
attrName = f"slider_{tag}"
slider = SliderPlus((margin, y, -margin, 40), label, minValue, defaultValue, maxValue,
continuous=self._continuous, callback=self._sliderChanged)
setattr(self, attrName, slider)
y += 50
self.resetAllButton = Button((10, y, 120, 25), "Reset all axes", self._resetAllButtonCallback)
self.resetAllButton.enable(False)
y += 35
posSize = (0, 0, self.getPosSize()[2], y)
self.setPosSize(posSize)
self._updateState(savedState)
def _sliderChanged(self, sender):
self.resetAllButton.enable(True)
callCallback(self._callback, self)
def _resetAllButtonCallback(self, sender):
self.resetAllButton.enable(False)
for tag in self._tags:
attrName = f"slider_{tag}"
slider = getattr(self, attrName)
slider.set(self._defaultValues[tag])
callCallback(self._callback, self)
def get(self):
state = {}
for tag in self._tags:
attrName = f"slider_{tag}"
slider = getattr(self, attrName)
value = slider.get()
if value is not None:
if len(self._defaultValues[tag]) != 1 or value not in self._defaultValues[tag]:
state[tag] = value
return state
def _updateState(self, state):
for tag, value in state.items():
attrName = f"slider_{tag}"
slider = getattr(self, attrName, None)
if slider is not None:
slider.set(value)
def set(self, state):
if state:
self.resetAllButton.enable(True)
for tag in self._tags:
attrName = f"slider_{tag}"
slider = getattr(self, attrName)
value = state.get(tag)
if value is None:
value = self._defaultValues[tag]
slider.set(value)
class SliderPlus(Group):
_callback = weakrefCallbackProperty()
def __init__(self, posSize, label, minValue, value, maxValue, continuous=True, callback=None):
super().__init__(posSize)
self._callback = callback
self.label = TextBox((0, 0, 0, 20), label)
self.slider = Slider((0, 18, -60, 20), value=minValue, minValue=minValue, maxValue=maxValue,
continuous=continuous, callback=self._sliderCallback)
self.editField = EditText((-50, 16, 0, 24), "", continuous=False, callback=self._editFieldCallback)
self.editField._nsObject.setAlignment_(AppKit.NSRightTextAlignment)
self._setSliderFromValue(value)
self._setEditFieldFromValue(value)
def _breakCycles(self):
self._callback = None
super()._breakCycles()
def _sliderCallback(self, sender):
value = sender.get()
self._setEditFieldFromValue(value)
callCallback(self._callback, self)
def _editFieldCallback(self, sender):
value = sender.get()
if not value:
# self._setSliderFromValue(None)
callCallback(self._callback, self)
return
value = value.replace(",", ".")
try:
f = float(value)
except ValueError:
pass
else:
self.slider.set(f)
sliderValue = self.slider.get()
if sliderValue != f:
self._setEditFieldFromValue(sliderValue)
callCallback(self._callback, self)
def _setSliderFromValue(self, value):
if isinstance(value, set):
value = sum(value) / len(value)
elif value is None:
minValue = self.slider._nsObject.minValue()
maxValue = self.slider._nsObject.maxValue()
value = (minValue + maxValue) / 2
self.slider.set(value)
def _setEditFieldFromValue(self, value):
if isinstance(value, set):
if len(value) == 1:
value = next(iter(value))
else:
value = None
if value is None:
s = ""
else:
if int(value) == value:
s = str(int(value))
else:
s = f"{value:.1f}"
self.editField.set(s)
def get(self):
if not self.editField.get():
return None
else:
return self.slider.get()
def set(self, value):
self._setSliderFromValue(value)
self._setEditFieldFromValue(value)
def callCallback(callback, sender):
if callback is not None:
callback(sender)
if __name__ == "__main__":
from random import random
from vanilla import Window
class SliderTest:
def __init__(self):
self.w = Window((300, 400), "SliderTest", autosaveName="SliderTestttt")
# self.w.slider1 = SliderPlus((10, 10, -10, 50), "Slider 1", 0, 50, 100)
# self.w.slider2 = SliderPlus((10, 60, -10, 50), "Slider 2", 0, 50, 100)
info = [("abcd", "The alphabet"),
("xyz ", "The alphabet part 2"),
("wdth", "Width"),
("wght", "Weight")]
self.sliderInfo = {}
for tag, label in info:
self.sliderInfo[tag] = (label, 0, 50, 100)
self.w.sliderGroup = SliderGroup(300, self.sliderInfo, continuous=True, callback=self.sliderGroupCallback)
self.w.mutateButton = Button((10, -40, 80, 20), "Mutate", callback=self.mutateCallback)
self.w.open()
def sliderGroupCallback(self, sender):
print(sender.get())
def mutateCallback(self, sender):
state = {}
for tag, (label, minValue, defaultValue, maxValue) in self.sliderInfo.items():
v = minValue + (maxValue - minValue) * random()
state[tag] = v
self.w.sliderGroup.set(state)
t = SliderTest()
| [
"justvanrossum@gmail.com"
] | justvanrossum@gmail.com |
868328ae66f047ce34cae69b20f7d9f0840e2497 | 0fd9cb5e8bfb26fa62b90e65f7fbaa2fd233d3d3 | /pyang/python/pyang/error.py | 2b7b3dee1c4d4fc1b2c1e6a73fb65c95005f3242 | [] | no_license | levensailor/pip4lambda | 0cff15b2dba3ba586652c6cc914252daf01a874b | 22a83a43141f9bf72fdd0cd5faee3b88cc7e49fa | refs/heads/master | 2022-03-02T05:31:48.894906 | 2022-02-11T16:38:00 | 2022-02-11T16:38:00 | 174,207,440 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,128 | py | import copy
### struct to keep track of position for error messages
class Position(object):
__slots__ = (
'ref',
'line',
'top',
'uses_pos',
)
def __init__(self, ref):
self.ref = ref
self.line = 0
self.top = None
self.uses_pos = None
def __str__(self):
s = self.ref + ':' + str(self.line)
if self.uses_pos is None:
return s
else:
return str(self.uses_pos) + ' (at ' + s + ')'
### Exceptions
class Abort(Exception):
"""used for non-recoverable errors to abort parsing"""
pass
class Eof(Exception):
"""raised by tokenizer when end of file is detected"""
pass
class TransformError(Exception):
"""raised by plugins to fail the transform() function"""
def __init__(self, msg="", exit_code=1):
self.msg = msg
self.exit_code = exit_code
class EmitError(Exception):
"""raised by plugins to fail the emit() function"""
def __init__(self, msg="", exit_code=1):
self.msg = msg
self.exit_code = exit_code
### error codes
## level:
## 1: critical error, can not be made into a warning
## 2: major error, can not be made into a warning
## 3: minor error, can be made into warning with -W
## 4: warning
error_codes = \
{
'READ_ERROR':
(1,
'read error: %s'),
'EOF_ERROR':
(1,
'premature end of file'),
'EXPECTED_QUOTED_STRING':
(1,
'expected quoted string after \'+\' operator'),
'UNKNOWN_KEYWORD':
(1,
'unknown keyword "%s"'),
'INCOMPLETE_STATEMENT':
(1,
'unterminated statement definition for keyword "%s", looking at %s'),
'EXPECTED_KEYWORD':
(1,
'expected keyword "%s"'),
'EXPECTED_KEYWORD_2':
(1,
'expected keyword "%s" as child to "%s"'),
'EXPECTED_DATA_DEF':
(1,
'expected a data definition statement as child to "%s"'),
'UNEXPECTED_KEYWORD':
(1,
'unexpected keyword "%s"'),
'UNEXPECTED_KEYWORD_1':
(1,
'unexpected keyword "%s", expected "%s"'),
'UNEXPECTED_KEYWORD_N':
(1,
'unexpected keyword "%s", expected one of %s'),
'UNEXPECTED_KEYWORD_CANONICAL':
(1,
'keyword "%s" not in canonical order (see RFC 6020, Section 12)'),
'UNEXPECTED_KEYWORD_CANONICAL_1':
(1,
'keyword "%s" not in canonical order,'
'expected "%s" (see RFC 6020, Section 12)'),
'UNEXPECTED_KEYWORD_USES':
(1,
'unexpected keyword "%s" under "%s", defined at %s'),
'EXPECTED_ARGUMENT':
(1,
'expected an argument for keyword "%s"'),
'UNEXPECTED_ARGUMENT':
(1,
'did not expect an argument, got "%s"'),
'XML_IDENTIFIER':
(3,
'illegal identifier "%s", must not start with [xX][mM][lL] in'
'YANG version 1 (see RFC 6020, Section 12)'),
'TRAILING_GARBAGE':
(2,
'trailing garbage after module'),
'BAD_VALUE':
(1,
'bad value "%s" (should be %s)'),
'CIRCULAR_DEPENDENCY':
(1,
'circular dependency for %s "%s"'),
'MODULE_NOT_FOUND':
(1,
'module "%s" not found in search path'),
'MODULE_NOT_FOUND_REV':
(1,
'module "%s" revision "%s" not found in search path'),
'MODULE_NOT_IMPORTED':
(1,
'no module with the namespace "%s" is imported'),
'BAD_IMPORT':
(1,
'cannot import %s "%s", must be a module'),
'BAD_IMPORT_YANG_VERSION':
(1,
'a version %s module cannot import a version %s module by revision'),
'BAD_INCLUDE':
(1,
'cannot include %s "%s", must be a submodule'),
'BAD_INCLUDE_YANG_VERSION':
(1,
'cannot include a version %s submodule in a version %s module'),
'BAD_MODULE_NAME':
(2,
'unexpected modulename "%s" in %s, should be %s'),
'WBAD_MODULE_NAME':
(4,
'unexpected modulename "%s" in %s, should be %s'),
'FILENAME_BAD_MODULE_NAME':
(4,
'filename "%s" suggests invalid module name "%s", should match "%s"'),
'BAD_REVISION':
(3,
'unexpected latest revision "%s" in %s, should be %s'),
'WBAD_REVISION':
(4,
'unexpected latest revision "%s" in %s, should be %s'),
'FILENAME_BAD_REVISION':
(4,
'filename "%s" suggests invalid revision "%s", should match "%s"'),
'BAD_SUB_BELONGS_TO':
(1,
'module %s includes %s, but %s does not specify a correct belongs-to'),
'MISSING_INCLUDE':
(1,
'submodule %s is included by %s, but not by the module %s'),
'PREFIX_ALREADY_USED':
(1,
'prefix "%s" already used for module %s'),
'PREFIX_NOT_DEFINED':
(1,
'prefix "%s" is not defined (reported only once)'),
'WPREFIX_NOT_DEFINED':
(4,
'prefix "%s" is not defined'),
'NODE_NOT_FOUND':
(1,
'node %s::%s is not found'),
'BAD_NODE_IN_AUGMENT':
(1,
'node %s::%s of type %s cannot be augmented'),
'BAD_NODE_IN_REFINE':
(1,
'node %s::%s cannot be refined'),
'BAD_REFINEMENT':
(1,
'"%s" node "%s::%s" cannot be refined with "%s"'),
'BAD_DEVIATE_KEY':
(2,
'key node "%s::%s" cannot be deviated with "not-supported"'),
'BAD_DEVIATE_ADD':
(2,
'the %s property already exists in node "%s::%s"'),
'BAD_DEVIATE_DEL':
(2,
'the %s property does not exist in node "%s::%s"'),
'BAD_DEVIATE_TYPE':
(2,
'the %s property cannot be added'),
'BAD_DEVIATE_WITH_NOT_SUPPORTED':
(2,
'cannot have other deviate statement together with "not-supported"'),
'EXTENSION_NOT_DEFINED':
(1,
'extension "%s" is not defined in module %s'),
'TYPE_NOT_FOUND':
(1,
'type "%s" not found in module %s'),
'FEATURE_NOT_FOUND':
(1,
'feature "%s" not found in module %s'),
'IDENTITY_NOT_FOUND':
(1,
'identity "%s" not found in module %s'),
'GROUPING_NOT_FOUND':
(1,
'grouping "%s" not found in module %s'),
'DEFAULT_CASE_NOT_FOUND':
(1,
'the default case "%s" is not found"'),
'MANDATORY_NODE_IN_DEFAULT_CASE':
(1,
'mandatory node in default case'),
'MULTIPLE_REFINE':
(1,
'the node "%s" is already refined at %s'),
'RANGE_BOUNDS':
(2,
'range error: "%s" is not larger than "%s"'),
'LENGTH_BOUNDS':
(2,
'length error: "%s" is not larger than "%s"'),
'LENGTH_VALUE':
(2,
'length error: "%s" is too large'),
'TYPE_VALUE':
(2,
'the value "%s" does not match its base type %s- %s'),
'DUPLICATE_ENUM_NAME':
(1,
'the enum name "%s" has already been used for the ' \
'enumeration at %s'),
'DUPLICATE_ENUM_VALUE':
(1,
'the integer value "%d" has already been used for the ' \
'enumeration at %s'),
'ENUM_VALUE':
(1,
'the enumeration value "%s" is not an 32 bit integer'),
'BAD_ENUM_VALUE':
(1,
'the given value "%s" does not match the base enum value "%d"'),
'DUPLICATE_BIT_POSITION':
(1,
'the position "%d" has already been used for the bit at %s'),
'BIT_POSITION':
(1,
'the position value "%s" is not valid'),
'BAD_BIT_POSITION':
(1,
'the given position "%s" does not match the base bit position "%d"'),
'NEED_KEY':
(1,
'the list needs at least one key'),
'NEED_KEY_USES':
(1,
'the list at "%s" needs at least one key because it is used as config'),
'KEY_BAD_CONFIG':
(1,
'the key "%s" does not have same "config" as its list'),
'BAD_KEY':
(1,
'the key "%s" does not reference an existing leaf'),
'BAD_UNIQUE':
(1,
'the unique argument "%s" does not reference an existing leaf'),
'BAD_UNIQUE_PART':
(1,
'the identifier "%s" in the unique argument does not reference '
'an existing container'),
'BAD_UNIQUE_PART_LIST':
(1,
'the identifier "%s" in the unique argument references a list; '
'this is not legal'),
'BAD_UNIQUE_CONFIG':
(1,
'the identifer "%s" has not the same config property as the'
' other nodes in the unique expression'),
'ILLEGAL_ESCAPE':
(1,
'the escape sequence "\%s" is illegal in double quoted strings'),
'ILLEGAL_ESCAPE_WARN':
(4,
'the escape sequence "\%s" is unsafe in double quoted strings' \
' - pass the flag --lax-quote-checks to avoid this warning'),
'UNIQUE_IS_KEY':
(4,
'all keys in the list are redundantly present in the unique statement'),
'DUPLICATE_KEY':
(2,
'the key "%s" must not be listed more than once'),
'DUPLICATE_UNIQUE':
(3,
'the leaf "%s" occurs more than once in the unique expression'),
'PATTERN_ERROR':
(2,
'syntax error in pattern: %s'),
'PATTERN_FAILURE':
(4,
'could not verify pattern: %s'),
'LEAFREF_TOO_MANY_UP':
(1,
'the path for %s at %s has too many ".."'),
'LEAFREF_IDENTIFIER_NOT_FOUND':
(1,
'%s:%s in the path for %s at %s is not found'),
'LEAFREF_IDENTIFIER_BAD_NODE':
(1,
'%s:%s in the path for %s at %s references a %s node'),
'LEAFREF_BAD_PREDICATE':
(1,
'%s:%s in the path for %s at %s has a predicate, '
'but is not a list'),
'LEAFREF_BAD_PREDICATE_PTR':
(1,
'%s:%s in the path\'s predicate for %s at %s is compared '
'with a node that is not a leaf'),
'LEAFREF_NOT_LEAF':
(1,
'the path for %s at %s does not refer to a leaf'),
'LEAFREF_NO_KEY':
(1,
'%s:%s in the path for %s at %s is not the name of a key leaf'),
'LEAFREF_MULTIPLE_KEYS':
(1,
'%s:%s in the path for %s at %s is referenced more than once'),
'LEAFREF_BAD_CONFIG':
(1,
'the path for %s is config but refers to a '
'non-config leaf %s defined at %s'),
'LEAFREF_DEREF_NOT_LEAFREF':
(1,
'the deref argument refers to node %s at %s which is'
' not a leafref leaf'),
'LEAFREF_DEREF_NOT_KEY':
(1,
'the deref argument refers to node %s at %s which'
' does not refer to a key (%s at %s)'),
'LEAFREF_DEREF_NOT_LEAFREF':
(1,
'the deref argument for %s at %s does not refer to a leafref leaf'),
'DUPLICATE_CHILD_NAME':
(1,
'there is already a child node to "%s" at %s with the name "%s" '
'defined at %s'),
'BAD_TYPE_NAME':
(1,
'illegal type name "%s"'),
'TYPE_ALREADY_DEFINED':
(1,
'type name "%s" is already defined at %s'),
'GROUPING_ALREADY_DEFINED':
(1,
'grouping name "%s" is already defined at %s'),
'FEATURE_ALREADY_DEFINED':
(1,
'feature name "%s" is already defined at %s'),
'IDENTITY_ALREADY_DEFINED':
(1,
'identity name "%s" is already defined at %s'),
'EXTENSION_ALREADY_DEFINED':
(1,
'extension name "%s" is already defined at %s'),
'BAD_RESTRICTION':
(1,
'restriction %s not allowed for this base type'),
'BAD_DEFAULT_VALUE':
(1,
'the type "%s" cannot have a default value'),
'MISSING_TYPE_SPEC':
(1,
'a type %s must have at least one %s statement'),
'MISSING_TYPE_SPEC_1':
(1,
'a type %s must have a %s statement'),
'BAD_TYPE_IN_UNION':
(1,
'the type %s (defined at %s) cannot be part of a union'),
'BAD_TYPE_IN_KEY':
(1,
'the type %s cannot be part of a key, used by leaf %s'),
'KEY_BAD_SUBSTMT':
(1,
'the statement %s cannot be given for a key'),
'DEFAULT_AND_MANDATORY':
(1,
'a \'default\' value cannot be given when \'mandatory\' is "true"'),
'DEFAULT_AND_MIN_ELEMENTS':
(1,
'a \'default\' value cannot be given when \'min-elements\' is'
' greater than 0'),
'DUPLICATE_DEFAULT':
(1,
'the default value %s is given twice in the leaf list'),
'BAD_STATUS_REFERENCE':
(2,
'the "%s" definition is %s, but the "%s" it references is %s'),
'REVISION_ORDER':
(4,
'the revision statements are not given in reverse chronological order'),
'EXTENSION_ARGUMENT_PRESENT':
(1,
'unexpected argument for extension %s'),
'EXTENSION_NO_ARGUMENT_PRESENT':
(1,
'expected argument for extension %s'),
'SYNTAX_ERROR':
(1,
'syntax error: %s'),
'DUPLICATE_NAMESPACE':
(1,
'duplicate namespace uri %s found in module %s'),
'MISSING_ARGUMENT_ATTRIBUTE':
(1,
'missing argument attribute "%s" for "%s"'),
'MISSING_ARGUMENT_ELEMENT':
(1,
'missing argument element "%s" for "%s"'),
'UNEXPECTED_ATTRIBUTE':
(1,
'unexpected attribute %s'),
'INVALID_CONFIG':
(2,
'config true cannot be set when the parent is config false'),
'XPATH_SYNTAX_ERROR':
(2,
'XPath syntax error: %s'),
'XPATH_VARIABLE':
(2,
'XPath variable "%s" is not defined in the XPath context'),
'XPATH_FUNCTION':
(2,
'XPath function "%s" is not defined in the XPath context'),
'AUGMENT_MANDATORY':
(1,
'cannot augment with mandatory node %s'),
'LONG_IDENTIFIER':
(3,
'identifier %s exceeds %s characters'),
'CONFIG_IGNORED':
(4,
'explicit config statement is ignored'),
'UNUSED_IMPORT':
(4,
'imported module %s not used'),
'UNUSED_TYPEDEF':
(4,
'locally scoped typedef %s not used'),
'UNUSED_GROUPING':
(4,
'locally scoped grouping %s not used'),
'KEY_HAS_DEFAULT':
(4,
'default value for a key leaf is ignored'),
'KEY_HAS_MANDATORY_FALSE':
(4,
'"mandatory" statement for a key leaf is ignored'),
'LONG_LINE':
(4,
'line length %s exceeds %s characters'),
'STRICT_XPATH_FUNCTION':
(2,
'XPath function "%s" is not allowed for strict YANG compliance'),
}
def add_error_code(tag, level, fmt):
"""Add an error code to the framework.
Can be used by plugins to add special errors."""
error_codes[tag] = (level, fmt)
def err_level(tag):
try:
(level, fmt) = error_codes[tag]
return level
except KeyError:
return 0
def err_to_str(tag, args):
try:
(level, fmt) = error_codes[tag]
return fmt % args
except KeyError:
return 'unknown error %s' % tag
def err_add(errors, pos, tag, args):
error = (copy.copy(pos), tag, args)
# surely this can be done more elegant??
for (p, t, a) in errors:
if (p.line == pos.line and p.ref == pos.ref and
p.top == pos.top and t == tag and a == args):
return
errors.append(error)
def is_warning(level):
return not is_error(level)
def is_error(level):
return level < 4
def allow_warning(level):
return level > 2
| [
"jlevensailor@presidio.com"
] | jlevensailor@presidio.com |
72e698fc4e1fed404313ed161df45403fb8971a5 | 6411acb3828fec8cc79f6f2630a4d99e8352a9aa | /326.py | f29ee1980db2b5d3ee63d806ebccad065e793aa1 | [] | no_license | zhouliuling/Leetcode_Task | 95f55a3ca33b561a130fc15272e513d9af2c0317 | 0230d31351a4093e8ae6be5fe0c175f3f41e08ac | refs/heads/master | 2020-05-25T18:44:59.377953 | 2019-09-12T02:14:22 | 2019-09-12T02:14:22 | 187,935,297 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 380 | py | ## 3的幂 超时
## 循环
class Solution(object):
def isPowerOfThree(self, n):
"""
:type n: int
:rtype: bool
"""
if n == 0:
return False
while n >= 3:
if n % 3 == 0 and n > 3:
n = n/3
if n == 3:
break
if n == 3:return True
else:return False
| [
"noreply@github.com"
] | zhouliuling.noreply@github.com |
16e8e3d398736c6e70c7d28c21ad2a6de16d3a9a | dae4ab4882080344e5f505def7e2e59e0ed888b4 | /polyaxon/scheduler/spawners/horovod_spawner.py | a7b8b56fea439cfa513c771759a4f92ac18a4103 | [
"MPL-2.0"
] | permissive | vfdev-5/polyaxon | 8c3945604e8eaa25ba8b3a39ed0838d0b9f39a28 | 3e1511a993dc1a03e0a0827de0357f4adcc0015f | refs/heads/master | 2021-07-09T22:27:23.272591 | 2018-11-01T23:44:44 | 2018-11-01T23:44:44 | 154,320,634 | 0 | 0 | MIT | 2018-10-23T12:01:34 | 2018-10-23T12:01:33 | null | UTF-8 | Python | false | false | 3,769 | py | from scheduler.spawners.experiment_spawner import ExperimentSpawner
from schemas.environments import HorovodClusterConfig
from schemas.specifications import HorovodSpecification
from schemas.tasks import TaskType
class HorovodSpawner(ExperimentSpawner):
MASTER_SERVICE = True
WORKER_SERVICE = True
@property
def resources(self):
cluster, is_distributed, = self.spec.cluster_def
worker_resources = HorovodSpecification.get_worker_resources(
environment=self.spec.environment,
cluster=cluster,
is_distributed=is_distributed
)
return {
TaskType.MASTER: {0: self.spec.master_resources},
TaskType.WORKER: worker_resources,
}
@property
def node_selectors(self):
cluster, is_distributed, = self.spec.cluster_def
worker_node_selectors = HorovodSpecification.get_worker_node_selectors(
environment=self.spec.environment,
cluster=cluster,
is_distributed=is_distributed
)
return {
TaskType.MASTER: {0: self.spec.master_node_selector},
TaskType.WORKER: worker_node_selectors,
}
@property
def affinities(self):
cluster, is_distributed, = self.spec.cluster_def
worker_affinities = HorovodSpecification.get_worker_affinities(
environment=self.spec.environment,
cluster=cluster,
is_distributed=is_distributed
)
return {
TaskType.MASTER: {0: self.spec.master_affinity},
TaskType.WORKER: worker_affinities,
}
@property
def tolerations(self):
cluster, is_distributed, = self.spec.cluster_def
worker_tolerations = HorovodSpecification.get_worker_tolerations(
environment=self.spec.environment,
cluster=cluster,
is_distributed=is_distributed
)
return {
TaskType.MASTER: {0: self.spec.master_affinity},
TaskType.WORKER: worker_tolerations,
}
def get_resources(self, task_type, task_idx):
return self.resources.get(task_type, {}).get(task_idx)
def get_node_selector(self, task_type, task_idx):
return self.node_selectors.get(task_type, {}).get(task_idx)
def get_affinity(self, task_type, task_idx):
return self.affinities.get(task_type, {}).get(task_idx)
def get_tolerations(self, task_type, task_idx):
return self.tolerations.get(task_type, {}).get(task_idx)
def get_n_pods(self, task_type):
return self.spec.cluster_def[0].get(task_type, 0)
def start_experiment(self):
experiment = super().start_experiment()
experiment[TaskType.WORKER] = self.create_multi_jobs(task_type=TaskType.WORKER,
add_service=self.WORKER_SERVICE)
return experiment
def stop_experiment(self):
deleted = super().stop_experiment()
if not self.delete_multi_jobs(task_type=TaskType.WORKER, has_service=self.WORKER_SERVICE):
deleted = False
return deleted
def get_cluster(self):
cluster_def, _ = self.spec.cluster_def
job_name = self.pod_manager.get_job_name(task_type=TaskType.MASTER, task_idx=0)
cluster_config = {
TaskType.MASTER: [self._get_pod_address(job_name)]
}
workers = []
for i in range(cluster_def.get(TaskType.WORKER, 0)):
job_name = self.pod_manager.get_job_name(task_type=TaskType.WORKER, task_idx=i)
workers.append(self._get_pod_address(job_name))
cluster_config[TaskType.WORKER] = workers
return HorovodClusterConfig.from_dict(cluster_config).to_dict()
| [
"mouradmourafiq@gmail.com"
] | mouradmourafiq@gmail.com |
80e587f59af889c72ed9b2e941144441a2eda60e | 12123592a54c4f292ed6a8df4bcc0df33e082206 | /py2/pgms/appx/timedeltas.py | a1e94beb0b7ea3d8b5472cc307de9f8b3ec91e80 | [] | no_license | alvinooo/advpython | b44b7322915f832c8dce72fe63ae6ac7c99ef3d4 | df95e06fd7ba11b0d2329f4b113863a9c866fbae | refs/heads/master | 2021-01-23T01:17:22.487514 | 2017-05-30T17:51:47 | 2017-05-30T17:51:47 | 92,860,630 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 634 | py | #!/usr/bin/env python
# timedeltas.py - timedelta objects in datetime module
from datetime import timedelta, datetime
td1 = timedelta(days=2, hours=18)
td2 = timedelta(hours=8.5)
td3 = td1 + td2
days = td3.days
hours = td3.seconds / float(3600)
hoursahead = td3.total_seconds() / 3600
print days, hours, hoursahead
dt1 = datetime(2016, 2, 25)
dt2 = datetime(2016, 3, 6)
td4 = dt2 - dt1
print td4.days
now = datetime.today()
print now
print now + timedelta(minutes=10)
###############################################
#
# $ timedeltas.py
# 3 2.5 74.5
# 10
# 2016-07-09 14:34:36.785123
# 2016-07-09 14:44:36.785123
#
| [
"alvin.heng@teradata.com"
] | alvin.heng@teradata.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.