blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
aa0eb2304723651a84891f2939dba38fde4c68ce
|
90dc956890f6581424aa0925c86c5752a19ba618
|
/url_shortner.py
|
eb52b4d306b0277fdb7d4c8f3d576815288f4a62
|
[] |
no_license
|
Narentest/Applause-task
|
0579dc812c5c2304cab6a5af8ad629008a83d3e7
|
072d121578d2ffdb7f6a1cb9de1d7d1f74e8c945
|
refs/heads/main
| 2023-03-13T22:36:32.499849
| 2021-03-15T00:03:09
| 2021-03-15T00:03:09
| 347,784,043
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 544
|
py
|
from __future__ import with_statement
import contextlib
try:
from urllib.parse import urlencode
except ImportError:
from urllib import urlencode
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
import sys
def make_tiny(url):
request_url = input('enter url:' +
urlencode({'url':url}))
with contextlib.closing(urlopen(request_url)) as response:
return response.read().decode('utf-8')
def main():
for tinyurl in map(make_tiny, sys.argv[1:]):
print(tinyurl)
if __name__ == '__main__':
main()
|
[
"narendarmsctesting@gmail.com"
] |
narendarmsctesting@gmail.com
|
cd009ea532016e6d794b44635f9cf787d176f987
|
d374478ba42d027e730e2b9d378b0a08de9c23b5
|
/4. Building your Deep Neural Network/linear_backward.py
|
a7d5789fc96744d2b4c971623e11c58a98dfa9a2
|
[] |
no_license
|
kuangzijian/Neural-Networks-and-Deep-Learning
|
8ffe46e7b99611c033f54d553a897313b36ea22b
|
781d62679497e9dfa6e6556d2b49a6366c6f945f
|
refs/heads/master
| 2023-08-08T07:32:13.280785
| 2021-05-05T16:44:49
| 2021-05-05T16:44:49
| 217,354,065
| 0
| 0
| null | 2023-07-22T19:42:15
| 2019-10-24T17:20:55
|
Python
|
UTF-8
|
Python
| false
| false
| 990
|
py
|
# GRADED FUNCTION: linear_backward
import numpy as np
def linear_backward(dZ, cache):
"""
Implement the linear portion of backward propagation for a single layer (layer l)
Arguments:
dZ -- Gradient of the cost with respect to the linear output (of current layer l)
cache -- tuple of values (A_prev, W, b) coming from the forward propagation in the current layer
Returns:
dA_prev -- Gradient of the cost with respect to the activation (of the previous layer l-1), same shape as A_prev
dW -- Gradient of the cost with respect to W (current layer l), same shape as W
db -- Gradient of the cost with respect to b (current layer l), same shape as b
"""
A_prev, W, b = cache
m = A_prev.shape[1]
dW = (1 / m) * dZ.dot(A_prev.T)
db = np.sum(dZ, axis=1, keepdims=True) / m
dA_prev = np.dot(W.T, dZ)
assert (dA_prev.shape == A_prev.shape)
assert (dW.shape == W.shape)
assert (db.shape == b.shape)
return dA_prev, dW, db
|
[
"kuangzijian1@hotmail.com"
] |
kuangzijian1@hotmail.com
|
6137c990285e636fdf82e210f75dee172cfa4c61
|
618eb6d8c9aca9a179ef252d1e4cb9ab13449e13
|
/String/Pattern Matcher.py
|
db791da8160afb25e4caeaa9c09768f531257c23
|
[] |
no_license
|
kotsky/programming-exercises
|
8966660986d8baf0467f1efc88b606b7d61a31f7
|
636b1f6b0ab28c6eef8f8900e68393f7b1fb931a
|
refs/heads/master
| 2023-04-16T22:40:21.461494
| 2021-05-02T03:48:56
| 2021-05-02T03:48:56
| 268,241,097
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,308
|
py
|
'''
(patternMatcher("xxyxxy", "gogopowerrangergogopowerranger")) => ["go", "powerranger"]
TO define patterns as x = "go" and y = "powerrangers" in the given string.
Count of x and y. Then calculate len of possible substrings. And check every time for new substring of x.
'''
def patternMatcher(pattern, string):
if pattern == "" or len(pattern) > len(string):
return []
new_pattern = separartion(pattern)
flag = None
if new_pattern[0] != 'x':
flag = 1
for idx in range(len(new_pattern)):
if new_pattern[idx] == 'x':
new_pattern[idx] = 'y'
else:
new_pattern[idx] = 'x'
count_x = 0
count_y = 0
position_y = None
for idx in range(len(new_pattern)):
if new_pattern[idx] == 'x':
count_x += 1
else:
count_y += 1
if position_y is None:
position_y = idx
if count_x == 0 or count_y == 0:
count = count_x if count_x != 0 else count_y
if len(string) % count != 0:
return []
length = len(string)
substring_range = length//count
substring = string[0:substring_range]
for i in range(1, count):
if substring != string[i*substring_range:(i*substring_range+substring_range)]:
return []
return [substring, ""] if flag is None else ["", substring]
else:
for end in range(1, len(string)):
x = string[0:end]
len_x = len(x)
len_y = (len(string) - len_x*count_x)//count_y if (len(string) - len_x*count_x) % count_y == 0 else -1
if len_y == -1:
continue
y = string[len_x*position_y:(len_y + len_x*position_y)]
check_list = new_pattern.copy()
for idx in range(len(check_list)):
if check_list[idx] == 'x':
check_list[idx] = x
else:
check_list[idx] = y
if "".join(check_list) == string:
return [x, y] if flag is None else [y, x]
return []
def separartion(string):
array = []
for letter in string:
array.append(letter)
return array
print(patternMatcher("xxyxxy", "gogopowerrangergogopowerranger"))
|
[
"noreply@github.com"
] |
kotsky.noreply@github.com
|
a6413ee47a04dd4a4c2b9d22eed87295184b3f8e
|
919156799a5dd4b8bfed059b30af5e98fa2686ed
|
/Clustering/hierarchical_clustering.py
|
b6902c0b8aafc203b622ee52fbdc2d1f7a192afc
|
[] |
no_license
|
SoniaCheung/Machine-Learning-Samples
|
6060d59ca287e3e91ca0aea9a2307e5f0dac5fad
|
a0cf9b15f3202bf9ed6cef73f8e09d2f33ad9e34
|
refs/heads/master
| 2020-03-29T22:55:31.035544
| 2018-10-04T14:21:28
| 2018-10-04T14:21:28
| 150,447,888
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,451
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 18 19:08:27 2018
@author: sonia
Hierarchial Clustering
"""
#Hierarchial Clustering
#import libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
#import the mall dataset with pandas
dataset = pd.read_csv('Mall_Customers.csv')
X = dataset.iloc[:, [3, 4]].values
#Using dendogram to find the optimal number of clusters
import scipy.cluster.hierarchy as sch
dendrogram = sch.dendrogram(sch.linkage(X, method = 'ward'))
plt.title('Dendrogram')
plt.xlabel('Customers')
plt.ylabel('Euclidean Distance')
plt.show()
#Fitting Hierarchial clustering algorithm in the dataset
from sklearn.cluster import AgglomerativeClustering
hc = AgglomerativeClustering(n_clusters = 5, affinity = 'euclidean', linkage = 'ward')
y_hc = hc.fit_predict(X)
#Visualizing the clusters
plt.scatter(X[y_hc == 0, 0], X[y_hc == 0, 1], s = 100, color = 'red', label = 'Cluster 1')
plt.scatter(X[y_hc == 1, 0], X[y_hc == 1, 1], s = 100, color = 'blue', label = 'Cluster 2')
plt.scatter(X[y_hc == 2, 0], X[y_hc == 2, 1], s = 100, color = 'green', label = 'Cluster 3')
plt.scatter(X[y_hc == 3, 0], X[y_hc == 3, 1], s = 100, color = 'cyan', label = 'Cluster 4')
plt.scatter(X[y_hc == 4, 0], X[y_hc == 4, 1], s = 100, color = 'magenta', label = 'Cluster 5')
plt.title('Cluster of clients')
plt.xlabel('Annual Income ($k)')
plt.ylabel('Spending score (1 - 100)')
plt.legend()
plt.show()
|
[
"soniacym@gmail.com"
] |
soniacym@gmail.com
|
ebb70ae6174007de438a9ebda6abaf0d24309a98
|
76d4de4ec3ae352bdeb55bf216afab7984c6fd63
|
/server/main/api/security.py
|
5054ca6f0fd8ff14ef71b938dfc4511fd68a0bc6
|
[] |
no_license
|
krystofwoldrich/via-project
|
41e041df8b5120b9c99b30b6b52864752e88c785
|
d332710914c4d25c06caf4ecd9f041ddf5dcb119
|
refs/heads/main
| 2023-02-11T08:36:46.187582
| 2021-01-07T12:26:29
| 2021-01-07T12:28:35
| 303,415,346
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,157
|
py
|
import uuid
import jwt
from flask_restplus import Namespace, Resource, fields
from flask import request
from werkzeug.security import generate_password_hash, check_password_hash
from functools import wraps
from parameters import config
from custom_time.custom_time import get_now_utc_iso_string
from datetime import datetime, timedelta
from database.database import db
from bson import ObjectId
SECRET_KEY = config['api']['secret_key']
users_collection = db.users
security_namespace = Namespace('auth', description='Auth API functions')
registration_body_model = security_namespace.model('Registration', {
'username': fields.String(required=True, example='john_doe'),
'password': fields.String(required=True, example='myAwEsOmE_pass!1234'),
'email': fields.String(required=True, example='john_doe@example.com'),
})
login_body_model = security_namespace.model('Login', {
'username': fields.String(required=True, example='john_doe'),
'password': fields.String(required=True, example='myAwEsOmE_pass!1234'),
})
def token_required(f):
@wraps(f)
def decorated(ref, *args, **kwargs):
token = None
if 'x-access-token' in request.headers:
token = request.headers['x-access-token']
if not token:
return {'message' : 'Token is missing!'}, 401
try:
data = jwt.decode(token, SECRET_KEY, algorithms=['HS256'])
current_user = None
current_user = users_collection.find_one({'_id': ObjectId(data['id'])})
if current_user == None:
raise "User not found"
except Exception as e:
print(e)
return {'message' : 'Token is invalid!'}, 401
current_user['id'] = str(current_user['_id'])
return f(ref, current_user, *args, **kwargs)
return decorated
@security_namespace.route('/register')
class Register(Resource):
def options(self):
pass
@security_namespace.doc(body=registration_body_model, responses={201: 'Created'}, description="Register a new user")
def post(self):
data = request.get_json()
hashed_password = generate_password_hash(data['password'], method='sha256')
new_user = {
'username': data['username'],
'password': hashed_password,
'email': data['email'],
'admin': False,
'last_login_at': get_now_utc_iso_string(),
}
users_collection.insert_one(new_user)
return {'message' : 'New user created!'}, 201
@security_namespace.route('/login')
@security_namespace.header('X-Header', 'Some class header')
class Login(Resource):
def options(self):
pass
@security_namespace.doc(body=login_body_model, responses={200: 'OK'}, description="Register a new user")
def put(self):
auth = request.get_json()
if not auth or not auth['username'] or not auth['password']:
return 'Could not verify', 401
current_user = None
current_user = users_collection.find_one(
{"username": auth['username']})
if not current_user:
return 'Could not verify', 401
if check_password_hash(current_user['password'], auth['password']):
token = jwt.encode(
{
'id': str(current_user['_id']),
'exp': datetime.utcnow() + timedelta(weeks=8),
},
SECRET_KEY,
algorithm='HS256'
)
return {'token' : token}
return 'Could not verify', 401
|
[
"krystof51@gmail.com"
] |
krystof51@gmail.com
|
5d3672c2a1c6bca4c072e56a67bdf929bf920c55
|
82eb24f0e9c9501727e339ec9cad9991a8ade618
|
/GREETINGBOOK/socailmedia/__init__.py
|
1349c1cd5b688fa8b10b1f8d831d3862c8b5b689
|
[] |
no_license
|
kundan1989/GreetingbookProject
|
4bf706e79b456fd0e98d745f9a8021e60d7c7ee2
|
73248f0925b5b8fbf6b6abe827213fa37a772573
|
refs/heads/master
| 2022-04-27T15:01:54.557678
| 2020-04-29T09:09:42
| 2020-04-29T09:09:42
| 259,877,331
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 57
|
py
|
default_app_config = 'socailmedia.apps.SocailmediaConfig'
|
[
"kundanrj3@gmil.com"
] |
kundanrj3@gmil.com
|
bcdcadd3fdf0e87272d4063922c370c1021fbee4
|
a055563d1fcc798db4c1a5aca01bcdc86fa95a5e
|
/config_file_create.py
|
45a3fe31a8d88cace8f880764c83ab6829ac914e
|
[] |
no_license
|
arun-618/cloud
|
8f7c13b4a2f49689dea68ebe669263821d7dde36
|
529cbdd6317ff6f677eb38351b17d029078f6b58
|
refs/heads/master
| 2021-04-08T02:40:15.594748
| 2020-05-12T05:28:47
| 2020-05-12T05:28:47
| 248,731,181
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 229
|
py
|
from configparser import ConfigParser
from mongoengine import *
config = ConfigParser()
config['settings'] = {
'username': "arun",
'pwd': '618618618'
}
with open('./dev.ini', 'w') as f:
config.write(f)
|
[
"noreply@github.com"
] |
arun-618.noreply@github.com
|
8aba2942340cc5f1e675229a80ce52ff0a0f4244
|
3d19e1a316de4d6d96471c64332fff7acfaf1308
|
/Users/M/martharotter/wikipediavisualiser.py
|
8077e845b760b04b80b2c4f75a79ad8f643a9261
|
[] |
no_license
|
BerilBBJ/scraperwiki-scraper-vault
|
4e98837ac3b1cc3a3edb01b8954ed00f341c8fcc
|
65ea6a943cc348a9caf3782b900b36446f7e137d
|
refs/heads/master
| 2021-12-02T23:55:58.481210
| 2013-09-30T17:02:59
| 2013-09-30T17:02:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 998
|
py
|
import scraperwiki
scraperwiki.sqlite.attach("wikipedia_paper_scraper_until_jan_20")
data = scraperwiki.sqlite.select(
'''* from wikipedia_paper_scraper_until_jan_20.swdata
order by id desc limit 10'''
)
print "<table>"
print "<tr><th>ID</th><th>Tweet</th><th>User</th>"
for d in data:
print "<tr>"
print "<td>", d["id"], "</td>"
print "<td>", d["text"], "</td>"
print "<td>", d["from_user"], "</td>"
print "</tr>"
print "</table>"
import scraperwiki
scraperwiki.sqlite.attach("wikipedia_paper_scraper_until_jan_20")
data = scraperwiki.sqlite.select(
'''* from wikipedia_paper_scraper_until_jan_20.swdata
order by id desc limit 10'''
)
print "<table>"
print "<tr><th>ID</th><th>Tweet</th><th>User</th>"
for d in data:
print "<tr>"
print "<td>", d["id"], "</td>"
print "<td>", d["text"], "</td>"
print "<td>", d["from_user"], "</td>"
print "</tr>"
print "</table>"
|
[
"pallih@kaninka.net"
] |
pallih@kaninka.net
|
21b55be9be866eef698898b832f2d7cd8f6c1b9a
|
d393e9b2feda1145a36a5d0963a75e7fa303d1dc
|
/week_5/dlp.py
|
cbcd74ac0c6c5a0448b28d7437aabae64638c723
|
[] |
no_license
|
c-ripper/coursera-crypto
|
84b8c9f325f187b90c331fa57a129e0df96368e4
|
3201adbd5b850cb7f01259dffac512fc7fd880ea
|
refs/heads/master
| 2021-07-24T03:37:19.054505
| 2017-11-04T12:16:16
| 2017-11-04T12:16:16
| 109,491,610
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,995
|
py
|
from gmpy2 import f_mod, invert, mpz, powmod
from multiprocessing import Pool, cpu_count
from timeit import default_timer as timer
###################################################################
# Programming Assignment for Week 5 - Solve DLP for G^X = H mod P #
###################################################################
G = mpz('11717829880366207009516117596335367088558084999998952205599979459063929499736583746670572176471460312928594829675428279466566527115212748467589894601965568')
H = mpz('3239475104050450443565264378728065788649097520952449527834792452971981976143292558073856937958553180532878928001494706097394108577585732452307673444020333')
P = mpz('13407807929942597099574024998205846127479365820592393377723561443721764030073546976801874298166903427690031858186486050853753882811946569946433649006084171')
B = mpz(2 ** 20)
def compute_x1(seq):
hash_map = {}
for x1 in seq:
left = f_mod(H * invert(powmod(G, x1, P), P), P)
hash_map[left] = x1
return hash_map
def find_x0(seq):
for x0 in seq:
right = powmod(powmod(G, B, P), x0, P)
if right in lookup_map:
x1 = lookup_map[right]
x = f_mod(x0 * B + x1, P)
print('Found x = {}'.format(x))
break
# splits a in n ~even subsets
def split(a, n):
k, m = divmod(len(a), n)
return (a[i * k + min(i, m):(i + 1) * k + min(i + 1, m)] for i in range(n))
print('Pre-computing table [H/(G^x1) mod P, x1]...')
t0 = timer()
with Pool() as pool:
hash_maps = pool.map(compute_x1, list(split(range(0, B), cpu_count())))
t1 = timer()
print("Time spent: {0:.3f}s".format(t1 - t0))
lookup_map = {}
# merge n hash maps into one
for hm in hash_maps:
for key in hm.keys():
lookup_map[key] = hm[key]
print('Searching for x0, so that: H/(G^x1) mod P = (G^B)^x0 mod P')
with Pool() as pool:
pool.map(find_x0, list(split(range(0, B), cpu_count())))
t2 = timer()
print("Total time spent: {0:.3f}s".format(t2 - t0))
|
[
"alexander.berezovsky@gmail.com"
] |
alexander.berezovsky@gmail.com
|
fa226e9c59edd983c0b72c84bcd9a980cb861f7c
|
148537b9709b5c1ee5ddd43c30430eccb338aec4
|
/keras_ana_mit_cent_sort.py
|
df2dd09a41e9a857cfc4d309b489bb498e5bd141
|
[] |
no_license
|
vheinitz/learn_keras
|
571137fe83b048c3e51bbe2cb6284fbefd5e404e
|
0efc894cbe30fad454a697f7ecc7c76497591dd1
|
refs/heads/master
| 2021-01-25T12:43:49.190862
| 2018-03-01T23:57:02
| 2018-03-01T23:57:02
| 123,503,974
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,093
|
py
|
# Valentin Heinitz, vheinitz@googlemail.com, 2018.01.01
# L E A R N I N G K E R A S WITH
# https://www.youtube.com/playlist?list=PLtPJ9lKvJ4oiz9aaL_xcZd-x0qd8G0VN_
# Using ANA-HEp2, ANCA, dDNA data sets
#
# Find mitosis in images of Centromere-pattern cells and sort them in different
# directories
import numpy as np
from keras.models import model_from_json
from keras.preprocessing import image
from keras.applications.vgg16 import preprocess_input
import matplotlib.pyplot as plt
from scipy.misc import toimage
import os
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D
from keras.layers import Activation, Dropout, Flatten, Dense
from keras import backend as K
from shutil import copyfile
# dimensions of the images.
img_width, img_height = 64, 64
train_dir = 'c:/tmp/ana_mit_cent/train'
val_dir = 'c:/tmp/ana_mit_cent/val'
test_dir = 'c:/tmp/ana_mit_cent/val'
nb_train_samples = 2000
nb_validation_samples = 500
nb_test_samples = 5000
epochs = 5
batch_size = 25
classes = [ 'mit', 'not_mit']
directory = 'C:/tmp/ana/cells/test/cent/'
json_file = open("ana_mit_cent.json", "r")
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
loaded_model.load_weights("ana_mit_cent.h5")
loaded_model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['accuracy'])
def get_class(prediction):
return 1 if prediction > 0.5 else 0
right=0
wrong=0
out_mit = 'C:/tmp/ana_mit_cent/out_cent/mit'
out_not_mit = 'C:/tmp/ana_mit_cent/out_cent/not_mit'
print(directory)
for filename in os.listdir(directory):
fn=os.path.join(directory, filename)
img = image.load_img(fn, target_size=(64, 64))
x = image.img_to_array(img)
x = x.astype('float32')
x /= 255
x = np.expand_dims(x, axis=0)
prediction = loaded_model.predict(x)
predicted = classes[get_class(prediction)]
print(fn, prediction )
if predicted == 'mit':
copyfile(fn, os.path.join(out_mit, filename) )
else:
copyfile(fn, os.path.join(out_not_mit, filename))
|
[
"vheinitz@googlemail.com"
] |
vheinitz@googlemail.com
|
0c0342a2bf22618d0cb8629199ad831bb482d324
|
84a3092c59f828651cc3608449a531ffbcb1a599
|
/main.py
|
4d6611c2f9d4708464ae2ed24e45c5635ec9d053
|
[] |
no_license
|
festivalle/PyTracker-MIDI
|
0009a8f70330f5090d1b5ee980650730cd7bb97e
|
f1443587758f388c729bc6dbae75d1a7bf306183
|
refs/heads/master
| 2022-04-25T05:45:17.676030
| 2020-04-25T03:44:58
| 2020-04-25T03:44:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 14,698
|
py
|
'''
CircuitPython DJ
Inspired by LSDJ and nanoloop gameboy trackers
Code snippets and libraries from the following Adafruit Learning Guides:
FruitBox Sequencer
PyBadge GamePad
Feather Waveform Generator in CircuitPython
Circuit Playground Express USB MIDI Controller and Synthesizer
'''
import time
import array
import math
import digitalio
import board
import busio
import neopixel
import displayio
import simpleio
import terminalio
from adafruit_display_shapes.rect import Rect
from adafruit_display_shapes.circle import Circle
from adafruit_display_shapes.roundrect import RoundRect
from adafruit_display_text import label
from digitalio import DigitalInOut, Direction, Pull
from adafruit_bus_device.i2c_device import I2CDevice
from gamepadshift import GamePadShift
from micropython import const
from analogio import AnalogOut
import shapes
#import pitches
from notevals import display_note
import usb_midi
import adafruit_lis3dh
import adafruit_midi
from adafruit_midi.note_on import NoteOn
from adafruit_midi.control_change import ControlChange
from adafruit_midi.pitch_bend import PitchBend
midi_note_C4 = 60
midi_cc_modwheel = 1 # was const(1)
velocity = 127
min_octave = -3
max_octave = +3
octave = 0
min_semitone = -11
max_semitone = +11
semitone = 0
# Button Constants
BUTTON_LEFT = const(128)
BUTTON_UP = const(64)
BUTTON_DOWN = const(32)
BUTTON_RIGHT = const(16)
BUTTON_SEL = const(8)
BUTTON_START = const(4)
BUTTON_A = const(2)
BUTTON_B = const(1)
pad = GamePadShift(digitalio.DigitalInOut(board.BUTTON_CLOCK),
digitalio.DigitalInOut(board.BUTTON_OUT),
digitalio.DigitalInOut(board.BUTTON_LATCH))
speaker_enable = digitalio.DigitalInOut(board.SPEAKER_ENABLE)
speaker_enable.direction = digitalio.Direction.OUTPUT
speaker_enable.value = False
midi = adafruit_midi.MIDI(midi_out=usb_midi.ports[1],
out_channel=(0))
RED = (255, 0, 0)
YELLOW = (255, 150, 0)
GREEN = (0, 255, 0)
CYAN = (0, 255, 255)
BLUE = (0, 0, 255)
PURPLE = (180, 0, 255)
OFF = (0,0,0)
PLAY = (0,10,0)
current_buttons = pad.get_pressed()
last_read = 0
bpm = 60 # quarter note beats per minute
beat = 15 / bpm # 16th note expressed as seconds, each beat is this long
#16 step sequence
speaker_enable.value = False
# We are going to send midi to another board or out over usb in this project
display = board.DISPLAY
# Set text, font, and color
text = "ChrisLeeWoo"
font = terminalio.FONT
color = 0x0000FF
# Create the text label
text_area = label.Label(font, text="ChrisLeeWoo", color=0x6F9FAF)
# Set the location
text_area.x = 52
text_area.y = 52
# Make the display context
splash = displayio.Group(max_size=10)
display.show(splash)
# Make a background color fill
#color_bitmap = displayio.Bitmap(160, 128, 1)
#color_palette = displayio.Palette(2)
#color_palette[0] = 0x000000
#bg_sprite = displayio.TileGrid(color_bitmap, x=50, y=50,
# pixel_shader=color_palette)
#splash.append(bg_sprite)
##########################################################################
#customwait(2)
# add my sprite
def customwait(wait_time):
start = time.monotonic()
while time.monotonic() < (start + wait_time):
pass
roundrect = RoundRect(40, 40, 90, 30, 10, fill=0x0, outline=0xAFAF00, stroke=6)
splash.append(roundrect)
splash.append(text_area)
# insert play startup sound here ######
customwait(1)
# Here are my screens to move through
song = displayio.Group(max_size=64)
mixgrid = displayio.Group(max_size=64)
instrument_screen = displayio.Group(max_size=64)
settings = displayio.Group(max_size=64)
# Song screen
lbl_song = label.Label(font, text='Song', color=0xff9F00, x=10, y=10)
song.append(lbl_song)
# Instruments screen
lbl_instruments = label.Label(font, text='Instruments', color=0xff9F00, x=10, y=10)
instrument_screen.append(lbl_instruments)
# Settings screen
lbl_settings = label.Label(font, text='Settings', color=0xff9F00, x=10, y=10)
settings.append(lbl_settings)
for m in range(64):
# This initializes my array for the display grid
blankness = label.Label(font, text=" ", color=0xff9Fff)
mixgrid.append(blankness)
screen_rects = -1
for g in range(4):
for h in range(4):
screen_rects += 1
gridsq = Rect( (51+25*g), (5+25*h), 25, 25, fill=0x0, outline=0x555555, stroke=1)
mixgrid.pop(screen_rects)
mixgrid.insert( (screen_rects) , gridsq)
# mixgrid values 0 to 15
display.show(mixgrid)
pixel_pin = board.D8
num_pixels = 8
pixels = neopixel.NeoPixel(pixel_pin, num_pixels, brightness=0.3, auto_write=False)
def set_grid_disp(note,spot):
#be aware of overwriting a current note
# this changes the text in the box
# clear the screen starting at (54,7) with size 20
mixgrid.pop(spot+16)
thing = label.Label(font, text=note, color=0xff9F00)
thing.x = ( pixelocate_x(spot) )
thing.y = ( pixelocate_y(spot) )
#mixgrid 16 to 31
mixgrid.insert(spot+16, thing)
selection = Rect( (51), (5), 25, 25, outline=0xFFAA00, stroke=3)
mixgrid.pop(32)
mixgrid.insert(32,selection)
selected = 0
# mixgrid 32
gridbeat = Rect( (51), (5), 25, 25, outline=0xF00000, stroke=3)
mixgrid.pop(33)
mixgrid.insert(33, gridbeat)
#mixgrid 33
def set_note_playing(note):
# mixgrid 34
mixgrid.pop(34)
noteval = label.Label(font, text=display_note(note), color=0xff9F00) #initialize text in each box
noteval.x = 5
noteval.y = 119
mixgrid.insert(34, noteval)
def disp_bpm(bpm):
#mixgrid 35
mixgrid.pop(35)
bpm_val = label.Label(font, text=( "BPM: " + str(bpm) ), color=0x0f9Fff) #initialize text in each box
bpm_val.x = 5
bpm_val.y = 12 #59
mixgrid.insert(35, bpm_val)
mixgrid.pop(36)
screen_label = label.Label(font, text=( "PTN: 00" ), color=0x0f9Fff) #initialize text in each box
screen_label.x = 5
screen_label.y = 24 #12
mixgrid.insert(36, screen_label)
# mixgrid 36
mixgrid.pop(37)
screen_label = label.Label(font, text=( "INS: " + "0" ), color=0x0f9Fff) #initialize text in each box
screen_label.x = 5
screen_label.y = 36 #42
mixgrid.insert(37, screen_label)
# mixgrid 37 as INS number
mixgrid.pop(38)
screen_label = label.Label(font, text=( "EFF: " + "0" ), color=0x0f9Fff) #initialize text in each box
screen_label.x = 5
screen_label.y = 48 #42
mixgrid.insert(38, screen_label)
# mixgrid 37 as EFF number
# may allow more than one per step on the pattern page?
def sequencer(seq, beat, gridbeat, x):
# I have a feeling that each step needs to be iterated indiviually in the running loop
beatstep = x
#gridbeat = Rect( (52), (5), 24, 24, outline=0xF00000, stroke=3)
beatstep = selection_update('right',beatstep, gridbeat)
if seq[x][0] == 0:
customwait(beat)
else:
midi.send(NoteOn(seq[x][0], 127, channel = seq[x][1]))
# setting channel as x is just to prove we can switch channels or instruments for each step
# fully supporting this means passing seq[] to sequencer fn includes
# note number, midi channel, and any CC for that step as well
customwait(beat)
midi.send(NoteOn(seq[x][0], 0))
def selection_update(dir,current, type):
if dir == 'left':
if current % 4 != 0: #0, 4, 8, 12
type.x = type.x - 25
current -= 1
return current
elif current == 0:
return current
else:
type.x = type.x + 25 * 3
type.y = type.y - 25
current -= 1
return current
if dir == 'right':
if current % 4 != 3: #3, 7, 11, 15
type.x = type.x + 25
current += 1
return current
elif current == 15:
type.x = 51
type.y = 5
current = 0
return current
else:
type.x = type.x - 25 * 3
type.y = type.y + 25
current += 1
return current
if dir == 'up':
if current > 3 : #3, 7, 11, 15
type.y = type.y - 25
current -= 4
return current
elif current < 4:
return current
else:
type.x = type.x - 25 * 3
type.y = type.y + 25
current += 1
return current
if dir == 'down':
if current < 12: #3, 7, 11, 15
type.y = type.y + 25
current += 4
return current
elif current > 11:
return current
else:
type.x = type.x - 25 * 3
type.y = type.y + 25
current += 1
return current
def pixelocate_x(number):
return 55 + 25 * ( number % 4 )
def pixelocate_y(number):
if number < 4:
return 16
elif number < 8:
return 16 + 25
elif number < 12:
return 16 + 25*2
else: return 16 + 25*3
def change_note(position, amount):
note, channel, cc = seq[position]
note += amount
if note >=0 and note <=127:
seq[position][0] = note
set_grid_disp(display_note(note),position)
print('change_note @',position, 'amount',amount,'note', note)
print("playing")
print (display_note(10))
x = 0
seq = [[10,0,0], [20,0,0], [0,0,0], [40,0,0],
[50,0,0], [60,0,0], [70,0,0], [80,0,0],
[0,0,0], [20,0,0], [30,0,0], [40,0,0],
[50,0,0], [60,0,0], [70,0,0], [80,0,0]]
# seq array is [note (0-127), channel(0-15), CC ( 0xffff ) ]
# CC value is where first nybble (ff) is message, second nyble is value
# I may or may not need more values to pass, but this was a good start
for step in range(16):
# we are setting up an initial sequence in this demo program
set_grid_disp(display_note(seq[step][0]), step)
disp_bpm(bpm)
print("stopped")
playing = False
screen = 0 # I will need to create more display screens, probably best to put in separate .py files
def show_screen(screen_number):
if screen_number == 0:
# the main song screen, it should eventually show a pattern number in the grid
display.show(song)
elif screen_number == 1:
display.show(mixgrid)
elif screen_number == 2:
display.show(instrument_screen)
elif screen_number == 3:
display.show(settings)
def read_buttons():
# all the button states I want to read, then call fn depending on current screen
# maybe making ths a switch / case is best
return
def pattern_button():
return
def song_button():
return
def instrument_button():
return
def settings_button():
return
while True:
#x = 0
#y = 0
#z = 0
#COLOR = (x,y,z)
#for v in range (5):
# x = v+1 % 255
# y = v+1 % 255
# z = v+1 % 255
#COLOR = (x,y,z)
#pixels.fill(OFF)
#pixels.show()
if playing:
gridbeat.outline = 0x009900
sequencer (seq, beat, gridbeat, x)
x = (x+1) % 16
pixels.fill(PLAY)
pixels.show()
set_note_playing((seq[x][0]))
else:
gridbeat.outline = 0xff0000
pixels.fill(OFF)
pixels.show()
# Reading buttons too fast returns 0
if (last_read + 0.1) < time.monotonic():
buttons = pad.get_pressed()
last_read = time.monotonic()
if current_buttons != buttons:
# Respond to the buttons
# The A button is the one on the right side, like on NES!
#######################################
# Sel ________________ Start #
# | | #
# ^ | | A #
# < > | | B #
# v | | #
# |________________| #
# @ @ @ @ @ #
#######################################
read_buttons()
if (buttons == BUTTON_SEL & BUTTON_A): #
customwait(.1)
print ("A + SEL", buttons)
elif (buttons == BUTTON_A + BUTTON_RIGHT > 0 ):
print ("A + Right", buttons)
elif (buttons == BUTTON_A + BUTTON_LEFT > 0 ):
print ("A + Left", buttons)
elif (buttons == BUTTON_A + BUTTON_UP > 0 ):
print ("A + Up", buttons)
elif (buttons == BUTTON_A + BUTTON_DOWN > 0 ):
print ("A + Down", buttons)
elif (buttons == BUTTON_B + BUTTON_RIGHT > 0 ):
print ("B + Right", buttons)
change_note(selected,12) # up an octave
elif (buttons == BUTTON_B + BUTTON_LEFT > 0 ):
print ("B + Left", buttons)
change_note(selected,-12) # down an octave
elif (buttons == BUTTON_B + BUTTON_UP > 0 ):
print ("B + Up", buttons) # up a MIDI note
change_note(selected,1)
elif (buttons == BUTTON_B + BUTTON_DOWN > 0 ):
print ("B + Down", buttons) # down a MIDI note
change_note(selected,-1)
elif (buttons & BUTTON_LEFT) > 0:
selected = selection_update('left', selected, selection)
print('Left', selected)
elif (buttons & BUTTON_RIGHT) > 0:
selected = selection_update('right', selected, selection)
print('Right', selected)
elif (buttons & BUTTON_UP) > 0 :
selected = selection_update('up', selected, selection)
print('Up', selected)
#print('Up', buttons)
elif (buttons & BUTTON_DOWN) > 0 :
selected = selection_update('down', selected, selection)
print('Down', selected)
elif (buttons & BUTTON_A) > 0 :
print('A', buttons)
elif (buttons & BUTTON_B) > 0 :
print('B', buttons)
elif (buttons & BUTTON_START) > 0 :
if playing == False:
playing = True
else:
playing = False
print('Start', buttons)
elif (buttons & BUTTON_SEL) > 0 :
print('Select', buttons)
screen += 1
if screen >3:
screen = 0
show_screen(screen)
current_buttons = buttons
|
[
"noreply@github.com"
] |
festivalle.noreply@github.com
|
8a7325c4058cee17ab12e9eb1c91d00c7cfcd48e
|
b144ca4e7e7e53d35b6c4d56567fc00b771b3e2d
|
/Scripts/stock_ts_loader.py
|
d57b9c879b46e9bc4ed3e1577cf4848300a17432
|
[] |
no_license
|
junbai94/quant_trading
|
02fb052a490e773365ab421b7cb2c4b1aebfaa51
|
3d20ddcc602a586461bd8efddafe5afeed01c416
|
refs/heads/master
| 2021-08-29T18:11:14.369913
| 2017-12-14T14:53:46
| 2017-12-14T14:53:46
| 110,840,452
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,704
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 27 09:42:12 2017
@author: Junbai
Load SZ50 data from TuShare
Add in get_k_data tomorrow
"""
import tushare as ts
import pandas as pd
import sqlite3
import time
import datetime
DATABASE_PATH = "C:/Users/user/quant_analysis/Database/cn_stock.db"
sql_unadjusted = "insert into cn_stocks_daily_ts_unadj (code, date, open, high, close, low, volume, price_change, p_change, ma5, ma10, ma20, v_ma5, v_ma10, v_ma20, turnover) \
values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)"
sql_adjusted = "insert into cn_stocks_daily_ts_adj (code, date, open, high, close, low, volume, amount) values \
(?, ?, ?, ?, ?, ?, ?, ?)"
start_date = '2014-01-01'
end_date = datetime.datetime.now().strftime("%Y-%m-%d")
def ts_sz50_loader(adjustment=True, code_list=None, today_only=False, start_date=start_date, end_date=end_date):
print('----------------------------------------------------------------------')
print('loading commencing')
print('----------------------------------------------------------------------')
start = time.time()
# get sz50 codes and company names
sz50 = ts.get_sz50s()
if code_list:
codes = code_list
else:
codes = sz50['code']
if today_only:
start_date = end_date
failed = []
conn = sqlite3.connect(DATABASE_PATH)
c = conn.cursor()
for code in codes:
try:
if adjustment:
df = ts.get_h_data(code, retry_count=5, pause=10, start=start_date, end=end_date)
sql = sql_adjusted
else:
df = ts.get_hist_data(code, retry_count=5, pause=10, start=start_date, end=end_date)
sql = sql_unadjusted
except:
print (code + ' loading FAILED')
failed.append(code)
continue
for row in df.iterrows():
if adjustment:
date = row[0].isoformat()
else:
date = row[0]
data = row[1]
data = list(data)
data.insert(0, date)
data.insert(0, code)
c.execute(sql, tuple(data))
print (code + ' loaded')
conn.commit()
conn.close()
end = time.time()
print('----------------------------------------------------------------------')
print('loading completed. total run time: %.2fs' % (end-start))
print('----------------------------------------------------------------------')
if __name__ == '__main__':
failed = ts_sz50_loader(adjustment=False, start_date='2016-01-01')
|
[
"noreply@github.com"
] |
junbai94.noreply@github.com
|
2aa6bf269adef4a8e482ad5d9fd5320bbae3df96
|
21c5b922925922c5358075f1b11361cca022530e
|
/day5/day5a.py
|
df66bae926b0f43c32c5bcae941c65ec3788472e
|
[] |
no_license
|
sjstein/aoc2020
|
8efd6aeebfa6ccea4e95debe4d56d5eb88025d22
|
be024ad7daf1e9385a18e9263fed69be07d8a0d9
|
refs/heads/main
| 2023-01-31T16:51:58.907334
| 2020-12-19T03:54:29
| 2020-12-19T03:54:29
| 317,272,893
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,862
|
py
|
"""
--- Day 5: Binary Boarding ---
You board your plane only to discover a new problem: you dropped your boarding pass! You aren't sure which seat is
yours, and all of the flight attendants are busy with the flood of people that suddenly made it through passport
control.
You write a quick program to use your phone's camera to scan all of the nearby boarding passes (your puzzle input);
perhaps you can find your seat through process of elimination.
Instead of zones or groups, this airline uses binary space partitioning to seat people. A seat might be specified like
FBFBBFFRLR, where F means "front", B means "back", L means "left", and R means "right".
The first 7 characters will either be F or B; these specify exactly one of the 128 rows on the plane (numbered 0
through 127). Each letter tells you which half of a region the given seat is in. Start with the whole list of rows;
the first letter indicates whether the seat is in the front (0 through 63) or the back (64 through 127).
The next letter indicates which half of that region the seat is in, and so on until you're left with exactly one row.
For example, consider just the first seven characters of FBFBBFFRLR:
Start by considering the whole range, rows 0 through 127.
F means to take the lower half, keeping rows 0 through 63.
B means to take the upper half, keeping rows 32 through 63.
F means to take the lower half, keeping rows 32 through 47.
B means to take the upper half, keeping rows 40 through 47.
B keeps rows 44 through 47.
F keeps rows 44 through 45.
The final F keeps the lower of the two, row 44.
The last three characters will be either L or R; these specify exactly one of the 8 columns of seats on the plane
(numbered 0 through 7). The same process as above proceeds again, this time with only three steps. L means to keep
the lower half, while R means to keep the upper half.
For example, consider just the last 3 characters of FBFBBFFRLR:
Start by considering the whole range, columns 0 through 7.
R means to take the upper half, keeping columns 4 through 7.
L means to take the lower half, keeping columns 4 through 5.
The final R keeps the upper of the two, column 5.
So, decoding FBFBBFFRLR reveals that it is the seat at row 44, column 5.
Every seat also has a unique seat ID: multiply the row by 8, then add the column. In this example, the seat has
ID 44 * 8 + 5 = 357.
Here are some other boarding passes:
BFFFBBFRRR: row 70, column 7, seat ID 567.
FFFBBBFRRR: row 14, column 7, seat ID 119.
BBFFBBFRLL: row 102, column 4, seat ID 820.
As a sanity check, look through your list of boarding passes. What is the highest seat ID on a boarding pass?
Your puzzle answer was 933.
"""
import math
def normal_round(n):
# Added this rounding function as we need to round 1/2 to 1 and python uses "banker's rounding" with the normal
# round method.
if n - math.floor(n) < 0.5:
return math.floor(n)
return math.ceil(n)
def find_row(rowstr):
maxrow = 127
minrow = 0
for i in range(7):
if rowstr[i] == 'B':
minrow = minrow + normal_round((maxrow - minrow) / 2)
else:
maxrow = maxrow - normal_round((maxrow + 1 - minrow) / 2)
return int(minrow)
def find_col(seatstr):
maxcol = 7
mincol = 0
for i in range(3):
if seatstr[i] == 'R':
mincol = mincol + normal_round((maxcol - mincol) / 2)
else:
maxcol = maxcol - normal_round((maxcol + 1 - mincol) / 2)
return int(mincol)
# Begin Main
highest = 0
inputfile = 'input.txt'
fp = open(inputfile, 'r')
for line in fp:
head = line[:7]
tail = line[7:10]
row = find_row(head)
col = find_col(tail)
sid = row * 8 + col
print(f'Found sid: {sid} at row: {row} and col: {col}')
if sid > highest:
highest = sid
print(f'Found highest sid: {highest}')
|
[
"s.joshua.stein@gmail.com"
] |
s.joshua.stein@gmail.com
|
f9fe3338792613d6f1e728f251acd0f70bd12db0
|
297d5aa6906eac1c05093516a033a6670a4ea88b
|
/src/haverster/streamtimeline.py
|
971de43c23d4f2d115086da1cadb97f93b7cce7b
|
[] |
no_license
|
xinzhel97/Project_Cloud_Computing_and_Cluster
|
05d3af735aec00bea6240efbec2671d926d419ce
|
e49515b708322753cc8d1ef75dbfa06f650f4291
|
refs/heads/master
| 2022-11-27T01:59:47.633324
| 2020-08-09T12:57:55
| 2020-08-09T12:57:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,004
|
py
|
from __future__ import absolute_import, print_function
import tweepy
from auth import consumer_key,consumer_secret,access_token,access_token_secret
from timelineThread import timelineThread
import json
import couchdb
from dblogin import user, password
import sys
class dbStreamListener(tweepy.StreamListener):
def __init__(self, api,db):
self.api = api
self.db = db
self.count = 0
def on_data(self, data):
try:
tweet = json.loads(data)
except Exception:
print("Failed to parse tweet data\n")
tweet = None
if tweet:
if "id" in tweet and "text" in tweet and "id_str" in tweet:
self.count += 1
user = tweet["user"]["screen_name"]
t = timelineThread(self.count,self.api,user,self.db)
t.start()
try:
print("%s: %s\n" % (tweet["user"]["screen_name"], tweet["full_text"]))
except Exception:
print("%s: %s\n" % (tweet["user"]["screen_name"], tweet["text"]))
else:
print("Received a responce that is not a tweet\n")
print(tweet)
if self.count >= 10:
print("finish\n")
sys.exit(0);
return True
def on_error(self, status):
print(status)
if __name__ == '__main__':
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
# select database
server = couchdb.Server("http://%s:%s@localhost:5984/" % (user,password))
dbname = "test4"
if dbname in server:
db = server[dbname]
else:
db = server.create(dbname)
listener = dbStreamListener(api,db)
stream = tweepy.Stream(auth, listener,tweet_mode="extended")
stream.filter(locations=[112.6233053121, -44.1178998761, 154.0490928206, -10.6805030025])
|
[
"phyjeremy@outlook.com"
] |
phyjeremy@outlook.com
|
608b68e973e8bfe9e3922fe3c69dc0ff9a731796
|
bf97a169e18a256294018c0a81837e59680859d8
|
/ValidBST.py
|
ce7107a9e086187d0b79f13a34fc2573f8a6d14c
|
[] |
no_license
|
oskip/IB_Algorithms
|
780904842372a608362528758377344e126d3012
|
094d871ac4b808d883d5af5430bac47782132c6b
|
refs/heads/master
| 2021-01-19T04:25:13.948498
| 2016-07-04T16:55:09
| 2016-07-04T16:55:09
| 50,581,848
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,124
|
py
|
# Given a binary tree, determine if it is a valid binary search tree (BST).
#
# Assume a BST is defined as follows:
#
# The left subtree of a node contains only nodes with keys less than the node's key.
# The right subtree of a node contains only nodes with keys greater than the node's key.
# Both the left and right subtrees must also be binary search trees.
# Example :
#
# Input :
# 1
# / \
# 2 3
#
# Output : 0 or False
#
#
# Input :
# 2
# / \
# 1 3
#
# Output : 1 or True
# Return 0 / 1 ( 0 for false, 1 for true ) for this problem
# Definition for a binary tree node
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
# @param A : root node of tree
# @return an integer
def isValidBST(self, A):
return self.isValidBSTIter(A, -2**31, 2**31-1)
def isValidBSTIter(self, A, minVal, maxVal):
if A is None: return True
if minVal <= A.val < maxVal:
return self.isValidBSTIter(A.left, minVal, A.val) & self.isValidBSTIter(A.right, A.val, maxVal)
else: return False
|
[
"oskipet@gmail.com"
] |
oskipet@gmail.com
|
396ea4c1da477abeef83272f049b43b9c2305edb
|
ac94172f245ee73e56e4711f8663a8c466f1fa23
|
/110.平衡二叉树/solution.py
|
f890b70c2bf6ad0fad59fe35a57a901cb7450e07
|
[] |
no_license
|
QtTao/daily_leetcode
|
7aaed0f161f27cd950774fae6d966b04d7e3b76f
|
52756b30e9d51794591aca030bc918e707f473f1
|
refs/heads/main
| 2023-06-05T05:58:36.214308
| 2021-06-28T11:18:05
| 2021-06-28T11:18:05
| 347,343,822
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,592
|
py
|
# !/usr/bin/python
# -*- coding: utf-8 -*-
# author : Tao Qitian
# email : taoqt@mail2.sysu.edu.cn
# datetime : 2021/5/15 01:01
# filename : solution.py
# description : LC 110 平衡二叉树
class Solution:
def max_depth(self, root: TreeNode) -> int:
""" 二叉树的最大深度 """
if not root:
return 0
return max(self.max_depth(root.left), self.max_depth(root.right)) + 1
def isBalancedTop2Bottom(self, root: TreeNode) -> bool:
""" 从顶到底 """
if not root:
return True
elif abs(self.max_depth(root.left) - self.max_depth(root.right)) > 1:
return False
return self.isBalanced(root.left) and self.isBalanced(root.right)
def isBalanced(self, root: TreeNode) -> bool:
""" 从底到顶部,需掌握 """
is_balanced = True
def get_height(root: TreeNode) -> int:
nonlocal is_balanced
# 进一步优化,当二叉树不是平衡树,直接返回结果,停止遍历
if not root or not is_balanced:
return 0
# 从底部开始计算左右子树的高度
left_height = get_height(root.left) + 1
right_height = get_height(root.right) + 1
# 在底部往顶部计算二叉树高度过程中,出现左右子树高度大于 1,说明不是平衡树
if abs(left_height - right_height) > 1:
is_balanced = False
return max(left_height, right_height)
get_height(root)
return is_balanced
|
[
"taoqt@mail2.sysu.edu.cn"
] |
taoqt@mail2.sysu.edu.cn
|
b0d98af14e50f4bbabae044085c11c80ef37c9df
|
81c9bd683398dcd959e623acb3eb5208e5b20ca1
|
/classify_images.py
|
24fb1e02decc958b4c2b46a98c2e15c4ca507668
|
[] |
no_license
|
HienPhanVN/Use-a-Pre-trained-Image-Classifier-to-Identify-Dog-Breeds
|
de71bd31d68d0bcac146e12571a4847a662b5c56
|
a08091a047ef8e08c054e650d67f1046ce524737
|
refs/heads/master
| 2023-03-21T15:45:58.253372
| 2019-06-29T22:56:59
| 2019-06-29T22:56:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,936
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# */AIPND-revision/intropyproject-classify-pet-images/classify_images.py
#
# PROGRAMMER: Shamil Jamion
# DATE CREATED: 17/04/2019
# REVISED DATE:
# PURPOSE: Create a function classify_images that uses the classifier function
# to create the classifier labels and then compares the classifier
# labels to the pet image labels. This function inputs:
# -The Image Folder as image_dir within classify_images and function
# and as in_arg.dir for function call within main.
# -The results dictionary as results_dic within classify_images
# function and results for the functin call within main.
# -The CNN model architecture as model wihtin classify_images function
# and in_arg.arch for the function call within main.
# This function uses the extend function to add items to the list
# that's the 'value' of the results dictionary. You will be adding the
# classifier label as the item at index 1 of the list and the comparison
# of the pet and classifier labels as the item at index 2 of the list.
#
##
# Imports classifier function for using CNN to classify images
from classifier import classifier
# TODO 3: Define classify_images function below, specifically replace the None
# below by the function definition of the classify_images function.
# Notice that this function doesn't return anything because the
# results_dic dictionary that is passed into the function is a mutable
# data type so no return is needed.
#
def classify_images(images_dir, results_dic, model):
"""
Creates classifier labels with classifier function, compares pet labels to
the classifier labels, and adds the classifier label and the comparison of
the labels to the results dictionary using the extend function. Be sure to
format the classifier labels so that they will match your pet image labels.
The format will include putting the classifier labels in all lower case
letters and strip the leading and trailing whitespace characters from them.
For example, the Classifier function returns = 'Maltese dog, Maltese terrier, Maltese'
so the classifier label = 'maltese dog, maltese terrier, maltese'.
Recall that dog names from the classifier function can be a string of dog
names separated by commas when a particular breed of dog has multiple dog
names associated with that breed. For example, you will find pet images of
a 'dalmatian'(pet label) and it will match to the classifier label
'dalmatian, coach dog, carriage dog' if the classifier function correctly
classified the pet images of dalmatians.
PLEASE NOTE: This function uses the classifier() function defined in
classifier.py within this function. The proper use of this function is
in test_classifier.py Please refer to this program prior to using the
classifier() function to classify images within this function
Parameters:
images_dir - The (full) path to the folder of images that are to be
classified by the classifier function (string)
results_dic - Results Dictionary with 'key' as image filename and 'value'
as a List. Where the list will contain the following items:
index 0 = pet image label (string)
--- where index 1 & index 2 are added by this function ---
NEW - index 1 = classifier label (string)
NEW - index 2 = 1/0 (int) where 1 = match between pet image
and classifer labels and 0 = no match between labels
model - Indicates which CNN model architecture will be used by the
classifier function to classify the pet images,
values must be either: resnet alexnet vgg (string)
Returns:
None - results_dic is mutable data type so no return needed.
"""
for key in results_dic:
model_label = ""
classified = classifier(images_dir+'/'+key,model)
low_pet_image = classified.lower()
word_list_pet_image = low_pet_image
"""
pet_name = ""
for word in word_list_pet_image:
if word.isalpha():
pet_name += word + " " """
low_pet_image = low_pet_image.strip()
#print("Classifier: " + low_pet_image)
model_label = low_pet_image
#results_dic[key].append(model_label)
truth = results_dic[key][0]
# Classifier Label
if truth in model_label:
results_dic[key].extend((model_label,1))
else:
results_dic[key].extend((model_label,0))
print(results_dic)
|
[
"noreply@github.com"
] |
HienPhanVN.noreply@github.com
|
50e0d7ba43a2d2eddaf378c06555d32d6b5d604f
|
a28e1e659e4dd82be5e253443b0c7a808cdcee92
|
/SortAlgorithm/QuickSort.py
|
ff67210431d9014a59585e9e5f3016511d39ca00
|
[] |
no_license
|
LeBron-Jian/BasicAlgorithmPractice
|
b2af112e8f1299fe17cf456111276fce874586cb
|
51943e2c2c4ec70c7c1d5b53c9fdf0a719428d7a
|
refs/heads/master
| 2023-06-07T19:12:16.362428
| 2023-05-27T06:58:12
| 2023-05-27T06:58:12
| 217,682,743
| 13
| 14
| null | 2020-09-12T01:50:35
| 2019-10-26T08:59:04
|
Python
|
UTF-8
|
Python
| false
| false
| 4,569
|
py
|
# -*- coding: utf-8 -*-
'''
快速排序
让指定的元素归位,所谓归位,就是放到他应该放的位置
左边的元素比他小,右边的元素比他大,然后对每个元素归位,就完成了排序
正常情况下,快速排序的复杂度是O(nlogn)
快速排序存在一个最坏的情况,就是每次归位,都不能把列表分成两部分,
此时的复杂度就是O(n**2)
如果避免设计成这种最坏情况,可以在取第一个数的时候不要去取第一个元素
而是取一个列表中的随机数。
'''
# 归位函数
def partition(data, left, right): # 左右分别指向两端的元素
# 把左边第一个元素赋值给tmp,此时left指向空
tmp = data[left]
# 如果左右两个指针不重合,则继续
while left < right: # 左右两个指针不重合,就继续
# 当左边的元素小于右边,而且右边的元素大于tmp则不交换
while left < right and data[right] >= tmp:
right -= 1 # 右边的指标往左走一步
# 如果right指向的元素小于tmp,就放到左边目前为空的位置
data[left] = data[right]
print('left:', li)
# 如果left指向的元素小于tmp,则不交换
while left < right and data[left] <= tmp:
left += 1 # 此时left向右移动一位
# 如果left指向的元素大于tmp,就交换到右边目前为空的位置
data[right] = data[left]
print('right:', li)
# 最后把最开始拿出来的那个值,放到左右重合的那个位置上即可
data[left] = tmp
return left # 最后返回这个位置
# 写好归位函数后,就可以递归调用这个函数,实现排序
def quick_sort(data, left, right):
if left < right:
# 找到指定元素的位置
mid = partition(data, left, right)
# 对左边的元素排序
quick_sort(data, left, mid - 1)
# 对右边的元素排序
quick_sort(data, mid + 1, right)
return data
li = [5, 7, 4, 6, 3, 1, 2, 9, 8]
print('start:', li)
quick_sort(li, 0, len(li) - 1)
print('end:', li)
'''
start: [5, 7, 4, 6, 3, 1, 2, 9, 8]
left: [2, 7, 4, 6, 3, 1, 2, 9, 8]
right: [2, 7, 4, 6, 3, 1, 7, 9, 8]
left: [2, 1, 4, 6, 3, 1, 7, 9, 8]
right: [2, 1, 4, 6, 3, 6, 7, 9, 8]
left: [2, 1, 4, 3, 3, 6, 7, 9, 8]
right: [2, 1, 4, 3, 3, 6, 7, 9, 8]
left: [1, 1, 4, 3, 5, 6, 7, 9, 8]
right: [1, 1, 4, 3, 5, 6, 7, 9, 8]
left: [1, 2, 3, 3, 5, 6, 7, 9, 8]
right: [1, 2, 3, 3, 5, 6, 7, 9, 8]
left: [1, 2, 3, 4, 5, 6, 7, 9, 8]
right: [1, 2, 3, 4, 5, 6, 7, 9, 8]
left: [1, 2, 3, 4, 5, 6, 7, 9, 8]
right: [1, 2, 3, 4, 5, 6, 7, 9, 8]
left: [1, 2, 3, 4, 5, 6, 7, 8, 8]
right: [1, 2, 3, 4, 5, 6, 7, 8, 8]
end: [1, 2, 3, 4, 5, 6, 7, 8, 9]
'''
# *****************方法二**********************
def quick_sort1(array, left, right):
if left >= right:
return
low = left
high = right
key = array[low] # 第一个值
while low < high: # 只要左右未遇见
while low < high and array[high] > key: # 找到列表右边比key大的值为止
high -= 1
# 此时直接把key(array[low]) 根比他大的 array[high]进行交换
array[low] = array[high]
array[high] = key
# 这里要思考为什么是 <= 而不是 <
while low < high and array[low] <= key: # 找到key左边比key大的值
low += 1
# 找到了左边比k大的值,把array[high](此时应该换成了key)和这个比key大的array[low]进行调换
array[high] = array[low]
array[low] = key
# 最后用同样的方法对分出来的左边的小组进行同上的做法
quick_sort(array, left, low-1)
# 再使用同样的方法对分出来的右边的小组进行同上的做法
quick_sort(array, low+1, right)
# li = [5, 7, 4, 6, 3, 1, 2, 9, 8]
# print('start:', li)
# quick_sort1(li, 0, len(li) - 1)
# print('end:', li)
def quick_sort(data):
if len(data) >= 2: # 递归入口及出口
mid = data[len(data) // 2] # 选择基准数,也可以选取第一个或最后一个
left, right = [], [] # 定义基准值左右两侧的列表
data.remove(mid) # 从原始数组中移除基准值
for num in data:
if num >= mid:
right.append(num)
else:
left.append(num)
return quick_sort(left) + [mid] + quick_sort(right)
else:
return data
li = [3, 2, 4, 5, 6, 7, 1]
print(quick_sort(li))
# [1, 2, 3, 4, 5, 6, 7]
|
[
"1171737614@qq.com"
] |
1171737614@qq.com
|
711780889d1840fe7188c450ea316d8925f126f2
|
01c9a54caea0d20454a9c3d91a5a6757ca78b94d
|
/chapter8/formatted_name.py
|
5109974d00e59fa8219164c35b1a58b899cbd2bf
|
[] |
no_license
|
cogoming/astartpy
|
4c231e00df80d5d5bcdad63f3e6c36e6a1037edd
|
c60bc5324c6683b01389e218d678b549a91be0c4
|
refs/heads/master
| 2021-08-22T21:14:21.967097
| 2017-12-01T09:27:18
| 2017-12-01T09:27:18
| 111,042,896
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 642
|
py
|
def get_formatted_name(first_name, last_name):
"""返回整洁的姓名"""
full_name = first_name + ' ' + last_name
return full_name.title()
musician = get_formatted_name('jimi', 'hendrix')
print(musician)
#让实参变成可选的
def get_formatted_name(first_name, last_name,middle_name=''):
"""返回整洁的姓名"""
full_name = first_name + ' ' + middle_name + ' ' + last_name
return full_name.title()
musician = get_formatted_name('john', 'lee', 'hooker')
print(musician)
musician = get_formatted_name('jimi', 'hendrix')
print(musician)
musician = get_formatted_name('john', 'hooker', 'lee')
print(musician)
|
[
"qiuming@mq.com"
] |
qiuming@mq.com
|
1c2e6faec149da7d7536536c274983a294122359
|
1f5cf4f2d2b48ec347a26122d5e121eaa9ccf64e
|
/tasks1/task1.py
|
4b9ecbff41c5e5218ff6a4616094b0c564e2bc53
|
[] |
no_license
|
entick/training_it_cloud
|
52c49c1c6cadb62d9c6500fece2f1f698ee043b6
|
57c5759883718de357425938b0c42f609e9da490
|
refs/heads/ivan
| 2021-06-07T19:24:27.291478
| 2017-07-12T15:10:22
| 2017-07-12T15:10:22
| 96,106,290
| 0
| 0
| null | 2017-07-03T12:02:08
| 2017-07-03T12:02:08
| null |
UTF-8
|
Python
| false
| false
| 265
|
py
|
text = raw_input()
text = text.upper()
repllist = [",",".",":",";","!","?"]
for x in repllist:
text=text.replace(x,"")
list = text.split(" ")
count = 0
for x in list:
if (len(x) > 0):
if (x==x[::-1]):
count += 1
print(count)
|
[
"noreply@github.com"
] |
entick.noreply@github.com
|
c564cb8a4f8fb15ca5244ece24f0664747b45e2e
|
2dc17d12ff6ea9794177c81aa4f385e4e09a4aa5
|
/archive/513FindBottomLeftTreeValue.py
|
b9b3b6ac780a028c6dda69b1deef818d7aa4d7fd
|
[] |
no_license
|
doraemon1293/Leetcode
|
924b19f840085a80a9e8c0092d340b69aba7a764
|
48ba21799f63225c104f649c3871444a29ab978a
|
refs/heads/master
| 2022-10-01T16:20:07.588092
| 2022-09-08T02:44:56
| 2022-09-08T02:44:56
| 122,086,222
| 0
| 0
| null | null | null | null |
WINDOWS-1252
|
Python
| false
| false
| 1,254
|
py
|
# coding=utf-8
'''
Created on 2017�2�15�
@author: Administrator
'''
from collections import deque
# Definition for a binary tree node.
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def findBottomLeftValue(self, root):
"""
:type root: TreeNode
:rtype: int
"""
def LevelTraverseFromRoot(root):
ans = []
if root:
from collections import deque
current_level = 0
q = deque()
q.append((root, 0))
temp = []
while q:
node, level = q.popleft()
if level > current_level:
ans.append(temp)
temp = []
current_level += 1
temp.append(node.val)
if node.left:
q.append((node.left, current_level + 1))
if node.right:
q.append((node.right, current_level + 1))
ans.append(temp)
return ans
return LevelTraverseFromRoot(root)[-1][0]
|
[
"yanhuang1293@gmail.com"
] |
yanhuang1293@gmail.com
|
f62bc2a10938517435c706a53739b1d178c7a593
|
dd5c169a925ada08c053723b8c85b3d0ae2c126e
|
/QuadraticEquation.py
|
36043f06a1595542a79c988a00dcc8e3301858be
|
[
"Apache-2.0"
] |
permissive
|
rajeshjaava/python
|
01f8a38d9436cfd43839533fd32286c95ae84cc5
|
fefcd92a6ea2db3dddb05365a299636bb6bf50b6
|
refs/heads/main
| 2023-07-01T21:26:38.526043
| 2021-08-02T15:05:01
| 2021-08-02T15:05:01
| 313,374,663
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 174
|
py
|
# ax**2+bc+c=0
import cmath
a=1
b=5
c=6
d=(b**2)-(4*a*c)
sol1=(-b-cmath.sqrt(d))/(2*a)
sol2=(-b+cmath.sqrt(d))/(2*a)
print('The solution are {0} and {1}'.format(sol1,sol2))
|
[
"rajeshthokala10@gmail.com"
] |
rajeshthokala10@gmail.com
|
24993c1168307724a1daf85bbc352dab641caf90
|
aff612112744272579ec081b189d087e636d923a
|
/website/app.py
|
789fc0ef8f5bd290937b4740b7d49ae7270b8eda
|
[] |
no_license
|
pythonguru101/CrpytoCurrentMarket
|
a76bfe9ab647058fcedfd63bbe41f8f421ac385e
|
b207f54e15883db277b31f60bc8bbca42c9f61b5
|
refs/heads/master
| 2022-12-16T00:50:02.828610
| 2020-02-27T04:04:38
| 2020-02-27T04:04:38
| 236,615,768
| 0
| 1
| null | 2022-12-08T03:35:04
| 2020-01-27T23:10:02
|
Python
|
UTF-8
|
Python
| false
| false
| 36,685
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import time
import os
import commands
# import subprocess
import base64
import csv
import re
import json
import libdb_mysql_web
import decimal
from operator import itemgetter
from flask import Flask, render_template, request, jsonify, redirect, url_for, send_file
from datetime import datetime
app = Flask(__name__)
db = libdb_mysql_web.libdb_mysql()
##########################################################################
### MAIN WEB PAGE FUNCTIONS
def chart(coin_name):
price_list = []
item = []
condition = "ticker='"+coin_name+"'"
capitalizations = db.generic_select([condition], "capitalization")
for i in capitalizations:
item.append(i['secs'])
item.append(i['last'])
price_list.append(item)
item = []
return price_list
@app.route('/coinmarketcap.html', methods=['GET'])
def get_cmc():
price_in_usd = 0
btc = 0
eth = 0
price_in_btc = 0
price_in_eth = 0
market_cap = 0
volume24 = 0
secs_list = []
selected_coin_prefix = 'CM-'
coin_sel = 'CM-BTC'
capitalizations = db.generic_select(["last_flag=1"], "capitalization")
for item in capitalizations:
secs_list.append(item['secs'])
latest_secs = max(secs_list)
for item in capitalizations:
if item['secs'] == latest_secs and "CM-" in item['ticker']:
selected_coin_prefix = 'CM-'
if item['secs'] == latest_secs and "MK-" in item['ticker']:
selected_coin_prefix = 'MK-'
if request.args.get('coin'):
coin_sel = selected_coin_prefix + str(request.args.get('coin'))
price = chart(coin_sel)
for item in capitalizations:
if item['secs'] == latest_secs and item['ticker'] == coin_sel:
market_cap = item['market_cap']
market_cap = format(market_cap, '.1f')
volume24 = item['volume']
if item['secs'] == latest_secs and item['ticker'] == selected_coin_prefix + "BTC":
btc = item['last']
if item['secs'] == latest_secs and item['ticker'] == selected_coin_prefix + "ETH":
eth = item['last']
day_range = []
for item in capitalizations:
if 0 <= latest_secs - item['secs'] \
and latest_secs - item['secs'] <= 24 * 60 * 60 \
and item['ticker'] == coin_sel:
day_range.append(item['last'])
for item in capitalizations:
if item['secs'] == latest_secs and item['ticker'] == coin_sel:
price_in_usd = item['last']
if btc != 0:
price_in_btc = '%.12f' % (price_in_usd / btc)
if eth != 0:
price_in_eth = '%.12f' % (price_in_usd / eth)
range_1d = []
for item in capitalizations:
if 0 <= latest_secs - item['secs'] \
and latest_secs - item['secs'] <= 24 * 60 * 60 \
and item['ticker'] == coin_sel:
range_1d.append(item['last'])
range_7d = []
for item in capitalizations:
if 0 <= latest_secs - item['secs'] \
and latest_secs - item['secs'] <= 7 * 24 * 60 * 60 \
and item['ticker'] == coin_sel:
range_7d.append(item['last'])
range_52w = []
for item in capitalizations:
if 0 <= latest_secs - item['secs'] \
and latest_secs - item['secs'] <= 52 * 7 * 24 * 60 * 60 \
and item['ticker'] == coin_sel:
range_52w.append(item['last'])
vol_list_52w = []
average_vol_52w = 0
for item in capitalizations:
if 0 <= latest_secs - item['secs'] \
and latest_secs - item['secs'] <= 52 * 7 * 24 * 60 * 60 \
and item['ticker'] == coin_sel:
vol_list_52w.append(item['volume'])
if len(vol_list_52w) != 0:
average_vol_52w = sum(vol_list_52w) / len(vol_list_52w)
whole_range = []
for item in capitalizations:
if item['ticker'] == coin_sel:
whole_range.append(item['last'])
secs_7d_last = 0
basic_secs_7d = max(secs_list) - 7 * 24 * 60 * 60
secs_7d = min(secs_list, key=lambda x: abs(x - basic_secs_7d))
for item in capitalizations:
if item['secs'] == secs_7d and item['ticker'] == coin_sel:
secs_7d_last = item['last']
secs_1m_last = 0
basic_secs_1m = max(secs_list) - 30 * 24 * 60 * 60
secs_1m = min(secs_list, key=lambda x: abs(x - basic_secs_1m))
for item in capitalizations:
if item['secs'] == secs_1m and item['ticker'] == coin_sel:
secs_1m_last = item['last']
secs_6m_last = 0
basic_secs_6m = max(secs_list) - 6 * 30 * 24 * 60 * 60
secs_6m = min(secs_list, key=lambda x: abs(x - basic_secs_6m))
for item in capitalizations:
if item['secs'] == secs_6m and item['ticker'] == coin_sel:
secs_6m_last = item['last']
ticker_list = []
for item in capitalizations:
if item['secs'] == latest_secs:
item['market_cap'] = format(item['market_cap'], '.1f')
item['ticker'] = item['ticker'][3:]
ticker_list.append(item)
alldata = {'market_cap': market_cap,
'day_range_max': max(day_range),
'day_range_min': min(day_range),
'volume24': volume24,
'circulating_supply': '-',
'price_in_usd': price_in_usd,
'price_in_btc': price_in_btc,
'price_in_eth': price_in_eth,
'range_max_1d': max(range_1d),
'range_min_1d': min(range_1d),
'range_max_7d': max(range_7d),
'range_min_7d': min(range_7d),
'range_max_52w': max(range_52w),
'range_min_52w': min(range_52w),
'average_vol_52w': average_vol_52w,
'all_time_high': max(whole_range),
'percent_from_ath': max(whole_range) - price_in_usd,
'cap_in_btc': float(market_cap) / float(btc),
'pro_7d': (secs_7d_last - price_in_usd) / price_in_usd * 100,
'pro_1m': (secs_1m_last - price_in_usd) / price_in_usd * 100,
'pro_6m': (secs_6m_last - price_in_usd) / price_in_usd * 100,
'ticker_list': ticker_list,
'coin_sel': coin_sel[3:],
'updated': datetime.fromtimestamp(latest_secs).strftime("%d/%m/%Y %H:%M:%S"),
'price_list': price
}
return render_template("coinmarketcap.html", data=alldata)
@app.route('/download.html', methods=['POST'])
def download():
values = {}
ticker_list = []
csv_list = []
option = None
controller = request.form.get('controller')
tmpsecs = time.strptime(request.form.get('dini') + " 00:00:00", "%Y-%m-%d %H:%M:%S")
ini_secs = int(time.mktime(tmpsecs))
tmpsecs = time.strptime(request.form.get('dend') + " 23:59:59", "%Y-%m-%d %H:%M:%S")
end_secs = int(time.mktime(tmpsecs))
if request.form.get('option').lower() == "prices":
option = "last"
if request.form.get('option').lower() == "dollars":
option = "dollars"
if request.form.get('option').lower() == "volume":
option = "volume"
# -- get involved tickers --
values['time'] = []
params = [
"controller='" + controller + "'",
"active=1",
"ORDER BY localticker"
]
tickers = db.generic_select(params, "tickers")
ticker_list.append('time')
for i in tickers:
ticker_list.append(i['remoteticker'])
for i in tickers:
data_table = "prices"
if controller == "COINMARKETCAP":
data_table = "capitalization"
params = [
"SELECT secs, " + option,
"ticker='" + i['localticker'] + "'",
"secs>=" + str(ini_secs),
"secs<=" + str(end_secs),
"ORDER BY secs"
]
data = db.generic_select(params, data_table)
if len(data) > 0:
if values['time'] == []:
for n in data:
values['time'].append(n['secs'])
values[i['remoteticker']] = []
for n in data:
if option == "last":
values[i['remoteticker']].append(n['last'])
if option == "dollars":
values[i['remoteticker']].append(n['dollars'])
if option == "volume":
values[i['remoteticker']].append(n['dayvol'])
# -- fill data for CSV --
for i in range(0, len(values['time'])):
tmp = {}
for n in values.keys():
if n == "time":
tmpdate = time.localtime(values[n][i])
tmp[n] = time.strftime("%Y-%m-%d %H:%M:%S", tmpdate)
for n in values.keys():
if n <> "time":
try:
tmp[n] = ('%16.8f' % values[n][i]).strip()
except:
tmp[n] = ('%16.8f' % values[n][-1]).strip()
csv_list.append(tmp)
# -- write to CSV file on /tmp --
if option == "last":
option = "prices"
dini = re.sub("-", "", request.form.get('dini'))
dend = re.sub("-", "", request.form.get('dend'))
csv_file = controller + "_" + dini + "_" + dend + "_" + option + ".csv"
fp = open("/tmp/" + csv_file, 'wb')
writer = csv.DictWriter(fp, fieldnames=ticker_list, extrasaction='ignore', delimiter=',',
quoting=csv.QUOTE_NONNUMERIC)
writer.writeheader()
for i in csv_list:
writer.writerow(i)
fp.close()
return send_file("/tmp/" + csv_file, mimetype="text/csv", attachment_filename=csv_file,
as_attachment=True)
@app.route('/csv-download.html', methods=['GET'])
def csv_download():
controllers = []
# -- get data --
controllers = db.generic_select([], "controllers")
today = time.strftime("%Y-%m-%d", time.localtime())
alldata = {'controllers': controllers, 'date': today}
return render_template("csv-download.html", data=alldata)
@app.route('/save-bot-config.html', methods=['POST'])
def savebotconfig():
bot_config = {
'volume': 0.00,
}
# -- process arguments --
if request.method == 'POST':
volume = request.form['volume']
if volume != "":
bot_config['volume'] = float(volume)
params1 = bot_config
params2 = ["id=1"]
db.generic_update(params1, params2, "bot_config")
# -- reinitialize allstream --
db.path = os.getcwd() + "/"
db.stopper = db.path + "../sd-allstream.py"
print("Stopping allstream...")
os.system(db.stopper + " &")
time.sleep(1)
return redirect(url_for('configbot'))
@app.route('/config-bot.html', methods=['GET'])
def configbot():
alldata = {}
tmp = db.generic_select([], "bot_config")
bot_config = tmp[0]
bot_config['vol10'] = ""
if bot_config['volume'] == 10:
bot_config['vol10'] = " checked"
bot_config['vol25'] = ""
if bot_config['volume'] == 25:
bot_config['vol25'] = " checked"
bot_config['vol50'] = ""
if bot_config['volume'] == 50:
bot_config['vol50'] = " checked"
bot_config['vol75'] = ""
if bot_config['volume'] == 75:
bot_config['vol75'] = " checked"
bot_config['vol100'] = ""
if bot_config['volume'] == 100:
bot_config['vol100'] = " checked"
alldata = {'bot_config': bot_config}
return render_template("config-bot.html", data=alldata)
@app.route('/csv-operations.html', methods=['GET'])
def csv_operations():
args_filter = 1
argums = {
'start_date': "",
'end_date': "",
'ticker': "",
'op_type': "",
'status_type': "",
}
# -- get arguments --
argums['start_date'] = request.args.get('start_date')
argums['end_date'] = request.args.get('end_date')
argums['ticker'] = request.args.get('ticker')
if argums['ticker'] is not None:
argums['ticker'] = "BI-" + request.args.get('ticker')
argums['op_type'] = request.args.get('op_type')
argums['status_type'] = request.args.get('status_type')
if argums['start_date'] is None \
and argums['end_date'] is None \
and argums['ticker'] is None \
and argums['op_type'] is None \
and argums['status_type'] is None:
args_filter = 0
# -- get data --
params = []
params.append("ORDER BY timestamp DESC")
operations = db.generic_select(params, "operations")
if len(operations) != []:
for i in range(0, len(operations)):
operations[i]['price1'] = ('%16.8f' % operations[i]['price1']).strip()
operations[i]['price2'] = ('%16.8f' % operations[i]['price2']).strip()
# -- make csv file --
# return render_template("csv-operations.html", data=alldata)
csv_data = []
csv_file = "operations.csv"
titles = ['Time', 'Ticker', 'Operation', 'Price', 'Status']
status = ['Success', 'Failed', 'No Funds']
for i in operations:
txt_status = status[i['end_status']]
tmp = {
'Time': str(i['pdate']) + " " + str(i['ptime']),
'Ticker': i['ticker'],
'Operation': i['operation'],
'Price': i['price'],
'Status': txt_status
}
csv_data.append(tmp)
fp = open(csv_file, 'wb')
writer = csv.DictWriter(fp, fieldnames=titles, extrasaction='ignore', delimiter=',', quoting=csv.QUOTE_NONNUMERIC)
writer.writeheader()
for i in csv_data:
writer.writerow(i)
fp.close()
return send_file(csv_file, mimetype="text/csv", attachment_filename=csv_file, as_attachment=True)
@app.route('/operation-review.html', methods=['GET'])
def op_review():
op_id = request.args.get('id')
params = [
"op_id=" + str(op_id),
"ORDER BY status"
]
alldata = db.generic_select(params, "op_tracking")
return render_template("operation-review.html", data=alldata)
@app.route('/operations.html', methods=['GET'])
def operations():
alldata = []
args_filter = 1
argums = {
'start_date': "",
'end_date': "",
'ticker': "",
'op_type': "",
'status_type': "",
}
# -- get arguments --
tmp = request.args.get('page')
curr_page = 1
if tmp != "" \
and tmp is not None:
curr_page = int(tmp)
if curr_page < 1:
curr_page = 1
argums['start_date'] = request.args.get('start_date')
argums['end_date'] = request.args.get('end_date')
argums['ticker'] = request.args.get('ticker')
if argums['ticker'] is not None:
argums['ticker'] = "BI-" + request.args.get('ticker')
argums['op_type'] = request.args.get('op_type')
argums['status_type'] = request.args.get('status_type')
if argums['start_date'] is None \
and argums['end_date'] is None \
and argums['ticker'] is None \
and argums['op_type'] is None \
and argums['status_type'] is None:
args_filter = 0
# -- get all filter data --
params = ["SELECT DISTINCT(ticker)"]
tickers = db.generic_select(params, "operations")
show_tickers = [{'ticker': 'All', 'selected': ""}]
for i in tickers:
if argums['ticker'] == i['ticker']:
tmp = {'ticker': i['ticker'][3:], 'selected': " selected"}
else:
tmp = {'ticker': i['ticker'][3:], 'selected': ""}
show_tickers.append(tmp)
op_types = []
for i in ['All', 'Buy', 'Sell']:
if argums['op_type'] == i:
tmp = {'op_type': i, 'selected': " selected"}
else:
tmp = {'op_type': i, 'selected': ""}
op_types.append(tmp)
status_types = []
for i in ['All', 'Success', 'Failed']:
if argums['status_type'] == i:
tmp = {'status_type': i, 'selected': " selected"}
else:
tmp = {'status_type': i, 'selected': ""}
status_types.append(tmp)
# -- make filter query --
params = []
if argums['start_date'] is not None \
and argums['start_date'] != "":
start_date = time.strftime("%Y-%m-%d", time.strptime(argums['start_date'], "%d-%b-%Y"))
params.append("pdate>='" + start_date + "'")
if argums['end_date'] is not None \
and argums['end_date'] != "":
end_date = time.strftime("%Y-%m-%d", time.strptime(argums['end_date'], "%d-%b-%Y"))
params.append("pdate<='" + end_date + "'")
if argums['ticker'] is not None \
and argums['ticker'] != "BI-":
params.append("ticker='" + argums['ticker'] + "'")
if argums['op_type'] is not None:
if argums['op_type'] == "Buy":
params.append("operation='B'")
if argums['op_type'] == "Sell":
params.append("operation='S'")
if argums['status_type'] is not None:
if argums['status_type'] == "Success":
params.append("end_status=0")
if argums['status_type'] == "Failed":
params.append("end_status=1")
if argums['status_type'] == "No Funds":
params.append("end_status=2")
params.append("ORDER BY timestamp DESC")
operations = db.generic_select(params, "operations")
# -- correct date arguments --
if argums['start_date'] is None:
argums['start_date'] = ""
if argums['end_date'] is None:
argums['end_date'] = ""
# -- compose operations for page --
if len(operations):
for i in range(0, len(operations)):
operations[i]['price'] = ('%16.8f' % operations[i]['price']).strip()
operations[i]['status'] = "Success"
if operations[i]['end_status'] == 1:
operations[i]['status'] = "Failed"
if operations[i]['end_status'] == 2:
operations[i]['status'] = "No Funds"
# -- compose paginator --
pages_url = "/operations.html"
if request.query_string != "":
end = request.query_string.find("&page=")
pages_url += "?" + request.query_string[0:end]
print("PAGES_URL:")
print(pages_url)
page_list = db.paginator(operations, curr_page, 25, pages_url)
alldata = {'operations': page_list['rows'], 'pages': page_list['pages'], 'argums': argums, 'tickers': show_tickers,
'op_types': op_types, 'status_types': status_types, 'args_filter': args_filter}
return render_template("operations.html", data=alldata)
@app.route('/stop.html')
def stop_bot():
# if not session.get('logged_in'):
# return redirect(url_for('login'))
comando = "ps auwx | grep -i 'python' | grep -i 'botcommand' | grep -v 'grep' | awk {'print $2'}"
result = commands.getoutput(comando).split("\n")
# result = subprocess.getoutput(comando).split("\n")
for i in result:
if len(i) > 1:
comando = "kill -9 " + str(i)
os.system(comando)
return redirect(url_for('bot_control'))
@app.route('/run.html')
def run_bot():
# if not session.get('logged_in'):
# return redirect(url_for('login'))
# -- start bot --
comando = "cd ../ && " + db.path + "botcommand.py &"
print("COMANDO:")
print(comando)
os.system(comando)
return redirect(url_for('bot_control'))
@app.route('/bot-control.html')
def bot_control():
log_text = ""
running_state = 0
stopping_state = 0
log_file = db.path + "bot_log.log"
# -- get current state --
comando = "ps auwx | grep -i 'python' | grep -i 'botcommand' | grep -v 'vi ' | grep -v 'grep'"
lines = commands.getoutput(comando).split("\n")
# lines = subprocess.getoutput(comando).split("\n")
if len(lines) > 0:
if len(lines[0]) > 1:
running_state = 1
if os.path.isfile(db.path + "stop_bot.ctl"):
stopping_state = 1
print("RUNNING:")
print(running_state)
print("STOPPING:")
print(stopping_state)
print("---------------------------")
# -- if bot not running prepare or create log file for reading --
if running_state == 0:
if not os.path.isfile(log_file):
fp = open(log_file, "w")
fp.write("")
fp.close()
# -- if bot is running, get log file and check stopping --
# if running_state == 1:
fp = open(log_file, "r")
log_text = fp.read().split("\n")
fp.close()
alldata = {'running_state': running_state, 'log_text': log_text, 'path': db.path, 'stopping_state': stopping_state}
return render_template("bot-control.html", data=alldata)
@app.route('/save-ticker.html', methods=['GET'])
def save_ticker():
active = 0
active_selected = request.args.getlist('active')
check_selected = bool(active_selected)
if check_selected == True:
active = 1
# -- update data --
params1 = {
'active': active,
'name': request.args.get('name'),
'localticker': request.args.get('localticker'),
'remoteticker': request.args.get('remoteticker'),
'controller': request.args.get('controller')
}
params2 = ["id=" + str(request.args.get('id'))]
db.generic_update(params1, params2, "tickers_tmp")
return redirect(url_for('setup'))
@app.route('/edit-ticker.html', methods=['GET'])
def edit_ticker():
ticker_id = request.args.get('ticker')
controllers = []
# -- get arguments --
controller = request.args.get('controller')
if controller is None:
controller = ""
# -- get data --
params = ["id=" + str(ticker_id)]
ticker = db.generic_select(params, "tickers_tmp")[0]
if ticker['active'] == 1:
ticker['active'] = " checked"
tmp = db.generic_select([], "controllers")
# -- add default for controller list --
for i in tmp:
if i['controller'] == ticker['controller']:
controllers.append({'controller': i['controller'], 'selected': " selected"})
else:
controllers.append({'controller': i['controller'], 'selected': ""})
alldata = {'controllers': controllers, 'ticker': ticker}
return render_template("edit-ticker.html", data=alldata)
@app.route('/get-controller-ids.html', methods=['GET'])
def get_controller_ids():
ticker_ids = []
controller = ""
controller = request.args.get('controller')
search = request.args.get('search')
ticker_ids = db.get_ticker_id_search(controller, search)
alldata = {'tickers': ticker_ids}
return jsonify(alldata)
@app.route('/update-controller.html', methods=['GET'])
def update_controller():
controller = str(request.args.get('controller'))
search = str(request.args.get('search'))
arg_active = str(request.args.get('active'))
referer = ""
if controller == "None":
controller = ""
if search == "None":
search = ""
db.update_controller_active(controller, search, arg_active)
return redirect(url_for('setup', controller=controller, search=search))
@app.route('/update-list.html')
def update_list():
ticker_id = ""
active = 0
ticker_id = request.args.get('ticker')
arg_active = int(request.args.get('active'))
params1 = {'active': arg_active}
params2 = ["id=" + str(ticker_id)]
db.generic_update(params1, params2, "tickers_tmp")
return redirect(url_for('setup'))
@app.route('/apply.html', methods=['GET'])
def apply_changes():
referer = None
controller = str(request.args.get('controller'))
search = str(request.args.get('search'))
if controller == "None":
controller = ""
if search == "None":
search = ""
return_function = "setup"
db.generic_delete([], "tickers")
db.fill_tickers_tmp("tickers_tmp", "tickers")
# -- get origin page --
if "HTTP_REFERER" in request.environ.keys():
referer = request.environ['HTTP_REFERER'].split("/")[-1]
if referer == "config-exchanges.html":
return_function = "setup_exchanges"
if referer == "config-times.html":
return_function = "setup_times"
if referer == "config-fees.html":
return_function = "setup_fees"
if referer == "config-keys.html":
return_function = "setup_keys"
# -- reinitialize allstream --
db.path = os.getcwd() + "/../"
db.launcher = db.path + "allstream.py"
# db.histo_launcher = db.path+"allhistorical.py"
db.stopper = db.path + "sd-allstream.py"
print("Stopping allstream and allhistorical...")
os.system(db.stopper + " &")
time.sleep(1)
# print("Relaunching allstream...")
# os.system(db.launcher+" &")
# print("Relaunching allhistorical...")
# os.system(db.histo_launcher+" &")
return redirect(url_for(return_function, message='saved', controller=controller, search=search))
@app.route('/save-keys.html', methods=['GET'])
def save_keys():
referer = ""
# -- get origin page --
if "HTTP_REFERER" in request.environ.keys():
referer = request.environ['HTTP_REFERER'].split("/")[-1]
controller = str(request.args.get('controller'))
key = str(request.args.get('key'))
secret = str(request.args.get('secret'))
passphrase = str(request.args.get('passphrase'))
params1 = {
'api_key': key,
'api_secret': secret,
'passphrase': passphrase
}
params2 = ["controller='" + controller + "'"]
db.generic_update(params1, params2, "controllers")
return redirect(url_for('setup_keys', message='saved'))
@app.route('/save-fees.html', methods=['GET'])
def save_fees():
referer = ""
# -- get origin page --
if "HTTP_REFERER" in request.environ.keys():
referer = request.environ['HTTP_REFERER'].split("/")[-1]
controller = str(request.args.get('controller'))
fee = str(request.args.get('fee'))
params1 = {'fee': fee}
params2 = ["controller='" + controller + "'"]
db.generic_update(params1, params2, "controllers")
return redirect(url_for('setup_fees', message='saved'))
@app.route('/save-times.html', methods=['GET'])
def save_times():
referer = ""
# -- get origin page --
if "HTTP_REFERER" in request.environ.keys():
referer = request.environ['HTTP_REFERER'].split("/")[-1]
controller = str(request.args.get('controller'))
timepace = str(request.args.get('timepace'))
params1 = {'timepace': timepace}
params2 = ["controller='" + controller + "'"]
db.generic_update(params1, params2, "controllers")
return redirect(url_for('setup_times', message='saved'))
@app.route('/config-keys.html')
def setup_keys():
alldata = []
modified = 0
referer = ""
finished = 0
# -- get origin page --
if "HTTP_REFERER" in request.environ.keys():
referer = request.environ['HTTP_REFERER'].split("/")[-1]
# -- get arguments --
tmp = str(request.args.get('message'))
if tmp == "saved":
finished = 1
controllers = db.generic_select([], "controllers")
# -- conditions for modified --
if referer.find("save-keys.html") == 0:
modified = 1
# -- create data representation for page --
alldata = {'controllers': controllers, 'modified': modified, 'finished': finished}
return render_template("config-keys.html", data=alldata)
@app.route('/config-fees.html')
def setup_fees():
alldata = []
modified = 0
referer = ""
finished = 0
# -- get origin page --
if "HTTP_REFERER" in request.environ.keys():
referer = request.environ['HTTP_REFERER'].split("/")[-1]
# -- get arguments --
tmp = str(request.args.get('message'))
if tmp == "saved":
finished = 1
controllers = db.generic_select([], "controllers")
# -- conditions for modified --
if referer.find("save-fees.html") == 0:
modified = 1
# -- create data representation for page --
alldata = {'controllers': controllers, 'modified': modified, 'finished': finished}
return render_template("config-fees.html", data=alldata)
@app.route('/config-times.html')
def setup_times():
alldata = []
modified = 0
referer = ""
finished = 0
# -- get origin page --
if "HTTP_REFERER" in request.environ.keys():
referer = request.environ['HTTP_REFERER'].split("/")[-1]
# -- get arguments --
tmp = str(request.args.get('message'))
if tmp == "saved":
finished = 1
controllers = db.generic_select([], "controllers")
# -- conditions for modified --
if referer.find("save-times.html") == 0:
modified = 1
# -- create data representation for page --
alldata = {'controllers': controllers, 'modified': modified, 'finished': finished}
return render_template("config-times.html", data=alldata)
@app.route('/config-tickers.html', methods=['GET'])
def setup():
alldata = []
modified = 0
referer = ""
finished = 0
# -- get origin page --
if "HTTP_REFERER" in request.environ.keys():
referer = request.environ['HTTP_REFERER'].split("/")[-1]
# -- get arguments --
controller = str(request.args.get('controller'))
search = str(request.args.get('search'))
if controller == "None":
controller = ""
if search == "None":
search = ""
tmp = str(request.args.get('message'))
if tmp == "saved":
finished = 1
# -- delete tickers_tmp, refill, get controllers and tickers --
if referer.find("config-tickers.html") < 0 \
and referer.find("edit-ticker.html") < 0:
db.generic_delete([], "tickers_tmp")
db.fill_tickers_tmp("tickers", "tickers_tmp")
# -- get controllers --
controllers = db.generic_select([], "controllers")
if controller == "":
controller = controllers[0]['controller']
# -- if there is a search argument go to search --
# if search != "":
# return redirect(url_for('search', controller=controller, search=search))
# -- load data --
for i in range(0, len(controllers)):
if controllers[i]['controller'] == controller:
controllers[i]['selected'] = " selected"
else:
controllers[i]['selected'] = ""
# -- get tickers --
tickers = db.get_search_tickers(-1, "tickers_tmp", controller, search)
orig_tickers = db.get_search_tickers(-1, "tickers", controller, search)
# -- conditions for modified --
if referer.find("save-ticker.html") == 0 \
or tickers != orig_tickers:
modified = 1
alldata = {'search': search, 'controllers': controllers, 'controller': controller, 'modified': modified,
'finished': finished, 'settings': {'total': 0, 'selected': 0, 'allsel': ""}, 'tickers': []}
# -- create data representation for page --
numtickers = 0
seltickers = 0
for i in range(0, len(tickers)):
tickers[i]['checked'] = ""
if tickers[i]['active'] == 1:
tickers[i]['checked'] = " checked"
seltickers += 1
numtickers += 1
alldata['tickers'] = tickers
alldata['settings']['total'] = numtickers
alldata['settings']['selected'] = seltickers
if alldata['settings']['total'] == alldata['settings']['selected']:
alldata['settings']['allsel'] = " checked"
return render_template("config-tickers.html", data=alldata)
@app.route('/config-exchanges.html')
def setup_exchanges():
alldata = []
modified = 0
referer = ""
finished = 0
# -- get origin page --
if "HTTP_REFERER" in request.environ.keys():
referer = request.environ['HTTP_REFERER'].split("/")[-1]
# -- get arguments --
tmp = str(request.args.get('message'))
if tmp == "saved":
finished = 1
# -- delete tickers_tmp, refill, get controllers and tickers --
if referer == "index.html" or referer == "":
db.generic_delete([], "tickers_tmp")
db.fill_tickers_tmp("tickers", "tickers_tmp")
controllers = db.generic_select([], "controllers")
params = ["ORDER BY controller, remoteticker"]
tickers = db.generic_select(params, "tickers_tmp")
orig_tickers = db.generic_select(params, "tickers")
# -- conditions for modified --
if referer == "save-ticker.html" \
or tickers != orig_tickers:
modified = 1
# -- create data representation for page --
for i in range(0, len(controllers)):
active = ""
activated = "0"
for n in tickers:
if n['controller'] == controllers[i]['controller']:
if str(n['active']) == "1":
active = " checked"
activated = "1"
alldata.append(
{'controller': controllers[i]['controller'], 'active': active, 'activated': activated, 'modified': modified,
'finished': finished})
return render_template("config-exchanges.html", data=alldata)
@app.route('/view-log.html')
def view_log():
alldata = {}
# -- get arguments --
secs = str(request.args.get('secs'))
# -- get listing --
params = ["secs=" + secs]
listing = db.generic_select(params, "opportunities")
# -- date hour --
tmpdate = time.localtime(listing[0]['secs'])
log_date = time.strftime("%d-%m-%Y %H:%M:%S", tmpdate)
for i in range(0, len(listing)):
listing[i]['operation1'] = "B"
listing[i]['operation2'] = "S"
if listing[i]['op_type'] == "S-B":
listing[i]['operation1'] = "S"
listing[i]['operation2'] = "B"
listing[i]['ticker1'] = listing[i]['ticker1'][3:]
listing[i]['ticker2'] = listing[i]['ticker2'][3:]
listing[i]['price1'] = '%16.8f' % listing[i]['price1']
listing[i]['price2'] = '%16.8f' % listing[i]['price2']
listing[i]['pot_profit'] = '%16.8f' % listing[i]['pot_profit']
listing = sorted(listing, key=itemgetter("pot_profit"))
listing.reverse()
alldata = {'datehour': log_date, 'log': listing}
return render_template("view-log.html", data=alldata)
@app.route('/index.html')
@app.route('/screener.html')
@app.route('/')
def screener():
data = []
prices = []
lastsecs = int(time.time())
controller = None
# -- get controller --
controller_sel = str(request.args.get('controller'))
# -- get controllers and tickers --
controllers = db.generic_select([], "controllers")
if controller_sel == "None":
controller = controllers[0]
for i in range(0, len(controllers)):
if controllers[i]['controller'] == controller_sel:
controller = controllers[i]
controllers[i]['selected'] = " selected"
else:
controllers[i]['selected'] = ""
params = [
"controller='" + controller['controller'] + "'",
"active=1",
"ORDER BY localticker"
]
tickers = db.generic_select(params, "tickers")
params = [
"last_flag=1",
"ticker LIKE '" + controller['preintticker'] + "%'"
]
prices_values = db.generic_select(params, "prices")
# -- create data representation for page --
for i in prices_values:
for n in tickers:
if i['ticker'] == n['localticker']:
name = n['name']
# -- add to list --
secsdiff = lastsecs - i['secs']
if secsdiff < 3600:
updated = "Recently"
else:
hours = int(secsdiff / 3600)
updated = "More than " + str(hours) + " hours"
last = i['last']
volume = i['dayvol']
price = {
'name': name,
'localticker': i['ticker'],
'ticker': i['ticker'][3:],
'last': format(last, 'f'),
'volume': volume,
'updated': updated
}
prices.append(price)
data.append({'controller': controller, 'prices': prices})
# -- create last date and group all data for template --
tmpfecha = time.localtime(lastsecs)
fecha = time.strftime("%Y-%m-%d %H:%M:%S", tmpfecha)
alldata = {'last_updated': fecha, 'controllers': controllers, 'controller': controller['controller'], 'data': data}
return render_template("screener.html", data=alldata)
#############################################################################
## MAIN
# db = libdb_mysql.libdb_mysql()
if __name__ == '__main__':
app.run(host='0.0.0.0', port=8080, debug=True)
|
[
"pythonguru101@gmail.com"
] |
pythonguru101@gmail.com
|
c20db2103395c74594a0bf40d5860f88f87a367a
|
cf01f91631bf79c9ad101f271fa12db18b30a336
|
/example/theRoom/cgi-bin/home.py
|
d55605e54662bc7c5565466f56f58018ca496dfe
|
[
"Apache-2.0"
] |
permissive
|
fallen-geko/QuickPYSER
|
69b803dba734c5510dc018fd4fc0f0c2172eb33c
|
f8e4fb18934f8896794efd4609eef15c88e34d9e
|
refs/heads/master
| 2022-08-13T15:26:12.270353
| 2020-05-25T18:46:44
| 2020-05-25T18:46:44
| 266,401,475
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,066
|
py
|
#!/usr/bin/env python
'''
------------------------------------------------------------------------------------------
Copyright 2020 Romeo Dabok
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-------------------------------------------------------------------------------------------
Notes: For the main page. Almost all the main js functions are defined here. This page is split
into divisions (which will update themselves). Its a really messy script (to humans) and
I keep forgetting to use comments. Actually, I think I may already forget what some of these
functions do...
em nau, inap lo sumuk.
'''
#import modules for CGI handling
import cgi, cgitb
from os import environ as enn
import theroom
cgitb.enable()
decider = "portal"
logged = "false"
if theroom.checkCookie():
decider = "lobby"
logged = "true"
home = '''
//Logged in and stuff
let userName = "";
let passWord = "";
let userToken = "";
let isLoggedIn = %s;
let currRoom = "%s";
let refreshTime = 10000; //10 seconds
// Okay, forum room and pages here
let forumRoom = 0;
let forumPage = 0;
// Okay this guy is loaded everytime the room changes
let mainExtra = "";
// Sets onload to our lil refresh function
//document.getElementById("output").inner="JQuery Rocks!";
//window.onload = setupRefresh;
// Arrays for room stuff... sorry I dont know what else to call em
const freeRooms = new Array("portal", "signup", "signinProcess", "signupProcess");
const staticRooms = new Array("portal", "signup", "signinProcess", "signupProcess","roomMakeProcess","lobby","createRoom","profile");
const datInputRooms = new Array("room","message");
//AJAX
var aRequests = new Array();
if (window.XMLHttpRequest) {
aRequests.push(new XMLHttpRequest());
aRequests.push(new XMLHttpRequest());
aRequests.push(new XMLHttpRequest());
} else if (window.ActiveXObject) {
aRequests.push(new ActiveXObject("Microsoft.XMLHttp"));
aRequests.push(new ActiveXObject("Microsoft.XMLHttp"));
aRequests.push(new ActiveXObject("Microsoft.XMLHttp"));
}
var Request = false;
if (window.XMLHttpRequest) {
Request = new XMLHttpRequest();
} else if (window.ActiveXObject) {
Request = new ActiveXObject("Microsoft.XMLHTTP");
}
// This function checks an array for a value
function hasItem(array,item) {
var lengthOfArray = array.length;
for (var i = 0; i < lengthOfArray; i++) {
if (item == array[i]) {
return true;
}
}
return false;
}
function setupRefresh()
{
setInterval("refreshBlock(0);",refreshTime);
refreshBlock(1);
}
//Sends message
function forumPost(post) {
if (hasItem(datInputRooms,currRoom)) {
//$('#inputdata').load('inputDat.py?postm='+post+mainExtra);
changeRoom('room','&postm='+post);
}
}
function refreshBlock(flag,extra)
{
if (!extra) {
extra = "";
}
//If there is some extra, make sure they are added nicely
var stoks = "";
//Adds main extra
extra = extra + mainExtra;
if (extra) {
stoks = "?";
var slen = extra.length;
extra = extra.slice(1,slen);
}
//If user is not logged and and is in a room he is not supposed to be in
//send em to the portal
if (isLoggedIn == false && (!hasItem(freeRooms,currRoom))) {
currRoom = "portal";
}
//If this is not a static room okay fresh it
if ((!hasItem(staticRooms,currRoom)) || flag == 1) {
$('#rooms').load("".concat(currRoom,'.py',stoks,extra));
}
//And this is for text input and the lil nav bar
if (flag == 1 && (!hasItem(freeRooms,currRoom))) {
$('#lnabar').load('lilnavbar.py');
if (hasItem(datInputRooms,currRoom)) {
$('#inputdata').load('inputDat.py');
} else {
$('#inputdata').load('../nullbar.html');
}
} else if (flag == 1) {
$('#lnabar').load('../nullbar.html');
$('#inputdata').load('../nullbar.html');
}
}
//Change the room
function changeRoom(room,extra) {
if (!extra) {
extra = "";
}
currRoom = room;
refreshBlock(1,extra);
}
//Creates new user
function signUp(uname,pword,pword2,firstname,lastname,phone,email) {
if (currRoom !== 'signup') {
return false;
}
var error = 0;
var erp1 = document.getElementById('errorplace1');
var erp2 = document.getElementById('errorplace2');
var erp3 = document.getElementById('errorplace3');
erp1.innerHTML = "";
erp2.innerHTML = "";
if (uname == "" || uname.length < 4) {
erp1.innerHTML="Username field must be at least 4 characters long.";
error += 1;
}
if (pword == "" || pword.length < 5) {
erp2.innerHTML="Password field must be at least 5 characters long.";
error += 1;
}
if (pword2 !== pword) {
erp3.innerHTML="Password 2 does not match Password 1";
error += 1;
}
if (error < 1) {
//If no errors, okay go to the processing room with this long-assed samtin ya
//changeRoom('signupProcess',"".concat('&uname=',uname,'&pword=',pword,'&fname=',firstname,'&lname=',lastname,'&phone=',phone,'&email=',email));
if (aRequests[1]) {
aRequests[1].open("POST","signupProcess.py");
aRequests[1].setRequestHeader('Content-Type','application/x-www-form-urlencoded');
aRequests[1].onreadystatechange = function() {
if (aRequests[1].readyState == 4 && aRequests[1].status == 200) {
var rt = aRequests[1].responseText;
console.log(rt);
setTimeout(rt,5);
}
}
var req = "".concat('uname=',uname,'&pword=',pword,'&fname=',firstname,'&lname=',lastname,'&phone=',phone,'&email=',email);
aRequests[1].send(req);
}
}
}
function signIn(uname,pword) {
if (currRoom !== 'portal') {
return false;
}
var error = 0;
var erp1 = document.getElementById('errorplace1');
var erp2 = document.getElementById('errorplace2');
erp1.innerHTML = "";
erp2.innerHTML = "";
if (uname == "" || uname.length < 4) {
erp1.innerHTML="Username field must be at least 4 characters long.";
error += 1;
}
if (pword == "" || pword.length < 5) {
erp2.innerHTML="Password field must be at least 5 characters long.";
error += 1;
}
if (error < 1) {
//TODO: Sign In
//changeRoom('signinProcess',"".concat('&uname=',uname,'&pword=',pword));
if(aRequests[0]) {
aRequests[0].open("POST", 'signinProcess.py');
aRequests[0].setRequestHeader('Content-Type','application/x-www-form-urlencoded');
aRequests[0].onreadystatechange = function() {
if (aRequests[0].readyState == 4 && aRequests[0].status == 200) {
//Saves the response text in a variable so I do not stain myself
//typing it with this ruined keyboard of mine.
var rt = aRequests[0].responseText;
setTimeout(rt,5);
}
}
aRequests[0].send("uname="+uname+'&pword='+pword);
}
}
}
function updateInfo(firstname,lastname,phone,email,gend,ppic) {
changeRoom('profile',"".concat('&uinfc=true&fname=',firstname,'&lname=',lastname,'&phone=',phone,'&email=',email,'&gend=',gend+'&pic=',ppic));
}
function makeRoom(rname,rdes) {
if (currRoom !== 'createRoom') {
return false;
}
var error = 0;
var erp1 = document.getElementById('errorplace1');
erp1.innerHTML = "";
if (rname == "" || rname.length < 4) {
erp1.innerHTML="Room name must be at least 4 characters long.";
error += 1;
}
if (error < 1) {
//TODO: Make Room
//changeRoom('roomMakeProcess',"".concat('&radmin=',userName,'&rname=',encodeURI(rname),'&rdes='+encodeURI(rdes)));
if (aRequests[2]) {
aRequests[2].open("POST","roomMakeProcess.py");
aRequests[2].setRequestHeader('Content-Type','application/x-www-form-urlencoded');
aRequests[2].onreadystatechange = function() {
if (aRequests[2].readyState == 4 && aRequests[2].status == 200) {
var rt = aRequests[2].responseText;
setTimeout(rt,5);
}
}
aRequests[2].send("".concat('radmin=',encodeURI(userName),'&rname=',encodeURI(rname),'&rdes=',encodeURI(rdes)))
}
}
}
function changePic(pic) {
if (currRoom !== 'profile') {
return false;
}
changeRoom('profile',"".concat('&uppic=',pic));
}
//For navigating the forum
function sealHeader() {
mainExtra = "".concat("&forRoom=",forumRoom,"&forPage=",forumPage);
changeRoom('room');
}
function forumMaxPage() {
forumPage = -1;
sealHeader();
}
function forumNextPage() {
forumPage += 1;
sealHeader();
}
function forumPrePage() {
forumPage -= 1;
if (forumPage < 0) {
forumPage = 0;
}
sealHeader();
}
function forumMinPage() {
forumPage = 0;
sealHeader();
}
function delRoom(rmid) {
changeRoom('lobby',"".concat('&klroom=',rmid));
}
function delUser(uid) {
changeRoom('portal',"".concat('&klusr=',uid));
}
''' % (logged, decider)
total = '''
<div><h1 align="center">The Room</h1></div>
<p class="parago">Welcome Romeo's Odd Open Messenger (ROOM).</p>
<hr>
<div id = "lnabar"></div>
<div id = "rooms"></div>
<div id = "inputdata"></div>
<div id = "output"></div>
<script>
$(document).ready(function() {
try {
window.onload=setupRefresh;
} catch (e){
//changeRoom = function(room,extra) {
//}
rooms.innerHTML = '<h1>This Browser is incompatable with The Room</h1>';
//window.onload = startInterval;
}
});
</script>
'''
rpage = theroom.templateLoad('Home',total,jsscript=home)
print("Content-Type: text/html\r\n")
print(rpage)
|
[
"noreply@github.com"
] |
fallen-geko.noreply@github.com
|
89e0aa1d612cb9446065b4b76594e32c3d855cd6
|
3fb8b4712ad22f862921c2641fd11743d0a2dbb8
|
/tourbag/urls.py
|
2a7a492f5332a3a89e4cbfe7f2c752df63a04251
|
[] |
no_license
|
klimt1722/tourbag
|
57fe76d77d45987bd2f4cf5886c1ad3ba05cf598
|
95b4c8f109596058cfd250ec688dc8c88b3f737d
|
refs/heads/master
| 2021-01-21T06:46:32.893270
| 2017-03-22T14:57:48
| 2017-03-22T14:57:48
| 83,257,284
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 764
|
py
|
"""tourbag URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
]
|
[
"TELL@jangtaeilui-MacBook-Pro.local"
] |
TELL@jangtaeilui-MacBook-Pro.local
|
54e636829957d06459dc60b547fb5f2dd688a85e
|
0e34eb0fd1327c4663ad0b47add223b3ee4764e3
|
/rename.py
|
0a73cf276b876dc4f6121501ee11e50172521bb5
|
[] |
no_license
|
morphIsmail/date-rename
|
fb19194d03b2a2f16abd0bd7b14d2920df543a36
|
15885874c4190a339991fb24072356c30a933c95
|
refs/heads/main
| 2023-04-21T19:47:28.475063
| 2021-05-14T07:50:55
| 2021-05-14T07:50:55
| 367,283,882
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,360
|
py
|
# импорт модулей
import os, re, time, datetime
# проверить ОС == Windows
from sys import platform
if platform == 'win32':
from win32_setctime import setctime
def finder():
# все файлы в текущей директории
names = os.listdir(os.getcwd())
# цикл по всем файлам
for name in names:
# путь к текущему файлу
fullname = os.path.join(os.getcwd(), name)
if os.path.isfile(name):
# если файл подходит под шаблон названия
if (re.search('IMG|VID.\d{8}.*\.jpg|mp4', name)):
# получить год, месяц и день из названия
y = int(name[4:8])
m = int(name[8:10])
d = int(name[10:12])
# создать дату и превратить ее в timestamp
date = datetime.datetime(y, m, d).timestamp()
# изменить дату изменения (но не создания)
os.utime(fullname, times=(date,date))
# если Windows
if platform == 'win32':
# изменить дату создания
setctime(fullname, date)
# вывести дату создания
print(os.path.getctime(fullname))
# выполнить функцию
if __name__ == '__main__':
finder()
|
[
"ismail_2016@mail.ru"
] |
ismail_2016@mail.ru
|
34b90bf1c2adda3444693f646a3b71fa1829dbb0
|
2939265a3ebaabd483aea301d8d7a95469ba417a
|
/DatabaseCreator_OLD.py
|
3354e0887a8fbb3d7ce14ea0867ab98fee702a09
|
[] |
no_license
|
sahilshah379/AIAllergy_Database_Creator
|
51ad02f2d9a8c20fee92d1c0b5b6f0f8db224eb2
|
dd08378ad29d17f1f60ee4195970ea0aa1bd763b
|
refs/heads/master
| 2020-07-21T14:33:02.911897
| 2019-09-08T02:53:15
| 2019-09-08T02:53:15
| 206,896,195
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,640
|
py
|
import selenium
import requests
import re
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.chrome.options import Options
HEADERS = {'user-agent': ('Chrome/45.0.2454.101 Safari/537.36'), 'referer': 'https://www.food2fork.com/api'}
url_search = 'https://www.food2fork.com/api/search'
url_get = 'https://www.food2fork.com/api/get'
def getRecipeID(recipe_name, api_key):
str.lower(recipe_name)
recipe_array = recipe_name.split(' ')
recipe_name = recipe_array[0]
for i in range(1, len(recipe_array)):
recipe_name = recipe_name + '%20'
recipe_name = recipe_name + recipe_array[i]
recipe_id_array = []
for page in range(1):
url_recipe_search = url_search + '?key=' + api_key + '&q=' + recipe_name + '&page=' + str(page+1)
search_request = requests.get(url_recipe_search, headers = HEADERS)
search_text = search_request.text
search_start_key = '"recipe_id": "'
search_end_key = '"'
search_array = search_text.split(search_start_key)
for i in range(1, len(search_array)):
recipe_id = search_array[i].split(search_end_key)[0]
recipe_id_array.append(recipe_id)
print('\n'.join(map(str, recipe_id_array)))
print(len(recipe_id_array))
def getIngredients(recipe_name, api_key):
recipes = []
recipe_id_array = getRecipeID(recipe_name, api_key)
for recipe_id in recipe_id_array:
url_ingredients_get = url_get + '?key=' + api_key + '&rId=' + recipe_id
get_request = requests.get(url_ingredients_get, headers = HEADERS)
get_text = get_request.text
get_start_key = '"ingredients": ['
get_end_key = '], '
get_ingredients_list = get_text.split(get_start_key)[1].split(get_end_key)[0] + ' '
get_ingredients_list = get_ingredients_list.replace(',', '')
recipe_ingredients_list = get_ingredients_list.split('" ')
recipe_ingredients_list = map(lambda foo: foo.replace('"', ''), recipe_ingredients_list)
recipe_data = []
recipe_data.append(recipe_id)
recipe_data.append(recipe_ingredients_list)
recipes.append(recipe_data)
def getAPIKey():
email = getEmail()
options = Options()
# options.add_argument('--headless')
driver = webdriver.Chrome(options=options)
driver.get('https://www.food2fork.com/default/user/register')
first_name_element = driver.find_element_by_name('first_name')
first_name_element.send_keys('Pennapps')
last_name_element = driver.find_element_by_name('last_name')
last_name_element.send_keys('Nibbas')
email_element = driver.find_element_by_name('email')
email_element.send_keys(email)
password_element = driver.find_element_by_name('password')
password_element.send_keys('weneedmoremoney')
password_two_element = driver.find_element_by_name('password_two')
password_two_element.send_keys('weneedmoremoney')
driver.find_element_by_class_name("g-recaptcha").click()
driver.find_element_by_xpath('//*[@id="submit_record__row"]/td[2]/input').click()
if driver.current_url == 'https://www.food2fork.com/default/user/register#':
recaptcha = input('Recaptcha')
driver.find_element_by_xpath('//*[@id="submit_record__row"]/td[2]/input').click()
driver.get('https://www.food2fork.com/user/api')
api_key_element = driver.find_element_by_xpath('//*[@id="main"]/div/div/div[2]/span')
api_key = api_key_element.text
driver.quit()
return api_key
def getEmail():
emailText = requests.get('http://generator.email').text
email = emailText.split('id="email_ch_text">')[1].split('</span></b><p>')[0]
return email
def main():
category = 'Cookie'
api_key = getAPIKey()
print(api_key)
getIngredients(category, api_key)
if __name__ == '__main__':
main()
|
[
"sahilshah379@gmail.com"
] |
sahilshah379@gmail.com
|
e278af9ffb8d2abc6f279bbc2b85c83a0d7b416e
|
91c39b4ff7198a513d5f11a9d45c75bbf73cf204
|
/polls/migrations/0001_initial.py
|
d3e35778dc0fe7bf6490effbe0382aeaa6037a7d
|
[
"MIT"
] |
permissive
|
blackway/django-gentelella
|
9a238f9576cffdd64e7b9e64af181a597269a5d7
|
63e4b888c4ffa655c4b1d09b3b61c31b06f5c990
|
refs/heads/master
| 2020-07-07T16:41:25.069642
| 2019-09-28T01:06:55
| 2019-09-28T01:06:55
| 203,409,006
| 0
| 0
|
MIT
| 2019-08-20T15:59:07
| 2019-08-20T15:59:07
| null |
UTF-8
|
Python
| false
| false
| 1,163
|
py
|
# Generated by Django 2.0 on 2017-12-28 00:48
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Choice',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('choice_text', models.CharField(max_length=200)),
('votes', models.IntegerField(default=0)),
],
),
migrations.CreateModel(
name='Question',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('question_text', models.CharField(max_length=200)),
('pub_date', models.DateTimeField(verbose_name='date published')),
],
),
migrations.AddField(
model_name='choice',
name='question',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='polls.Question'),
),
]
|
[
"blackway76@gmail.com"
] |
blackway76@gmail.com
|
34bffbc478f8a443ab31759de1082a30eb40f037
|
7a84e43b2805fcbbbba1ac6bb3fad2b5f0f4a358
|
/douyin/settings.py
|
53fbe6c4b071935cde8a1b77fc7259678589813d
|
[] |
no_license
|
te3/douyin-scrapy
|
fe45ab37927068e3074ea04f6040fb425e76fd03
|
25a7f538b5ec96a190061728110fbfef1862c163
|
refs/heads/master
| 2022-11-28T01:36:14.876493
| 2020-08-06T09:47:11
| 2020-08-06T09:47:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,224
|
py
|
# -*- coding: utf-8 -*-
# Scrapy settings for douyin project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://docs.scrapy.org/en/latest/topics/settings.html
# https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
# AUTOTHROTTLE_ENABLED = True
BOT_NAME = 'douyin'
SPIDER_MODULES = ['douyin.spiders']
NEWSPIDER_MODULE = 'douyin.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
# USER_AGENT = 'douyin (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
# Configure maximum concurrent requests performed by Scrapy (default: 16)
CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
DOWNLOAD_DELAY = 1
# The download delay setting will honor only one of:
CONCURRENT_REQUESTS_PER_DOMAIN = 32
# CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
# COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
# TELNETCONSOLE_ENABLED = False
# Override the default request headers:
# DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
# }
# Enable or disable spider middlewares
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
SPIDER_MIDDLEWARES = {
'douyin.middlewares.ProxyMiddleware': 543,
# 'douyin.middlewares.DouyinSpiderMiddleware': 544,
}
# Enable or disable downloader middlewares
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
DOWNLOADER_MIDDLEWARES = {
# 'douyin.middlewares.DouyinDownloaderMiddleware': 543,
# 'douyin.middlewares.DuplicatesPipeline': 444,
}
# Enable or disable extensions
# See https://docs.scrapy.org/en/latest/topics/extensions.html
# EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
# }
# Configure item pipelines
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'douyin.pipelines.UserInfoPipeline': 300,
}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
# AUTOTHROTTLE_ENABLED = True
# The initial download delay
AUTOTHROTTLE_START_DELAY = 1
# The maximum download delay to be set in case of high latencies
# AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
# AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
# AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
# HTTPCACHE_ENABLED = True
# HTTPCACHE_EXPIRATION_SECS = 0
# HTTPCACHE_DIR = 'httpcache'
# HTTPCACHE_IGNORE_HTTP_CODES = []
# HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
|
[
"982781738@qq.com"
] |
982781738@qq.com
|
f61d8e2fbef99b67676e23d077048c449257ecbb
|
49aa7daaa00b7c0fbf8cd35653e1a58cb4be5ba5
|
/pydom/pydom.py
|
0de2d476cdcef5591d6d3d8a38d9e3054229fe6a
|
[] |
no_license
|
arxd/pyweb
|
74ca0d02c791396a38b94c472f302a1639717acf
|
0777b811f4c81a81b9890484af3f0a98b7bb1bde
|
refs/heads/master
| 2020-03-27T16:56:06.001450
| 2019-06-28T10:20:00
| 2019-06-28T10:20:00
| 146,817,136
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,857
|
py
|
from aiohttp import web
import mimetypes, asyncio
import logging, os, json
from datetime import datetime
HTML = """
<!doctype html>
<html>
<head>
<title>{title}</title>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width,initial-scale=1,user-scalable=no">
<meta name="description" content="{desc}">
<link rel="icon" type="image/png" href="/favicon.png">
<link href="https://fonts.googleapis.com/icon?family=Material+Icons"
rel="stylesheet">
<style>
body, html {{margin:0; padding:0; width: 100%; height: 100%;}}
</style>
{head}
<script type="module">
import {{WebMain}} from './web.js';
window.main = new WebMain();
</script>
</head>
<body onload="window.main.onload()">
</body>
</html>
"""
class AppServer(web.Application):
def __init__(self, app):
super().__init__()
self.clients = {}
self.app = app
self.log = logging.getLogger()
self.router.add_route('*', '/ws', self.websocket)
self.router.add_route('GET', '/', self.root_page)
self.router.add_route('GET', '/{rsc}', self.file)
self.on_startup.append(self.start_background_tasks)
self.on_shutdown.append(self.cleanup_background_tasks)
def get_client(self, request, auth):
self.clients[auth['id']] = {'user':auth['id'], 'key':'abcd', 'ws':[]}
return self.clients[auth['id']]
async def websocket(self, request):
if "Upgrade" not in request.headers:
return web.Response(text="", status=404)
ws = web.WebSocketResponse(heartbeat=50.0)
await ws.prepare(request)
try:
auth = (await ws.receive(timeout=3.0)).json()
self.log.info(f"User Connected:{auth} {len(self.clients)} other users")
client = self.get_client(request, auth)
except:
self.log.error("User didn't send the right info")
await ws.close(code=4000)
return ws
client['ws'].append(ws)
client['health'] = self.now()
self.log.info("%s: WS connect %d", client['user'], len(client['ws']))
await ws.send_str(json.dumps({'html':self.app.html_str()}))
async for msg in ws:
if msg.type == WSMsgType.TEXT:
client['health'] = self.now()
#data = json.loads(msg.data)
print("MSG", msg)
#await ws.send_str(json.dumps(reply))
else:
self.log.error("%s:Strange message: %s: %r", client['user'], msg.type, msg.data)
await ws.close(code=5001)
self.log.info("%s:WS Close (%s)", client['user'], ws.closed)
if ws in client['ws']:
self.log.info("Removed")
client['ws'].remove(ws)
return ws
def now(self):
return datetime.utcnow().timestamp() - self.startup_time
async def root_page(self, request):
tmpl = {
'title':'No Title',
'desc':'No Description',
'head': '',
}
#tmpl.update(self.main())
#print(tmpl)
return web.Response(content_type="text/html", text=HTML.format(**tmpl))
async def file(self, request):
filename = os.getcwd()+'/'+request.match_info['rsc']
if not os.path.exists(filename):
raise web.HTTPNotFound()
with open(filename, 'rb') as f:
body = f.read()
return web.Response(content_type=mimetypes.guess_type(filename)[0], body=body)
def run(self):
web.run_app(self)
async def ws_health_check(self):
check_interval = 5.1 #seconds
try:
while True:
self.log.info(f"{self.now():.1f}")
await asyncio.sleep(check_interval)
except asyncio.CancelledError:
pass
self.log.info("Health check Cancelled")
async def start_background_tasks(self, app):
self.startup_time = datetime.utcnow().timestamp()
self.ws_health_check = app.loop.create_task(self.ws_health_check())
async def cleanup_background_tasks(self, app):
self.log.info("Cleanup_background_tasks")
self.ws_health_check.cancel()
await self.ws_health_check
for k, v in self.clients.items():
self.log.info(f"{v['user']}:close {len(v['ws'])} WS sockets")
while v['ws']: # actually get removed in at the end of websocket()
await v['ws'][0].close(code=5000)
self.log.info("Good Bye...")
class Element(object):
def __init__(self, **kwargs):
self.props = kwargs.get('props',{})
self.tag = kwargs.get('tag', 'div')
self.children = []
self.handlers = set()
def addEventListener(self, client):
self.handlers.append(client)
def html(self):
if self.tag == 'cdata':
return ''.join(map(str, self.children))
attrs = [f' {k}="{v}"' for k,v in self.props.items()]
s = f"<{self.tag}{''.join(attrs)}>"
for child in self.children:
s += child.html()
s += f"</{self.tag}>"
return s
class Button(Element):
def __init__(self, **kwargs):
kwargs['tag'] = 'input'
kwargs.setdefault('props',{})
kwargs['props'].setdefault('type', 'button')
super().__init__(tag="input", **kwargs)
class App(Element):
def __init__(self, **kwargs):
kwargs['tag'] = 'body'
super().__init__(**kwargs)
def log(self, str):
print(str)
def route(self, url):
print(f"ROUTE: {url}")
def run(self, port=8080):
AppServer(self).run()
|
[
"aaron@framelunch.jp"
] |
aaron@framelunch.jp
|
2c9401d55b683553a7c4272e8a1b38cb68ba1393
|
ecaff4997482b58af6aacd79d5942583fe4d1bd5
|
/data/1VII/addlnzref.py
|
47c3527c4264c74b76f859739896ee09ff426610
|
[] |
no_license
|
3ki5tj/wham
|
3380d8175e55e23935d886bd332b18d023e32c0f
|
c0d03f4158b29d64824af44366bf403b3f2e3752
|
refs/heads/master
| 2021-01-17T13:30:31.919446
| 2017-09-14T21:26:15
| 2017-09-14T21:26:15
| 35,853,200
| 5
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,297
|
py
|
#!/usr/bin/env python
''' add reference lnz from MBAR '''
import sys, os, getopt, shutil, re, glob
fninp = None
fnref = "mbar.out"
verbose = 0
def usage():
''' print usage and die '''
print """ Usage:
%s [OPTIONS] file""" % sys.argv[0]
print """
OPTIONS:
--ref= set the reference file
-v be verbose
--verbose= set verbocity
-h, --help help
"""
exit(1)
def doargs():
''' handle input arguments '''
try:
opts, args = getopt.gnu_getopt(sys.argv[1:],
"hv",
[ "ref=",
"help", "verbose=",
] )
except getopt.GetoptError, err:
print str(err)
usage()
global fninp, fnref
for o, a in opts:
if o in ("--ref",):
fnref = a
elif o in ("-v",):
verbose += 1 # such that -vv gives verbose = 2
elif o in ("--verbose",):
verbose = int(a)
elif o in ("-h", "--help"):
usage()
if len(args) > 0:
fninp = args
else:
fninp = glob.glob("*wham*.out") + glob.glob("*mbar*.out") + glob.glob("est*.out")
def main(fn, fnref):
# load the reference array
s = open(fnref).readlines()
n = len(s)
betref = [0]*n
arrref = [0]*n
for i in range(n):
ln = s[i].strip()
x = ln.split()
try:
betref[i] = float(x[1])
# do not convert to float
arrref[i] = x[2]
except ValueError:
print ln, x
raw_input()
# load the input data
s = open(fn).readlines()
nn = len( [ln for ln in s if not ln.startswith("#")] )
if nn != n:
print "number of lines mismatch %s(%s) vs %s " % (nn, fn, n)
raise Exception
bet = [-0]*n
arr = [-0]*n
maxcol = 6
if fn.startswith("est"):
maxcol = 11
ii = 0
for i in range(n):
# find the ith noncomment line
while s[ii].startswith("#"):
ii += 1
ln = s[ii].rstrip()
arr = ln.split()
if len(arr) > maxcol:
# assuming every column beyond column `maxcol` is added
# by addlnzref, so we can remove them safely
p = ln.rfind( arr[maxcol] )
if p >= 0:
ln = ln[:p].rstrip()
s[ii] = ln + "\t" + arrref[i] + "\n"
ii += 1
print "updating %s" % fn
open(fn, "w").writelines(s)
if __name__ == "__main__":
doargs()
for fn in fninp:
if fn != fnref:
main(fn, fnref)
|
[
"3ki5tj@gmail.com"
] |
3ki5tj@gmail.com
|
d05898717da5b0bdb2eb3a4e61da5f70552a7ed7
|
52a1c705883a283b43587dd993825ae16ace2cd9
|
/locallibrary/catalog/urls.py
|
62839d6b6cbdeccc89bad0936d830b89ca54b8a1
|
[] |
no_license
|
chitkokooo/LocalLibrary
|
db6643a42178f00e430641334a941e7ffe5010b6
|
f99325f529834294f830b67007ca96a302d51a96
|
refs/heads/master
| 2023-08-25T00:12:26.572477
| 2020-07-18T10:08:27
| 2020-07-18T10:08:27
| 277,484,143
| 3
| 3
| null | 2021-09-22T19:27:33
| 2020-07-06T08:22:19
|
Python
|
UTF-8
|
Python
| false
| false
| 1,209
|
py
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name='index'),
path('books/', views.BookListView.as_view(), name='books'),
path('book/<int:pk>/', views.BookDetailView.as_view(), name='book-detail'),
# Challenge yourself (3)
path('authors/', views.AuthorListView.as_view(), name="authors"),
path('author/<int:pk>/', views.AuthorDetailView.as_view(), name="author-detail"),
path('mybooks/', views.LoanedBooksByUserListView.as_view(), name='my-borrowed'),
# Challenge yourself
path('allborrowedbooks/', views.AllLoanedBooksListView.as_view(), name='all-borrowed'),
path('book/<uuid:pk>/renew/', views.renew_book_librarian, name='renew-book-librarian'),
path('author/create/', views.AuthorCreate.as_view(), name='author_create'),
path('author/<int:pk>/update/', views.AuthorCreate.as_view(), name='author_update'),
path('author/<int:pk>/delete/', views.AuthorDelete.as_view(), name='author_delete'),
# Challenge yourself
path('book/create/', views.BookCreate.as_view(), name="book_create"),
path('book/<int:pk>/update/', views.BookUpdate.as_view(), name="book_update"),
path('book/<int:pk>/delete/', views.BookDelete.as_view(), name="book_delete"),
]
|
[
"chitkokooo.cu@gmail.com"
] |
chitkokooo.cu@gmail.com
|
e0f12a86fb901108bf9f2a170c3607667e2e6e21
|
22215eae0bf31c1021cf927155d310ba57adf0fb
|
/automaton/state_machine.py
|
913aa3461a9dcbe5fe1166eb378e5cace0ed6ba7
|
[
"MIT"
] |
permissive
|
pombredanne/automaton-5
|
37e466f26d55e3ed34c8780e63b0b9ce1cac66dc
|
3f5b6dc4521bc8ee284f732daa7883e49b3433e2
|
refs/heads/master
| 2020-12-31T03:42:52.956371
| 2013-12-28T01:02:37
| 2013-12-28T01:02:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 673
|
py
|
# -*- coding: utf-8 -*-
def to_state_machine(node):
state_machine = {}
nodes = set([node])
visited_nodes = set()
accepting_states = set()
while True:
start_node = nodes.pop()
for edge in node.alphabet():
end_node = start_node.derive(edge).reduce()
if end_node.accepts_lambda:
accepting_states.add(end_node)
if end_node and end_node not in visited_nodes:
visited_nodes.add(end_node)
nodes.add(end_node)
state_machine[(start_node, edge)] = end_node
if not nodes:
break
return node, state_machine, accepting_states
|
[
"anler86@gmail.com"
] |
anler86@gmail.com
|
f6704266b1482dc5b366cbdc3d16fd83c464f0b3
|
bdfa1601b8da83f7a21da0a9ccda18277ff6c614
|
/back-end/website/dist-packages/boto/gs/bucket.py
|
f07b9029605b439d22abe8269b50a82f5831c304
|
[] |
no_license
|
dchang00/keekaa-back-end
|
e66bd242018c477d42abb81da547707d44312eab
|
698e027b7f6f4db5c2e9b9a899ba74f4ad4daf8e
|
refs/heads/master
| 2021-05-16T04:21:05.352393
| 2013-11-07T07:11:02
| 2013-11-07T07:11:02
| 14,196,360
| 0
| 1
| null | 2020-07-25T20:30:01
| 2013-11-07T06:43:43
|
Python
|
UTF-8
|
Python
| false
| false
| 17,838
|
py
|
# Copyright 2010 Google Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import boto
from boto import handler
from boto.exception import InvalidAclError
from boto.gs.acl import ACL, CannedACLStrings
from boto.gs.acl import SupportedPermissions as GSPermissions
from boto.gs.cors import Cors
from boto.gs.key import Key as GSKey
from boto.s3.acl import Policy
from boto.s3.bucket import Bucket as S3Bucket
import xml.sax
# constants for http query args
DEF_OBJ_ACL = 'defaultObjectAcl'
STANDARD_ACL = 'acl'
CORS_ARG = 'cors'
class Bucket(S3Bucket):
WebsiteBody = ('<?xml version="1.0" encoding="UTF-8"?>\n'
'<WebsiteConfiguration>%s%s</WebsiteConfiguration>')
WebsiteMainPageFragment = '<MainPageSuffix>%s</MainPageSuffix>'
WebsiteErrorFragment = '<NotFoundPage>%s</NotFoundPage>'
def __init__(self, connection=None, name=None, key_class=GSKey):
super(Bucket, self).__init__(connection, name, key_class)
def set_acl(self, acl_or_str, key_name='', headers=None, version_id=None):
"""sets or changes a bucket's or key's acl (depending on whether a
key_name was passed). We include a version_id argument to support a
polymorphic interface for callers, however, version_id is not relevant
for Google Cloud Storage buckets and is therefore ignored here."""
if isinstance(acl_or_str, Policy):
raise InvalidAclError('Attempt to set S3 Policy on GS ACL')
elif isinstance(acl_or_str, ACL):
self.set_xml_acl(acl_or_str.to_xml(), key_name, headers=headers)
else:
self.set_canned_acl(acl_or_str, key_name, headers=headers)
def set_def_acl(self, acl_or_str, key_name='', headers=None):
"""sets or changes a bucket's default object acl"""
if isinstance(acl_or_str, Policy):
raise InvalidAclError('Attempt to set S3 Policy on GS ACL')
elif isinstance(acl_or_str, ACL):
self.set_def_xml_acl(acl_or_str.to_xml(), key_name, headers=headers)
else:
self.set_def_canned_acl(acl_or_str, key_name, headers=headers)
def get_acl_helper(self, key_name, headers, query_args):
"""provides common functionality for get_acl() and get_def_acl()"""
response = self.connection.make_request('GET', self.name, key_name,
query_args=query_args,
headers=headers)
body = response.read()
if response.status == 200:
acl = ACL(self)
h = handler.XmlHandler(acl, self)
xml.sax.parseString(body, h)
return acl
else:
raise self.connection.provider.storage_response_error(
response.status, response.reason, body)
def get_acl(self, key_name='', headers=None, version_id=None):
"""returns a bucket's acl. We include a version_id argument
to support a polymorphic interface for callers, however,
version_id is not relevant for Google Cloud Storage buckets
and is therefore ignored here."""
return self.get_acl_helper(key_name, headers, STANDARD_ACL)
def get_def_acl(self, key_name='', headers=None):
"""returns a bucket's default object acl"""
return self.get_acl_helper(key_name, headers, DEF_OBJ_ACL)
def set_canned_acl_helper(self, acl_str, key_name, headers, query_args):
"""provides common functionality for set_canned_acl() and
set_def_canned_acl()"""
assert acl_str in CannedACLStrings
if headers:
headers[self.connection.provider.acl_header] = acl_str
else:
headers={self.connection.provider.acl_header: acl_str}
response = self.connection.make_request('PUT', self.name, key_name,
headers=headers, query_args=query_args)
body = response.read()
if response.status != 200:
raise self.connection.provider.storage_response_error(
response.status, response.reason, body)
def set_canned_acl(self, acl_str, key_name='', headers=None,
version_id=None):
"""sets or changes a bucket's acl to a predefined (canned) value.
We include a version_id argument to support a polymorphic
interface for callers, however, version_id is not relevant for
Google Cloud Storage buckets and is therefore ignored here."""
return self.set_canned_acl_helper(acl_str, key_name, headers,
STANDARD_ACL)
def set_def_canned_acl(self, acl_str, key_name='', headers=None):
"""sets or changes a bucket's default object acl to a predefined
(canned) value"""
return self.set_canned_acl_helper(acl_str, key_name, headers,
query_args=DEF_OBJ_ACL)
def set_def_xml_acl(self, acl_str, key_name='', headers=None):
"""sets or changes a bucket's default object ACL"""
return self.set_xml_acl(acl_str, key_name, headers,
query_args=DEF_OBJ_ACL)
def get_cors(self, headers=None):
"""returns a bucket's CORS XML"""
response = self.connection.make_request('GET', self.name,
query_args=CORS_ARG,
headers=headers)
body = response.read()
if response.status == 200:
# Success - parse XML and return Cors object.
cors = Cors()
h = handler.XmlHandler(cors, self)
xml.sax.parseString(body, h)
return cors
else:
raise self.connection.provider.storage_response_error(
response.status, response.reason, body)
def set_cors(self, cors, headers=None):
"""sets or changes a bucket's CORS XML."""
cors_xml = cors.encode('UTF-8')
response = self.connection.make_request('PUT', self.name,
data=cors_xml,
query_args=CORS_ARG,
headers=headers)
body = response.read()
if response.status != 200:
raise self.connection.provider.storage_response_error(
response.status, response.reason, body)
# Method with same signature as boto.s3.bucket.Bucket.add_email_grant(),
# to allow polymorphic treatment at application layer.
def add_email_grant(self, permission, email_address,
recursive=False, headers=None):
"""
Convenience method that provides a quick way to add an email grant
to a bucket. This method retrieves the current ACL, creates a new
grant based on the parameters passed in, adds that grant to the ACL
and then PUT's the new ACL back to GCS.
:type permission: string
:param permission: The permission being granted. Should be one of:
(READ, WRITE, FULL_CONTROL).
:type email_address: string
:param email_address: The email address associated with the GS
account your are granting the permission to.
:type recursive: boolean
:param recursive: A boolean value to controls whether the call
will apply the grant to all keys within the bucket
or not. The default value is False. By passing a
True value, the call will iterate through all keys
in the bucket and apply the same grant to each key.
CAUTION: If you have a lot of keys, this could take
a long time!
"""
if permission not in GSPermissions:
raise self.connection.provider.storage_permissions_error(
'Unknown Permission: %s' % permission)
acl = self.get_acl(headers=headers)
acl.add_email_grant(permission, email_address)
self.set_acl(acl, headers=headers)
if recursive:
for key in self:
key.add_email_grant(permission, email_address, headers=headers)
# Method with same signature as boto.s3.bucket.Bucket.add_user_grant(),
# to allow polymorphic treatment at application layer.
def add_user_grant(self, permission, user_id, recursive=False, headers=None):
"""
Convenience method that provides a quick way to add a canonical user
grant to a bucket. This method retrieves the current ACL, creates a new
grant based on the parameters passed in, adds that grant to the ACL and
then PUTs the new ACL back to GCS.
:type permission: string
:param permission: The permission being granted. Should be one of:
(READ|WRITE|FULL_CONTROL)
:type user_id: string
:param user_id: The canonical user id associated with the GS account
you are granting the permission to.
:type recursive: bool
:param recursive: A boolean value to controls whether the call
will apply the grant to all keys within the bucket
or not. The default value is False. By passing a
True value, the call will iterate through all keys
in the bucket and apply the same grant to each key.
CAUTION: If you have a lot of keys, this could take
a long time!
"""
if permission not in GSPermissions:
raise self.connection.provider.storage_permissions_error(
'Unknown Permission: %s' % permission)
acl = self.get_acl(headers=headers)
acl.add_user_grant(permission, user_id)
self.set_acl(acl, headers=headers)
if recursive:
for key in self:
key.add_user_grant(permission, user_id, headers=headers)
def add_group_email_grant(self, permission, email_address, recursive=False,
headers=None):
"""
Convenience method that provides a quick way to add an email group
grant to a bucket. This method retrieves the current ACL, creates a new
grant based on the parameters passed in, adds that grant to the ACL and
then PUT's the new ACL back to GCS.
:type permission: string
:param permission: The permission being granted. Should be one of:
READ|WRITE|FULL_CONTROL
See http://code.google.com/apis/storage/docs/developer-guide.html#authorization
for more details on permissions.
:type email_address: string
:param email_address: The email address associated with the Google
Group to which you are granting the permission.
:type recursive: bool
:param recursive: A boolean value to controls whether the call
will apply the grant to all keys within the bucket
or not. The default value is False. By passing a
True value, the call will iterate through all keys
in the bucket and apply the same grant to each key.
CAUTION: If you have a lot of keys, this could take
a long time!
"""
if permission not in GSPermissions:
raise self.connection.provider.storage_permissions_error(
'Unknown Permission: %s' % permission)
acl = self.get_acl(headers=headers)
acl.add_group_email_grant(permission, email_address)
self.set_acl(acl, headers=headers)
if recursive:
for key in self:
key.add_group_email_grant(permission, email_address,
headers=headers)
# Method with same input signature as boto.s3.bucket.Bucket.list_grants()
# (but returning different object type), to allow polymorphic treatment
# at application layer.
def list_grants(self, headers=None):
acl = self.get_acl(headers=headers)
return acl.entries
def disable_logging(self, headers=None):
xml_str = '<?xml version="1.0" encoding="UTF-8"?><Logging/>'
self.set_subresource('logging', xml_str, headers=headers)
def enable_logging(self, target_bucket, target_prefix=None, headers=None):
if isinstance(target_bucket, Bucket):
target_bucket = target_bucket.name
xml_str = '<?xml version="1.0" encoding="UTF-8"?><Logging>'
xml_str = (xml_str + '<LogBucket>%s</LogBucket>' % target_bucket)
if target_prefix:
xml_str = (xml_str +
'<LogObjectPrefix>%s</LogObjectPrefix>' % target_prefix)
xml_str = xml_str + '</Logging>'
self.set_subresource('logging', xml_str, headers=headers)
def configure_website(self, main_page_suffix=None, error_key=None,
headers=None):
"""
Configure this bucket to act as a website
:type suffix: str
:param suffix: Suffix that is appended to a request that is for a
"directory" on the website endpoint (e.g. if the suffix
is index.html and you make a request to
samplebucket/images/ the data that is returned will
be for the object with the key name images/index.html).
The suffix must not be empty and must not include a
slash character. This parameter is optional and the
property is disabled if excluded.
:type error_key: str
:param error_key: The object key name to use when a 400
error occurs. This parameter is optional and the
property is disabled if excluded.
"""
if main_page_suffix:
main_page_frag = self.WebsiteMainPageFragment % main_page_suffix
else:
main_page_frag = ''
if error_key:
error_frag = self.WebsiteErrorFragment % error_key
else:
error_frag = ''
body = self.WebsiteBody % (main_page_frag, error_frag)
response = self.connection.make_request('PUT', self.name, data=body,
query_args='websiteConfig',
headers=headers)
body = response.read()
if response.status == 200:
return True
else:
raise self.connection.provider.storage_response_error(
response.status, response.reason, body)
def get_website_configuration(self, headers=None):
"""
Returns the current status of website configuration on the bucket.
:rtype: dict
:returns: A dictionary containing a Python representation
of the XML response from GCS. The overall structure is:
* WebsiteConfiguration
* MainPageSuffix: suffix that is appended to request that
is for a "directory" on the website endpoint
* NotFoundPage: name of an object to serve when site visitors
encounter a 404
"""
return self.get_website_configuration_xml(self, headers)[0]
def get_website_configuration_with_xml(self, headers=None):
"""
Returns the current status of website configuration on the bucket as
unparsed XML.
:rtype: 2-Tuple
:returns: 2-tuple containing:
1) A dictionary containing a Python representation
of the XML response from GCS. The overall structure is:
* WebsiteConfiguration
* MainPageSuffix: suffix that is appended to request that
is for a "directory" on the website endpoint
* NotFoundPage: name of an object to serve when site visitors
encounter a 404
2) unparsed XML describing the bucket's website configuration.
"""
response = self.connection.make_request('GET', self.name,
query_args='websiteConfig', headers=headers)
body = response.read()
boto.log.debug(body)
if response.status != 200:
raise self.connection.provider.storage_response_error(
response.status, response.reason, body)
e = boto.jsonresponse.Element()
h = boto.jsonresponse.XmlHandler(e, None)
h.parse(body)
return e, body
def delete_website_configuration(self, headers=None):
self.configure_website(headers=headers)
|
[
"dchang00@stanford.edu"
] |
dchang00@stanford.edu
|
7dfc746f506e5f5fa900920650cbc6ab587c17b2
|
9a0a732fe4d7be0ee57e003671e7f6b198945b3e
|
/pyjos/request/service/promotion/__init__.py
|
12a111c69c365c8c7ce8ec7db6285379ba8fe8ae
|
[] |
no_license
|
tcztzy/pyjos
|
14f28b240e30a29841e368c2df0b9065ec92a511
|
ba531e7cd61a727a69ffbcb4bc71774344c003e2
|
refs/heads/master
| 2021-01-09T20:46:16.196181
| 2017-03-05T07:03:50
| 2017-03-05T07:03:50
| 60,248,520
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 638
|
py
|
from . import app, batch
from ... import BaseRequest, parameter
class getcode(BaseRequest):
"""自定义链接转换接口"""
def __init__(self, **kwargs):
super(getcode, self).__init__(**kwargs)
@property
def api_method_name(self):
return 'jingdong.service.promotion.getcode'
promotionType = parameter(attr='promotionType', default=1, validators=[], doc='推广类型 1:商品推广 2:店铺推广 3 活动推广 4 频道页推广 5 搜索推广 0 其他'),
materialId = parameter('materialId', validators=[], doc='')
class appReport(object):
pass
class goodsInfo(object):
pass
|
[
"tcztzy@gmail.com"
] |
tcztzy@gmail.com
|
c733a026e5521d8614c95845121a8e2524ae5ac9
|
fd21360b894987cb097cc9381d863ef01aeac833
|
/object_tracking/bytetrack/tracker/byte_tracker.py
|
bc1589124a14572d4776ee61b72062cce32669d6
|
[
"MIT"
] |
permissive
|
rodrigoheck/ailia-models
|
b46aaf0f136eb0ca69bad7c2797e63078d2e03c5
|
82cca9912ff77003aa0e6da37ac3970c04f25652
|
refs/heads/master
| 2023-08-28T05:02:34.908334
| 2021-11-16T04:11:30
| 2021-11-16T04:11:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,571
|
py
|
import numpy as np
from .kalman_filter import KalmanFilter
from .basetrack import BaseTrack, TrackState
from . import matching
class STrack(BaseTrack):
shared_kalman = KalmanFilter()
def __init__(self, tlwh, score):
# wait activate
self._tlwh = np.asarray(tlwh, dtype=np.float)
self.kalman_filter = None
self.mean, self.covariance = None, None
self.is_activated = False
self.score = score
self.tracklet_len = 0
def predict(self):
mean_state = self.mean.copy()
if self.state != TrackState.Tracked:
mean_state[7] = 0
self.mean, self.covariance = self.kalman_filter.predict(mean_state, self.covariance)
@staticmethod
def multi_predict(stracks):
if len(stracks) > 0:
multi_mean = np.asarray([st.mean.copy() for st in stracks])
multi_covariance = np.asarray([st.covariance for st in stracks])
for i, st in enumerate(stracks):
if st.state != TrackState.Tracked:
multi_mean[i][7] = 0
multi_mean, multi_covariance = STrack.shared_kalman.multi_predict(multi_mean, multi_covariance)
for i, (mean, cov) in enumerate(zip(multi_mean, multi_covariance)):
stracks[i].mean = mean
stracks[i].covariance = cov
def activate(self, kalman_filter, frame_id):
"""Start a new tracklet"""
self.kalman_filter = kalman_filter
self.track_id = self.next_id()
self.mean, self.covariance = self.kalman_filter.initiate(self.tlwh_to_xyah(self._tlwh))
self.tracklet_len = 0
self.state = TrackState.Tracked
if frame_id == 1:
self.is_activated = True
# self.is_activated = True
self.frame_id = frame_id
self.start_frame = frame_id
def re_activate(self, new_track, frame_id, new_id=False):
self.mean, self.covariance = self.kalman_filter.update(
self.mean, self.covariance, self.tlwh_to_xyah(new_track.tlwh)
)
self.tracklet_len = 0
self.state = TrackState.Tracked
self.is_activated = True
self.frame_id = frame_id
if new_id:
self.track_id = self.next_id()
self.score = new_track.score
def update(self, new_track, frame_id):
"""
Update a matched track
:type new_track: STrack
:type frame_id: int
:type update_feature: bool
:return:
"""
self.frame_id = frame_id
self.tracklet_len += 1
new_tlwh = new_track.tlwh
self.mean, self.covariance = self.kalman_filter.update(
self.mean, self.covariance, self.tlwh_to_xyah(new_tlwh))
self.state = TrackState.Tracked
self.is_activated = True
self.score = new_track.score
@property
def tlwh(self):
"""Get current position in bounding box format `(top left x, top left y,
width, height)`.
"""
if self.mean is None:
return self._tlwh.copy()
ret = self.mean[:4].copy()
ret[2] *= ret[3]
ret[:2] -= ret[2:] / 2
return ret
@property
def tlbr(self):
"""Convert bounding box to format `(min x, min y, max x, max y)`, i.e.,
`(top left, bottom right)`.
"""
ret = self.tlwh.copy()
ret[2:] += ret[:2]
return ret
@staticmethod
def tlwh_to_xyah(tlwh):
"""Convert bounding box to format `(center x, center y, aspect ratio,
height)`, where the aspect ratio is `width / height`.
"""
ret = np.asarray(tlwh).copy()
ret[:2] += ret[2:] / 2
ret[2] /= ret[3]
return ret
def to_xyah(self):
return self.tlwh_to_xyah(self.tlwh)
@staticmethod
def tlbr_to_tlwh(tlbr):
ret = np.asarray(tlbr).copy()
ret[2:] -= ret[:2]
return ret
@staticmethod
def tlwh_to_tlbr(tlwh):
ret = np.asarray(tlwh).copy()
ret[2:] += ret[:2]
return ret
def __repr__(self):
return 'OT_{}_({}-{})'.format(self.track_id, self.start_frame, self.end_frame)
class BYTETracker(object):
def __init__(
self, track_thresh=0.6, track_buffer=30,
match_thresh=0.9, frame_rate=30,
mot20=False):
self.tracked_stracks = [] # type: list[STrack]
self.lost_stracks = [] # type: list[STrack]
self.removed_stracks = [] # type: list[STrack]
self.frame_id = 0
self.track_thresh = track_thresh
self.match_thresh = match_thresh
self.det_thresh = track_thresh + 0.1
self.buffer_size = int(frame_rate / 30.0 * track_buffer)
self.max_time_lost = self.buffer_size
self.mot20 = mot20
self.kalman_filter = KalmanFilter()
def update(self, output_results):
self.frame_id += 1
activated_starcks = []
refind_stracks = []
lost_stracks = []
removed_stracks = []
if output_results.shape[1] == 5:
scores = output_results[:, 4]
bboxes = output_results[:, :4]
else:
scores = output_results[:, 4] * output_results[:, 5]
bboxes = output_results[:, :4] # x1y1x2y2
remain_inds = scores > self.track_thresh
inds_low = scores > 0.1
inds_high = scores < self.track_thresh
inds_second = np.logical_and(inds_low, inds_high)
dets_second = bboxes[inds_second]
dets = bboxes[remain_inds]
scores_keep = scores[remain_inds]
scores_second = scores[inds_second]
if len(dets) > 0:
'''Detections'''
detections = [
STrack(STrack.tlbr_to_tlwh(tlbr), s) for (tlbr, s) in zip(dets, scores_keep)
]
else:
detections = []
''' Add newly detected tracklets to tracked_stracks'''
unconfirmed = []
tracked_stracks = [] # type: list[STrack]
for track in self.tracked_stracks:
if not track.is_activated:
unconfirmed.append(track)
else:
tracked_stracks.append(track)
''' Step 2: First association, with high score detection boxes'''
strack_pool = joint_stracks(tracked_stracks, self.lost_stracks)
# Predict the current location with KF
STrack.multi_predict(strack_pool)
dists = matching.iou_distance(strack_pool, detections)
if not self.mot20:
dists = matching.fuse_score(dists, detections)
matches, u_track, u_detection = matching.linear_assignment(dists, thresh=self.match_thresh)
for itracked, idet in matches:
track = strack_pool[itracked]
det = detections[idet]
if track.state == TrackState.Tracked:
track.update(detections[idet], self.frame_id)
activated_starcks.append(track)
else:
track.re_activate(det, self.frame_id, new_id=False)
refind_stracks.append(track)
''' Step 3: Second association, with low score detection boxes'''
# association the untrack to the low score detections
if len(dets_second) > 0:
'''Detections'''
detections_second = [STrack(STrack.tlbr_to_tlwh(tlbr), s) for
(tlbr, s) in zip(dets_second, scores_second)]
else:
detections_second = []
r_tracked_stracks = [strack_pool[i] for i in u_track if strack_pool[i].state == TrackState.Tracked]
dists = matching.iou_distance(r_tracked_stracks, detections_second)
matches, u_track, u_detection_second = matching.linear_assignment(dists, thresh=0.5)
for itracked, idet in matches:
track = r_tracked_stracks[itracked]
det = detections_second[idet]
if track.state == TrackState.Tracked:
track.update(det, self.frame_id)
activated_starcks.append(track)
else:
track.re_activate(det, self.frame_id, new_id=False)
refind_stracks.append(track)
for it in u_track:
track = r_tracked_stracks[it]
if not track.state == TrackState.Lost:
track.mark_lost()
lost_stracks.append(track)
'''Deal with unconfirmed tracks, usually tracks with only one beginning frame'''
detections = [detections[i] for i in u_detection]
dists = matching.iou_distance(unconfirmed, detections)
if not self.mot20:
dists = matching.fuse_score(dists, detections)
matches, u_unconfirmed, u_detection = matching.linear_assignment(dists, thresh=0.7)
for itracked, idet in matches:
unconfirmed[itracked].update(detections[idet], self.frame_id)
activated_starcks.append(unconfirmed[itracked])
for it in u_unconfirmed:
track = unconfirmed[it]
track.mark_removed()
removed_stracks.append(track)
""" Step 4: Init new stracks"""
for inew in u_detection:
track = detections[inew]
if track.score < self.det_thresh:
continue
track.activate(self.kalman_filter, self.frame_id)
activated_starcks.append(track)
""" Step 5: Update state"""
for track in self.lost_stracks:
if self.frame_id - track.end_frame > self.max_time_lost:
track.mark_removed()
removed_stracks.append(track)
# print('Ramained match {} s'.format(t4-t3))
self.tracked_stracks = [t for t in self.tracked_stracks if t.state == TrackState.Tracked]
self.tracked_stracks = joint_stracks(self.tracked_stracks, activated_starcks)
self.tracked_stracks = joint_stracks(self.tracked_stracks, refind_stracks)
self.lost_stracks = sub_stracks(self.lost_stracks, self.tracked_stracks)
self.lost_stracks.extend(lost_stracks)
self.lost_stracks = sub_stracks(self.lost_stracks, self.removed_stracks)
self.removed_stracks.extend(removed_stracks)
self.tracked_stracks, self.lost_stracks = remove_duplicate_stracks(self.tracked_stracks, self.lost_stracks)
# get scores of lost tracks
output_stracks = [track for track in self.tracked_stracks if track.is_activated]
return output_stracks
def joint_stracks(tlista, tlistb):
exists = {}
res = []
for t in tlista:
exists[t.track_id] = 1
res.append(t)
for t in tlistb:
tid = t.track_id
if not exists.get(tid, 0):
exists[tid] = 1
res.append(t)
return res
def sub_stracks(tlista, tlistb):
stracks = {}
for t in tlista:
stracks[t.track_id] = t
for t in tlistb:
tid = t.track_id
if stracks.get(tid, 0):
del stracks[tid]
return list(stracks.values())
def remove_duplicate_stracks(stracksa, stracksb):
pdist = matching.iou_distance(stracksa, stracksb)
pairs = np.where(pdist < 0.15)
dupa, dupb = list(), list()
for p, q in zip(*pairs):
timep = stracksa[p].frame_id - stracksa[p].start_frame
timeq = stracksb[q].frame_id - stracksb[q].start_frame
if timep > timeq:
dupb.append(q)
else:
dupa.append(p)
resa = [t for i, t in enumerate(stracksa) if not i in dupa]
resb = [t for i, t in enumerate(stracksb) if not i in dupb]
return resa, resb
|
[
"ooe1123@gmail.com"
] |
ooe1123@gmail.com
|
032d2486569c16c3c75a7ad16d6ce1f0bbec64df
|
e484e9b3be09d0e9122e92d0f3a2fbcb494a22e5
|
/app.py
|
bfaed1de0733a8f155f2d29eb053c32d5e988b2c
|
[] |
no_license
|
yjz111/xjzx
|
86c6298fab48cea7b224942130e4fea3b598eaf3
|
87fb8c42a3e31fa64c0fccaf954eeb3de54a8c0b
|
refs/heads/master
| 2020-04-25T16:31:33.202855
| 2019-04-09T13:56:53
| 2019-04-09T13:56:53
| 172,915,341
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,648
|
py
|
from flask import Flask
from models import db
import logging
from logging.handlers import RotatingFileHandler
from flask_wtf.csrf import CSRFProtect
from flask_session import Session
import redis
from views_news import news_blueprint
from views_user import user_blueprint
from views_admin import admin_blueprint
def create(config):
app=Flask(__name__)
#加载配置
app.config.from_object(config)
#初始化数据库连接
db.init_app(app)
#CSRF保护
CSRFProtect(app)
#采用redis保存session
Session(app)
#添加日志
# 设置日志的记录等级
logging.basicConfig(level=logging.DEBUG) # 调试debug级
# 创建日志记录器,指明日志保存的路径、每个日志文件的最大大小、保存的日志文件个数上限
file_log_handler = RotatingFileHandler(config.BASE_DIR + "/logs/xjzx.log", maxBytes=1024 * 1024 * 100,backupCount=10)
# 创建日志记录的格式 日志等级 输入日志信息的文件名 行数 日志信息
formatter = logging.Formatter('%(levelname)s %(filename)s:%(lineno)d %(message)s')
# 为刚创建的日志记录器设置日志记录格式
file_log_handler.setFormatter(formatter)
# 为全局的日志工具对象(flask app使用的)添加日志记录器
logging.getLogger().addHandler(file_log_handler)
app.logger_xjzx = logging
#添加redis对象
app.redis_cli=redis.StrictRedis(config.REDIS_HOST,config.REDIS_PORT,config.REDIS_DB)
#注册蓝图
app.register_blueprint(news_blueprint)
app.register_blueprint(user_blueprint)
app.register_blueprint(admin_blueprint)
return app
|
[
"1206604668@qq.com"
] |
1206604668@qq.com
|
51e3944307e72804a1265a356f3d0a5f606b613f
|
c49590eb7f01df37c8ec5fef00d0ffc7250fa321
|
/openapi_client/models/existing_normal_order.py
|
a13bb994a8a5c32393770f610b14317804a73b72
|
[] |
no_license
|
harshad5498/ks-orderapi-python
|
373a4b85a56ff97e2367eebd076f67f972e92f51
|
237da6fc3297c02e85f0fff1a34857aaa4c1d295
|
refs/heads/master
| 2022-12-09T19:55:21.938764
| 2020-09-03T05:22:51
| 2020-09-03T05:22:51
| 293,533,651
| 0
| 0
| null | 2020-09-07T13:19:25
| 2020-09-07T13:19:24
| null |
UTF-8
|
Python
| false
| false
| 7,092
|
py
|
# coding: utf-8
"""
KS Trade API's
The version of the OpenAPI document: 1.0
"""
import pprint
import re # noqa: F401
import six
from openapi_client.configuration import Configuration
class ExistingNormalOrder(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'orderId': 'str',
'quantity': 'int',
'price': 'float',
'disclosedQuantity': 'int',
'triggerPrice': 'float'
}
attribute_map = {
'orderId': 'orderId',
'quantity': 'quantity',
'price': 'price',
'disclosedQuantity': 'disclosedQuantity',
'triggerPrice': 'triggerPrice'
}
def __init__(self, orderId=None, quantity=None, price=None, disclosedQuantity=None, triggerPrice=None, local_vars_configuration=None): # noqa: E501
"""ExistingNormalOrder - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._orderId = None
self._quantity = None
self._price = None
self._disclosedQuantity = None
self._triggerPrice = None
self.discriminator = None
self.orderId = orderId
if quantity is not None:
self.quantity = quantity
if price is not None:
self.price = price
if disclosedQuantity is not None:
self.disclosedQuantity = disclosedQuantity
if triggerPrice is not None:
self.triggerPrice = triggerPrice
@property
def orderId(self):
"""Gets the orderId of this ExistingNormalOrder. # noqa: E501
Order ID of the order to be modified # noqa: E501
:return: The orderId of this ExistingNormalOrder. # noqa: E501
:rtype: str
"""
return self._orderId
@orderId.setter
def orderId(self, orderId):
"""Sets the orderId of this ExistingNormalOrder.
Order ID of the order to be modified # noqa: E501
:param orderId: The orderId of this ExistingNormalOrder. # noqa: E501
:type orderId: str
"""
if self.local_vars_configuration.client_side_validation and orderId is None: # noqa: E501
raise ValueError("Invalid value for `orderId`, must not be `None`") # noqa: E501
self._orderId = orderId
@property
def quantity(self):
"""Gets the quantity of this ExistingNormalOrder. # noqa: E501
Order quantity - specified in same unit as quoted in market depth # noqa: E501
:return: The quantity of this ExistingNormalOrder. # noqa: E501
:rtype: int
"""
return self._quantity
@quantity.setter
def quantity(self, quantity):
"""Sets the quantity of this ExistingNormalOrder.
Order quantity - specified in same unit as quoted in market depth # noqa: E501
:param quantity: The quantity of this ExistingNormalOrder. # noqa: E501
:type quantity: int
"""
self._quantity = quantity
@property
def price(self):
"""Gets the price of this ExistingNormalOrder. # noqa: E501
Order Price, non zero positive for limit order and zero for market order # noqa: E501
:return: The price of this ExistingNormalOrder. # noqa: E501
:rtype: float
"""
return self._price
@price.setter
def price(self, price):
"""Sets the price of this ExistingNormalOrder.
Order Price, non zero positive for limit order and zero for market order # noqa: E501
:param price: The price of this ExistingNormalOrder. # noqa: E501
:type price: float
"""
self._price = price
@property
def disclosedQuantity(self):
"""Gets the disclosedQuantity of this ExistingNormalOrder. # noqa: E501
Quantity to be disclosed in order # noqa: E501
:return: The disclosedQuantity of this ExistingNormalOrder. # noqa: E501
:rtype: int
"""
return self._disclosedQuantity
@disclosedQuantity.setter
def disclosedQuantity(self, disclosedQuantity):
"""Sets the disclosedQuantity of this ExistingNormalOrder.
Quantity to be disclosed in order # noqa: E501
:param disclosedQuantity: The disclosedQuantity of this ExistingNormalOrder. # noqa: E501
:type disclosedQuantity: int
"""
self._disclosedQuantity = disclosedQuantity
@property
def triggerPrice(self):
"""Gets the triggerPrice of this ExistingNormalOrder. # noqa: E501
Trigger price, required for stoploss or supermultiple order # noqa: E501
:return: The triggerPrice of this ExistingNormalOrder. # noqa: E501
:rtype: float
"""
return self._triggerPrice
@triggerPrice.setter
def triggerPrice(self, triggerPrice):
"""Sets the triggerPrice of this ExistingNormalOrder.
Trigger price, required for stoploss or supermultiple order # noqa: E501
:param triggerPrice: The triggerPrice of this ExistingNormalOrder. # noqa: E501
:type triggerPrice: float
"""
self._triggerPrice = triggerPrice
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ExistingNormalOrder):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, ExistingNormalOrder):
return True
return self.to_dict() != other.to_dict()
|
[
"thebhushanp@gmail.com"
] |
thebhushanp@gmail.com
|
e63d5ee85f51bfe2e1f5687b3c76cef9d7f939f3
|
6712aec3e2f8984ee7a748eeed7e6b7fc46d2404
|
/polls/views.py
|
52f20195e926b66028b87b15cbfe0f4d807e6567
|
[] |
no_license
|
zakaria391/django_example
|
7e309a85755da89cb6da211fbcfc9f5942c5fd2b
|
201cc80427abc04560cd42fe5619e50e9dc5bb93
|
refs/heads/main
| 2023-08-28T12:30:49.247453
| 2021-11-09T05:05:41
| 2021-11-09T05:05:41
| 426,097,950
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,720
|
py
|
from django import template
from django.shortcuts import get_object_or_404, render
# Create your views here.
from django.http import HttpResponse, HttpResponseRedirect
from django.urls import reverse
from django.template import loader
from django.views import generic
from django.utils import timezone
from .models import Question, Choice
class IndexView(generic.ListView):
template_name = 'polls/index.html'
context_object_name = 'latest_question_list'
def get_queryset(self):
"""Return the last five published questions (not including those set to be published in the future)."""
return Question.objects.filter(
pub_date__lte=timezone.now()
).order_by('-pub_date')[:5]
class DetailView(generic.DetailView):
model = Question
template_name = 'polls/detail.html'
class ResultsView(generic.DetailView):
model = Question
template_name = 'polls/results.html'
def vote(request, question_id):
question = get_object_or_404(Question, pk=question_id)
print(question_id)
try:
selected_choice = question.choice_set.get(pk=request.POST['choice'])
except (KeyError, Choice.DoesNotExist):
# Redisplay the question voting form.
return render(request, 'polls/detail.html', {
'question': question,
'error_message': "You didn't select a choice.",
})
else:
selected_choice.votes += 1
selected_choice.save()
# Always return an HttpResponseRedirect after successfully dealing
# with POST data. This prevents data from being posted twice if a
# user hits the Back button.
return HttpResponseRedirect(reverse('polls:results', args=(question.id,)))
|
[
"harryjason@mailna.co"
] |
harryjason@mailna.co
|
5823a62afd3d08698685ab28c56917e64b1a3011
|
21818228cb62d31b9685de44deb27cfd90430573
|
/ccxt/async/bter.py
|
9945a46c62c3d907e8cc92124daada92fbb822ac
|
[] |
no_license
|
mico/cryptoArbitrage
|
d9d5d2f89e3fccc0b84d9c13b771edef0f2b00a1
|
ea9ef03e79f302b36948746c77e4acbb3d6f01b7
|
refs/heads/master
| 2021-03-22T00:17:30.448593
| 2018-05-28T05:08:21
| 2018-05-28T05:08:21
| 108,232,310
| 2
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,931
|
py
|
# -*- coding: utf-8 -*-
from ccxt.async.base.exchange import Exchange
import hashlib
from ccxt.base.errors import ExchangeError
class bter (Exchange):
def describe(self):
return self.deep_extend(super(bter, self).describe(), {
'id': 'bter',
'name': 'Bter',
'countries': ['VG', 'CN'], # British Virgin Islands, China
'version': '2',
'hasCORS': False,
'hasFetchTickers': True,
'hasWithdraw': True,
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/27980479-cfa3188c-6387-11e7-8191-93fc4184ba5c.jpg',
'api': {
'public': 'https://data.bter.com/api',
'private': 'https://api.bter.com/api',
},
'www': 'https://bter.com',
'doc': 'https://bter.com/api2',
},
'api': {
'public': {
'get': [
'pairs',
'marketinfo',
'marketlist',
'tickers',
'ticker/{id}',
'orderBook/{id}',
'trade/{id}',
'tradeHistory/{id}',
'tradeHistory/{id}/{tid}',
],
},
'private': {
'post': [
'balances',
'depositAddress',
'newAddress',
'depositsWithdrawals',
'buy',
'sell',
'cancelOrder',
'cancelAllOrders',
'getOrder',
'openOrders',
'tradeHistory',
'withdraw',
],
},
},
})
async def fetch_markets(self):
response = await self.publicGetMarketinfo()
markets = response['pairs']
result = []
for i in range(0, len(markets)):
market = markets[i]
keys = list(market.keys())
id = keys[0]
details = market[id]
base, quote = id.split('_')
base = base.upper()
quote = quote.upper()
base = self.common_currency_code(base)
quote = self.common_currency_code(quote)
symbol = base + '/' + quote
precision = {
'amount': details['decimal_places'],
'price': details['decimal_places'],
}
amountLimits = {
'min': details['min_amount'],
'max': None,
}
priceLimits = {
'min': None,
'max': None,
}
limits = {
'amount': amountLimits,
'price': priceLimits,
}
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'info': market,
'maker': details['fee'] / 100,
'taker': details['fee'] / 100,
'precision': precision,
'limits': limits,
})
return result
async def fetch_balance(self, params={}):
await self.load_markets()
balance = await self.privatePostBalances()
result = {'info': balance}
currencies = list(self.currencies.keys())
for i in range(0, len(currencies)):
currency = currencies[i]
code = self.common_currency_code(currency)
account = self.account()
if 'available' in balance:
if currency in balance['available']:
account['free'] = float(balance['available'][currency])
if 'locked' in balance:
if currency in balance['locked']:
account['used'] = float(balance['locked'][currency])
account['total'] = self.sum(account['free'], account['used'])
result[code] = account
return self.parse_balance(result)
async def fetch_order_book(self, symbol, params={}):
await self.load_markets()
orderbook = await self.publicGetOrderBookId(self.extend({
'id': self.market_id(symbol),
}, params))
result = self.parse_order_book(orderbook)
result['asks'] = self.sort_by(result['asks'], 0)
return result
def parse_ticker(self, ticker, market=None):
timestamp = self.milliseconds()
symbol = None
if market:
symbol = market['symbol']
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': float(ticker['high24hr']),
'low': float(ticker['low24hr']),
'bid': float(ticker['highestBid']),
'ask': float(ticker['lowestAsk']),
'vwap': None,
'open': None,
'close': None,
'first': None,
'last': float(ticker['last']),
'change': float(ticker['percentChange']),
'percentage': None,
'average': None,
'baseVolume': float(ticker['quoteVolume']),
'quoteVolume': float(ticker['baseVolume']),
'info': ticker,
}
async def fetch_tickers(self, symbols=None, params={}):
await self.load_markets()
tickers = await self.publicGetTickers(params)
result = {}
ids = list(tickers.keys())
for i in range(0, len(ids)):
id = ids[i]
baseId, quoteId = id.split('_')
base = baseId.upper()
quote = quoteId.upper()
base = self.common_currency_code(base)
quote = self.common_currency_code(quote)
symbol = base + '/' + quote
ticker = tickers[id]
market = None
if symbol in self.markets:
market = self.markets[symbol]
if id in self.markets_by_id:
market = self.markets_by_id[id]
result[symbol] = self.parse_ticker(ticker, market)
return result
async def fetch_ticker(self, symbol, params={}):
await self.load_markets()
market = self.market(symbol)
ticker = await self.publicGetTickerId(self.extend({
'id': market['id'],
}, params))
return self.parse_ticker(ticker, market)
def parse_trade(self, trade, market):
timestamp = self.parse8601(trade['date'])
return {
'id': trade['tradeID'],
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': market['symbol'],
'type': None,
'side': trade['type'],
'price': trade['rate'],
'amount': trade['amount'],
}
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
market = self.market(symbol)
await self.load_markets()
response = await self.publicGetTradeHistoryId(self.extend({
'id': market['id'],
}, params))
return self.parse_trades(response['data'], market)
async def create_order(self, symbol, type, side, amount, price=None, params={}):
if type == 'market':
raise ExchangeError(self.id + ' allows limit orders only')
await self.load_markets()
method = 'privatePost' + self.capitalize(side)
order = {
'currencyPair': self.market_id(symbol),
'rate': price,
'amount': amount,
}
response = await getattr(self, method)(self.extend(order, params))
return {
'info': response,
'id': response['orderNumber'],
}
async def cancel_order(self, id, symbol=None, params={}):
await self.load_markets()
return await self.privatePostCancelOrder({'orderNumber': id})
async def withdraw(self, currency, amount, address, params={}):
await self.load_markets()
response = await self.privatePostWithdraw(self.extend({
'currency': currency.lower(),
'amount': amount,
'address': address, # Address must exist in you AddressBook in security settings
}, params))
return {
'info': response,
'id': None,
}
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
prefix = (api + '/') if (api == 'private') else ''
url = self.urls['api'][api] + self.version + '/1/' + prefix + self.implode_params(path, params)
query = self.omit(params, self.extract_params(path))
if api == 'public':
if query:
url += '?' + self.urlencode(query)
else:
self.check_required_credentials()
nonce = self.nonce()
request = {'nonce': nonce}
body = self.urlencode(self.extend(request, query))
signature = self.hmac(self.encode(body), self.encode(self.secret), hashlib.sha512)
headers = {
'Key': self.apiKey,
'Sign': signature,
'Content-Type': 'application/x-www-form-urlencoded',
}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
async def request(self, path, api='public', method='GET', params={}, headers=None, body=None):
response = await self.fetch2(path, api, method, params, headers, body)
if 'result' in response:
if response['result'] != 'true':
raise ExchangeError(self.id + ' ' + self.json(response))
return response
|
[
"artur.komarov@gmail.com"
] |
artur.komarov@gmail.com
|
ac940193d2f3f1caa457b04b24282a0ee5161495
|
5c1be2da807efc7ce3470dac24654b9bd8b4131a
|
/asreval/word_uttr_scores.py
|
338580dbb5d9ec387232c3fc3d7870396db1c997
|
[] |
no_license
|
messiaen/asreval
|
bb8e95bbb2cc9688c653bfb7d4f37ac4592dcd21
|
54656ce4b8abb5764fb1832067a9999a03fa07b8
|
refs/heads/master
| 2020-03-07T11:35:30.770876
| 2017-12-15T15:36:46
| 2017-12-15T15:36:46
| 127,459,600
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,841
|
py
|
from __future__ import division
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from builtins import str
import math
from collections import namedtuple
__all__ = [
'word_lst_uttr_scores',
'word_uttr_scores',
'max_word_score'
]
WordUttrScore = namedtuple(
'WordUttrScore',
'audio_id channel start_time end_time word score truth')
score_converters = {
'raw': lambda x: x,
'posterior': lambda x: x,
'log10': lambda x: 10 ** x,
'log2': lambda x: 2 ** x,
'ln': lambda x: math.e ** x
}
def word_lst_uttr_scores(
words,
ref_uttrs,
hypothesis,
convert_fn=None,
default_score=0.0):
for word in words:
for row in word_uttr_scores(
word,
ref_uttrs,
hypothesis,
convert_fn=convert_fn,
default_score=default_score):
yield row
def word_uttr_scores(
word,
ref_uttrs,
hypothesis,
convert_fn=None,
default_score=0.0):
for ref in ref_uttrs:
score = max_word_score(
word,
ref,
hypothesis,
convert_fn=convert_fn,
default_score=default_score)
truth = 1 if word in ref else 0
yield WordUttrScore(
ref.audio_id,
ref.channel,
ref.start_time,
ref.end_time,
word,
score,
truth)
def max_word_score(word, ref, hypothesis, convert_fn=None, default_score=0.0):
score = None
for hyp in hypothesis[word]:
if hyp.audio_id == ref.audio_id and hyp.channel == ref.channel:
for edge in hyp[word]:
if ref.time_match_ratio(edge.start_time, edge.end_time) > 0.5:
if (isinstance(convert_fn, str)
and convert_fn in score_converters):
curr_score = score_converters[convert_fn](edge.score)
elif callable(convert_fn):
curr_score = convert_fn(edge.score)
else:
curr_score = edge.score
if score is None:
score = curr_score
else:
score = max(score, curr_score)
if score is None:
return default_score
return score
# TODO for now we just output word uttr csv rows
# def truth_and_scores(word_scores):
# truths = []
# scores = []
# for _, _, _, _, _, score, truth in word_scores:
# truths.append(truth)
# scores.append(score)
#
# return np.array(truths, dtype='int'), np.array(scores, dtype='float64')
|
[
"gregclark0130@gmail.com"
] |
gregclark0130@gmail.com
|
c11aaea5e8819db91521374f5e392cdd7fe91e17
|
334eb80df42753ab52ef3fef484689d5db383dde
|
/knowledge/similarity.py
|
0c4f1fd9121ee181bdaa0d184dd055ee7e35d4f1
|
[] |
no_license
|
malab/test-python
|
db8b12b8ea468a4ffc37b240fb044654d0bd3ce1
|
17134674189dd84c00da7a7234abc325ae4439c5
|
refs/heads/master
| 2021-01-19T06:56:34.537787
| 2014-12-22T15:27:21
| 2014-12-22T15:27:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,060
|
py
|
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
# from nltk.corpus import wordnet as wn
documents = (
"The time of being young, early life",
"a young person (especially a young man or boy) ",
"young people collectively",
"the time of life between childhood and maturity",
"early maturity; the state of being young or immature or inexperienced",
"an early period of development",
"the freshness and vitality characteristic of a young person"
)
"""
ndocs = []
for doc in documents:
ndoc = []
for w in doc.split():
if wn.synsets(w):
new = wn.synsets(w)[0].lemmas()[0].name()
ndoc.append(new)
else:
new = w
ndoc = ' '.join(ndoc)
ndocs.append(ndoc)
"""
tfidf_vectorizer = TfidfVectorizer()
tfidf_matrix = tfidf_vectorizer.fit_transform(documents)
sims = cosine_similarity(tfidf_matrix[0:1], tfidf_matrix)
sims = list(sims[0])
for i, cada in enumerate(sims):
print((cada, " for sentence ", documents[int(i)]))
|
[
"aurelio@germes.com"
] |
aurelio@germes.com
|
1ab29118ed618df82a690d46d51b918b38c9c595
|
119646d6e1f13582c577fd7b87c9654839a0b806
|
/hubspot/crm/objects/communications/models/error.py
|
ee0e84e240b0fae81b427767c8273d3d7c978c2e
|
[] |
permissive
|
HubSpot/hubspot-api-python
|
446daaceeb3a6ce27edcd0414603c6d4bc07e327
|
d51a64c413461c0b82d8a41743e752d878747ca1
|
refs/heads/master
| 2023-08-31T09:52:56.583803
| 2023-08-07T11:00:27
| 2023-08-07T11:00:27
| 248,865,684
| 227
| 98
|
Apache-2.0
| 2023-09-14T15:25:19
| 2020-03-20T22:41:24
|
Python
|
UTF-8
|
Python
| false
| false
| 9,156
|
py
|
# coding: utf-8
"""
Communications
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v3
Generated by: https://openapi-generator.tech
"""
try:
from inspect import getfullargspec
except ImportError:
from inspect import getargspec as getfullargspec
import pprint
import re # noqa: F401
import six
from hubspot.crm.objects.communications.configuration import Configuration
class Error(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {"message": "str", "correlation_id": "str", "category": "str", "sub_category": "str", "errors": "list[ErrorDetail]", "context": "dict[str, list[str]]", "links": "dict[str, str]"}
attribute_map = {"message": "message", "correlation_id": "correlationId", "category": "category", "sub_category": "subCategory", "errors": "errors", "context": "context", "links": "links"}
def __init__(self, message=None, correlation_id=None, category=None, sub_category=None, errors=None, context=None, links=None, local_vars_configuration=None): # noqa: E501
"""Error - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration.get_default_copy()
self.local_vars_configuration = local_vars_configuration
self._message = None
self._correlation_id = None
self._category = None
self._sub_category = None
self._errors = None
self._context = None
self._links = None
self.discriminator = None
self.message = message
self.correlation_id = correlation_id
self.category = category
if sub_category is not None:
self.sub_category = sub_category
if errors is not None:
self.errors = errors
if context is not None:
self.context = context
if links is not None:
self.links = links
@property
def message(self):
"""Gets the message of this Error. # noqa: E501
A human readable message describing the error along with remediation steps where appropriate # noqa: E501
:return: The message of this Error. # noqa: E501
:rtype: str
"""
return self._message
@message.setter
def message(self, message):
"""Sets the message of this Error.
A human readable message describing the error along with remediation steps where appropriate # noqa: E501
:param message: The message of this Error. # noqa: E501
:type message: str
"""
if self.local_vars_configuration.client_side_validation and message is None: # noqa: E501
raise ValueError("Invalid value for `message`, must not be `None`") # noqa: E501
self._message = message
@property
def correlation_id(self):
"""Gets the correlation_id of this Error. # noqa: E501
A unique identifier for the request. Include this value with any error reports or support tickets # noqa: E501
:return: The correlation_id of this Error. # noqa: E501
:rtype: str
"""
return self._correlation_id
@correlation_id.setter
def correlation_id(self, correlation_id):
"""Sets the correlation_id of this Error.
A unique identifier for the request. Include this value with any error reports or support tickets # noqa: E501
:param correlation_id: The correlation_id of this Error. # noqa: E501
:type correlation_id: str
"""
if self.local_vars_configuration.client_side_validation and correlation_id is None: # noqa: E501
raise ValueError("Invalid value for `correlation_id`, must not be `None`") # noqa: E501
self._correlation_id = correlation_id
@property
def category(self):
"""Gets the category of this Error. # noqa: E501
The error category # noqa: E501
:return: The category of this Error. # noqa: E501
:rtype: str
"""
return self._category
@category.setter
def category(self, category):
"""Sets the category of this Error.
The error category # noqa: E501
:param category: The category of this Error. # noqa: E501
:type category: str
"""
if self.local_vars_configuration.client_side_validation and category is None: # noqa: E501
raise ValueError("Invalid value for `category`, must not be `None`") # noqa: E501
self._category = category
@property
def sub_category(self):
"""Gets the sub_category of this Error. # noqa: E501
A specific category that contains more specific detail about the error # noqa: E501
:return: The sub_category of this Error. # noqa: E501
:rtype: str
"""
return self._sub_category
@sub_category.setter
def sub_category(self, sub_category):
"""Sets the sub_category of this Error.
A specific category that contains more specific detail about the error # noqa: E501
:param sub_category: The sub_category of this Error. # noqa: E501
:type sub_category: str
"""
self._sub_category = sub_category
@property
def errors(self):
"""Gets the errors of this Error. # noqa: E501
further information about the error # noqa: E501
:return: The errors of this Error. # noqa: E501
:rtype: list[ErrorDetail]
"""
return self._errors
@errors.setter
def errors(self, errors):
"""Sets the errors of this Error.
further information about the error # noqa: E501
:param errors: The errors of this Error. # noqa: E501
:type errors: list[ErrorDetail]
"""
self._errors = errors
@property
def context(self):
"""Gets the context of this Error. # noqa: E501
Context about the error condition # noqa: E501
:return: The context of this Error. # noqa: E501
:rtype: dict[str, list[str]]
"""
return self._context
@context.setter
def context(self, context):
"""Sets the context of this Error.
Context about the error condition # noqa: E501
:param context: The context of this Error. # noqa: E501
:type context: dict[str, list[str]]
"""
self._context = context
@property
def links(self):
"""Gets the links of this Error. # noqa: E501
A map of link names to associated URIs containing documentation about the error or recommended remediation steps # noqa: E501
:return: The links of this Error. # noqa: E501
:rtype: dict[str, str]
"""
return self._links
@links.setter
def links(self, links):
"""Sets the links of this Error.
A map of link names to associated URIs containing documentation about the error or recommended remediation steps # noqa: E501
:param links: The links of this Error. # noqa: E501
:type links: dict[str, str]
"""
self._links = links
def to_dict(self, serialize=False):
"""Returns the model properties as a dict"""
result = {}
def convert(x):
if hasattr(x, "to_dict"):
args = getfullargspec(x.to_dict).args
if len(args) == 1:
return x.to_dict()
else:
return x.to_dict(serialize)
else:
return x
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
attr = self.attribute_map.get(attr, attr) if serialize else attr
if isinstance(value, list):
result[attr] = list(map(lambda x: convert(x), value))
elif isinstance(value, dict):
result[attr] = dict(map(lambda item: (item[0], convert(item[1])), value.items()))
else:
result[attr] = convert(value)
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Error):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, Error):
return True
return self.to_dict() != other.to_dict()
|
[
"azheltkovskiy@hubspot.com"
] |
azheltkovskiy@hubspot.com
|
eb91d4a5ee4783d3343c61260473eedbd4c4dd18
|
5b93930ce8280b3cbc7d6b955df0bfc5504ee99c
|
/nodes/Geron17Hands/C_PartII/E_Chapter13/E_Exercises/index.py
|
31dbc036a2281c1cf4c61b4da842159f9b8249e8
|
[] |
no_license
|
nimra/module_gen
|
8749c8d29beb700cac57132232861eba4eb82331
|
2e0a4452548af4fefd4cb30ab9d08d7662122cf4
|
refs/heads/master
| 2022-03-04T09:35:12.443651
| 2019-10-26T04:40:49
| 2019-10-26T04:40:49
| 213,980,247
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,826
|
py
|
# Lawrence McAfee
# ~~~~~~~~ import ~~~~~~~~
from modules.node.HierNode import HierNode
from modules.node.LeafNode import LeafNode
from modules.node.Stage import Stage
from modules.node.block.CodeBlock import CodeBlock as cbk
from modules.node.block.HierBlock import HierBlock as hbk
from modules.node.block.ImageBlock import ImageBlock as ibk
from modules.node.block.ListBlock import ListBlock as lbk
from modules.node.block.MarkdownBlock import MarkdownBlock as mbk
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
blocks = [
# Download from finelybook www.finelybook.com
#
# TensorFlow Convolution Operations
# TensorFlow also offers a few other kinds of convolutional layers:
#
# • conv1d() creates a convolutional layer for 1D inputs. This is useful, for example,
# in natural language processing, where a sentence may be represented as a 1D
# array of words, and the receptive field covers a few neighboring words.
# • conv3d() creates a convolutional layer for 3D inputs, such as 3D PET scan.
# • atrous_conv2d() creates an atrous convolutional layer (“à trous” is French for
# “with holes”). This is equivalent to using a regular convolutional layer with a fil‐
# ter dilated by inserting rows and columns of zeros (i.e., holes). For example, a 1 ×
# 3 filter equal to [[1,2,3]] may be dilated with a dilation rate of 4, resulting in a
# dilated filter [[1, 0, 0, 0, 2, 0, 0, 0, 3]]. This allows the convolutional
# layer to have a larger receptive field at no computational price and using no extra
# parameters.
# • conv2d_transpose() creates a transpose convolutional layer, sometimes called a
# deconvolutional layer,15 which upsamples an image. It does so by inserting zeros
# between the inputs, so you can think of this as a regular convolutional layer using
# a fractional stride. Upsampling is useful, for example, in image segmentation: in a
# typical CNN, feature maps get smaller and smaller as you progress through the
# network, so if you want to output an image of the same size as the input, you
# need an upsampling layer.
# • depthwise_conv2d() creates a depthwise convolutional layer that applies every fil‐
# ter to every individual input channel independently. Thus, if there are fn filters
# and fn′ input channels, then this will output fn × fn′ feature maps.
# • separable_conv2d() creates a separable convolutional layer that first acts like a
# depthwise convolutional layer, then applies a 1 × 1 convolutional layer to the
# resulting feature maps. This makes it possible to apply filters to arbitrary sets of
# inputs channels.
#
#
#
# Exercises
# 1. What are the advantages of a CNN over a fully connected DNN for image classi‐
# fication?
# 2. Consider a CNN composed of three convolutional layers, each with 3 × 3 kernels,
# a stride of 2, and SAME padding. The lowest layer outputs 100 feature maps, the
#
#
# 15 This name is quite misleading since this layer does not perform a deconvolution, which is a well-defined
# mathematical operation (the inverse of a convolution).
#
#
#
# 376 | Chapter 13: Convolutional Neural Networks
#
# Download from finelybook www.finelybook.com
# middle one outputs 200, and the top one outputs 400. The input images are RGB
# images of 200 × 300 pixels. What is the total number of parameters in the CNN?
# If we are using 32-bit floats, at least how much RAM will this network require
# when making a prediction for a single instance? What about when training on a
# mini-batch of 50 images?
# 3. If your GPU runs out of memory while training a CNN, what are five things you
# could try to solve the problem?
# 4. Why would you want to add a max pooling layer rather than a convolutional
# layer with the same stride?
# 5. When would you want to add a local response normalization layer?
# 6. Can you name the main innovations in AlexNet, compared to LeNet-5? What
# about the main innovations in GoogLeNet and ResNet?
# 7. Build your own CNN and try to achieve the highest possible accuracy on MNIST.
# 8. Classifying large images using Inception v3.
# a. Download some images of various animals. Load them in Python, for example
# using the matplotlib.image.mpimg.imread() function. Resize and/or crop
# them to 299 × 299 pixels, and ensure that they have just three channels (RGB),
# with no transparency channel.
# b. Download the latest pretrained Inception v3 model: the checkpoint is avail‐
# able at https://goo.gl/nxSQvl.
# c. Create the Inception v3 model by calling the inception_v3() function, as
# shown below. This must be done within an argument scope created by the
# inception_v3_arg_scope() function. Also, you must set is_training=False
# and num_classes=1001 like so:
# from tensorflow.contrib.slim.nets import inception
# import tensorflow.contrib.slim as slim
#
# X = tf.placeholder(tf.float32, shape=[None, 299, 299, 3])
# with slim.arg_scope(inception.inception_v3_arg_scope()):
# logits, end_points = inception.inception_v3(
# X, num_classes=1001, is_training=False)
# predictions = end_points["Predictions"]
# saver = tf.train.Saver()
# d. Open a session and use the Saver to restore the pretrained model checkpoint
# you downloaded earlier.
# e. Run the model to classify the images you prepared. Display the top five pre‐
# dictions for each image, along with the estimated probability (the list of class
# names is available at https://goo.gl/brXRtZ). How accurate is the model?
# 9. Transfer learning for large image classification.
#
#
# Exercises | 377
#
# Download from finelybook www.finelybook.com
# a. Create a training set containing at least 100 images per class. For example, you
# could classify your own pictures based on the location (beach, mountain, city,
# etc.), or alternatively you can just use an existing dataset, such as the flowers
# dataset or MIT’s places dataset (requires registration, and it is huge).
# b. Write a preprocessing step that will resize and crop the image to 299 × 299,
# with some randomness for data augmentation.
# c. Using the pretrained Inception v3 model from the previous exercise, freeze all
# layers up to the bottleneck layer (i.e., the last layer before the output layer),
# and replace the output layer with the appropriate number of outputs for your
# new classification task (e.g., the flowers dataset has five mutually exclusive
# classes so the output layer must have five neurons and use the softmax activa‐
# tion function).
# d. Split your dataset into a training set and a test set. Train the model on the
# training set and evaluate it on the test set.
# 10. Go through TensorFlow’s DeepDream tutorial. It is a fun way to familiarize your‐
# self with various ways of visualizing the patterns learned by a CNN, and to gener‐
# ate art using Deep Learning.
#
# Solutions to these exercises are available in Appendix A.
#
#
#
#
# 378 | Chapter 13: Convolutional Neural Networks
#
# Download from finelybook www.finelybook.com
#
#
# CHAPTER 14
# Recurrent Neural Networks
#
#
#
#
# The batter hits the ball. You immediately start running, anticipating the ball’s trajec‐
# tory. You track it and adapt your movements, and finally catch it (under a thunder of
# applause). Predicting the future is what you do all the time, whether you are finishing
# a friend’s sentence or anticipating the smell of coffee at breakfast. In this chapter, we
# are going to discuss recurrent neural networks (RNN), a class of nets that can predict
# the future (well, up to a point, of course). They can analyze time series data such as
# stock prices, and tell you when to buy or sell. In autonomous driving systems, they
# can anticipate car trajectories and help avoid accidents. More generally, they can work
# on sequences of arbitrary lengths, rather than on fixed-sized inputs like all the nets we
# have discussed so far. For example, they can take sentences, documents, or audio
# samples as input, making them extremely useful for natural language processing
# (NLP) systems such as automatic translation, speech-to-text, or sentiment analysis
# (e.g., reading movie reviews and extracting the rater’s feeling about the movie).
# Moreover, RNNs’ ability to anticipate also makes them capable of surprising creativ‐
# ity. You can ask them to predict which are the most likely next notes in a melody, then
# randomly pick one of these notes and play it. Then ask the net for the next most likely
# notes, play it, and repeat the process again and again. Before you know it, your net
# will compose a melody such as the one produced by Google’s Magenta project. Simi‐
# larly, RNNs can generate sentences, image captions, and much more. The result is not
# exactly Shakespeare or Mozart yet, but who knows what they will produce a few years
# from now?
# In this chapter, we will look at the fundamental concepts underlying RNNs, the main
# problem they face (namely, vanishing/exploding gradients, discussed in Chapter 11),
# and the solutions widely used to fight it: LSTM and GRU cells. Along the way, as
# always, we will show how to implement RNNs using TensorFlow. Finally, we will take
# a look at the architecture of a machine translation system.
#
#
# 379
#
]
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class Content(LeafNode):
def __init__(self):
super().__init__(
"Exercises",
# Stage.REMOVE_EXTRANEOUS,
# Stage.ORIG_BLOCKS,
# Stage.CUSTOM_BLOCKS,
# Stage.ORIG_FIGURES,
# Stage.CUSTOM_FIGURES,
# Stage.CUSTOM_EXERCISES,
)
[self.add(a) for a in blocks]
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class Exercises(HierNode):
def __init__(self):
super().__init__("Exercises")
self.add(Content(), "content")
# eof
|
[
"lawrence.mcafee@gmail.com"
] |
lawrence.mcafee@gmail.com
|
c60c6d00ce9043829cb99f108e1c608e2884c871
|
12a21e1ae1ad776f55ce439c6397b9bcbbd57b4a
|
/scrapeoutage.py
|
b003993083c0d4cbce4c135def8c1e737a893cd7
|
[
"MIT"
] |
permissive
|
mboehn/scrapeoutage
|
03eb7ddae5d3bdbd5256aa9cdd7d67336aa1ee36
|
3b9bebb3a8e942b4293c917aad72728d54bff3f5
|
refs/heads/master
| 2016-09-15T08:34:05.395478
| 2016-05-05T11:55:26
| 2016-05-05T11:55:26
| 14,965,507
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,637
|
py
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
## Set to True if testing and there only is planned outages:
DO_PLANNED=False
## This is a list of the places you want to know about
myplaces = [ u'Tjøme', u'Tønsberg', u'Svelvik', u'Sande', u'Hof', u'Holmestrand', u'Horten', u'Re', u'Andebu', u'Lardal', u'Sandefjord', u'Nøtterøy', u'Larvik', u'Stokke']
## URL to outagemap
url = 'http://powercompany.example/outagemap/geoserver-api/content/outageTableData.json'
#####################################
import requests
import json
import sys
r = requests.get(url)
r.encoding = 'utf_8'
data = json.loads(r.text)
faultanswer = None
faulttotal = None
plananswer = None
plantotal = None
for mainarea in data['mainAreas']:
area = mainarea['area']
if not area in myplaces:
continue
faults = mainarea['faultrunning']['nr']
faultsaffect = mainarea['faultrunning']['customers']
plans = mainarea['planrunning']['nr']
plansaffect = mainarea['planrunning']['customers']
if int(faults):
if faultanswer:
faultanswer += ", {} [{}/{}]".format(area, faults, faultsaffect)
faulttotal += int(faultsaffect)
else:
faultanswer = "{} [{}/{}]".format(area, faults, faultsaffect)
faulttotal = int(faultsaffect)
if int(plans) and DO_PLANNED:
if plananswer:
plananswer += ", {} [{}/{}]".format(area, plans, plansaffect)
plantotal += int(plansaffect)
else:
plananswer = "Planned: {} [{}/{}]".format(area, plans, plansaffect)
plantotal = int(plansaffect)
if (not faultanswer and not plananswer):
sys.exit(0)
elif faultanswer:
print(faultanswer)
sys.exit(100)
elif plananswer:
print(plananswer)
sys.exit(101)
|
[
"mathias@grytemark.no"
] |
mathias@grytemark.no
|
ac2042e35130d75a34e3ee03fce8e6df4be09e81
|
bad9237c2cf3261beba118c25e0b5e18cb4e9361
|
/Day4/dsw2/dsw2/wsgi.py
|
e924571cf4f487c2a1a2019af61e0a402b314890
|
[] |
no_license
|
limjh0513/dgsw_html-css
|
9b0fc6eef742b2c53f7c02ef1cab523b8910c0f7
|
8ce8e5125c3851897a887fda23c78fe3cb09bb46
|
refs/heads/master
| 2022-12-06T19:59:06.002575
| 2020-09-04T11:50:55
| 2020-09-04T11:50:55
| 282,757,286
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 167
|
py
|
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'dsw2.settings')
application = get_wsgi_application()
|
[
"ljh040513@naver.com"
] |
ljh040513@naver.com
|
5214004ac55a355b07c8bc226a3c1b333ab947fb
|
d1a149731a15823e870b464662eabd6d601c3a38
|
/CalcClassAndScientificFunction.py
|
ca38b187b79a12da756063838c951ef65f874d4e
|
[] |
no_license
|
rahmausama/Calculator
|
44522af2d53990131d234776693ce1450857f6e7
|
777976ea7a2b3b2f656c0834120f9c6efa940ebc
|
refs/heads/main
| 2023-02-09T10:23:07.394380
| 2021-01-02T18:41:58
| 2021-01-02T18:41:58
| 326,257,110
| 0
| 0
| null | 2021-01-02T19:35:20
| 2021-01-02T19:35:20
| null |
UTF-8
|
Python
| false
| false
| 1,905
|
py
|
class Calc():
def __init__(self):
self.total=0
self.current=''
self.input_value=True
self.check_sum=False
self.op=''
self.result=False
def numberEnter(self, num):
self.result=False
firstnum=textDisplay.get()
secondnum=str(num)
if self.input_value:
self.current = secondnum
self.input_value=False
else:
if secondnum == '.':
if secondnum in firstnum:
return
self.current = firstnum+secondnum
self.display(self.current)
def sum_of_total(self):
self.result=True
self.current=float(self.current)
if self.check_sum==True:
self.valid_function()
else:
self.total=float(textDisplay.get())
def display(self, value):
textDisplay.delete(0, END)
textDisplay.insert(0, value)
def valid_function(self):
if self.op == "add":
self.total += self.current
if self.op == "sub":
self.total -= self.current
if self.op == "multi":
self.total *= self.current
if self.op == "divide":
self.total /= self.current
if self.op == "mod":
self.total %= self.current
self.input_value=True
self.check_sum=False
self.display(self.total)
def pi(self):
self.result = False
self.current = math.pi
self.display(self.current)
def tau(self):
self.result = False
self.current = math.tau
self.display(self.current)
def e(self):
self.result = False
self.current = math.e
self.display(self.current)
def mathPM(self):
self.result = False
self.current = -(float(textDisplay.get()))
self.display(self.current)
|
[
"noreply@github.com"
] |
rahmausama.noreply@github.com
|
7f2852b0f4be4f781576595cef92b728b46f471b
|
c2ce7155a393e1056b5fdc4d3f9b9a89046e9285
|
/scripts/pipeline.py
|
3d981873f2ffb78575182feb447d1effe251678a
|
[
"MIT"
] |
permissive
|
blyucs/aw_nas
|
9c068dab1bd84a35e58a4c426f7c852a67b93882
|
8a32196ce342b8ad9e3885895735d1286e25beba
|
refs/heads/master
| 2023-08-19T11:00:00.526229
| 2021-08-21T05:16:13
| 2021-08-21T05:16:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,413
|
py
|
from __future__ import print_function
import os
import re
import random
import shutil
import logging
import argparse
import subprocess
import yaml
import numpy as np
DERIVE_N = 10
def _get_genotype_substr(genotypes):
return re.search(r".+?Genotype\((.+)\)", genotypes).group(1)
def _get_perf(log, type_="cnn"):
if type_ == "cnn":
out = subprocess.check_output("grep -Eo 'valid_acc [0-9.]+' {}".format(log) + \
" | tail -n 1 | awk '{print $NF}'", shell=True)
logging.info(out)
acc = float(out)
return acc
raise NotImplementedError("unknown type: {}".format(type_))
def call_search(cfg, gpu, seed, train_dir, vis_dir, save_every):
if seed is None:
seed = random.randint(1, 999999)
logging.info("train seed: %s", str(seed))
logging.info(("awnas search {cfg} --gpu {gpu} --seed {seed} --save-every {save_every} "
"--train-dir {train_dir} --vis-dir {vis_dir}")\
.format(cfg=cfg, gpu=gpu, seed=seed,
train_dir=train_dir, vis_dir=vis_dir, save_every=save_every))
subprocess.check_call(("awnas search {cfg} --gpu {gpu} --seed {seed} --save-every {save_every} "
"--train-dir {train_dir} --vis-dir {vis_dir}")\
.format(cfg=cfg, gpu=gpu, seed=seed,
train_dir=train_dir, vis_dir=vis_dir, save_every=save_every),
shell=True)
# derive
def call_derive(cfg, gpu, seed, load, out_file, n):
if seed is None:
seed = random.randint(1, 999999)
logging.info("train seed: %s", str(seed))
logging.info(("awnas derive {cfg} --load {load} --gpu {gpu} --seed {seed}"
" --test -n {n} -o {out_file}")\
.format(cfg=cfg, load=load, gpu=gpu, seed=seed,
out_file=out_file, n=n))
subprocess.check_call(("awnas derive {cfg} --load {load} --gpu {gpu} --seed {seed}"
" --test -n {n} -o {out_file}")\
.format(cfg=cfg, load=load, gpu=gpu, seed=seed,
out_file=out_file, n=n),
shell=True)
# train
def call_train(cfg, gpu, seed, train_dir, save_every):
if seed is None:
seed = random.randint(1, 999999)
logging.info("train seed: %s", str(seed))
save_str = "" if save_every is None else "--save-every {}".format(save_every)
logging.info(("awnas train {cfg} --gpus {gpu} --seed {seed} {save_str} "
"--train-dir {train_dir}")\
.format(cfg=cfg, gpu=gpu, seed=seed,
train_dir=train_dir, save_str=save_str))
subprocess.check_call(("awnas train {cfg} --gpus {gpu} --seed {seed} {save_str} "
"--train-dir {train_dir}")\
.format(cfg=cfg, gpu=gpu, seed=seed,
train_dir=train_dir, save_str=save_str),
shell=True)
def make_surrogate_cfgs(derive_out_file, template_file, sur_dir):
with open(template_file, "r") as f:
cfg_template = yaml.load(f)
with open(derive_out_file, "r") as f:
genotypes_list = yaml.load(f)
for ind, genotypes in enumerate(genotypes_list):
sur_fname = os.path.join(sur_dir, "{}.yaml".format(ind))
genotypes = _get_genotype_substr(genotypes)
cfg_template["final_model_cfg"]["genotypes"] = genotypes
with open(sur_fname, "w") as of:
yaml.safe_dump(cfg_template, of)
def get_sur_perfs(sur_dir):
final_perfs = []
for ind in range(DERIVE_N):
surrogate_dir = os.path.join(sur_dir, str(ind))
log = os.path.join(surrogate_dir, "train.log")
final_perfs.append(_get_perf(log, type_=args.type))
return final_perfs
parser = argparse.ArgumentParser()
parser.add_argument("--gpu", required=True)
parser.add_argument("--exp-name", required=True, type=str)
parser.add_argument("--type", default="cnn", choices=["cnn", "rnn"], type=str, help="(default: %(default)s)")
parser.add_argument("--base-dir", default=os.path.abspath(os.path.expanduser("~/awnas/results")),
type=str, help="results will be saved to `base_dir`/`exp_name` (default: %(default)s)")
parser.add_argument("--seed", type=int, help="the default seeds of all tasks, "
"if not specified explicitly.")
parser.add_argument("--search-cfg", required=True, type=str)
parser.add_argument("--search-memory", default=6000, type=int)
parser.add_argument("--search-util", default=30, type=int)
parser.add_argument("--search-seed", default=None, type=int)
parser.add_argument("--search-save-every", default=20, type=int)
parser.add_argument("--derive-memory", default=3000, type=int)
parser.add_argument("--derive-util", default=0, type=int)
parser.add_argument("--derive-seed", default=123, type=int)
parser.add_argument("--train-surrogate-cfg", required=True, type=str, help="train surrogate config file")
parser.add_argument("--train-surrogate-memory", default=6000, type=int)
parser.add_argument("--train-surrogate-util", default=0, type=int)
parser.add_argument("--train-surrogate-seed", default=None, type=int)
parser.add_argument("--train-final-cfg", required=True, type=str, help="train final config file")
parser.add_argument("--train-final-memory", default=10000, type=int)
parser.add_argument("--train-final-util", default=70, type=int)
parser.add_argument("--train-final-seed", default=None, type=int)
args = parser.parse_args()
args.search_cfg = os.path.abspath(args.search_cfg)
args.train_surrogate_cfg = os.path.abspath(args.train_surrogate_cfg)
args.train_final_cfg = os.path.abspath(args.train_final_cfg)
gpu = args.gpu
exp_name = args.exp_name
# result dirs
result_dir = os.path.join(args.base_dir, exp_name)
search_dir = os.path.join(result_dir, "search")
sur_dir = os.path.join(result_dir, "train_surrogate")
final_dir = os.path.join(result_dir, "train_final")
if not os.path.exists(result_dir):
os.makedirs(os.path.join(result_dir))
os.makedirs(search_dir)
os.makedirs(sur_dir)
os.makedirs(final_dir)
search_cfg = os.path.join(result_dir, "search.yaml")
train_surrogate_template = os.path.join(result_dir, "train_surrogate.template")
train_final_template = os.path.join(result_dir, "train_final.template")
shutil.copy(args.search_cfg, search_cfg)
shutil.copy(args.train_surrogate_cfg, train_surrogate_template)
shutil.copy(args.train_final_cfg, train_final_template)
# # search
vis_dir = os.path.join(result_dir, "vis")
call_search(search_cfg, gpu, args.search_seed, search_dir, vis_dir, args.search_save_every)
# derive
max_epoch = max([int(n) for n in os.listdir(search_dir) if n.isdigit()])
final_checkpoint = os.path.join(search_dir, str(max_epoch))
derive_out_file = os.path.join(search_dir, "derive.yaml")
call_derive(search_cfg, gpu, args.derive_seed, final_checkpoint, derive_out_file, DERIVE_N)
# make surrogate cfgs
make_surrogate_cfgs(derive_out_file, train_surrogate_template, sur_dir)
# train surrogate
for index in range(DERIVE_N):
sur_fname = os.path.join(sur_dir, "{}.yaml".format(index))
train_sur_dir = os.path.join(sur_dir, str(index))
call_train(sur_fname, gpu, args.train_surrogate_seed, train_sur_dir, save_every=None)
# choose best
sur_perfs = get_sur_perfs(sur_dir)
best_ind = np.argmax(sur_perfs)
with open(derive_out_file, "r") as f:
genotypes_list = yaml.load(f)
best_geno = _get_genotype_substr(genotypes_list[best_ind])
with open(os.path.join(sur_dir, "sur_res.txt"), "w") as of:
of.write("\n".join(["{} {}".format(ind, perf)
for ind, perf in
sorted(list(enumerate(sur_perfs)), key=lambda item: -item[1])]))
# dump configuration of final train
with open(train_final_template, "r") as f:
base_cfg = yaml.load(f)
base_cfg["final_model_cfg"]["genotypes"] = best_geno
train_final_cfg = os.path.join(final_dir, "train.yaml")
with open(train_final_cfg, "w") as of:
yaml.safe_dump(base_cfg, of)
# train final
total_epochs = base_cfg["final_trainer_cfg"]["epochs"]
train_final_dir = os.path.join(final_dir, "train")
call_train(train_final_cfg, gpu, args.train_final_seed, train_final_dir, save_every=total_epochs // 4)
log = os.path.join(train_final_dir, "train.log")
final_valid_perf = _get_perf(log, type_=args.type)
|
[
"foxdoraame@gmail.com"
] |
foxdoraame@gmail.com
|
1d59a9449e1faa81ca0cc15a68fdd61b3aed9c02
|
29ad60d0f4e4207aaf0374f811c9728b16942da2
|
/Report/files/switchoff.py
|
9ebd115adc375c7740070dff463253315afc67ba
|
[] |
no_license
|
LCAV/AcousticRobot
|
c97e03bc06c59650556832794aca38cfe2d873a5
|
9f33434f64cb882897b1e0e3b8ad01642e91148a
|
refs/heads/master
| 2021-01-10T16:46:51.989150
| 2017-10-11T08:26:58
| 2017-10-11T08:26:58
| 43,871,589
| 2
| 5
| null | 2017-03-06T16:15:09
| 2015-10-08T07:58:42
|
HTML
|
UTF-8
|
Python
| false
| false
| 831
|
py
|
mport RPi.GPIO as GPIO
import os
import time
#set up GPIO using BCM numbering
GPIO.setmode(GPIO.BCM)
GPIO.setup(10,GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
#function called on pin interrupt
def button_triggered(channel):
counter = 0
counter_on = 0
while (counter <= 6):
time.sleep(1)
counter+=1
if (GPIO.input(10)):
counter_on+=1
if (counter_on >= 3):
break
if (counter_on >= 3):
print("switchoff.py: Raspberry shutting down now")
os.system("sudo halt")
elif (counter_on < 3):
print("switchoff.py: Rapsberry is going to reboot now")
os.system("sudo reboot")
#setup pin interrupt
GPIO.add_event_detect(10,GPIO.RISING,callback=button_triggered,bouncetime=300)
#wait forever
while True:
time.sleep(0.001)
GPIO.cleanup()
|
[
"frederike.duembgen@epfl.ch"
] |
frederike.duembgen@epfl.ch
|
e0680f343ee85bb87951a871d65120840818f049
|
33c497917be26cfddd5427dbb7cbb36ada1d713e
|
/Packs/Cybersixgill-ActionableAlerts/Integrations/CybersixgillActionableAlerts/CybersixgillActionableAlerts.py
|
8bafe36ad2a821d171cc6bb11db009b66daa106e
|
[
"MIT"
] |
permissive
|
ChuckWoodraska/content
|
8caf6c7b2f9f94745c9ca5941c4a86232bfea5e1
|
cd4b2e396ab229c1298443018073628e11832335
|
refs/heads/master
| 2021-12-21T11:24:07.360944
| 2021-12-20T15:04:37
| 2021-12-20T15:04:37
| 440,306,120
| 0
| 0
|
MIT
| 2021-12-20T21:03:46
| 2021-12-20T21:03:46
| null |
UTF-8
|
Python
| false
| false
| 9,584
|
py
|
import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
''' IMPORTS '''
import json
import copy
import requests
from sixgill.sixgill_request_classes.sixgill_auth_request import SixgillAuthRequest
from sixgill.sixgill_actionable_alert_client import SixgillActionableAlertClient
# Disable insecure warnings
requests.packages.urllib3.disable_warnings()
''' GLOBALS/PARAMS '''
CHANNEL_CODE = '7698e8287dfde53dcd13082be750a85a'
MAX_INCIDENTS = 100
DEFAULT_INCIDENTS = '50'
MAX_DAYS_BACK = 30
DEFAULT_DAYS_BACK = '1'
DATETIME_FORMAT = '%Y-%m-%d %H:%M:%S'
DEMISTO_DATETIME_FORMAT = '%Y-%m-%dT%H:%M:%S.%fZ'
THREAT_LEVEL_TO_SEVERITY = {
'imminent': 3,
'emerging': 2,
'unknown': 0
}
TO_DEMISTO_STATUS = {
'in_treatment': 1,
'resolved': 2,
'treatment_required': 0
}
VERIFY = not demisto.params().get("insecure", True)
SESSION = requests.Session()
''' HELPER FUNCTIONS '''
def get_incident_init_params():
params_dict = {
'threat_level': demisto.params().get('threat_level', None),
'threat_type': demisto.params().get('threat_type', None)
}
return {param_k: param_v for param_k, param_v in params_dict.items() if param_v is not None}
def item_to_incident(item_info, sixgill_alerts_client):
incident: Dict[str, Any] = dict()
incidents = []
items = []
# get fields that are shared in case of sub alerts
add_sub_alerts_shared_fields(incident, item_info)
sub_alerts = item_info.pop('sub_alerts', None)
if sub_alerts:
# add any sub alert as incident
for sub_alert in sub_alerts:
sub_item = copy.deepcopy(item_info)
sub_item.update(sub_alert)
items.append(sub_item)
else:
items.append(item_info)
for item in items:
sub_incident = copy.deepcopy(incident)
# add all other fields
add_sub_alerts_fields(sub_incident, item, sixgill_alerts_client)
sub_incident['rawJSON'] = json.dumps(item)
incidents.append(sub_incident)
return incidents
def add_sub_alerts_shared_fields(incident, item_info):
incident['name'] = item_info.get('title', 'Cybersixgill Alert')
incident_date = datetime.strptime(item_info.get('date'), DATETIME_FORMAT)
incident['occurred'] = incident_date.strftime(DEMISTO_DATETIME_FORMAT)
incident['severity'] = THREAT_LEVEL_TO_SEVERITY[item_info.get('threat_level', 'unknown')]
incident['CustomFields'] = {
'cybersixgillthreatlevel': item_info.get('threat_level', 'unknown'),
'cybersixgillthreattype': item_info.get('threats', []),
'cybersixgillassessment': item_info.get('assessment', None),
'cybersixgillrecommendations': '\n\n-----------\n\n'.join(item_info.get('recommendations', [])),
'incidentlink': f"https://portal.cybersixgill.com/#/?actionable_alert={item_info.get('id', '')}"
}
def add_sub_alerts_fields(incident, item_info, sixgill_alerts_client):
status = item_info.get('status', {}).get('name', 'treatment_required')
incident['status'] = TO_DEMISTO_STATUS[status]
content_item = {'creator': None, 'title': '', 'content': '', 'description': item_info.get('description', '')}
# cve alert
if item_info.get('content_type', '') == 'cve_item':
content_item['content'] = f'https://portal.cybersixgill.com/#/cve/{item_info.get("additional_info",{}).get("cve_id", "")}'
else:
content = sixgill_alerts_client.get_actionable_alert_content(actionable_alert_id=item_info.get('id'),
aggregate_alert_id=item_info.get('aggregate_alert_id', None))
# get item full content
content = content.get('items', None)
if content:
if content[0].get('_id'):
es_items = [item['_source'] for item in content if item['_id'] == item_info['es_id']]
if es_items:
content_item['title'] = es_items[0].get('title')
content_item['content'] = es_items[0].get('content')
content_item['creator'] = es_items[0].get('creator')
else:
# github alert
content_item['content'] = '\n\n-----------\n\n'.join(
[f'Repository name: {github_item.get("Repository name", "")}\nCustomer Keywords:'
f' {github_item.get("Customer Keywords", "")}\n URL: {github_item.get("URL", "")}'
for github_item in content])
incident['details'] = f"{content_item.get('description')}\n\n{content_item.get('title', '')}\n" \
f"\n{content_item.get('content', '')}"
triggered_assets = []
for key, value in item_info.get('additional_info', {}).items():
if 'matched_' in key:
triggered_assets.extend(value)
incident['CustomFields'].update({
'cybersixgillstatus': status.replace('_', ' ').title(),
'cybersixgillsite': item_info.get('site', None),
'cybersixgillactor': content_item.get('creator', None),
'cybersixgilltriggeredassets': triggered_assets
})
''' COMMANDS + REQUESTS FUNCTIONS '''
def test_module():
"""
Performs basic Auth request
"""
response = SESSION.send(request=SixgillAuthRequest(demisto.params()['client_id'],
demisto.params()['client_secret'],
CHANNEL_CODE).prepare(), verify=VERIFY)
if not response.ok:
raise Exception("Auth request failed - please verify client_id, and client_secret.")
def fetch_incidents():
last_run = demisto.getLastRun()
if 'last_fetch_time' in last_run:
last_fetch_time = datetime.strptime(last_run['last_fetch_time'], DATETIME_FORMAT)
demisto.info(f'Found last run, fetching new alerts from {last_fetch_time}')
else:
days_back = int(demisto.params().get('first_fetch_days', DEFAULT_DAYS_BACK))
if days_back > MAX_DAYS_BACK:
demisto.info(f'Days back({days_back}) is larger than the maximum, setting to {MAX_DAYS_BACK}')
days_back = MAX_DAYS_BACK
last_fetch_time = datetime.now() - timedelta(days=days_back)
demisto.info(f'First run, fetching alerts from {last_fetch_time}')
max_incidents_to_return = int(demisto.params().get('max_fetch', DEFAULT_INCIDENTS))
if max_incidents_to_return > MAX_INCIDENTS:
demisto.info(f'Max incidents({max_incidents_to_return}) is larger than the maximum, setting to {MAX_INCIDENTS}')
max_incidents_to_return = MAX_INCIDENTS
sixgill_alerts_client = SixgillActionableAlertClient(client_id=demisto.params()['client_id'],
client_secret=demisto.params()['client_secret'],
channel_id=CHANNEL_CODE,
logger=demisto,
session=SESSION,
verify=VERIFY)
filter_alerts_kwargs = get_incident_init_params()
incidents = []
items = sixgill_alerts_client.get_actionable_alerts_bulk(limit=MAX_INCIDENTS, **filter_alerts_kwargs)
newest_incident_date = datetime.strptime(items[0].get('date'), DATETIME_FORMAT)
offset = 0
items_to_add = []
if newest_incident_date > last_fetch_time:
# finding all new alerts since last fetch time
while items:
for item in items:
if datetime.strptime(item.get('date'), DATETIME_FORMAT) > last_fetch_time:
items_to_add.append(item)
if len(items_to_add) - offset == len(items):
offset += len(items)
items = sixgill_alerts_client.get_actionable_alerts_bulk(limit=MAX_INCIDENTS, offset=offset,
**filter_alerts_kwargs)
else:
items = []
demisto.info(f'Found {len(items_to_add)} new alerts since {last_fetch_time}')
# getting more info about oldest ~max_incidents_to_return(can be more because of sub alerts)
if len(items_to_add):
items_to_add.reverse()
newest_incident_date = items_to_add[-1].get('date')
for item in items_to_add:
item_info = sixgill_alerts_client.get_actionable_alert(actionable_alert_id=item.get('id'))
item_info['date'] = item.get('date')
new_incidents = item_to_incident(item_info, sixgill_alerts_client)
incidents.extend(new_incidents)
if len(incidents) >= max_incidents_to_return:
newest_incident_date = item.get('date')
break
demisto.info(f'Adding {len(incidents)} to demisto')
demisto.incidents(incidents)
if len(incidents):
demisto.info(f'Update last fetch time to: {newest_incident_date}')
demisto.setLastRun({
'last_fetch_time': newest_incident_date
})
''' COMMANDS MANAGER / SWITCH PANEL '''
if __name__ in ('__main__', '__builtin__', 'builtins'):
try:
SESSION.proxies = handle_proxy()
command = demisto.command()
if command == 'test-module':
test_module()
demisto.results('ok')
elif command == "fetch-incidents":
fetch_incidents()
except Exception as e:
return_error("Failed to execute {} command. Error: {}".format(demisto.command(), str(e)))
|
[
"noreply@github.com"
] |
ChuckWoodraska.noreply@github.com
|
105a53ca087440aa93635c0acd7192b5693a8611
|
44c17c1e394fc1b62fdb5020ea64300ea5859cac
|
/qihuan_web/blog/migrations/0002_auto_20180709_1611.py
|
6b4187ffb7539b8464d07906d5eaf11ac15d3d8d
|
[] |
no_license
|
wangdengkai/qihuan_website
|
08d2140593fa9c8c9e62487e85af7ac44c055ede
|
0d0c11faf857466025eb5f1ab1a0731b5b733926
|
refs/heads/master
| 2022-12-10T01:16:30.831353
| 2018-07-27T01:26:19
| 2018-07-27T01:26:19
| 139,438,549
| 0
| 0
| null | 2022-12-08T02:19:03
| 2018-07-02T12:09:04
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 583
|
py
|
# Generated by Django 2.0.7 on 2018-07-09 16:11
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='post',
name='like_number',
field=models.IntegerField(default=0, verbose_name='点赞数量'),
),
migrations.AddField(
model_name='post',
name='read_number',
field=models.IntegerField(default=0, verbose_name='阅读数量'),
),
]
|
[
"wangdengkaiwy@163.com"
] |
wangdengkaiwy@163.com
|
bf58c5db04488963f4c42364ac29e2c62a15cba7
|
5992c932bf01602a0f33710113659e928cb15f93
|
/hello.py
|
527dd3bf88b2b415a66ab35ff5b57ac777c88ec7
|
[] |
no_license
|
ArystanK/stepik
|
9efe280f3503067123c12455f00020dc994702cc
|
52cc2d6fbe956887f02cbceb6254501e9c55e4c4
|
refs/heads/master
| 2023-03-31T14:57:53.305721
| 2021-03-13T12:40:44
| 2021-03-13T12:40:44
| 347,365,378
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 582
|
py
|
#/usr/bin/env python3
#from urllib.parse import parse_qs
# python 2
#from cgi import parse_qs, escape
from re import sub
def app(environ, start_response):
"""wsgi minimal app.
$ gunicorn hello:app
"""
#parameters = parse_qs(environ.get('QUERY_STRING', ''))
#output = ''
#for p in parameters:
# for r in parameters[p]:
# output += p + '=' + r + '\n'
# no parsing needed
output = sub('&', '\n', environ.get('QUERY_STRING', ''))
start_response('200 OK', [('Content-Type', 'text/plain')])
return iter([str.encode(output)])
|
[
"aarystan@outlook.com"
] |
aarystan@outlook.com
|
2a6074103d161af980d28a05fcd375eb1031fc3c
|
72ef62e9a63ebbf6199d2577bc054b35a60d2ec9
|
/sympy/stats/joint_rv_types.py
|
a0efe5989a5e2a9b7b4d081701816d2a43ebf874
|
[
"BSD-3-Clause"
] |
permissive
|
aGzDelusion/sympy
|
7d2f1cfb954e5a098bc1c1b865ade108c94560f7
|
f384c734dfbeb9ac5479af626152bfcb4486e10f
|
refs/heads/master
| 2020-03-22T14:00:57.954529
| 2018-07-08T06:24:04
| 2018-07-08T06:24:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,141
|
py
|
from sympy import sympify, S, pi, sqrt, exp, Lambda, Indexed, Symbol
from sympy.stats.rv import _value_check
from sympy.stats.joint_rv import JointDistribution, JointPSpace
from sympy.matrices.dense import Matrix
from sympy.matrices.expressions.determinant import det
# __all__ = ['MultivariateNormal',
# 'MultivariateLaplace',
# 'MultivariateT',
# 'NormalGamma'
# ]
def multivariate_rv(cls, sym, *args):
sym = sympify(sym)
args = list(map(sympify, args))
dist = cls(*args)
dist.check(*args)
return JointPSpace(sym, dist).value
#-------------------------------------------------------------------------------
# Multivariate Normal distribution ---------------------------------------------------------
class MultivariateNormalDistribution(JointDistribution):
_argnames = ['mu', 'sigma']
is_Continuous=True
@property
def set(self):
k = len(self.mu)
return S.Reals**k
def check(self, mu, sigma):
mu, sigma = Matrix([mu]), Matrix(sigma)
_value_check(len(mu) == len(sigma.col(0)),
"Size of the mean vector and covariance matrix are incorrect.")
#check if covariance matrix is positive definite or not.
_value_check(all([i > 0 for i in sigma.eigenvals().keys()]),
"The covariance matrix must be positive definite. ")
def pdf(self, *args):
mu, sigma = Matrix(self.mu), Matrix(self.sigma)
k = len(mu)
args = Matrix(args)
return S(1)/sqrt((2*pi)**(k)*det(sigma))*exp(
-S(1)/2*(mu - args).transpose()*(sigma**(-1)*\
(mu - args)))[0]
def marginal_distribution(self, indices, sym):
sym = Matrix([Symbol(str(Indexed(sym, i))) for i in indices])
_mu, _sigma = Matrix(self.mu), Matrix(self.sigma)
k = len(self.mu)
for i in range(k):
if i not in indices:
_mu.row_del(i)
_sigma.col_del(i)
_sigma.row_del(i)
return Lambda(sym, S(1)/sqrt((2*pi)**(len(_mu))*det(_sigma))*exp(
-S(1)/2*(_mu - sym).transpose()*(_sigma**(-1)*\
(_mu - sym)))[0])
|
[
"akash.9712@gmail.com"
] |
akash.9712@gmail.com
|
835034ca52476b61b60282faf5eee452c6593c69
|
260b028aaab18b06286b0cf6eae94eafa3202c19
|
/bootstrapFigs100DiffTestSizes.py
|
89e47fdd4c95d79764f906060a36fd78d35f3769
|
[] |
no_license
|
YaleMRRC/CPMBaggingAnalysis
|
f28306bab0ee6aa3c9dfe359906dec1f0a3255b1
|
37fa1ca21667cf8708684cec2fc5b27eef2120f2
|
refs/heads/master
| 2023-03-19T04:59:54.500948
| 2021-03-10T17:08:40
| 2021-03-10T17:08:40
| 275,262,013
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 78,130
|
py
|
import h5py
import time
import os, sys
import glob
from functools import reduce
import pickle
import argparse
import pdb
import warnings
import numpy as np
import pandas as pd
import random
from scipy import stats,io
import scipy as sp
import matplotlib
from matplotlib import pyplot as plt
from matplotlib.ticker import FuncFormatter
from matplotlib.ticker import FormatStrFormatter
import seaborn as sns
from multiprocessing.dummy import Pool as ThreadPool
from multiprocessing import Pool
def to_percent(y, position):
# Ignore the passed in position. This has the effect of scaling the default
# tick locations.
s = str(100 * round(y,2))
# The percent symbol needs escaping in latex
if matplotlib.rcParams['text.usetex'] is True:
return s + r'$\%$'
else:
return s + '%'
iterResGather=[]
bsEdgesGather=[]
bsEdgesGather=[]
bsEdgesGather=[]
threshPerformGather=[]
cvPerformGather = []
histDenGatherBoot=[]
histAbsGatherBoot=[]
histDenGatherS300=[]
histAbsGatherS300=[]
histDenGatherS200=[]
histAbsGatherS200=[]
edgeDfGather = []
rvalDict={}
for itr in range(1,21):
rvalDict[itr] = {}
globalIpdir = '/path/to/iter'+str(itr).zfill(2)
globalOpdir = '/path/to/figs/'
edgeCountDf = pd.DataFrame()
edgemask = np.triu(np.ones([268,268]),k=1).flatten().astype(bool)
## Ten fold
print('Ten Fold')
tenFoldPath = os.path.join(globalIpdir,'10kCV.npy')
tenFoldRes = np.load(tenFoldPath, allow_pickle = True).item()
edgeCountDf['tenFold'] = np.stack(tenFoldRes['res'][0]).reshape([1000,71824])[:,edgemask].sum(axis=1)
## Train Only
print('Train Only')
trainPath = os.path.join(globalIpdir,'trainOnly.npy')
trainRes = np.load(trainPath, allow_pickle = True)
edgeCountDf['trainOnly'] = np.pad(np.expand_dims(trainRes[2][edgemask].sum(),axis=0).astype('object'),[0,999],constant_values=(np.nan,))
## Splithalf CV
print('Splithalf')
splitHalfPath = os.path.join(globalIpdir,'splithalfCV.npy')
splitHalfRes = np.load(splitHalfPath, allow_pickle = True).item()
edgeCountDf['splitHalf'] = np.pad(np.stack(splitHalfRes['res'][0]).reshape([200,71824])[:,edgemask].sum(axis=1).astype('object'),[0,800],constant_values=(np.nan,))
## Five fold
print('Five fold')
fiveFoldPath = os.path.join(globalIpdir,'5kCV.npy')
fiveFoldRes = np.load(fiveFoldPath, allow_pickle = True).item()
edgeCountDf['fiveFold'] = np.pad(np.stack(fiveFoldRes['res'][0]).reshape([500,71824])[:,edgemask].sum(axis=1).astype('object'),[0,500],constant_values=(np.nan,))
## LOO
print('LOO')
looPath = os.path.join(globalIpdir,'looCV.npy')
looRes = np.load(looPath, allow_pickle = True).item()
edgeCountDf['LOO'] = np.pad(np.stack(looRes['res'][0]).reshape([400,71824])[:,edgemask].sum(axis=1).astype('object'),[0,600],constant_values=(np.nan,))
## Bootstrap
print('Bootstrap')
bootPath = os.path.join(globalIpdir,'bootstrap.npy')
bootRes = np.load(bootPath, allow_pickle = True).item()
bootRes = bootRes['res']
bootedges = np.array(np.array(bootRes[1]).mean(axis=0) > 0)[edgemask]
bootmodel = np.mean(bootRes[3],axis=0)
## Subsample
print('Subsample 300')
sub300Path = os.path.join(globalIpdir,'subsample300.npy')
sub300Res = np.load(sub300Path, allow_pickle = True).item()
sub300Res = sub300Res['res']
sub300edges = np.array(np.array(sub300Res[1]).mean(axis=0) > 0)[edgemask]
sub300model = np.mean(sub300Res[3],axis=0)
## Subsample
print('Subsample 200')
sub200Path = os.path.join(globalIpdir,'subsample200.npy')
sub200Res = np.load(sub200Path, allow_pickle = True).item()
sub200Res = sub200Res['res']
sub200edges = np.array(np.array(sub200Res[1]).mean(axis=0) > 0)[edgemask]
sub200model = np.mean(sub200Res[3],axis=0)
# SplitHalf
posEdgeMasktwoK = np.concatenate(splitHalfRes['res'][0])
posFitstwoK = np.concatenate(splitHalfRes['res'][5])
# 5k
posEdgeMaskfiveK = np.concatenate(fiveFoldRes['res'][0])
posFitsfiveK = np.concatenate(fiveFoldRes['res'][5])
#10K
posEdgeMasktenK = np.concatenate(tenFoldRes['res'][0])
posFitstenK = np.concatenate(tenFoldRes['res'][5])
#loo
posEdgeMaskloo = np.concatenate(looRes['res'][0])
posFitsloo = np.concatenate(looRes['res'][5])
#train only
trainMod = trainRes[0]
trainEdges = trainRes[2]
## CV Performance evaluated across all folds
cvPerfDf = pd.DataFrame()
# 2K
posBehavRes = np.reshape(splitHalfRes['res'][2],[100,400])
actBehavRes = np.reshape(splitHalfRes['res'][4],[100,400])
cvPerf2K = np.array([np.corrcoef(posBehavRes[i,:],actBehavRes[i,:])[0,1] for i in range(0,100)])
# 5K
posBehavRes = np.reshape(fiveFoldRes['res'][2],[100,400])
actBehavRes = np.reshape(fiveFoldRes['res'][4],[100,400])
cvPerf5K = np.array([np.corrcoef(posBehavRes[i,:],actBehavRes[i,:])[0,1] for i in range(0,100)])
# 10K
posBehavRes = np.reshape(tenFoldRes['res'][2],[100,400])
actBehavRes = np.reshape(tenFoldRes['res'][4],[100,400])
cvPerf10K = np.array([np.corrcoef(posBehavRes[i,:],actBehavRes[i,:])[0,1] for i in range(0,100)])
# LOO
posBehavRes = np.reshape(looRes['res'][2],[400])
actBehavRes = np.reshape(looRes['res'][4],[400])
cvPerfloo = np.corrcoef(posBehavRes,actBehavRes)[0,1]
cvPerfDf['splitHalf'] = cvPerf2K
cvPerfDf['fiveFold'] = cvPerf5K
cvPerfDf['tenFold'] = cvPerf10K
cvPerfDf['LOO'] = np.pad(np.expand_dims(cvPerfloo,axis=0).astype('object'),[0,99],constant_values=(np.nan,))
cvPerformGather.append(cvPerfDf)
bootedgesAv = np.array(bootRes[1])[:,edgemask].mean(axis=0)
sub300edgesAv = np.array(sub300Res[1])[:,edgemask].mean(axis=0)
sub200edgesAv = np.array(sub200Res[1])[:,edgemask].mean(axis=0)
posEdgeMasktwoKAv = np.concatenate(splitHalfRes['res'][0])[:,edgemask].mean(axis=0)
posEdgeMaskfiveKAv = np.concatenate(fiveFoldRes['res'][0])[:,edgemask].mean(axis=0)
posEdgeMasktenKAv = np.concatenate(tenFoldRes['res'][0])[:,edgemask].mean(axis=0)
posEdgeMasklooAv = np.concatenate(looRes['res'][0])[:,edgemask].mean(axis=0)
for thresh in np.arange(0,1,0.1):
if thresh == 0:
bootNum = np.array(bootedgesAv > thresh).sum()
s300Num = np.array(sub300edgesAv > thresh).sum()
s200Num = np.array(sub200edgesAv > thresh).sum()
edgeCountDf['boot>'+str(round(thresh,1))] = np.pad(np.expand_dims(bootNum,axis=0).astype('object'),[0,999],constant_values=(np.nan,))
edgeCountDf['sub300>'+str(round(thresh,1))] = np.pad(np.expand_dims(s300Num,axis=0).astype('object'),[0,999],constant_values=(np.nan,))
edgeCountDf['sub200>'+str(round(thresh,1))] = np.pad(np.expand_dims(s200Num,axis=0).astype('object'),[0,999],constant_values=(np.nan,))
else:
bootNum = np.array(bootedgesAv >= thresh).sum()
s300Num = np.array(sub300edgesAv >= thresh).sum()
s200Num = np.array(sub200edgesAv >= thresh).sum()
edgeCountDf['boot>='+str(round(thresh,1))] = np.pad(np.expand_dims(bootNum,axis=0).astype('object'),[0,999],constant_values=(np.nan,))
edgeCountDf['sub300>='+str(round(thresh,1))] = np.pad(np.expand_dims(s300Num,axis=0).astype('object'),[0,999],constant_values=(np.nan,))
edgeCountDf['sub200>='+str(round(thresh,1))] = np.pad(np.expand_dims(s200Num,axis=0).astype('object'),[0,999],constant_values=(np.nan,))
#edgeCountDf=edgeCountDf.unstack().reset_index()
#edgeCountDf=edgeCountDf.rename({0:'NumberOfEdges'},axis=1)
#edgeCountDf.dropna(inplace=True)
#edgeCountDf.reset_index(inplace=True)
dfLen = edgeCountDf.shape[0]
edgeCountDf['sampleNum'] = np.repeat([itr],dfLen)
edgeDfGather.append(edgeCountDf)
#### Creating DataFrames
threshResPath = os.path.join(globalIpdir,'threshRes.npy')
threshRes = np.load(threshResPath, allow_pickle = True).item()
threshDfGather = []
for k1 in threshRes.keys():
for k2 in threshRes[k1].keys():
for k3 in threshRes[k1][k2].keys():
for k4 in threshRes[k1][k2][k3].keys():
val = threshRes[k1][k2][k3][k4]
row = np.vstack(np.array([k1,k2,k3,k4,val,0])).T
miniDf = pd.DataFrame(row,columns = ['testSize','iter','thresh','modelTestSample','pearsonsR','modelNum'])
threshDfGather.append(miniDf)
threshDf = pd.concat(threshDfGather)
threshDf = threshDf.reset_index(drop=True)
threshDf['modelType'] = threshDf.modelTestSample.str.replace('PNC','').str.replace('HCP','')
threshDf['testSample'] = threshDf.testSize.str[:3]
threshDf.testSize = threshDf.testSize.str[3:]
threshDf.pearsonsR = threshDf.pearsonsR.astype('float')
dfLen = threshDf.shape[0]
threshDf['sampleNum'] = np.repeat([itr],dfLen)
iterResPath = os.path.join(globalIpdir,'iterRes.npy')
iterRes = np.load(iterResPath, allow_pickle = True).item()
iterDfGather = []
for k1 in iterRes.keys():
for k2 in iterRes[k1].keys():
for k3 in iterRes[k1][k2].keys():
val = iterRes[k1][k2][k3]
if type(val) == np.float64:
rows = np.vstack(np.array([k1,k2,k3,val,0])).T
else:
nrows = val.shape[0]
modelNum = np.arange(0,nrows)
rows = np.append(np.tile([k1,k2,k3],nrows).reshape(nrows,3),np.vstack(val),axis=1)
rows = np.append(rows,np.vstack(modelNum),axis=1)
miniDf = pd.DataFrame(rows,columns = ['testSize','iter','modelTestSample','pearsonsR','modelNum'])
iterDfGather.append(miniDf)
iterDf = pd.concat(iterDfGather)
iterDf = iterDf.reset_index(drop=True)
iterDf['modelType'] = iterDf.modelTestSample.str.replace('PNC','').str.replace('HCP','')
iterDf['testSample'] = iterDf.testSize.str[:3]
iterDf.testSize = iterDf.testSize.str[3:]
iterDf.pearsonsR = iterDf.pearsonsR.astype('float')
dfLen = iterDf.shape[0]
iterDf['sampleNum'] = np.repeat([itr],dfLen)
iterResGather.append(iterDf)
threshPerformGather.append(threshDf)
# Aggregate performance of all models within and out of sample
fig, ax = plt.subplots(figsize=[12,8],nrows=1,ncols=2)
sns.set(style="whitegrid", palette="pastel", color_codes=True)
sns.violinplot(data=iterDf[iterDf.testSample == 'hcp'],y="pearsonsR",x='modelType',inner='quartile',hue='testSize',ax = ax[0])
sns.despine(left=True, bottom=False)
ax[0].set_title('Performance of single and bootstrapped models within sample', fontsize=14)
ax[0].yaxis.grid(True)
ax[0].xaxis.grid(False)
#plt.ylim([-0.2,0.65])
#plt.xlim([-0.4,1.4])
sns.violinplot(data=iterDf[iterDf.testSample == 'pnc'],y="pearsonsR",x='modelType',inner='quartile',hue='testSize',ax = ax[1])
sns.despine(left=True, bottom=False)
ax[1].set_title('Performance of single and bootstrapped models out of sample', fontsize=14)
ax[1].yaxis.grid(True)
ax[1].xaxis.grid(False)
#plt.ylim([-0.2,0.65])
#plt.xlim([-0.4,1.4])
plt.tight_layout()
plt.savefig(os.path.join(globalOpdir,'BootstrapComparisonViolin'+str(itr)+'.png'))
plt.close()
#plt.clf()
plt.close('all')
def histPlot(ipArr,opname):
### Distribution of feature inclusion
# from https://matplotlib.org/2.0.2/examples/pylab_examples/broken_axis.html
plt.figure(figsize=[8,6])
f, (ax, ax2) = plt.subplots(2, 1, sharex=True)
sns.set(style="whitegrid", palette="bright", color_codes=True)
# plot the same data on both axes
x,y,rects=ax.hist(ipArr[ipArr > 0],range=(0,1))
ax2.hist(ipArr[ipArr > 0],range=(0,1))
ax.set_title('Distribution of feature occurence across bootstraps', fontsize=14)
# zoom-in / limit the view to different portions of the data
ax.set_ylim(10000, 15000) # outliers only
ax2.set_ylim(0, 1600) # most of the data
# hide the spines between ax and ax2
#ax.spines['bottom'].set_visible(False)
#ax.spines['top'].set_visible(False)
#ax2.spines['top'].set_visible(False)
#ax.xaxis.tick_top()
ax.tick_params(bottom=False) # don't put tick labels at the top
#ax2.xaxis.tick_bottom()
# This looks pretty good, and was fairly painless, but you can get that
# cut-out diagonal lines look with just a bit more work. The important
# thing to know here is that in axes coordinates, which are always
# between 0-1, spine endpoints are at these locations (0,0), (0,1),
# (1,0), and (1,1). Thus, we just need to put the diagonals in the
# appropriate corners of each of our axes, and so long as we use the
# right transform and disable clipping.
d = .015 # how big to make the diagonal lines in axes coordinates
# arguments to pass to plot, just so we don't keep repeating them
kwargs = dict(transform=ax.transAxes, color='k', clip_on=False)
ax.plot((-d, +d), (-d, +d), **kwargs) # top-left diagonal
#ax.plot((1 - d, 1 + d), (-d, +d), **kwargs) # top-right diagonal
kwargs.update(transform=ax2.transAxes) # switch to the bottom axes
ax2.plot((-d, +d), (1 - d, 1 + d), **kwargs) # bottom-left diagonal
#ax2.plot((1 - d, 1 + d), (1 - d, 1 + d), **kwargs) # bottom-right diagonal
# What's cool about this is that now if we vary the distance between
# ax and ax2 via f.subplots_adjust(hspace=...) or plt.subplot_tool(),
# the diagonal lines will move accordingly, and stay right at the tips
# of the spines they are 'breaking'
plt.xlabel('Frequency of occurence', fontsize=12)
plt.ylabel('Number of edges', fontsize=12)
sns.despine(ax=ax,left=False,bottom=True)
sns.despine(ax=ax2,left=False, bottom=False)
ax.xaxis.grid(False)
ax.yaxis.grid(False)
ax2.xaxis.grid(False)
ax2.yaxis.grid(False)
plt.tight_layout()
plt.savefig(opname)
plt.close()
plt.clf()
return x
x = histPlot(bootedgesAv,os.path.join(globalOpdir,'BootstrapEdgeDistribution'+str(itr)+'.png'))
histAbsGatherBoot.append(x)
x = histPlot(sub300edgesAv,os.path.join(globalOpdir,'Sub300EdgeDistribution'+str(itr)+'.png'))
histAbsGatherS300.append(x)
x = histPlot(sub200edgesAv,os.path.join(globalOpdir,'Sub200EdgeDistribution'+str(itr)+'.png'))
histAbsGatherS200.append(x)
#################################################
############ BS model thresholding ##############
#################################################
for testSize in threshDf[threshDf.testSample == 'hcp'].testSize.unique():
tempThreshDf = threshDf[threshDf.testSample == 'hcp']
tempThreshDf['tempInd'] = list(map(lambda x: '_'.join(list(x)),tempThreshDf[['iter','modelTestSample']].values))
bootvals = tempThreshDf[(tempThreshDf.modelTestSample == 'bootHCP') & (tempThreshDf.testSize == testSize)][['thresh','pearsonsR','tempInd']].pivot(columns = 'thresh',index='tempInd').values
sub300vals = tempThreshDf[(tempThreshDf.modelTestSample == 'sub300HCP') & (tempThreshDf.testSize == testSize)][['thresh','pearsonsR','tempInd']].pivot(columns = 'thresh',index='tempInd').values
sub200vals = tempThreshDf[(tempThreshDf.modelTestSample == 'sub200HCP') & (tempThreshDf.testSize == testSize)][['thresh','pearsonsR','tempInd']].pivot(columns = 'thresh',index='tempInd').values
f = plt.figure(figsize=[10,12])
sns.set_style('white')
ax = f.add_subplot(311)
medianprops = dict(linestyle='-', linewidth=1, color='black')
labels1=['>0%','>=10%','>=20%','>=30%','>=40%','>=50%','>=60%','>=70%','>=80%','>=90%']
ax.boxplot(bootvals,positions=[0.05,0.15,0.25,0.35,0.45,0.55,0.65,0.75,0.85,0.95],labels=labels1,widths=0.06,medianprops=medianprops)
ax.set_ylim(0,0.6)
ax.set_ylabel('Performance (R)')
#ax.set_xticks([0.05,0.15,0.25,0.35,0.45,0.55,0.65,0.75],['>0%','>=10%','>=20%','>=30%','>=40%','>=50%','>=60%','>=70%'])
ax2 = ax.twinx()
x,y,rects=ax2.hist(bootedgesAv[bootedgesAv > 0],10,range=(0,1),cumulative=-1,density=True,alpha=0.3)
histDenGatherBoot.append(x)
for i,rect in enumerate(rects):
txt="{0:.1%}".format(rect.get_height())
ax2.text(rect.get_x()+0.05, rect.get_height(),txt, ha='center', va='bottom',alpha=0.5)
ax2.set_ylabel('Percentage of total features included')
# https://matplotlib.org/examples/pylab_examples/histogram_percent_demo.html
# Create the formatter using the function to_percent. This multiplies all the
# default labels by 100, making them all percentages
formatter = FuncFormatter(to_percent)
# Set the formatter
ax2.yaxis.set_major_formatter(formatter)
sns.despine(trim=True,left=False, bottom=False, right=False)
ax.set_xlim(-0.05,1.05)
ax.set_xlabel('Percentage of boostraps features occured in')
ax.set_title('Bootstrap model performance with feature thresholding HCP->HCP')
### Bootstrap model threshold model in HCP
ax3 = f.add_subplot(312)
medianprops = dict(linestyle='-', linewidth=1, color='black')
labels2=['>0%','>=10%','>=20%','>=30%','>=40%','>=50%','>=60%','>=70%','>=80%','>=90%']
ax3.boxplot(sub300vals,positions=[0.05,0.15,0.25,0.35,0.45,0.55,0.65,0.75,0.85,0.95],labels=labels2,widths=0.06,medianprops=medianprops)
ax3.set_ylim(0,0.6)
ax3.set_ylabel('Performance (R)')
ax4 = ax3.twinx()
x,y,rects=ax4.hist(sub300edgesAv[sub300edgesAv > 0],10,range=(0,1),cumulative=-1,density=True,alpha=0.3)
histDenGatherS300.append(x)
for i,rect in enumerate(rects):
txt="{0:.1%}".format(rect.get_height())
ax4.text(rect.get_x()+0.05, rect.get_height(),txt, ha='center', va='bottom',alpha=0.5)
ax4.set_ylabel('Percentage of total features included')
# https://matplotlib.org/examples/pylab_examples/histogram_percent_demo.html
# Create the formatter using the function to_percent. This multiplies all the
# default labels by 100, making them all percentages
formatter = FuncFormatter(to_percent)
# Set the formatter
ax4.yaxis.set_major_formatter(formatter)
sns.despine(trim=True,left=False, bottom=False, right=False)
ax3.set_xlim(-0.05,1.05)
#ax3.set_xticks([0.05,0.15,0.25,0.35,0.45,0.55,0.65,0.75],['>0%','>=10%','>=20%','>=30%','>=40%','>=50%','>=60%','>=70%'])
ax3.set_xlabel('Percentage of boostraps features occured in')
ax3.set_title('Resample 300 model performance with feature thresholding HCP->HCP')
ax5 = f.add_subplot(313)
medianprops = dict(linestyle='-', linewidth=1, color='black')
labels3=['>0%','>=10%','>=20%','>=30%','>=40%','>=50%','>=60%','>=70%','>=80%','>=90%']
ax5.boxplot(sub200vals,positions=[0.05,0.15,0.25,0.35,0.45,0.55,0.65,0.75,0.85,0.95],labels=labels2,widths=0.06,medianprops=medianprops)
ax5.set_ylim(0,0.6)
ax5.set_ylabel('Performance (R)')
ax6 = ax5.twinx()
x,y,rects=ax5.hist(sub200edgesAv[sub200edgesAv > 0],10,range=(0,1),cumulative=-1,density=True,alpha=0.3)
histDenGatherS200.append(x)
for i,rect in enumerate(rects):
txt="{0:.1%}".format(rect.get_height())
ax6.text(rect.get_x()+0.05, rect.get_height(),txt, ha='center', va='bottom',alpha=0.5)
ax6.set_ylabel('Percentage of total features included')
# https://matplotlib.org/examples/pylab_examples/histogram_percent_demo.html
# Create the formatter using the function to_percent. This multiplies all the
# default labels by 100, making them all percentages
formatter = FuncFormatter(to_percent)
# Set the formatter
ax6.yaxis.set_major_formatter(formatter)
ax.xaxis.grid(False)
ax.yaxis.grid(True)
ax3.xaxis.grid(False)
ax3.yaxis.grid(True)
ax5.xaxis.grid(False)
ax5.yaxis.grid(True)
sns.despine(trim=True,left=False, bottom=False, right=False)
ax5.set_xlim(-0.05,1.05)
#ax3.set_xticks([0.05,0.15,0.25,0.35,0.45,0.55,0.65,0.75],['>0%','>=10%','>=20%','>=30%','>=40%','>=50%','>=60%','>=70%'])
ax5.set_xlabel('Percentage of boostraps features occured in')
ax5.set_title('Resample 200 model performance with feature thresholding HCP->HCP')
plt.tight_layout()
plt.savefig(os.path.join(globalOpdir,'HCPThreshPerform_testSize'+testSize+'_'+str(itr)+'.png'))
plt.close()
for testSize in threshDf[threshDf.testSample == 'pnc'].testSize.unique():
tempThreshDf = threshDf[threshDf.testSample == 'pnc']
tempThreshDf['tempInd'] = list(map(lambda x: '_'.join(list(x)),tempThreshDf[['iter','modelTestSample']].values))
bootvals = tempThreshDf[(tempThreshDf.modelTestSample == 'bootPNC') & (tempThreshDf.testSize == testSize)][['thresh','pearsonsR','tempInd']].pivot(columns = 'thresh',index='tempInd').values
sub300vals = tempThreshDf[(tempThreshDf.modelTestSample == 'sub300PNC') & (tempThreshDf.testSize == testSize)][['thresh','pearsonsR','tempInd']].pivot(columns = 'thresh',index='tempInd').values
sub200vals = tempThreshDf[(tempThreshDf.modelTestSample == 'sub200PNC') & (tempThreshDf.testSize == testSize)][['thresh','pearsonsR','tempInd']].pivot(columns = 'thresh',index='tempInd').values
f = plt.figure(figsize=[10,12])
sns.set_style('white')
ax = f.add_subplot(311)
medianprops = dict(linestyle='-', linewidth=1, color='black')
labels1=['>0%','>=10%','>=20%','>=30%','>=40%','>=50%','>=60%','>=70%','>=80%','>=90%']
ax.boxplot(bootvals,positions=[0.05,0.15,0.25,0.35,0.45,0.55,0.65,0.75,0.85,0.95],labels=labels1,widths=0.06,medianprops=medianprops)
ax.set_ylim(0,0.6)
ax.set_ylabel('Performance (R)')
#ax.set_xticks([0.05,0.15,0.25,0.35,0.45,0.55,0.65,0.75],['>0%','>=10%','>=20%','>=30%','>=40%','>=50%','>=60%','>=70%'])
ax2 = ax.twinx()
x,y,rects=ax2.hist(bootedgesAv[bootedgesAv > 0],10,range=(0,1),cumulative=-1,density=True,alpha=0.3)
for i,rect in enumerate(rects):
txt="{0:.1%}".format(rect.get_height())
ax2.text(rect.get_x()+0.05, rect.get_height(),txt, ha='center', va='bottom',alpha=0.5)
ax2.set_ylabel('Percentage of total features included')
# https://matplotlib.org/examples/pylab_examples/histogram_percent_demo.html
# Create the formatter using the function to_percent. This multiplies all the
# default labels by 100, making them all percentages
formatter = FuncFormatter(to_percent)
# Set the formatter
ax2.yaxis.set_major_formatter(formatter)
sns.despine(trim=True,left=False, bottom=False, right=False)
ax.set_xlim(-0.05,1.05)
ax.set_xlabel('Percentage of boostraps features occured in')
ax.set_title('Bootstrap model performance with feature thresholding HCP->PNC')
### Bootstrap model threshold model in HCP
ax3 = f.add_subplot(312)
medianprops = dict(linestyle='-', linewidth=1, color='black')
labels2=['>0%','>=10%','>=20%','>=30%','>=40%','>=50%','>=60%','>=70%','>=80%','>=90%']
ax3.boxplot(sub300vals,positions=[0.05,0.15,0.25,0.35,0.45,0.55,0.65,0.75,0.85,0.95],labels=labels2,widths=0.06,medianprops=medianprops)
ax3.set_ylim(0,0.6)
ax3.set_ylabel('Performance (R)')
ax4 = ax3.twinx()
x,y,rects=ax4.hist(sub300edgesAv[sub300edgesAv > 0],10,range=(0,1),cumulative=-1,density=True,alpha=0.3)
for i,rect in enumerate(rects):
txt="{0:.1%}".format(rect.get_height())
ax4.text(rect.get_x()+0.05, rect.get_height(),txt, ha='center', va='bottom',alpha=0.5)
ax4.set_ylabel('Percentage of total features included')
# https://matplotlib.org/examples/pylab_examples/histogram_percent_demo.html
# Create the formatter using the function to_percent. This multiplies all the
# default labels by 100, making them all percentages
formatter = FuncFormatter(to_percent)
# Set the formatter
ax4.yaxis.set_major_formatter(formatter)
sns.despine(trim=True,left=False, bottom=False, right=False)
ax3.set_xlim(-0.05,1.05)
#ax3.set_xticks([0.05,0.15,0.25,0.35,0.45,0.55,0.65,0.75],['>0%','>=10%','>=20%','>=30%','>=40%','>=50%','>=60%','>=70%'])
ax3.set_xlabel('Percentage of boostraps features occured in')
ax3.set_title('Resample 300 model performance with feature thresholding HCP->PNC')
ax5 = f.add_subplot(313)
medianprops = dict(linestyle='-', linewidth=1, color='black')
labels3=['>0%','>=10%','>=20%','>=30%','>=40%','>=50%','>=60%','>=70%','>=80%','>=90%']
ax5.boxplot(sub200vals,positions=[0.05,0.15,0.25,0.35,0.45,0.55,0.65,0.75,0.85,0.95],labels=labels2,widths=0.06,medianprops=medianprops)
ax5.set_ylim(0,0.6)
ax5.set_ylabel('Performance (R)')
ax6 = ax5.twinx()
x,y,rects=ax5.hist(sub200edgesAv[sub200edgesAv > 0],10,range=(0,1),cumulative=-1,density=True,alpha=0.3)
for i,rect in enumerate(rects):
txt="{0:.1%}".format(rect.get_height())
ax6.text(rect.get_x()+0.05, rect.get_height(),txt, ha='center', va='bottom',alpha=0.5)
ax6.set_ylabel('Percentage of total features included')
# https://matplotlib.org/examples/pylab_examples/histogram_percent_demo.html
# Create the formatter using the function to_percent. This multiplies all the
# default labels by 100, making them all percentages
formatter = FuncFormatter(to_percent)
# Set the formatter
ax6.yaxis.set_major_formatter(formatter)
ax.xaxis.grid(False)
ax.yaxis.grid(True)
ax3.xaxis.grid(False)
ax3.yaxis.grid(True)
ax5.xaxis.grid(False)
ax5.yaxis.grid(True)
sns.despine(trim=True,left=False, bottom=False, right=False)
ax5.set_xlim(-0.05,1.05)
#ax3.set_xticks([0.05,0.15,0.25,0.35,0.45,0.55,0.65,0.75],['>0%','>=10%','>=20%','>=30%','>=40%','>=50%','>=60%','>=70%'])
ax5.set_xlabel('Percentage of boostraps features occured in')
ax5.set_title('Resample 200 model performance with feature thresholding HCP->PNC')
plt.tight_layout()
plt.savefig(os.path.join(globalOpdir,'PNCThreshPerform_testSize'+testSize+'_'+str(itr)+'.png'))
plt.close()
## Feature inclusion plot
fig, ax = plt.subplots(figsize=[8,6])
sns.set(style="white", palette="bright", color_codes=True)
featRvalMean=np.mean(bootRes[5],axis=0)
posedges=np.stack(bootRes[1]).mean(axis=0)
negedges=np.stack(bootRes[2]).mean(axis=0)
sns.set_style("whitegrid")
plt.scatter(featRvalMean[(featRvalMean > 0) & (posedges> 0)],posedges[(featRvalMean > 0) & (posedges > 0)])
plt.scatter(featRvalMean[(featRvalMean < 0) & (negedges > 0)],negedges[(featRvalMean < 0) & (negedges > 0)])
plt.title('Bagged Model Rvals')
plt.xlabel('Rval for feature vs behavior')
plt.ylabel('Percentage of bootstraps feature occured in')
sns.despine(right=True)
ax.xaxis.grid(False)
ax.yaxis.grid(True)
plt.ylim([0,1.1])
plt.xlim([-0.4,0.4])
plt.savefig(os.path.join(globalOpdir,'featureRvalBootstrap'+str(itr)+'.png'))
plt.close('all')
fig, ax = plt.subplots(figsize=[8,6])
sns.set(style="white", palette="bright", color_codes=True)
featRvalMean=np.mean(sub300Res[5],axis=0)
posedges=np.stack(sub300Res[1]).mean(axis=0)
negedges=np.stack(sub300Res[2]).mean(axis=0)
sns.set_style("whitegrid")
plt.scatter(featRvalMean[(featRvalMean > 0) & (posedges> 0)],posedges[(featRvalMean > 0) & (posedges > 0)])
plt.scatter(featRvalMean[(featRvalMean < 0) & (negedges > 0)],negedges[(featRvalMean < 0) & (negedges > 0)])
plt.title('S300 Model Rvals')
plt.xlabel('Rval for feature vs behavior')
plt.ylabel('Percentage of resamples feature occured in')
sns.despine(right=True)
ax.xaxis.grid(False)
ax.yaxis.grid(True)
plt.ylim([0,1.1])
plt.xlim([-0.4,0.4])
plt.savefig(os.path.join(globalOpdir,'featureRvalS300'+str(itr)+'.png'))
plt.close('all')
fig, ax = plt.subplots(figsize=[8,6])
sns.set(style="white", palette="bright", color_codes=True)
featRvalMean=np.mean(sub200Res[5],axis=0)
posedges=np.stack(sub200Res[1]).mean(axis=0)
negedges=np.stack(sub200Res[2]).mean(axis=0)
sns.set_style("whitegrid")
plt.scatter(featRvalMean[(featRvalMean > 0) & (posedges> 0)],posedges[(featRvalMean > 0) & (posedges > 0)])
plt.scatter(featRvalMean[(featRvalMean < 0) & (negedges > 0)],negedges[(featRvalMean < 0) & (negedges > 0)])
plt.title('S200 Model Rvals')
plt.xlabel('Rval for feature vs behavior')
plt.ylabel('Percentage of resamples feature occured in')
sns.despine(right=True)
ax.xaxis.grid(False)
ax.yaxis.grid(True)
plt.ylim([0,1.1])
plt.xlim([-0.4,0.4])
plt.savefig(os.path.join(globalOpdir,'featureRvalS200'+str(itr)+'.png'))
plt.close('all')
fig, ax = plt.subplots(figsize=[8,6])
sns.set(style="white", palette="bright", color_codes=True)
sns.set_style("whitegrid")
plt.scatter(np.mean(bootRes[5],axis=0),np.stack(bootRes[1]).mean(axis=0)+np.stack(bootRes[2]).mean(axis=0))
plt.scatter(np.mean(sub300Res[5],axis=0),np.stack(sub300Res[1]).mean(axis=0)+np.stack(sub300Res[2]).mean(axis=0))
plt.scatter(np.mean(sub200Res[5],axis=0),np.stack(sub200Res[1]).mean(axis=0)+np.stack(sub200Res[2]).mean(axis=0))
plt.title('Bagged Model Rvals')
plt.xlabel('Rval for feature vs behavior')
plt.ylabel('Percentage of bootstraps feature occured in')
sns.despine(right=True)
ax.xaxis.grid(False)
ax.yaxis.grid(True)
plt.ylim([0,1.1])
plt.xlim([-0.4,0.4])
plt.savefig(os.path.join(globalOpdir,'featureRval3Model'+str(itr)+'.png'))
plt.close('all')
# Save feature selection stuff
rvalDict[itr]['bootRvals'] = np.mean(bootRes[5],axis=0)
rvalDict[itr]['sub300Rvals'] = np.mean(sub300Res[5],axis=0)
rvalDict[itr]['sub200Rvals'] = np.mean(sub200Res[5],axis=0)
rvalDict[itr]['bootEdgeCount'] = np.stack(bootRes[1]).mean(axis=0)+np.stack(bootRes[2]).mean(axis=0)
rvalDict[itr]['sub300EdgeCount'] = np.stack(sub300Res[1]).mean(axis=0)+np.stack(sub300Res[2]).mean(axis=0)
rvalDict[itr]['sub200EdgeCount'] = np.stack(sub200Res[1]).mean(axis=0)+np.stack(sub200Res[2]).mean(axis=0)
################################# End of big loop
allIterResDf = pd.concat(iterResGather)
allIterResDf['R Squared']=allIterResDf['pearsonsR']**2
#### Pairwise performance differences
newind = list(allIterResDf.modelType.unique())
mdlList = np.array([[n1,n2] for n1 in newind for n2 in newind])
mdlListRes = mdlList.reshape([8,8,2])
mdlListKeep = mdlListRes[~np.triu(np.ones([8,8])).astype(bool),:]
mdlMask = np.array(list(map(lambda x : x.split('_')[0], mdlListKeep[:,0]))) != np.array(list(map(lambda x : x.split('_')[0], mdlListKeep[:,1])))
mdlListUnq = mdlListKeep[mdlMask,:]
del mdlMask,mdlListKeep,mdlListRes,mdlList,newind
mdlListUnq = [['sub300','LOO'],['sub200','LOO']]
#timeStart = datetime.datetime.now()
#bigValsGather = []
arrDfGather = []
iterResDf2 = allIterResDf[~((allIterResDf.testSample == 'pnc') & (allIterResDf.testSize == '400'))]
iterResDf2.testSize.replace({'400':'All','787':'All'},inplace=True)
for testSize in ['200']:
for testSample in ['hcp','pnc']:
allIterResDfSizeSample = iterResDf2[(iterResDf2.testSize == testSize) & (iterResDf2.testSample == testSample)]
for mdlCombo in mdlListUnq:
mdlCombo = mdlCombo
print(testSize,testSample,mdlCombo)
allIterResDfModel = allIterResDfSizeSample[(allIterResDfSizeSample.modelType == mdlCombo[0]) | (allIterResDfSizeSample.modelType == mdlCombo[1])]
allIterPivot = allIterResDfModel.pivot(columns = ['iter','testSample','sampleNum'],index=['modelType','modelNum'],values='R Squared')
allIterPivot = allIterPivot.astype(np.float32)
nrows=allIterPivot.shape[0]
newind = list(map(lambda x : '_'.join(x),allIterPivot.index.values))
mdlList = np.array([[n1,n2] for n1 in newind for n2 in newind])
mdlListRes = mdlList.reshape([nrows,nrows,2])
mdlListKeep = mdlListRes[np.triu(np.ones([nrows,nrows]),k=1).astype(bool),:]
mdlMask = np.array(list(map(lambda x : x.split('_')[0], mdlListKeep[:,0]))) != np.array(list(map(lambda x : x.split('_')[0], mdlListKeep[:,1])))
mdlListKeep = mdlListKeep[mdlMask,:]
arrAgg = []
comboDf = pd.DataFrame(allIterPivot.index.values + allIterPivot.index.values[:,None])
mask = np.triu(np.ones(comboDf.shape),k=1).astype(bool)
m1 = comboDf.mask(~mask).values[0,-1][0]
m2 = comboDf.mask(~mask).values[0,-1][2]
for col in allIterPivot.columns:
#print(col)
arr = allIterPivot[col].values - allIterPivot[col].values[:,None]
arr = arr[np.triu(np.ones(arr.shape),k=1).astype(bool)]
arr = arr[mdlMask]
arrAgg.append(arr)
arrCat = np.concatenate(arrAgg)
narrrows = arrCat.shape[0]
arrDf = pd.DataFrame(arrCat,columns=['modelPerfDiff'])
arrDf['models'] = np.repeat(m1+'_'+m2,narrrows)
arrDf['sample'] = np.repeat(testSample,narrrows)
arrDfGather.append(arrDf)
#bigValsGather.append([testSize,testSample,m1,m2,np.sum(arrCat > 0),np.sum(arrCat < 0),np.sum(arrCat == 0)])
#exactTestDf = pd.DataFrame(bigValsGather,columns =['testSize','testSample','model1','model2','model1Better','model2Better','tie'])
#exactTestDf.to_csv('path/to/exacTestStuff.csv')
#timeEnd = datetime.datetime.now()
arrDfBig = pd.concat(arrDfGather)
plt.close()
sns.set(style="whitegrid", palette="bright", color_codes=True,font_scale=1.2)
g = sns.FacetGrid(data=arrDfBig,col="models",row="sample",height=6)
g.map(sns.kdeplot,"modelPerfDiff",fill=True,linewidth=0,common_norm=False)
#g.map(sns.displot,"modelPerfDiff",kind='kde',fill=True,linewidth=0,common_norm=False)
axtitles = ['Subsample 300 > LOO (Within Sample, HCP)',
'Subsample 200 > LOO (Within Sample, HCP)',
'Subsample 300 > LOO (Out of Sample, PNC)',
'Subsample 200 > LOO (Out of Sample, PNC)']
for i,a in enumerate(g.axes.flatten()):
#ax.set_ylim([-0.05,0.4])
#ax.set_xlim([-1,16])
a.xaxis.grid(False)
a.yaxis.grid(False)
a.set_xticks([0], minor=False)
a.set_title(axtitles[i])
a.xaxis.grid(True,which = 'Major')
if i > 1:
a.set_xticks([-0.10,-0.05,0.05,0.10], minor=True)
a.tick_params(axis='x', which='minor', bottom=True,labelsize='small',labelbottom=True)
a.xaxis.set_minor_formatter(FormatStrFormatter("%.2f"))
#plt.setp(a.get_xticklabels(minor=True), visible=True)
#plt.ylabel('Density')
sns.despine(left=False, bottom = False)
g.set_ylabels('Density')
g.set_xlabels('Difference in Model Performance')
g.fig.tight_layout()
plt.savefig(os.path.join(globalOpdir,'999modelPerfCompareDiff.png'))
plt.close()
#exactTestDf = pd.read_csv('path/to/exacTestStuff.csv')
#stats.fisher_exact([[431802,368196],[633125,166875]]) # sub300 loo comp
#stats.fisher_exact([[436709,363291],[579617,220383]]) # sub200 loo comp
# Mean Perf
# allIterResDf.drop(['index','pearsonsR','modelTestSample','iter','sampleNum'],axis=1).groupby(['modelType','testSample']).mean()
# reshapedDf = allIterResDf.drop(['index','pearsonsR','modelTestSample','iter','sampleNum'],axis=1).reset_index().pivot(columns = ['modelType','testSample'],values='R Squared')
# allIterResDf.drop(['index','level_2','Pearsons R','combo'],axis=1).reset_index().pivot(columns = ['ModelType','TestSample'],values='R Squared')
# allIterResDf.drop(['index','level_2','Pearsons R','combo'],axis=1).reset_index().pivot(columns = ['ModelType','TestSample'],values='R Squared')['LOO'].mean()
allThreshResDf = pd.concat(threshPerformGather)
################################ Summary Tables
allIterResDf.drop(['pearsonsR','sampleNum','modelTestSample'],axis=1).groupby(['modelType','testSample','testSize']).mean()
allIterResDf.replace({'sub300Resample':'Subsample 300','sub200Resample': 'Subsample 200'},inplace=True)
reshapePerfDf = allIterResDf.drop(['pearsonsR','sampleNum','modelTestSample'],axis=1).reset_index().pivot(columns = ['modelType','testSample','testSize'],values='R Squared')
meanPerfTable = reshapePerfDf.mean().sort_values(ascending=False)
iterResPlotDf = allIterResDf.replace({'boot':'Bagged','sub300': 'Subsample 300','sub200':'Subsample 200','2K':'Split Half','5K':'Five Fold','10K':'Ten Fold','Train':'Train Only','pnc':'PNC','hcp':'HCP'})
iterResPlotDf2 = iterResPlotDf[~((iterResPlotDf.testSample == 'PNC') & (iterResPlotDf.testSize == '400'))]
iterResPlotDf2.testSize.replace({'400':'All','787':'All'},inplace=True)
################################ Figures of All
for testSampleSize in ['200','300','All']:
#f = plt.figure(figsize=[24,10])
#gs = matplotlib.gridspec.GridSpec(1, 1, right=0.77)
#ax=plt.subplot(gs[0])
### Bootstrap model performance all edges, Single model vs BS model, test sample size 200
fig, ax = plt.subplots(figsize=[8,6])
sns.set(style="whitegrid", palette="pastel", color_codes=True)
d = sns.boxplot(data=iterResPlotDf2[iterResPlotDf2.testSize == testSampleSize],y="R Squared",x='testSample',hue='modelType',order = ['HCP','PNC'])
cols=np.zeros([1,20])+1
cols[:,10:] = 2
d.pcolorfast((-1,2), (-0.3,0.7),cols,cmap='brg', alpha=0.1)
fracOff=1/3.72
#plt.plot([-fracOff, -fracOff, 0, 0], [0.6, 0.62, 0.62, 0.6], lw=1.5, c='k')
#plt.text(-fracOff/2, 0.62, "*", ha='center', va='bottom', color='k')
#plt.plot([fracOff, fracOff, 0, 0], [0.57, 0.59, 0.59, 0.57], lw=1.5, c='k')
#plt.text(0, 0.61, "*", ha='center', va='bottom', color='k',weight="bold")
#plt.plot([1-fracOff, 1-fracOff, 1+fracOff, 1+fracOff], [0.45, 0.47, 0.47, 0.45], lw=1.5, c='k')
#plt.text(1+fracOff, 0.42, "*", ha='center', va='bottom', color='k',weight="bold")
#plt.plot([(1/3.8), (1/3.8), 0, 0], [0.57, 0.59, 0.59, 0.57], lw=1.5, c='k')
#plt.text((1/7.6), 0.59, "*", ha='center', va='bottom', color='k')
sns.despine(left=True, bottom=True)
#plt.set_title('Performance of all models within and out of sample', fontsize=14)
#ax.set_title('Within Sample \t\t\t\t\t\t\t\t Out of Sample \t\t'.expandtabs(), fontsize=40)
ax.yaxis.grid(True)
ax.xaxis.grid(False)
plt.ylim([-0.05,0.4])
plt.xlim([-0.5,1.5])
plt.ylabel('Performance (R Squared)')
plt.xticks([0,1],['Within Sample (HCP)','Out of Sample (PNC)'])
plt.legend(loc='upper right')
plt.tight_layout()
plt.savefig(os.path.join(globalOpdir,'3BootstrapComparisonBoxplotAllTestSample'+testSampleSize+'.png'))
plt.close()
fig, ax = plt.subplots(figsize=[20,10])
sns.set(style="whitegrid", palette="pastel", color_codes=True)
d = sns.catplot(x="modelType", y="R Squared",hue="testSize", col="testSample",data=iterResPlotDf2, kind="box",col_order = ['HCP','PNC'],legend = False)
d.set_xticklabels(rotation=30)
sns.despine(left=True, bottom=True)
ax.yaxis.grid(True)
ax.xaxis.grid(False)
plt.ylim([-0.05,0.4])
#plt.xlim([-0.5,1.5])
#plt.ylabel('Performance (R Squared)')
plt.legend(loc='upper right',title = 'Test Sample Size')
plt.tight_layout()
plt.savefig(os.path.join(globalOpdir,'3BootstrapComparisonBoxplotAllTestSample3TestSize.png'))
plt.close()
### Three tier plot
cvPerformAll = pd.concat(cvPerformGather)
cvPerformAllStack = cvPerformAll.stack().reset_index()
cvPerformAllStack.columns = ['index','cvType','Pearsons R']
cvPerformAllStack['R Squared'] = cvPerformAllStack['Pearsons R']**2
## R squared violinplot
plt.figure(figsize=[12,10])
plt.subplot(3,1,1)
plt.title('CV Performance')
sns.boxplot(data = cvPerformAllStack, x = 'cvType', y = 'R Squared',color='white',order = ['splitHalf','fiveFold', 'tenFold', 'LOO'])
plt.xticks(range(0,8),['Split Half','Five Fold','Ten Fold','Leave One Out','','','',''])
plt.ylim([-0.1,0.3])
plt.grid(b=True,axis='y',alpha=0.7,linestyle='--')
plt.setp(ax.artists, edgecolor = 'k', facecolor='w')
plt.setp(ax.lines, color='k')
plt.subplot(3,1,2)
plt.title('Performance on left out HCP')
sns.boxplot(data = iterResPlotDf[iterResPlotDf.testSample == 'HCP'], x = 'modelType', y = 'R Squared',hue = 'testSize',color='white',order = ['Split Half','Five Fold', 'Ten Fold', 'LOO', 'Train Only','Subsample 200','Subsample 300','Bagged'])
#plt.xticks(range(1,9),['Split Half','Five Fold','Ten Fold','Leave One Out','subsample 200','subsample 300','Bootstrap','Train Only'])
plt.ylim([-0.1,0.3])
plt.grid(b=True,axis='y',alpha=0.7,linestyle='--')
plt.setp(ax.artists, edgecolor = 'k', facecolor='w')
plt.setp(ax.lines, color='k')
plt.subplot(3,1,3)
plt.title('Performance on PNC')
sns.boxplot(data = iterResPlotDf[iterResPlotDf.testSample == 'PNC'], x = 'modelType', y = 'R Squared',color='white',hue = 'testSize',order = ['Split Half','Five Fold', 'Ten Fold', 'LOO', 'Train Only','Subsample 200','Subsample 300','Bagged'])
#plt.xticks(range(1,9),['Split Half','Five Fold','Ten Fold','Leave One Out','subsample 200','subsample 300','Bagged','Train Only'])
plt.ylim([-0.1,0.3])
plt.grid(b=True,axis='y',alpha=0.7,linestyle='--')
plt.setp(ax.artists, edgecolor = 'k', facecolor='w')
plt.setp(ax.lines, color='k')
plt.tight_layout()
plt.savefig(os.path.join(globalOpdir,'allPerfIterSqViolin.png'))
plt.close()
plt.clf()
###### All Histogram
def makeHistAll(allAbshist,allAbshistMean,allAbshistStd,opname,cutnums,titl):
#f = plt.figure(figsize=[12,12])
f, (ax, ax2) = plt.subplots(2, 1, sharex=True)
sns.set(style="whitegrid", palette="bright", color_codes=True)
#ax.set_title(titl, fontsize=40)
labels1=['>0%','>=10%','>=20%','>=30%','>=40%','>=50%','>=60%','>=70%','>=80%','>=90%']
ax.bar(np.arange(0.05,1.05,0.1), allAbshistMean, yerr=allAbshistStd,alpha=1,width=0.1)
ax2.bar(np.arange(0.05,1.05,0.1), allAbshistMean, yerr=allAbshistStd,alpha=1,width=0.1)
ax.set_ylim(cutnums[1], cutnums[0]) # outliers only
ax2.set_ylim(cutnums[3], cutnums[2]) # most of the data
ax.tick_params(bottom=False) # don't put tick labels at the top
ax.set_xlim(-0.05,1.05)
d = .015 # how big to make the diagonal lines in axes coordinates
# arguments to pass to plot, just so we don't keep repeating them
kwargs = dict(transform=ax.transAxes, color='k', clip_on=False)
ax.plot((-d, +d), (-d, +d), **kwargs) # top-left diagonal
#ax.plot((1 - d, 1 + d), (-d, +d), **kwargs) # top-right diagonal
kwargs.update(transform=ax2.transAxes) # switch to the bottom axes
ax2.plot((-d, +d), (1 - d, 1 + d), **kwargs) # bottom-left diagonal
#ax2.plot((1 - d, 1 + d), (1 - d, 1 + d), **kwargs) # bottom-right diagonal
ax.xaxis.grid(False)
ax.yaxis.grid(False)
ax2.xaxis.grid(False)
ax2.yaxis.grid(False)
sns.despine(ax=ax,left=False,bottom=True)
sns.despine(ax=ax2,left=False, bottom=False)
ax2.set_xlabel('Threshold',fontsize=16)
ax2.set_ylabel('Number of features', fontsize=16)
ax.set_title(titl, fontsize=20)
plt.tight_layout()
plt.savefig(opname)
plt.close()
#ax2.xaxis.tick_bottom()
allAbshist=np.stack(histAbsGatherBoot)
allAbshistMean=np.mean(allAbshist,axis=0)
allAbshistStd=np.std(allAbshist,axis=0)
opname1=os.path.join(globalOpdir,'histogramAllBoot.png')
makeHistAll(allAbshist,allAbshistMean,allAbshistStd,opname1,[12500,10000,3000,0],'Bagged Model')
allAbshist=np.stack(histAbsGatherS300)
allAbshistMean=np.mean(allAbshist,axis=0)
allAbshistStd=np.std(allAbshist,axis=0)
opname2=os.path.join(globalOpdir,'histogramAllS300.png')
makeHistAll(allAbshist,allAbshistMean,allAbshistStd,opname2,[3000,2000,800,0],'Subsample 300 Model')
allAbshist=np.stack(histAbsGatherS200)
allAbshistMean=np.mean(allAbshist,axis=0)
allAbshistStd=np.std(allAbshist,axis=0)
opname3=os.path.join(globalOpdir,'histogramAllS200.png')
makeHistAll(allAbshist,allAbshistMean,allAbshistStd,opname3,[7000,900,750,0],'Subsample 200 Model')
os.system('montage '+opname1+' '+opname2+' '+opname3+' -geometry +3+1 '+os.path.join(globalOpdir,'4histogramAllMontage.png'))
###### Performance by split grid form
#f = plt.figure()
d = sns.catplot(x="modelType", y="R Squared",hue="sampleNum", col="testSample",row = 'testSize',data=iterResPlotDf2, kind="box",col_order = ['HCP','PNC'],legend = False,height=5,aspect=2,sharex=False)
#sns.despine(offset=15)
d.set_xticklabels(rotation=15)
plt.tight_layout()
plt.savefig(os.path.join(globalOpdir,'2PerformanceComparisonSplitGrid.png'))
plt.close()
###### Figure performance by split, each test size separate
for testSize in ['200','300','All']:
f = plt.figure(figsize=[40,18])
gs = matplotlib.gridspec.GridSpec(1, 1, right=0.85)
#ax = f.add_subplot(211)
ax=plt.subplot(gs[0])
order = ['bootHCP','sub300HCP','sub200HCP','HCP2K','HCP5K','HCP10K','HCPLOO','HCPTrain','bootPNC','sub300PNC','sub200PNC','PNC2K','PNC5K','PNC10K','PNCLOO','PNCTrain']
d=sns.boxplot(data=iterResPlotDf2[iterResPlotDf2.testSize == testSize],y="R Squared",x='modelTestSample',hue='sampleNum',ax=ax,order=order)
cols=np.zeros([1,160])+1
cols[:,80:] = 2
d.pcolorfast((-1,16), (-0.05,0.4),cols,cmap='brg', alpha=0.1)
sns.despine(offset=15)
#ax3.legend(bbox_to_anchor=(1.8, 1.5), loc='upper right')
#ax3.legend(bbox_to_anchor=(1.3, 1.5), loc='upper right')
handles, labels = ax.get_legend_handles_labels()
f.legend(handles, labels, loc='center right',bbox_to_anchor=(0.9, 0.55),fontsize=25,title = 'HCP Split',title_fontsize=25)
ax.get_legend().remove()
ax.set_ylim([-0.05,0.4])
ax.set_xlim([-1,16])
ax.xaxis.grid(False)
ax.yaxis.grid(True)
plt.title('Within Sample \t\t\t\t\t\t\t\t\t\t\t Out of Sample \t\t'.expandtabs(), fontsize=40)
plt.xticks(ticks = range(0,16), labels = ['Bagged', 'Subsample 300', 'Subsample 200', 'SplitHalf','FiveFold', 'TenFold', 'LOO', 'Train Only','Bagged', 'Subample 300', 'Subsample 200', 'SplitHalf','FiveFold', 'TenFold', 'LOO', 'Train Only'],rotation=25,fontsize=25)
plt.yticks(fontsize=25)
plt.ylabel('Performance (R Squared)', fontsize=30)
plt.xlabel('')
plt.gcf().subplots_adjust(bottom=0.25)
plt.tight_layout()
plt.savefig(os.path.join(globalOpdir,'2PerformanceComparisonSplit'+testSize+'.png'))
plt.close()
######################### Overlay figures 2 and 3 test
###### Figure performance by split, each test size separate
for testSize in ['200','300','All']:
fig, ax = plt.subplots(figsize = [20,10])
order = ['bootHCP','sub300HCP','sub200HCP','HCP2K','HCP5K','HCP10K','HCPLOO','HCPTrain','bootPNC','sub300PNC','sub200PNC','PNC2K','PNC5K','PNC10K','PNCLOO','PNCTrain']
#d = sns.boxplot(data=iterResPlotDf2[iterResPlotDf2.testSize == testSampleSize],y="R Squared",x='modelTestSample',order = order,ax=ax,color='black',alpha = 0.4,inner=None,)
#for vio in d.collections:
# vio.set_facecolor('black')
# vio.set_alpha(0.4)
d = sns.boxplot(data=iterResPlotDf2[iterResPlotDf2.testSize == testSampleSize],y="R Squared",x='modelTestSample',order = order,ax=ax,color='black',boxprops=dict(alpha=.5))
d=sns.boxplot(data=iterResPlotDf2[iterResPlotDf2.testSize == testSize],y="R Squared",x='modelTestSample',hue='sampleNum',ax=ax,order=order, boxprops=dict(alpha=.7))
sns.despine(offset=15)
handles, labels = ax.get_legend_handles_labels()
f.legend(handles, labels, loc='center right',bbox_to_anchor=(0.9, 0.55),fontsize=25,title = 'HCP Split',title_fontsize=25)
ax.get_legend().remove()
ax.set_ylim([-0.05,0.4])
ax.set_xlim([-1,16])
ax.xaxis.grid(False)
ax.yaxis.grid(True)
#plt.title('Within Sample \t\t\t\t\t\t\t\t\t\t\t Out of Sample \t\t'.expandtabs(), fontsize=40)
#plt.xticks(ticks = range(0,16), labels = ['Bagged', 'Subsample 300', 'Subsample 200', 'SplitHalf','FiveFold', 'TenFold', 'LOO', 'Train Only','Bagged', 'Subample 300', 'Subsample 200', 'SplitHalf','FiveFold', 'TenFold', 'LOO', 'Train Only'],rotation=25,fontsize=25)
plt.yticks(fontsize=25)
plt.ylabel('Performance (R Squared)', fontsize=30)
plt.xlabel('')
plt.gcf().subplots_adjust(bottom=0.25)
plt.tight_layout()
#plt.show()
plt.savefig(os.path.join(globalOpdir,'92PerformanceComparisonSplit'+testSize+'.png'))
plt.close()
allXhistBoot=np.stack(histDenGatherBoot)
allXhistMeanBoot=np.mean(allXhistBoot,axis=0)
allXhistStdBoot=np.std(allXhistBoot,axis=0)
allXhistS300=np.stack(histDenGatherS300)
allXhistMeanS300=np.mean(allXhistS300,axis=0)
allXhistStdS300=np.std(allXhistS300,axis=0)
allXhistS200=np.stack(histDenGatherS200)
allXhistMeanS200=np.mean(allXhistS200,axis=0)
allXhistStdS200=np.std(allXhistS200,axis=0)
#################################################
############ BS model thresholding ##############
#################################################
allThreshResDf = pd.concat(threshPerformGather)
allThreshResDf['R Squared']=allThreshResDf['pearsonsR']**2
allThreshResDf = allThreshResDf[~((allThreshResDf.testSample == 'pnc') & (allThreshResDf.testSize == '400'))]
allThreshResDf.testSize.replace({'400':'All','787':'All'},inplace=True)
allThreshResDfPivot = allThreshResDf.pivot(index=['sampleNum','iter'],columns=['modelTestSample','testSize','thresh'],values='R Squared')
for testSampleSize in ['200','300','All']:
for testSample in ['HCP','PNC']:
f = plt.figure(figsize=[12,12])
sns.set_style('white')
baseXPositions = [0.05,0.15,0.25,0.35,0.45,0.55,0.65,0.75,0.85,0.95]
#################### Top Subplot ################################
ax = f.add_subplot(311)
medianprops = dict(linestyle='-', linewidth=1, color='black')
labels1=['>0%','>=10%','>=20%','>=30%','>=40%','>=50%','>=60%','>=70%','>=80%','>=90%']
ax.boxplot([a[~np.isnan(a)] for a in allThreshResDfPivot['boot'+testSample][testSampleSize].values.T],positions=baseXPositions,labels=labels1,widths=0.06,medianprops=medianprops)
ax.set_ylim(-0.05,0.4)
ax.set_ylabel('Performance (R Squared)')
#ax.set_xticks([0.05,0.15,0.25,0.35,0.45,0.55,0.65,0.75],['>0%','>=10%','>=20%','>=30%','>=40%','>=50%','>=60%','>=70%'])
ax2 = ax.twinx()
ax2.bar(np.arange(0.05,1.05,0.1), allXhistMeanBoot, yerr=allXhistStdBoot,alpha=0.3,width=0.1)
for i,rect in enumerate(allXhistMeanBoot):
txt="{0:.1%}".format(rect)
ax2.text((i/10)+0.05,rect+allXhistStdBoot[i],txt, ha='center', va='bottom',alpha=0.5)
ax2.set_ylabel('Percentage of total features included')
# https://matplotlib.org/examples/pylab_examples/histogram_percent_demo.html
# Create the formatter using the function to_percent. This multiplies all the
# default labels by 100, making them all percentages
formatter = FuncFormatter(to_percent)
# Set the formatter
ax2.yaxis.set_major_formatter(formatter)
sns.despine(trim=True,left=False, bottom=False, right=False)
ax.set_xlim(-0.05,1.05)
ax.set_xlabel('Percentage of boostraps features occured in')
ax.set_title('Bagged models performance with feature thresholding within sample ('+testSample+')')
#################### Middle Subplot ################################
### S300 model threshold model in HCP
#f = plt.figure(figsize=[10,6])
ax3 = f.add_subplot(312)
medianprops = dict(linestyle='-', linewidth=1, color='black')
labels2=['>0%','>=10%','>=20%','>=30%','>=40%','>=50%','>=60%','>=70%','>=80%','>=90%']
ax3.boxplot([a[~np.isnan(a)] for a in allThreshResDfPivot['sub300'+testSample][testSampleSize].values.T],positions=baseXPositions,labels=labels2,widths=0.06,medianprops=medianprops)
ax3.set_ylim(-0.05,0.4)
ax3.set_ylabel('Performance (R Squared)')
ax4 = ax3.twinx()
x=ax4.bar(np.arange(0.05,1.05,0.1), allXhistMeanS300, yerr=allXhistStdS300,alpha=0.3,width=0.1)
for i,rect in enumerate(allXhistMeanS300):
txt="{0:.1%}".format(rect)
ax4.text((i/10)+0.05,rect+allXhistStdS300[i],txt, ha='center', va='bottom',alpha=0.5)
ax4.set_ylabel('Percentage of total features included')
# https://matplotlib.org/examples/pylab_examples/histogram_percent_demo.html
# Create the formatter using the function to_percent. This multiplies all the
# default labels by 100, making them all percentages
formatter = FuncFormatter(to_percent)
# Set the formatter
ax4.yaxis.set_major_formatter(formatter)
ax.xaxis.grid(False)
ax.yaxis.grid(True)
ax3.xaxis.grid(False)
ax3.yaxis.grid(True)
sns.despine(trim=True,left=False, bottom=False, right=False)
ax3.set_xlim(-0.05,1.05)
#ax3.set_xticks([0.05,0.15,0.25,0.35,0.45,0.55,0.65,0.75],['>0%','>=10%','>=20%','>=30%','>=40%','>=50%','>=60%','>=70%'])
ax3.set_xlabel('Percentage of subsamples features occured in')
ax3.set_title('Subsample 300 models performance with feature thresholding within sample ('+testSample+')')
#################### Bottom Subplot ################################
### S200 model threshold model in HCP
#f = plt.figure(figsize=[10,6])
ax3 = f.add_subplot(313)
medianprops = dict(linestyle='-', linewidth=1, color='black')
labels2=['>0%','>=10%','>=20%','>=30%','>=40%','>=50%','>=60%','>=70%','>=80%','>=90%']
ax3.boxplot([a[~np.isnan(a)] for a in allThreshResDfPivot['sub200'+testSample][testSampleSize].values.T],positions=baseXPositions,labels=labels2,widths=0.06,medianprops=medianprops)
ax3.set_ylim(-0.05,0.4)
ax3.set_ylabel('Performance (R Squared)')
ax4 = ax3.twinx()
x=ax4.bar(np.arange(0.05,1.05,0.1), allXhistMeanS200, yerr=allXhistStdS200,alpha=0.3,width=0.1)
for i,rect in enumerate(allXhistMeanS200):
txt="{0:.1%}".format(rect)
ax4.text((i/10)+0.05,rect+allXhistStdS200[i],txt, ha='center', va='bottom',alpha=0.5)
ax4.set_ylabel('Percentage of total features included')
# https://matplotlib.org/examples/pylab_examples/histogram_percent_demo.html
# Create the formatter using the function to_percent. This multiplies all the
# default labels by 100, making them all percentages
formatter = FuncFormatter(to_percent)
# Set the formatter
ax4.yaxis.set_major_formatter(formatter)
ax.xaxis.grid(False)
ax.yaxis.grid(True)
ax3.xaxis.grid(False)
ax3.yaxis.grid(True)
sns.despine(trim=True,left=False, bottom=False, right=False)
ax3.set_xlim(-0.05,1.05)
#ax3.set_xticks([0.05,0.15,0.25,0.35,0.45,0.55,0.65,0.75],['>0%','>=10%','>=20%','>=30%','>=40%','>=50%','>=60%','>=70%'])
ax3.set_xlabel('Percentage of subsamples features occured in')
ax3.set_title('Subsample 200 models performance with feature thresholding within sample ('+testSample+')')
plt.tight_layout()
plt.savefig(os.path.join(globalOpdir,'6'+testSample+'ThreshPerform'+testSampleSize+'.png'))
plt.close()
###### Rval versus inclusion
bootRvalsAll = np.concatenate([rvalDict[r]['bootRvals'] for r in rvalDict])
bootEdgesAll = np.concatenate([rvalDict[r]['bootEdgeCount'] for r in rvalDict])
sub300RvalsAll = np.concatenate([rvalDict[r]['sub300Rvals'] for r in rvalDict])
sub200RvalsAll = np.concatenate([rvalDict[r]['sub200Rvals'] for r in rvalDict])
sub300EdgesAll = np.concatenate([rvalDict[r]['sub300EdgeCount'] for r in rvalDict])
sub200EdgesAll = np.concatenate([rvalDict[r]['sub200EdgeCount'] for r in rvalDict])
mlInd = pd.MultiIndex.from_tuples(zip(*[['boot','sub300','sub200','boot','sub300','sub200'],['edgeCount','edgeCount','edgeCount','Rvals','Rvals','Rvals']]))
arr=np.stack([bootEdgesAll,sub300EdgesAll,sub200EdgesAll,bootRvalsAll,sub300RvalsAll,sub200RvalsAll])
tempDf=pd.DataFrame(arr.T,columns=mlInd)
tempDfStack = tempDf.stack(level=0).reset_index()
tempDfStack=tempDfStack.rename({'level_1':'ModelType'},axis=1)
replaceDict={'boot':'Bagged','sub200':'Subsample 200','sub300':'Subsample 300'}
tempDfStack.ModelType.replace(replaceDict,inplace=True)
tempDfStack = tempDfStack[~(tempDfStack.edgeCount == 0)]
tempDfStack.rename({'Rvals':'R Value at feature selection step','edgeCount':'Feature ocurrence across resamples/bootstraps'},axis=1,inplace=True)
plt.figure(figsize=[12,18])
sns.set(style="white", palette="bright", color_codes=True)
s=sns.jointplot(data=tempDfStack,x='R Value at feature selection step', y='Feature ocurrence across resamples/bootstraps',hue='ModelType',alpha=0.05,linewidth=0,marker='.')
s.fig.gca().set_ylabel('Feature ocurrence across resamples/bootstraps')
s.fig.gca().set_xlabel('R Value at feature selection step')
#s.yaxis.grid(True)
#plt.grid(axis='y')
plt.ylim([0,1])
s.ax_joint.yaxis.grid(True)
sns.despine()
plt.tight_layout()
plt.savefig(os.path.join(globalOpdir,'5RvalVOccurenceAll.png'))
plt.close('all')
################## Thresh plots all in one fig
for testSampleSize in ['200','300','All']:
baseXPosition = [0.05,0.15,0.25,0.35,0.45,0.55,0.65,0.75,0.85,0.95]
f = plt.figure(figsize=[12,12])
sns.set_style('white')
ax = f.add_subplot(311)
medianprops = dict(linestyle='-', linewidth=1, color='black')
labels1=['>0%','>=10%','>=20%','>=30%','>=40%','>=50%','>=60%','>=70%','>=80%','>=90%']
labels1Blank = ['','','','','','','','','','']
bp1 = ax.boxplot([a[~np.isnan(a)] for a in allThreshResDfPivot['bootHCP'][testSampleSize].values.T],positions=baseXPosition,labels=labels1,widths=0.025,medianprops=medianprops,patch_artist=True,boxprops=dict(facecolor='white'))
bp2 = ax.boxplot([a[~np.isnan(a)] for a in allThreshResDfPivot['bootPNC'][testSampleSize].values.T],positions=[0.08,0.18,0.28,0.38,0.48,0.58,0.68,0.78,0.88,0.98],labels=labels1Blank,widths=0.025,medianprops=medianprops,patch_artist=True,boxprops=dict(facecolor='grey'))
ax.legend([bp1["boxes"][0], bp2["boxes"][0]], ['Within Sample (HCP)', 'Out of Sample (PNC)'], loc='upper right')
ax.set_ylim(-0.05,0.4)
ax.set_ylabel('Performance (R Squared)')
#ax.set_xticks([0.05,0.15,0.25,0.35,0.45,0.55,0.65,0.75],['>0%','>=10%','>=20%','>=30%','>=40%','>=50%','>=60%','>=70%'])
ax2 = ax.twinx()
ax2.bar(np.arange(0.065,1.065,0.1), allXhistMeanBoot, yerr=allXhistStdBoot,alpha=0.3,width=0.1)
for i,rect in enumerate(allXhistMeanBoot):
txt="{0:.1%}".format(rect)
ax2.text((i/10)+0.065,rect+allXhistStdBoot[i],txt, ha='center', va='bottom',alpha=0.5)
ax2.set_ylabel('Percentage of total features included')
# https://matplotlib.org/examples/pylab_examples/histogram_percent_demo.html
# Create the formatter using the function to_percent. This multiplies all the
# default labels by 100, making them all percentages
formatter = FuncFormatter(to_percent)
# Set the formatter
ax2.yaxis.set_major_formatter(formatter)
sns.despine(trim=True,left=False, bottom=False, right=False)
ax.set_xlim(-0.05,1.1)
ax.set_xlabel('Percentage of boostraps features occured in')
ax.set_title('Bagged models performance with feature thresholding')
### S300 model threshold model in HCP
#f = plt.figure(figsize=[10,6])
ax3 = f.add_subplot(312)
medianprops = dict(linestyle='-', linewidth=1, color='black')
labels2=['>0%','>=10%','>=20%','>=30%','>=40%','>=50%','>=60%','>=70%','>=80%','>=90%']
ax3.boxplot([a[~np.isnan(a)] for a in allThreshResDfPivot['sub300HCP'][testSampleSize].values.T],positions=[0.05,0.15,0.25,0.35,0.45,0.55,0.65,0.75,0.85,0.95],labels=labels2,widths=0.025,medianprops=medianprops)
ax3.boxplot([a[~np.isnan(a)] for a in allThreshResDfPivot['sub300PNC'][testSampleSize].values.T],positions=[0.08,0.18,0.28,0.38,0.48,0.58,0.68,0.78,0.88,0.98],labels=labels1Blank,widths=0.025,medianprops=medianprops,patch_artist=True,boxprops=dict(facecolor='grey'))
ax3.set_ylim(-0.05,0.4)
ax3.set_ylabel('Performance (R Squared)')
ax4 = ax3.twinx()
x=ax4.bar(np.arange(0.065,1.065,0.1), allXhistMeanS300, yerr=allXhistStdS300,alpha=0.3,width=0.1)
for i,rect in enumerate(allXhistMeanS300):
txt="{0:.1%}".format(rect)
ax4.text((i/10)+0.065,rect+allXhistStdS300[i],txt, ha='center', va='bottom',alpha=0.5)
ax4.set_ylabel('Percentage of total features included')
# https://matplotlib.org/examples/pylab_examples/histogram_percent_demo.html
# Create the formatter using the function to_percent. This multiplies all the
# default labels by 100, making them all percentages
formatter = FuncFormatter(to_percent)
# Set the formatter
ax4.yaxis.set_major_formatter(formatter)
ax.xaxis.grid(False)
ax.yaxis.grid(True)
ax3.xaxis.grid(False)
ax3.yaxis.grid(True)
sns.despine(trim=True,left=False, bottom=False, right=False)
ax3.set_xlim(-0.05,1.1)
#ax3.set_xticks([0.05,0.15,0.25,0.35,0.45,0.55,0.65,0.75],['>0%','>=10%','>=20%','>=30%','>=40%','>=50%','>=60%','>=70%'])
ax3.set_xlabel('Percentage of subsamples features occured in')
ax3.set_title('Subsample 300 models performance with feature thresholding')
### S200 model threshold model in HCP
#f = plt.figure(figsize=[10,6])
ax3 = f.add_subplot(313)
medianprops = dict(linestyle='-', linewidth=1, color='black')
labels2=['>0%','>=10%','>=20%','>=30%','>=40%','>=50%','>=60%','>=70%','>=80%','>=90%']
ax3.boxplot([a[~np.isnan(a)] for a in allThreshResDfPivot['sub200HCP'][testSampleSize].values.T],positions=[0.05,0.15,0.25,0.35,0.45,0.55,0.65,0.75,0.85,0.95],labels=labels2,widths=0.025,medianprops=medianprops)
ax3.boxplot([a[~np.isnan(a)] for a in allThreshResDfPivot['sub200PNC'][testSampleSize].values.T],positions=[0.08,0.18,0.28,0.38,0.48,0.58,0.68,0.78,0.88,0.98],labels=labels1Blank,widths=0.025,medianprops=medianprops,patch_artist=True,boxprops=dict(facecolor='grey'))
ax3.set_ylim(-0.05,0.4)
ax3.set_ylabel('Performance (R Squared)')
ax4 = ax3.twinx()
x=ax4.bar(np.arange(0.065,1.065,0.1), allXhistMeanS200, yerr=allXhistStdS200,alpha=0.3,width=0.1)
for i,rect in enumerate(allXhistMeanS200):
txt="{0:.1%}".format(rect)
ax4.text((i/10)+0.065,rect+allXhistStdS200[i],txt, ha='center', va='bottom',alpha=0.5)
ax4.set_ylabel('Percentage of total features included')
# https://matplotlib.org/examples/pylab_examples/histogram_percent_demo.html
# Create the formatter using the function to_percent. This multiplies all the
# default labels by 100, making them all percentages
formatter = FuncFormatter(to_percent)
# Set the formatter
ax4.yaxis.set_major_formatter(formatter)
ax.xaxis.grid(False)
ax.yaxis.grid(True)
ax3.xaxis.grid(False)
ax3.yaxis.grid(True)
sns.despine(trim=True,left=False, bottom=False, right=False)
ax3.set_xlim(-0.05,1.1)
#ax3.set_xticks([0.05,0.15,0.25,0.35,0.45,0.55,0.65,0.75],['>0%','>=10%','>=20%','>=30%','>=40%','>=50%','>=60%','>=70%'])
ax3.set_xlabel('Percentage of subsamples features occured in')
ax3.set_title('Subsample 200 models performance with feature thresholding')
plt.tight_layout()
plt.savefig(os.path.join(globalOpdir,'6HCPandPNCThreshPerform'+testSampleSize+'.png'))
plt.close()
#########################################################################
######################## Strip plots ####################################
#########################################################################
################################ Figures of All
for testSampleSize in ['200','300','All']:
if testSampleSize == 'All':
swarmSize = 2
else:
swarmSize = 0.5
### Bootstrap model performance all edges, Single model vs BS model, test sample size 200
fig, ax = plt.subplots(figsize=[8,6])
sns.set(style="whitegrid", palette="pastel", color_codes=True)
#d = sns.swarmplot(data=iterResPlotDf2[iterResPlotDf2.testSize == testSampleSize],y="R Squared",x='testSample',hue='modelType',order = ['HCP','PNC'],dodge=True,size=swarmSize,ax=ax,alpha = 1,color='black')
d = sns.violinplot(data=iterResPlotDf2[iterResPlotDf2.testSize == testSampleSize],y="R Squared",x='testSample',hue='modelType',order = ['HCP','PNC'],alpha = 1,inner=None,linewidth=0,ax=ax)
#d = sns.stripplot(data=iterResPlotDf2[iterResPlotDf2.testSize == '200'],y="R Squared",x='testSample',hue='modelType',order = ['HCP','PNC'],ax=ax,dodge=True,jitter = 1)
for vio in d.collections:
vio.set_facecolor('black')
vio.set_alpha(0.25)
d = sns.boxplot(data=iterResPlotDf2[iterResPlotDf2.testSize == testSampleSize],y="R Squared",x='testSample',hue='modelType',order = ['HCP','PNC'],ax=ax, boxprops=dict(alpha=.9),showfliers=False)
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles[8:], labels[8:])
cols=np.zeros([1,20])+1
cols[:,10:] = 2
d.pcolorfast((-1,2), (-0.3,0.7),cols,cmap='brg', alpha=0.1)
fracOff=1/3.72
#plt.plot([-fracOff, -fracOff, 0, 0], [0.6, 0.62, 0.62, 0.6], lw=1.5, c='k')
#plt.text(-fracOff/2, 0.62, "*", ha='center', va='bottom', color='k')
#plt.plot([fracOff, fracOff, 0, 0], [0.57, 0.59, 0.59, 0.57], lw=1.5, c='k')
#plt.text(0, 0.61, "*", ha='center', va='bottom', color='k',weight="bold")
#plt.plot([1-fracOff, 1-fracOff, 1+fracOff, 1+fracOff], [0.45, 0.47, 0.47, 0.45], lw=1.5, c='k')
#plt.text(1+fracOff, 0.42, "*", ha='center', va='bottom', color='k',weight="bold")
#plt.plot([(1/3.8), (1/3.8), 0, 0], [0.57, 0.59, 0.59, 0.57], lw=1.5, c='k')
#plt.text((1/7.6), 0.59, "*", ha='center', va='bottom', color='k')
sns.despine(left=True, bottom=True)
#plt.set_title('Performance of all models within and out of sample', fontsize=14)
#ax.set_title('Within Sample \t\t\t\t\t\t\t\t Out of Sample \t\t'.expandtabs(), fontsize=40)
ax.yaxis.grid(True)
ax.xaxis.grid(False)
plt.ylim([-0.05,0.4])
plt.xlim([-0.5,1.5])
plt.ylabel('Performance (R Squared)')
plt.xticks([0,1],['Within Sample (HCP)','Out of Sample (PNC)'])
#plt.legend(loc='upper right')
plt.tight_layout()
plt.savefig(os.path.join(globalOpdir,'93BootstrapComparisonStripplotAllTestSample'+testSampleSize+'.png'))
plt.close()
fig, ax = plt.subplots(figsize=[20,10])
sns.set(style="whitegrid", palette="pastel", color_codes=True)
d = sns.catplot(x="modelType", y="R Squared",hue="testSize", col="testSample",data=iterResPlotDf2, kind="strip",col_order = ['HCP','PNC'],legend = False)
d.set_xticklabels(rotation=30)
sns.despine(left=True, bottom=True)
ax.yaxis.grid(True)
ax.xaxis.grid(False)
plt.ylim([-0.05,0.4])
#plt.xlim([-0.5,1.5])
#plt.ylabel('Performance (R Squared)')
plt.legend(loc='upper right',title = 'Test Sample Size')
plt.tight_layout()
plt.savefig(os.path.join(globalOpdir,'93BootstrapComparisonStripplotAllTestSample3TestSize.png'))
plt.close()
###### Performance by split grid form
#f = plt.figure()
d = sns.catplot(x="modelType", y="R Squared",hue="sampleNum", col="testSample",row = 'testSize',data=iterResPlotDf2, kind="strip",col_order = ['HCP','PNC'],legend = False,height=5,aspect=2,sharex=False)
#sns.despine(offset=15)
d.set_xticklabels(rotation=15)
plt.tight_layout()
plt.savefig(os.path.join(globalOpdir,'92PerformanceComparisonSplitGridStrip.png'))
plt.close()
###### Figure performance by split, each test size separate
for testSize in ['200','300','All']:
f = plt.figure(figsize=[40,18])
gs = matplotlib.gridspec.GridSpec(1, 1, right=0.85)
#ax = f.add_subplot(211)
ax=plt.subplot(gs[0])
order = ['bootHCP','sub300HCP','sub200HCP','HCP2K','HCP5K','HCP10K','HCPLOO','HCPTrain','bootPNC','sub300PNC','sub200PNC','PNC2K','PNC5K','PNC10K','PNCLOO','PNCTrain']
d=sns.stripplot(data=iterResPlotDf2[iterResPlotDf2.testSize == testSize],y="R Squared",x='modelTestSample',hue='sampleNum',ax=ax,order=order)
cols=np.zeros([1,160])+1
cols[:,80:] = 2
d.pcolorfast((-1,16), (-0.05,0.4),cols,cmap='brg', alpha=0.1)
sns.despine(offset=15)
#ax3.legend(bbox_to_anchor=(1.8, 1.5), loc='upper right')
#ax3.legend(bbox_to_anchor=(1.3, 1.5), loc='upper right')
handles, labels = ax.get_legend_handles_labels()
f.legend(handles, labels, loc='center right',bbox_to_anchor=(0.9, 0.55),fontsize=25,title = 'HCP Split',title_fontsize=25)
ax.get_legend().remove()
ax.set_ylim([-0.05,0.4])
ax.set_xlim([-1,16])
ax.xaxis.grid(False)
ax.yaxis.grid(True)
plt.title('Within Sample \t\t\t\t\t\t\t\t\t\t\t Out of Sample \t\t'.expandtabs(), fontsize=40)
plt.xticks(ticks = range(0,16), labels = ['Bagged', 'Subsample 300', 'Subsample 200', 'SplitHalf','FiveFold', 'TenFold', 'LOO', 'Train Only','Bagged', 'Subample 300', 'Subsample 200', 'SplitHalf','FiveFold', 'TenFold', 'LOO', 'Train Only'],rotation=25,fontsize=25)
plt.yticks(fontsize=25)
plt.ylabel('Performance (R Squared)', fontsize=30)
plt.xlabel('')
plt.gcf().subplots_adjust(bottom=0.25)
plt.tight_layout()
plt.savefig(os.path.join(globalOpdir,'92PerformanceComparisonSplit'+testSize+'Strip.png'))
plt.close()
labels1=['>0%','>=10%','>=20%','>=30%','>=40%','>=50%','>=60%','>=70%','>=80%','>=90%']
allXhistBoot=np.stack(histDenGatherBoot)
bootHistDf = pd.DataFrame(allXhistBoot,columns = labels1)
allXhistMeanBoot=np.mean(allXhistBoot,axis=0)
allXhistStdBoot=np.std(allXhistBoot,axis=0)
allXhistS300=np.stack(histDenGatherS300)
s300HistDf = pd.DataFrame(allXhistS300,columns = labels1)
allXhistMeanS300=np.mean(allXhistS300,axis=0)
allXhistStdS300=np.std(allXhistS300,axis=0)
allXhistS200=np.stack(histDenGatherS200)
s200HistDf = pd.DataFrame(allXhistS200,columns = labels1)
allXhistMeanS200=np.mean(allXhistS200,axis=0)
allXhistStdS200=np.std(allXhistS200,axis=0)
#################################################
############ BS model thresholding ##############
#################################################
allThreshResDf = pd.concat(threshPerformGather)
allThreshResDf['R Squared']=allThreshResDf['pearsonsR']**2
allThreshResDf = allThreshResDf[~((allThreshResDf.testSample == 'pnc') & (allThreshResDf.testSize == '400'))]
allThreshResDf.testSize.replace({'400':'All','787':'All'},inplace=True)
allThreshResDfPivot = allThreshResDf.pivot(index=['sampleNum','iter'],columns=['modelTestSample','testSize','thresh'],values='R Squared')
################## Thresh plots all in one fig
plt.close('all')
plt.clf()
for testSampleSize in ['200','300','All']:
if testSampleSize == 'All':
swarmSize = 3
else:
swarmSize = 0.8
baseXPosition = [0.05,0.15,0.25,0.35,0.45,0.55,0.65,0.75,0.85,0.95]
f = plt.figure(figsize=[16,16])
ax = f.add_subplot(311)
sns.set(font_scale=1.4)
sns.set_style('white')
sns.swarmplot(data = allThreshResDf[(allThreshResDf.testSize == testSampleSize) & (allThreshResDf.modelType == 'boot')],y='R Squared',x='thresh',hue='testSample',dodge=True,size=swarmSize,ax=ax,alpha = 0.6,color='black',hue_order=['hcp','pnc'])
d = sns.boxplot(data = allThreshResDf[(allThreshResDf.testSize == testSampleSize) & (allThreshResDf.modelType == 'boot')],y='R Squared',x='thresh',hue='testSample',ax=ax, boxprops=dict(alpha=.95),hue_order=['hcp','pnc'],palette = sns.color_palette(palette = ["white" , "grey"]),showfliers=False)
#for i,bx in enumerate(d.artists):
# if i % 2:
# bx.set_facecolor('grey')
# else:
# bx.set_facecolor('white')
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles[:2], ['Within Sample (HCP)', 'Out of Sample (PNC)'], loc='upper right')
ax.set_ylim(-0.05,0.4)
ax.set_ylabel('Performance (R Squared)')
ax2 = ax.twinx()
sns.barplot(data=bootHistDf,alpha = 0.3,color = 'blue',ax=ax2)
for i,rect in enumerate(allXhistMeanBoot):
txt="{0:.1%}".format(rect)
ax2.text((i)+0.065,rect+allXhistStdBoot[i],txt, ha='center', va='bottom',alpha=0.5)
ax2.set_ylabel('Percentage of total features included')
# https://matplotlib.org/examples/pylab_examples/histogram_percent_demo.html
# Create the formatter using the function to_percent. This multiplies all the
# default labels by 100, making them all percentages
formatter = FuncFormatter(to_percent)
# Set the formatter
ax2.yaxis.set_major_formatter(formatter)
ax.xaxis.grid(False)
ax.yaxis.grid(True)
ax2.xaxis.grid(False)
ax2.yaxis.grid(False)
sns.despine(trim=True,left=False, bottom=False, right=False)
#ax.set_xlim(-0.05,1.1)
ax.set_xlabel('Percentage of boostraps features occured in')
ax.set_title('Bagged models performance with feature thresholding')
### S300 model threshold model in HCP
#f = plt.figure(figsize=[10,6])
ax3 = f.add_subplot(312)
sns.set_style('white')
sns.swarmplot(data = allThreshResDf[(allThreshResDf.testSize == testSampleSize) & (allThreshResDf.modelType == 'sub300')],y='R Squared',x='thresh',hue='testSample',dodge=True,size=swarmSize,ax=ax3,alpha = 0.6,color='black',hue_order=['hcp','pnc'])
sns.boxplot(data = allThreshResDf[(allThreshResDf.testSize == testSampleSize) & (allThreshResDf.modelType == 'sub300')],y='R Squared',x='thresh',hue='testSample',ax=ax3, boxprops=dict(alpha=.95),hue_order=['hcp','pnc'],palette = sns.color_palette(palette = ["white" , "grey"]),showfliers=False)
handles, labels = ax3.get_legend_handles_labels()
#ax3.legend(handles[:2], labels[:2])
ax3.get_legend().remove()
ax3.set_ylim(-0.05,0.4)
ax3.set_ylabel('Performance (R Squared)')
ax4 = ax3.twinx()
sns.barplot(data=s300HistDf,alpha = 0.3,color = 'blue',ax=ax4)
for i,rect in enumerate(allXhistMeanS300):
txt="{0:.1%}".format(rect)
ax4.text((i)+0.065,rect+allXhistStdS300[i],txt, ha='center', va='bottom',alpha=0.5)
ax4.set_ylabel('Percentage of total features included')
# https://matplotlib.org/examples/pylab_examples/histogram_percent_demo.html
# Create the formatter using the function to_percent. This multiplies all the
# default labels by 100, making them all percentages
formatter = FuncFormatter(to_percent)
# Set the formatter
ax4.yaxis.set_major_formatter(formatter)
ax3.xaxis.grid(False)
ax3.yaxis.grid(True)
ax4.xaxis.grid(False)
ax4.yaxis.grid(False)
sns.despine(trim=True,left=False, bottom=False, right=False)
#ax3.set_xlim(-0.05,1.1)
#ax3.set_xticks([0.05,0.15,0.25,0.35,0.45,0.55,0.65,0.75],['>0%','>=10%','>=20%','>=30%','>=40%','>=50%','>=60%','>=70%'])
ax3.set_xlabel('Percentage of subsamples features occured in')
ax3.set_title('Subsample 300 models performance with feature thresholding')
### S200 model threshold model in HCP
#f = plt.figure(figsize=[10,6])
ax3 = f.add_subplot(313)
sns.set_style('white')
sns.swarmplot(data = allThreshResDf[(allThreshResDf.testSize == testSampleSize) & (allThreshResDf.modelType == 'sub200')],y='R Squared',x='thresh',hue='testSample',dodge=True,size=swarmSize,ax=ax3,alpha = 0.6,color='black',hue_order=['hcp','pnc'])
sns.boxplot(data = allThreshResDf[(allThreshResDf.testSize == testSampleSize) & (allThreshResDf.modelType == 'sub200')],y='R Squared',x='thresh',hue='testSample',ax=ax3, boxprops=dict(alpha=.95),hue_order=['hcp','pnc'],palette = sns.color_palette(palette = ["white" , "grey"]),showfliers=False)
handles, labels = ax3.get_legend_handles_labels()
ax3.legend(handles[:2], labels[:2])
ax3.get_legend().remove()
ax3.set_ylim(-0.05,0.4)
ax3.set_ylabel('Performance (R Squared)')
ax4 = ax3.twinx()
sns.barplot(data=s200HistDf,alpha = 0.3,color = 'blue',ax=ax4)
for i,rect in enumerate(allXhistMeanS200):
txt="{0:.1%}".format(rect)
ax4.text((i)+0.065,rect+allXhistStdS200[i],txt, ha='center', va='bottom',alpha=0.5)
ax4.set_ylabel('Percentage of total features included')
# https://matplotlib.org/examples/pylab_examples/histogram_percent_demo.html
# Create the formatter using the function to_percent. This multiplies all the
# default labels by 100, making them all percentages
formatter = FuncFormatter(to_percent)
# Set the formatter
ax4.yaxis.set_major_formatter(formatter)
ax3.xaxis.grid(False)
ax3.yaxis.grid(True)
ax4.xaxis.grid(False)
ax4.yaxis.grid(False)
sns.despine(trim=True,left=False, bottom=False, right=False)
#ax3.set_xlim(-0.05,1.1)
#ax3.set_xticks([0.05,0.15,0.25,0.35,0.45,0.55,0.65,0.75],['>0%','>=10%','>=20%','>=30%','>=40%','>=50%','>=60%','>=70%'])
ax3.set_xlabel('Percentage of subsamples features occured in')
ax3.set_title('Subsample 200 models performance with feature thresholding')
plt.tight_layout()
plt.savefig(os.path.join(globalOpdir,'96HCPandPNCThreshPerform'+testSampleSize+'Swarm.png'))
plt.close()
|
[
"6143678+DaveOC90@users.noreply.github.com"
] |
6143678+DaveOC90@users.noreply.github.com
|
81b5d2f903be4a2630043fb64531ce1e63856ff7
|
c6c35bbffa33d359ab535d87222cfd96cee4b5e7
|
/funny-bunny2.py
|
801a90e877af316b19b47f437c19921f5b659627
|
[] |
no_license
|
skandacode/Python
|
829d3a8c532f8f959fb978f46bfcb737d75dd24b
|
8c821cb7519b59c9afb65b3d9638b074ba183afc
|
refs/heads/master
| 2023-01-13T00:22:27.433291
| 2023-01-05T13:40:20
| 2023-01-05T13:40:20
| 187,404,932
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 105
|
py
|
from tkinter import*
tk=Tk()
canvas=Canvas(tk, height=1200, width=1500)
canvas.pack()
tk.resizable(0, 0)
|
[
"arun.skanda@outlook.com"
] |
arun.skanda@outlook.com
|
aec2e97b8d8530f6d1e153386336ebb53211600a
|
33432aab8c7397021eeb803ec6ccf6199afc047d
|
/button.py
|
182d9161ff493542bf940aeaea8e36c4a16665c2
|
[] |
no_license
|
Chris683/alien_invasion
|
46d3ec46bd5299c37c32b135cf8c77a5af31fd9b
|
3c00c90b31e8b8e81a8f0e72f5e9d0afbb21c761
|
refs/heads/master
| 2023-03-19T21:50:08.295124
| 2021-03-19T14:31:33
| 2021-03-19T14:31:33
| 254,661,096
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,166
|
py
|
import pygame.font
class Button():
def __init__(self, ai_settings, screen, msg):
"""初始化按钮属性"""
self.screen = screen
self.screen_rect = screen.get_rect()
# 设置按钮的尺寸和其他属性
self.width, self.height = 200, 50
self.button_color = (255, 255, 0)
self.text_color = (255, 0, 255)
self.font = pygame.font.SysFont(None, 48)
# 创建按钮的rect对象, 并使其居中
self.rect = pygame.Rect(0, 0, self.width, self.height)
self.rect.center = self.screen_rect.center
# 按钮的标签只需创建一次
self.prep_msg(msg)
def prep_msg(self, msg):
"""将msg渲染为图像, 并将其在按钮上居中"""
self.msg_image = self.font.render(msg, True, self.text_color, self.button_color)
self.msg_image_rect = self.msg_image.get_rect()
self.msg_image_rect.center = self.rect.center
def draw_button(self):
# 绘制一个用颜色填充的按钮,在绘制文本
self.screen.fill(self.button_color, self.rect)
self.screen.blit(self.msg_image, self.msg_image_rect)
|
[
"wukang683@163.com"
] |
wukang683@163.com
|
757cf8ad7c232ae92d6f00d0c9b417e70525c3f6
|
1d351c9df3beefce7b33affee9ce588403a22f42
|
/app.py
|
adfeb07f6d42674bd3dc4a7d2d89262d40e1d90f
|
[] |
no_license
|
learngvrk/stores-flask-restful-api
|
d0eb4b1faa8aa8a229f7dbd6aa4a55a63e732353
|
5ad4f6cf920e7d2687646123e6dd428185a99957
|
refs/heads/master
| 2021-01-05T01:51:12.061262
| 2020-02-17T02:53:47
| 2020-02-17T02:53:47
| 240,836,739
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,271
|
py
|
import os
from flask import Flask
from flask_restful import Api
from flask_jwt import JWT
from security import authenticate, identity
from resources.user import UserRegistration
from resources.item import Item, ItemList
from resources.store import Store, StoreList
# Create the Flask App
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = os.environ.get('DATABASE_URL', 'sqlite:///data.db')
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
app.secret_key = "jsn"
# Create the Api and pass the Flask Application
api = Api(app)
# Create a JSON Web Token
jwt = JWT(app, authenticate, identity) # /auth
api.add_resource(Item, '/item/<string:name>') # http://127.0.0.1:5000/item/chair
api.add_resource(ItemList, '/items') # http://127.0.0.1:5000/items
api.add_resource(UserRegistration, '/UserRegistration') # http://127.0.0.1:5000/UserRegistration
api.add_resource(Store, '/store/<string:name>') # http://127.0.0.1:5000/store
api.add_resource(StoreList, '/stores')
# Execute the App run function only if we run the app.py directly and not when app.py is imported from another
# module or class
if __name__ == "__main__":
from db import db
db.init_app(app)
app.run(port=5000, debug=True)
|
[
"= ranjithpals@gmail.com"
] |
= ranjithpals@gmail.com
|
b4c66f8a260da4fe83eb670aeb5e4b6544e3ef5b
|
00b6699ea1302149ab2b9fd57e115656f7a26e7d
|
/models/transformer_encoder.py
|
605c2d2f1ea94fdd68595454e41d279e6400e3ec
|
[] |
no_license
|
gauravaror/catastrophic_forgetting
|
97ac8e1c999db4f36d01ae19a0fb307f8109eb8b
|
60e53f61c45f6ce24a28bf8454c8078559bb9e6f
|
refs/heads/master
| 2021-06-30T21:07:27.448889
| 2020-10-05T09:37:36
| 2020-10-05T09:37:36
| 174,500,380
| 5
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,573
|
py
|
import math
import torch
import torch.nn as nn
from torch.nn import TransformerEncoder, TransformerEncoderLayer
from models.utils import Hardsigmoid, BernoulliST
from models.kv_memory import KeyValueMemory
# It's actually TransformerEncoder custom with PositionalEncoder but we use
# name: TransformerRepresentation to avoid confusion with TransformerEncoder Representation.
class TransformerRepresentation(nn.Module):
def __init__(self, emb_dim, nhead, nhid, nlayers, args, dropout=0.5,
use_memory=False, mem_size=None, mem_context_size=None,
inv_temp=None, use_binary=False):
super(TransformerRepresentation, self).__init__()
self.model_type = 'Transformer'
self.emb_dim = emb_dim
self.inv_temp = inv_temp
self.args = args
self.no_positional = self.args.no_positional
self.memory = KeyValueMemory(use_memory=use_memory,
emb_dim=self.emb_dim,
mem_size=mem_size,
mem_context_size=mem_context_size,
inv_temp=self.inv_temp,
use_binary=use_binary)
self.src_mask = None
self.transposed = True
self.pos_encoder = PositionalEncoding(emb_dim, dropout, transposed=self.transposed)
encoder_layers = TransformerEncoderLayer(self.memory.get_input_size(),
nhead, nhid, dropout)
self.transformer_encoder = TransformerEncoder(encoder_layers, nlayers)
self.pooler = nn.Linear(self.emb_dim, self.emb_dim)
def _generate_square_subsequent_mask(self, sz):
mask = (torch.triu(torch.ones(sz, sz)) == 1).transpose(0, 1)
mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0))
return mask
def add_target_pad(self):
self.memory.add_target_pad()
def get_output_dim(self):
## Transformer input size is same as output
return self.memory.get_input_size()
def forward(self, src, mask):
src = src.transpose(0,1)
if self.src_mask is None or self.src_mask.size(0) != len(src):
device = src.device
mask = self._generate_square_subsequent_mask(len(src)).to(device)
self.src_mask = mask
src = src * math.sqrt(self.emb_dim)
src = self.pos_encoder(src) if not self.no_positional else src
src_input = self.memory(src)
output = self.transformer_encoder(src_input, self.src_mask)
output = output.transpose(0,1)
return torch.mean(output, dim=1)
class PositionalEncoding(nn.Module):
def __init__(self, d_model, dropout=0.1, max_len=5000, transposed=False):
super(PositionalEncoding, self).__init__()
self.dropout = nn.Dropout(p=dropout)
self.transposed = transposed
pe = torch.zeros(max_len, d_model)
position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0)
if self.transposed:
pe = pe.transpose(0, 1)
self.register_buffer('pe', pe)
def forward(self, x):
added_pe = self.pe[:x.size(0), :, :] if self.transposed else self.pe[:, x.size(1), :]
x = x + added_pe
return self.dropout(x)
|
[
"gauravarora.daiict@gmail.com"
] |
gauravarora.daiict@gmail.com
|
ce8b7392f9103712fc34c8d66177a778883fae0d
|
3fb9c5b58ef0c596379ec5ed73e5471e6d34a5f4
|
/leetcode-challenge-01-2020/week1/kthMissingPositiveNumber.py
|
0ebd9ab4db4f7bc8c79f5d9bea311cdf70e99233
|
[] |
no_license
|
kramal/myLeetcode
|
10d6091a94b7b313294c14682e96868da82cb655
|
fb22384a38909ab9c12001eda132d75945f4e3bd
|
refs/heads/main
| 2023-02-28T21:28:22.838756
| 2021-02-09T18:11:03
| 2021-02-09T18:11:03
| 313,072,041
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 814
|
py
|
def findKthPositive(self, arr: List[int], k: int) -> int:
list_missing = []
result = False
for i in range(1, arr[-1] + 1):
if i not in arr:
list_missing.append(i)
if (k - 1) < len(list_missing):
result = list_missing[k - 1]
else:
if len(list_missing) > 0:
if list_missing[-1] < arr[-1]:
max_val_missing = arr[-1] + 1
result = max_val_missing
if (k - 1) > len(list_missing):
for i in range(k - 1 - len(list_missing)):
max_val_missing += 1
result = max_val_missing
else:
max_val_missing = arr[-1]
for i in range(1, k + 1):
max_val_missing += 1
result = max_val_missing
return result
|
[
"kramal888@gmail.com"
] |
kramal888@gmail.com
|
fa98b06d2d06034db777a9d9a0a9a471e55ce41d
|
fa95a770c4b0612dbcb545c0ce780d75eddb89e9
|
/downloader/map/osm_loader.py
|
3d7f3da61faf42986a0df74976a4a1cf4722d26a
|
[] |
no_license
|
fpeterek/car-map-downloader
|
095830fd44cab602093f3263aac38089ab32e7d7
|
827d99735051e523c3a458bbf4c12fca1cde4553
|
refs/heads/master
| 2023-03-27T16:55:26.520359
| 2021-04-02T10:30:49
| 2021-04-02T10:30:49
| 353,784,142
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,708
|
py
|
import osmium
from map.map import Map
from map.node import Node
from map.geo_position import GeoPosition
from map.path import Path
class MapLoaderHandler(osmium.SimpleHandler):
excluded_ways = ('footway', 'corridor', 'sidewalks', 'steps', 'crossing')
def __init__(self):
super(MapLoaderHandler, self).__init__()
self.map = Map()
def node(self, n):
position = GeoPosition(lon=n.location.lon, lat=n.location.lat)
node = Node(position=position)
self.map.add(node)
def way(self, way):
if not way.nodes or 'highway' not in way.tags or way.is_closed():
return
if way.tags['highway'] in MapLoaderHandler.excluded_ways:
return
previous = way.nodes[0]
# Ensure nodes are added to map in case they haven't been added before
# We want nodes to be added before ways, not the other way around
# Adding all necessary nodes here helps ensure that
self.map.add(previous)
for i in range(1, len(way.nodes)):
current = way.nodes[i]
self.map.add(current)
begin = GeoPosition(lon=previous.lon, lat=previous.lat)
end = GeoPosition(lon=current.lon, lat=current.lat)
path = Path(begin=begin, end=end)
self.map.add(path)
previous = current
def relation(self, way):
"""noop -> There's no need to handle relations, at least not now"""
class MapLoader:
def __init__(self):
pass
@staticmethod
def load(path: str) -> Map:
handler = MapLoaderHandler()
handler.apply_file(path, locations=True)
handler.map.reduce()
return handler.map
|
[
"fpeterek@seznam.cz"
] |
fpeterek@seznam.cz
|
ffad7c10a723699296c356e89c4dc8bc3a43652c
|
333e8404417cdf40cc84132a7abd24009dabb86e
|
/clienteharichand.py
|
47ae3e9e163b549ea8d258e742d7cc0694117edc
|
[] |
no_license
|
RabindraHarichand/Practica-2
|
355f1ac1bd5c0d09bb5deafe91c08e3cb04248ac
|
29c0bd4bb1433373930b71a558950ca75c323c49
|
refs/heads/master
| 2023-05-02T12:07:19.286519
| 2021-05-29T23:37:50
| 2021-05-29T23:37:50
| 372,077,248
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,823
|
py
|
import socket
import sys
import base64
import hashlib
#Ingresar datos
ip= (input('Escriba su ip '))
user= (input('Escriba un usuario '))
try:
#Establecer conexión TCP hacial servidor por el puerto 19876
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((ip, 19876))
s.settimeout(20)
#Enviar comando helloiam junto con nombre de usuario
s.send(bytes("helloiam "+user, "utf-8"))
#Recibir respuesta del servidor
data = s.recv(1024)
print('Received', repr(data))
# Pedir longitud del mensaje
s.send(bytes("msglen", "utf-8"))
#Recibir respuesta del servidor
data = s.recv(1024)
print('Received', repr(data))
#Pedir Mensaje al servidor
s.send(bytes("givememsg 15601","utf-8"))
data = s.recv(1024)
#Cliente UDP
UDPSocket = socket.socket(family=socket.AF_INET, type=socket.SOCK_DGRAM)
UDPSocket.bind(('', 15601))
UDPSocket.settimeout(10)
i =0
while i < 10:
try:
dataU = UDPSocket.recvfrom(5000)
except socket.timeout as e:
print ("Ocurrio un error por tiempo espera superado del UDP")
i+=1
try:
if not(dataU == ()):
message = dataU[0]
address = dataU[1]
except NameError as e:
print("Error de conexión, el mensaje no pudo ser retirado, intentelo mas tarde")
sys.exit()
msg = base64.b64decode(message)
mensage=msg.decode('utf-8')
print(repr(mensage))
# md5 y paso a hexadecimal
Dec=hashlib.md5(msg).hexdigest()
#validacion del mensaje
s.send(bytes("chkmsg "+ Dec,"utf-8"))
data = s.recv(1024)
print('Received', repr(data))
#salida
s.send(bytes("bye","utf-8"))
data = s.recv(1024)
print('Received', repr(data))
except Exception as e:
print (e)
|
[
"rharichand.17@est.ucab.edu.ve"
] |
rharichand.17@est.ucab.edu.ve
|
243a84a5f36c62cf4ae3192da755b3332e3e9249
|
408d9f3fe18df16663aa714416dec7584825a5a0
|
/test/trainer/test_trainer.py
|
09d45d17231e1c15c3524e736a3775a8cc84b119
|
[
"Apache-2.0"
] |
permissive
|
geoalgo/gluon-ts-1
|
43a036910e1a18ed56e4732570c2156571408a5a
|
2de36e034e1829c9f8eda573d95f6a5cd4773a90
|
refs/heads/master
| 2022-09-01T12:52:34.611659
| 2019-06-06T06:09:24
| 2019-06-06T06:09:24
| 190,351,064
| 0
| 1
|
Apache-2.0
| 2019-06-14T13:10:20
| 2019-06-05T07:55:19
|
Python
|
UTF-8
|
Python
| false
| false
| 2,052
|
py
|
# Standard library imports
from typing import Any, List
# Third-party imports
import pytest
# First-party imports
from gluonts.trainer import Trainer
def test_epochs() -> None:
assert_valid_param(
param_name='epochs', param_values=[0, 1, 42, 1000, 1000]
)
assert_invalid_param(
param_name='epochs',
param_values=[-2, -1],
exp_msg='The value of `epochs` should be > 0 (type=value_error)',
)
def test_patience() -> None:
assert_valid_param(param_name='patience', param_values=[0, 1, 10, 100])
assert_invalid_param(
param_name='patience',
param_values=[-2, -1],
exp_msg='The value of `patience` should be >= 0 (type=value_error)',
)
def test_learning_rate() -> None:
assert_valid_param(
param_name='learning_rate', param_values=[0.42, 17.8, 10.0]
)
assert_invalid_param(
param_name='learning_rate',
param_values=[-2, -1e-10, 0, float('inf'), float('nan')],
exp_msg='The value of `learning_rate` should be > 0 (type=value_error)',
)
def test_learning_rate_decay_factor() -> None:
assert_valid_param(
param_name='learning_rate_decay_factor',
param_values=[0, 1e-10, 0.5, 1 - 1e-10],
)
assert_invalid_param(
param_name='learning_rate_decay_factor',
param_values=[-2, -1e-10, +1, +5, float('inf'), float('nan')],
exp_msg='The value of `learning_rate_decay_factor` should be in the [0, 1) range (type=value_error)',
)
def assert_valid_param(param_name: str, param_values: List[Any]) -> None:
try:
for x in param_values:
Trainer(**{param_name: x})
except Exception as e:
pytest.fail(f'Unexpected exception when initializing Trainer: "{e}"')
raise e
def assert_invalid_param(
param_name: str, param_values: List[Any], exp_msg: str
) -> None:
for x in param_values:
with pytest.raises(AssertionError) as excinfo:
Trainer(**{param_name: x})
assert exp_msg in str(excinfo.value)
|
[
"alxale@amazon.com"
] |
alxale@amazon.com
|
f4d9dcb1f41cc6a7b45a966ade0d934aaaa2b814
|
81d9929745ea14ccf6c482d270ce786e4d5e31a7
|
/bin/django-admin.py
|
d0e1a06152fdfecd75e141843098ad01663063e3
|
[] |
no_license
|
riadush/Django-Scrapy
|
9b6866f941c33c83b6802afee6e6f17d4b39451e
|
5b9c4a4df3fcb5d36a8294a04b80378f5e0accbb
|
refs/heads/master
| 2021-01-20T18:58:03.503661
| 2016-08-14T19:22:18
| 2016-08-14T19:22:18
| 65,682,471
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 155
|
py
|
#!/Users/riadush/Desktop/scrapydjango/bin/python
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
|
[
"riadush@Riadushs-Mac-mini.local"
] |
riadush@Riadushs-Mac-mini.local
|
080327cbd21766ac54d21ecf1f08d7336c162d80
|
cb57a9ea4622b94207d12ea90eab9dd5b13e9e29
|
/lc/python/289_game_of_life.py
|
7db24705a3c6af0951103e9edb5e780f16317398
|
[] |
no_license
|
boknowswiki/mytraning
|
b59585e1e255a7a47c2b28bf2e591aef4af2f09a
|
5e2f6ceacf5dec8260ce87e9a5f4e28e86ceba7a
|
refs/heads/master
| 2023-08-16T03:28:51.881848
| 2023-08-10T04:28:54
| 2023-08-10T04:28:54
| 124,834,433
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,563
|
py
|
#!/usr/bin/python -t
#time O(m*n) space O(1)
#0,2 are "dead", and "dead->live"
#1,3 are "live", and "live->dead"
class Solution(object):
def gameOfLife(self, board):
"""
:type board: List[List[int]]
:rtype: None Do not return anything, modify board in-place instead.
"""
m = len(board)
n = len(board[0])
for i in range(m):
for j in range(n):
if board[i][j] == 0 or board[i][j] == 2:
if self.nnb(board, i, j) == 3:
board[i][j] = 2
else:
if self.nnb(board, i, j) < 2 or self.nnb(board, i, j) > 3:
board[i][j] = 3
for i in range(m):
for j in range(n):
if board[i][j] == 2:
board[i][j] = 1
if board[i][j] == 3:
board[i][j] = 0
def nnb(self, board, i, j):
m,n = len(board), len(board[0])
count = 0
if i-1 >= 0 and j-1 >= 0: count += board[i-1][j-1]%2
if i-1 >= 0: count += board[i-1][j]%2
if i-1 >= 0 and j+1 < n: count += board[i-1][j+1]%2
if j-1 >= 0: count += board[i][j-1]%2
if j+1 < n: count += board[i][j+1]%2
if i+1 < m and j-1 >= 0: count += board[i+1][j-1]%2
if i+1 < m: count += board[i+1][j]%2
if i+1 < m and j+1 < n: count += board[i+1][j+1]%2
return count
|
[
"taobo0626@gmail.com"
] |
taobo0626@gmail.com
|
ce3f3e77b734c979ff3d49bc82b04f891d0df5bd
|
4d1cca31a3aae847bd6ee2dc12eca3971b263fc4
|
/src/flua/Compiler/Output/python/PythonClass.py
|
a2ffea6ea236df51bc99c98170c3846e3c94f63c
|
[] |
no_license
|
akyoto/flua
|
4cc27202c326a6eedd088c5bb88c644905e7be64
|
e09d50e0d50fc4f4faa1b0ee482756eaef4e60ec
|
refs/heads/master
| 2021-06-06T10:55:32.795005
| 2016-12-04T00:17:20
| 2016-12-04T00:17:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,714
|
py
|
####################################################################
# Header
####################################################################
# Target: Python Code
# Author: Eduard Urbach
####################################################################
# License
####################################################################
# (C) 2012 Eduard Urbach
#
# This file is part of Blitzprog.
#
# Blitzprog is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Blitzprog is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Blitzprog. If not, see <http://www.gnu.org/licenses/>.
####################################################################
# Imports
####################################################################
from flua.Compiler.Output import *
from flua.Compiler.Output.BaseClass import *
from flua.Compiler.Output.python.PythonClassImplementation import *
####################################################################
# Classes
####################################################################
class PythonClass(BaseClass):
def __init__(self, name, node, cppFile):
super().__init__(name, node, cppFile)
def createClassImplementation(self, templateValues):
return PythonClassImplementation(self, templateValues)
|
[
"e.urbach@gmail.com"
] |
e.urbach@gmail.com
|
26cb4e82cc7893cf815f692b30f08b297b8f3e55
|
c6b5bbfebcf95f79fbdefcd85d0a692247051cd3
|
/multi_proxy_server.py
|
651a0c392f8555a5657dddbd2a306ad4963378d5
|
[] |
no_license
|
rmacgill/CMPUT404_Lab02
|
237e197ab314cb5e07699410813860ad6428db6b
|
6756f29b102b44d0e996a861de0cbb4b17dc0bd0
|
refs/heads/master
| 2023-02-16T17:17:27.066102
| 2021-01-19T02:12:26
| 2021-01-19T02:12:26
| 330,816,836
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,566
|
py
|
#!/usr/bin/env python3
import socket, sys
from multiprocessing import Process
# some constants to make this easier
LOCAL_HOST = ""
LOCAL_PORT = 8013
REMOTE_HOST = "www.google.com"
REMOTE_PORT = 80
BUFFER_SIZE = 4096
# Creates a socket object with the default arguments
def create_tcp_socket():
try:
# first arg specifies types of addresses (IPv4 for AF_INET)
# second arg specifies socket type (TCP for SOCK_STREAM)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
except (socket.error, msg):
print("Failed to create socket.")
sys.exit()
return s
# Get the IP address of the given host address
def get_remote_ip(host):
try:
remote_ip = socket.gethostbyname(host)
except socket.gaierror:
print("Hostname could ont be resolved. Exiting")
sys.exit()
return remote_ip
# Sends a payload using the given socket (assumes connection is established)
def send_data(serversocket, payload):
try:
serversocket.sendall(payload)
except socket.error:
print("Send failed. Exiting.")
sys.exit()
def handle_conn(conn):
# collect sent data up to our BUFFER_SIZE
payload = conn.recv(BUFFER_SIZE)
remote_s = create_tcp_socket();
remote_ip = get_remote_ip(REMOTE_HOST)
remote_s.connect((remote_ip, REMOTE_PORT))
send_data(remote_s, payload)
remote_s.shutdown(socket.SHUT_WR)
# full_data is forced to be the bytes type (empty)
full_data = b""
while True:
# gather up to BUFFER_SIZE data
data = remote_s.recv(BUFFER_SIZE)
# when we have no more data to collect, break out of the loop
if not data:
break
# append up to buffer_size data
full_data += data
# respond with whatever we were sent from google
conn.sendall(full_data)
# close the connection
conn.close()
def main():
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
# for q3
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# bind the socket to our specified address
s.bind((LOCAL_HOST, LOCAL_PORT))
# set socket to listen mode so that it can accept connections
# (argument is how many pending connections to sit on before refusing new ones)
s.listen(3)
# loop forever until program is shut down
while True:
# accept connection
conn, addr = s.accept()
print("Connected by", addr)
p = Process(target=handle_conn, args=(conn,))
p.daemon = True
p.start()
p.join()
if __name__ == "__main__":
main()
|
[
"macgillivray.robert@gmail.com"
] |
macgillivray.robert@gmail.com
|
9c4118ff849f01c6333a7c40154ecf6bfa18ce52
|
cd3d67ac5db68db5197e924949539fe96fac2901
|
/anzee_be/bin/rst2html4.py
|
a13a7311f9fb1711b4d8e2091d594a29778a9173
|
[
"BSD-3-Clause"
] |
permissive
|
PanthraxDigital/PD_Anzee_BE
|
49dc6efdb6e88c45bd01124a0222d2542c68c247
|
070998427db470044fdccb1cf75a5b0015677d73
|
refs/heads/master
| 2020-04-22T12:02:26.837243
| 2019-02-14T06:12:04
| 2019-02-14T06:12:04
| 170,360,457
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 805
|
py
|
#!/Volumes/Disk_2/PanthraxClients/05. Anzee/01_Anzee_WS/Anzee_BE_WS/anzee_be/PD_Anzee_BE/anzee_be/bin/python3
# $Id: rst2html4.py 7994 2016-12-10 17:41:45Z milde $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
A minimal front end to the Docutils Publisher, producing (X)HTML.
The output conforms to XHTML 1.0 transitional
and almost to HTML 4.01 transitional (except for closing empty tags).
"""
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline, default_description
description = ('Generates (X)HTML documents from standalone reStructuredText '
'sources. ' + default_description)
publish_cmdline(writer_name='html4', description=description)
|
[
"maha.yash@gmail.com"
] |
maha.yash@gmail.com
|
0967d73f73541ca521c643230b9642e958f3ed95
|
8a31861c8b8df74a9e6e2a62f88b06937a154596
|
/XStreamity/usr/lib/enigma2/python/Plugins/Extensions/XStreamity/record.py
|
075b0d7f7ae1fb954fc1efcaaee2db6c36ff2d51
|
[] |
no_license
|
kiddac/XStreamity
|
79d7c2f06450874f03a12ba19e9f9032e1f8c564
|
0e4f79eedf3fb08e1b3900ef6d82bccfbc15139e
|
refs/heads/master
| 2023-08-28T06:28:01.958223
| 2023-08-12T17:38:53
| 2023-08-12T17:38:53
| 250,813,457
| 43
| 29
| null | 2023-08-07T18:46:49
| 2020-03-28T14:19:55
|
Python
|
UTF-8
|
Python
| false
| false
| 4,490
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from . import _
from .plugin import skin_directory, cfg
from .xStaticText import StaticText
from Components.ActionMap import ActionMap
from Components.ConfigList import ConfigListScreen
from Components.config import getConfigListEntry, ConfigText
from Components.Pixmap import Pixmap
from Screens.MessageBox import MessageBox
from Screens.Screen import Screen
import datetime
import time
import os
class RecordDateInput(ConfigListScreen, Screen):
def __init__(self, session, config_name=None, config_date=None, config_starttime=None, config_endtime=None, config_instant=False):
Screen.__init__(self, session)
self.session = session
skin_path = os.path.join(skin_directory, cfg.skin.getValue())
skin = os.path.join(skin_path, "settings.xml")
if os.path.exists("/var/lib/dpkg/status"):
skin = os.path.join(skin_path, "DreamOS/settings.xml")
with open(skin, "r") as f:
self.skin = f.read()
self.list = []
ConfigListScreen.__init__(self, self.list, session=self.session)
self["key_red"] = StaticText(_("Close"))
self["key_green"] = StaticText(_("Save"))
self["VKeyIcon"] = Pixmap()
self["VKeyIcon"].hide()
self["HelpWindow"] = Pixmap()
self["HelpWindow"].hide()
self.conf_name = config_name
self.conf_date = config_date
self.conf_starttime = config_starttime
self.conf_endtime = config_endtime
self.conf_instant = config_instant
self.setup_title = (_("Please enter recording time"))
if self.conf_instant:
self.setup_title = (_("Please enter recording end time"))
self["actions"] = ActionMap(["XStreamityActions"], {
"cancel": self.cancel,
"red": self.cancel,
"green": self.keyGo,
}, -2)
self.onFirstExecBegin.append(self.initConfig)
self.onLayoutFinish.append(self.__layoutFinished)
def __layoutFinished(self):
self.setTitle(self.setup_title)
def cancel(self, answer=None):
if answer is None:
if self["config"].isChanged():
self.session.openWithCallback(self.cancel, MessageBox, _("Really close without saving settings?"))
else:
self.close()
elif answer:
for x in self["config"].list:
x[1].cancel()
self.close()
return
def initConfig(self):
self.timeinput_name = self.conf_name
self.timeinput_date = self.conf_date
self.timeinput_starttime = self.conf_starttime
self.timeinput_endtime = self.conf_endtime
self.createSetup()
def createSetup(self):
self.list = []
self.list.append(getConfigListEntry(_("Name"), self.timeinput_name))
if self.conf_instant is False:
self.list.append(getConfigListEntry(_("Start Time"), self.timeinput_starttime))
self.list.append(getConfigListEntry(_("End Time"), self.timeinput_endtime))
self["config"].list = self.list
self["config"].l.setList(self.list)
self.handleInputHelpers()
def handleInputHelpers(self):
from enigma import ePoint
currConfig = self["config"].getCurrent()
if currConfig is not None:
if isinstance(currConfig[1], ConfigText):
if "VKeyIcon" in self:
self["VKeyIcon"].show()
if "HelpWindow" in self and currConfig[1].help_window and currConfig[1].help_window.instance is not None:
helpwindowpos = self["HelpWindow"].getPosition()
currConfig[1].help_window.instance.move(ePoint(helpwindowpos[0], helpwindowpos[1]))
else:
if "VKeyIcon" in self:
self["VKeyIcon"].hide()
def getTimestamp(self, date, mytime):
d = time.localtime(date)
dt = datetime.datetime(d.tm_year, d.tm_mon, d.tm_mday, mytime[0], mytime[1])
return int(time.mktime(dt.timetuple()))
def keyGo(self):
starttime = self.getTimestamp(self.timeinput_date, self.timeinput_starttime.value)
if self.timeinput_endtime.value < self.timeinput_starttime.value:
self.timeinput_date += 86400
endtime = self.getTimestamp(self.timeinput_date, self.timeinput_endtime.value)
self.close((True, starttime, endtime, self.timeinput_name.value))
|
[
"kiddac2015@gmail.com"
] |
kiddac2015@gmail.com
|
a4a7ca95f01a689b37eaec5be609be0cb566474e
|
772802bd932c76ce51e7db1436877144b12c3ad6
|
/tnsr.py
|
aee68d061036c3724afaa7fc3b8c77589e6f4363
|
[] |
no_license
|
gokseltokur/imageclassifier
|
e16ec957185839d18e35bec93ff484a40bf1ce72
|
070aef7389e099d7fcca264c1d101e17f6ca0ddc
|
refs/heads/master
| 2020-04-29T18:04:45.183994
| 2019-03-18T15:36:42
| 2019-03-18T15:36:42
| 176,313,543
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,265
|
py
|
from __future__ import absolute_import, division, print_function
import tensorflow as tf
from tensorflow import keras
import numpy as np
import matplotlib.pyplot as plot
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Conv2D, MaxPooling2D
import os
from keras.preprocessing.image import ImageDataGenerator
class_names = ['airplane', 'automobile', 'bird', 'cat', 'deer',
'dog', 'frog', 'horse', 'ship', 'truck']
num_classes = len(class_names)
def plot_image(i, predictions_array, true_label, img):
predictions_array, true_label, img = predictions_array[i], true_label[i], img[i]
plot.grid(False)
plot.xticks([])
plot.yticks([])
plot.imshow(img, cmap=plot.cm.binary)
predicted_label = np.argmax(predictions_array)
if predicted_label == true_label:
color = 'blue'
else:
color = 'red'
plot.xlabel("{} {:2.0f}% ({})".format(class_names[predicted_label],
100*np.max(predictions_array),
class_names[true_label]),
color=color)
def plot_value_array(i, predictions_array, true_label):
predictions_array, true_label = predictions_array[i], true_label[i]
plot.grid(False)
plot.xticks([])
plot.yticks([])
thisplot = plot.bar(range(10), predictions_array, color="#777777")
plot.ylim([0, 1])
predicted_label = np.argmax(predictions_array)
thisplot[predicted_label].set_color('red')
thisplot[true_label].set_color('blue')
print(tf.__version__)
dataset = keras.datasets.cifar10
#(x_train, y_train), (x_test, y_test)
(train_images, train_labels), (test_images, test_labels) = dataset.load_data()
"""
print(train_images.shape)
print(len(train_labels))
test_images.shape
len(test_labels)
"""
"""
plot.figure()
plot.imshow(train_images[1])
plot.colorbar()
plot.grid(False)
plot.show()
"""
## BURASIIII
"""
plot.figure(figsize=(10,10))
for i in range(25):
plot.subplot(5, 5, i+1)
plot.xticks([])
plot.yticks([])
plot.grid(False)
plot.imshow(train_images[i], cmap=plot.cm.binary)
plot.xlabel(class_names[int(train_labels[i])]) # only integer scalar arrays is converted to a scalar array therefore int() is necessary
plot.show()
"""
"""SETUP THE LAYERS"""
#Flatten transforms the format of the images from 2d to 1d array.
#Parameter 128 of first dense is number of nodes(neurons)
#Parameter 10 of second dense is 10 probability, note that it equals to number of classes
#model = keras.Sequential([keras.layers.Flatten(input_shape = (32,32)), keras.layers.Dense(128, activation = tf.nn.relu), keras.layers.Dense(10, activation = tf.nn.softmax)])
y_train = keras.utils.to_categorical(train_labels, num_classes)
y_test = keras.utils.to_categorical(test_labels, num_classes)
model = Sequential()
model.add(Conv2D(32, (3, 3), padding='same', input_shape=train_images.shape[1:]))
model.add(Activation('relu'))
model.add(Conv2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(64, (3, 3), padding='same'))
model.add(Activation('relu'))
model.add(Conv2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes))
model.add(Activation('softmax'))
"""COMPILE THE MODEL"""
model.compile(optimizer = 'adam', loss = 'sparse_categorical_crossentropy', metrics = ['accuracy'])
#BUU
train_images = train_images.astype('float32')
test_images = test_images.astype('float32')
train_images /= 255
test_images /= 255
"""TRAIN THE MODEL"""
#Epochs is necessary for upgrading correct weights.
model.fit(train_images, train_labels, epochs = 5)
loss, accuracy = model.evaluate(test_images, test_labels)
print('Test accuracy: ', accuracy)
"""MAKE PREDICTIONS"""
predictions = model.predict(test_images)
#print(class_names[np.argmax(predictions[0])]) ## returns a class name
i = 0
plot.figure(figsize=(6,3))
plot.subplot(1,2,1)
plot_image(i, predictions, test_labels, test_images)
plot.subplot(1,2,2)
plot_value_array(i, predictions, test_labels)
plot.show()
|
[
"gokseltokur@gmail.com"
] |
gokseltokur@gmail.com
|
d14a5beaf03130a87a9f70428dc8e07b1a9daa02
|
bff84a3c71570e4b789fdd8b6d54de2fccf8f1f6
|
/openpyxl/get_column2.py
|
81e68c625e329a59e72deb3abec9b6a5ec85cc02
|
[] |
no_license
|
dominico120/python_excel_book
|
babcd08939f81b008138e8eee628506e03d98cb8
|
962fe312c2a7e4ef6c262c3592d9f687b6649aa1
|
refs/heads/master
| 2023-07-04T04:11:00.826713
| 2021-04-26T03:36:40
| 2021-04-26T03:36:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 136
|
py
|
import openpyxl
wb = openpyxl.load_workbook('shopping.xlsx')
ws = wb['Sheet1']
for cell in list(ws.columns)[1]:
print(cell.value)
|
[
"91katsuhisa@gmail.com"
] |
91katsuhisa@gmail.com
|
f14e333aa0d166df0cdeacebd35555a2782063b0
|
0415adbd14f5c1cfd137df727b9aab3d9a3a1358
|
/GA_path_improve/venv/bin/easy_install-3.6
|
ecf63d60da623c39eddc36d20d54bd0cdd6f47ae
|
[] |
no_license
|
dyz330542613/GA_maze
|
8302362cd05628f807701815d9dccc62ff9123b8
|
f50e5b28c91bbe6e5a15d1da1d06a896edd13c20
|
refs/heads/master
| 2020-03-12T02:19:21.630924
| 2019-05-07T10:06:39
| 2019-05-07T10:06:39
| 130,399,257
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 280
|
6
|
#!/Users/dyz/PycharmProjects/GeneticAlgorithm/venv/bin/python3.6
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"dyz@dufeifeizhudeMacBook-Pro.local"
] |
dyz@dufeifeizhudeMacBook-Pro.local
|
12f908935fc03abd5cdb76620bb8347f4a719490
|
6ef047ce19953492f03c31b97a0211e18480f008
|
/src/VIAME/EVAL/evaluate_optical_detection_results.py
|
67bcc0cead7fbce3be25d5316ea7fcddfe7c9031
|
[] |
no_license
|
muskanmahajan37/Bears-n-Seals
|
a089fde9b52c2b26530d9d038563f1ec249fc493
|
0d8593b40d17ec4c03b0e633c2fd5a29ed969808
|
refs/heads/master
| 2022-01-21T00:22:45.657797
| 2019-05-10T05:50:12
| 2019-05-10T05:50:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,411
|
py
|
import matplotlib.patches as mpatches
import os
import numpy as np
from PIL import Image
from src.VIAME.EVAL.model import DetectionResults
from src.arcticapi import ArcticApi
from src.arcticapi import HotSpot
from src.arcticapi.config import load_config
import matplotlib.pyplot as plt
from src.arcticapi.model.HotSpot import SpeciesList
def get_mean_confidences(tp, fp, fn):
pass
IOU_THRESH = 0.2 # only matters for optical
OPTICAL_CONFIDENCE_THRESH = .8
THERMAL_CONFIDENCE_THRESH = .5
threshes = [.2 ,.4,.6,.8,.9,.95]
cfg = load_config("new_data")
rgb_csv = "/home/yuval/Documents/XNOR/VIAME/build/install/examples/darknet/detectors/results/results_optical_fullyolov3.csv"
rgb_csv1 = "/home/yuval/Documents/XNOR/VIAME/build/install/examples/darknet/detectors/results/results_optical_17tiny.csv"
rgb_csv2 = "/home/yuval/Documents/XNOR/VIAME/build/install/examples/darknet/detectors/results/results_optical_17tiny_50000.csv"
rgb_csv3 = "/home/yuval/Documents/XNOR/VIAME/build/install/examples/darknet/detectors/results/rgb_18/results_optical_18tiny_20000.csv"
rgb_csv4 = "/home/yuval/Documents/XNOR/VIAME/build/install/examples/darknet/detectors/results/rgb_18/results_optical_18tiny_last.csv"
rgb_csv5 = "/home/yuval/Documents/XNOR/VIAME/build/install/examples/darknet/detectors/results/rgb_18/results_optical_18tiny_48000.csv"
rgb_csv6 = "/home/yuval/Documents/XNOR/VIAME/build/install/examples/darknet/detectors/results/rgb_18/results_optical_18tiny_60000.csv"
rgb_ims = "/home/yuval/Documents/XNOR/VIAME/build/install/examples/darknet/detectors/input_optical_images.txt"
ir_csv = "/home/yuval/Documents/XNOR/VIAME/build/install/examples/darknet/detectors/results_thermal.csv"
ir_ims = "/home/yuval/Documents/XNOR/VIAME/build/install/examples/darknet/detectors/input_thermal_images.txt"
api = ArcticApi(cfg)
res = []
res.append(DetectionResults(rgb_ims, rgb_csv, "rgb", api))
res.append(DetectionResults(rgb_ims, rgb_csv1, "rgb", api))
res.append(DetectionResults(rgb_ims, rgb_csv2, "rgb", api))
res.append(DetectionResults(rgb_ims, rgb_csv3, "rgb", api))
res.append(DetectionResults(rgb_ims, rgb_csv4, "rgb", api))
res.append(DetectionResults(rgb_ims, rgb_csv5, "rgb", api))
res.append(DetectionResults(rgb_ims, rgb_csv6, "rgb", api))
# thermal_results = DetectionResults(ir_ims, ir_csv, "thermal", api)
plotmap = {}
for thresh in threshes:
for optical_results in res:
if not optical_results.output in plotmap:
plotmap[optical_results.output] = [[],[],[]]
print(optical_results.output)
tp, fp, fn = optical_results.confidence_filter(thresh,.1)
new_hotspots = []
for f in fp:
fp_dets = fp[f]
copy_hs = api.rgb_images[f].hotspots[0]
i = 0
for det in fp_dets:
type = 0 if "ringed" in det.label else 1
type = SpeciesList[type]
hs = HotSpot(copy_hs.id + "fp%d"%i, copy_hs.thermal_loc[0], copy_hs.thermal_loc[1],int(det.x1), int(det.y1), int(det.x2), int(det.y2), "Animal", type,
copy_hs.rgb, copy_hs.ir, copy_hs.timestamp, copy_hs.project_name, copy_hs.aircraft, int(det.y1), int(det.y2), int(det.x1), int(det.x2), False, confidence=str(det.confidence))
new_hotspots.append(hs)
# xpos, ypos, thumb_left, thumb_top, thumb_right, thumb_bottom, type, species_id, rgb
# , ir, timestamp, project_name, aircraft,
# updated_top = -1, updated_bot = -1, updated_left = -1, updated_right = -1,
# updated = False, status = "none", confidence = "NA"):
i+=1
header = "id,color_image,thermal_image,hotspot_id,hotspot_type,species_id,species_confidence,fog,thermal_x," \
"thermal_y,color_left,color_top,color_right,color_bottom, updated_left, updated_top, updated_right, updated_bottom, " \
"updated, status"
api.saveHotspots(new_hotspots, "/fast/fps/fps.csv", header)
# for f in fp:
# fp_dets = fp[f]
# image = Image.open(f)
# open_cv_image = np.array(image)
# for det in fp_dets:
# cv2.rectangle(open_cv_image, (int(det.x1), int(det.y1)), (int(det.x2), int(det.y2)), (255, 0, 0), 6)
# for hs in api.rgb_images[f].hotspots:
# cv2.rectangle(open_cv_image, (hs.rgb_bb.x1-2, hs.rgb_bb.y1-2), (hs.rgb_bb.x2-2, hs.rgb_bb.y2-2), (0, 255, 0), 6)
#
# image = Image.fromarray(open_cv_image)
# image.save("/fast/fps/" + os.path.basename(f))
precision = float(len(tp))/(len(tp) + len(fp))
recall = float(len(tp))/(len(tp) + len(fn))
f1 = 2* ((precision*recall)/(precision+recall))
print("IOU_THRESH %.2f" % IOU_THRESH)
# print("CONFIDENCE_THRESH %.2f" % CONFIDENCE_THRESH)
print
print("precision %.3f" % precision)
print("recall %.3f" % recall)
plotmap[optical_results.output][0].append(precision)
plotmap[optical_results.output][1].append(recall)
plotmap[optical_results.output][2].append(f1)
print("FP %d TP %d FN %d" % (len(fp), len(tp), len(fn)))
# avg_confidence_tp = np.average(tps_confidence_thresh)
# stddev_confidence_tp = np.std(tps_confidence_thresh)
# avg_iou_tp = np.average(tps_iou_thresh)
# stddev_iou_tp = np.std(tps_iou_thresh)
# print("True Positive confidence avg: %.3f std: %.3f" % (avg_confidence_tp, stddev_confidence_tp))
# print("True Positive iou avg: %.3f std: %.3f" % (avg_iou_tp, stddev_iou_tp))
print
# print("Missing %d images" % missing)
colors = ['olive', 'red', 'blue', 'green', 'orange', 'yellow', "black", "gray"]
fig = plt.figure(figsize=(8, 8))
ax = fig.add_subplot(111)
legend_dict = {}
for idx, d in enumerate(plotmap):
color = colors[idx]
ys = plotmap[d]
legend_dict[os.path.basename(d)] = color
ax.plot(threshes, ys[0], linestyle=(0, (1, 1)), color=color)
ax.plot(threshes, ys[1], linestyle=(0, ()), color=color)
ax.plot(threshes, ys[2], linestyle=(0, (5, 5)), color=color)
patchList = []
for key in legend_dict:
data_key = mpatches.Patch(color=legend_dict[key], label=key)
patchList.append(data_key)
plt.xlabel("Confidence Thresh")
plt.ylabel("Percent")
plt.legend(handles=patchList)
plt.show()
|
[
"yuval@yuvalboss.com"
] |
yuval@yuvalboss.com
|
f6d3617804c700e4c95531ffe0787d46922e3a87
|
0d43d21491abbaa06e80d7c5a92dd9b1ec3a8050
|
/python/water_angles.py
|
f48f1c95a322a0e7266a2c6914d5eddf0d1b7506
|
[] |
no_license
|
lfelberg/confinement
|
067500edfb8c0eec92f9981eac854ec9ca12ea7e
|
e824ab61def3b94529e9c2f1944ad8ac88d08e69
|
refs/heads/master
| 2021-01-11T16:23:44.557687
| 2017-08-04T22:46:39
| 2017-08-04T22:46:39
| 80,072,886
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,171
|
py
|
import sys
import numpy as np
from water_angles_util import translate_pbc,cal_ang
from xyzfile import XYZFile
from volfile import VolFile
WOXY = 1; WHYD = 2; GRAPHENE = 3
def get_angles(xyz, disC, volC):
'''Method to get various angles between two waters, in confined space'''
# find water oxys, hyds
oo = xyz.get_type_i(WOXY); hh = xyz.get_type_i(WHYD); bnz = 5
oi,hi = xyz.get_inner_wat(); oou,hou = xyz.get_outer_wat() # outside walls
t1s, t2s, c1s, c2s, phs, rs, ws, wat_angles = [],[],[],[],[],[],[],[]
n_w_in = sum(oi.astype(int)); n_w_ou = sum(oou.astype(int))
in_wat = np.zeros((3, n_w_in, 3)); ou_wat = np.zeros((3, n_w_ou, 3))
hs_i=np.zeros((2,n_w_in), dtype=int); hs_o=np.zeros((2,n_w_ou), dtype=int)
ho_ct, hoidx, hi_ct, hiidx = 0, 0, 0, 0
for i in range(len(oi)): #looping over all atoms, make a list of H1 and H2
if hi[i] == True:
if hi_ct == 0:
hs_i[0][hiidx] = i; hi_ct = 1
elif hi_ct == 1:
hs_i[1][hiidx] = i; hi_ct = 0; hiidx +=1
if hou[i] == True:
if ho_ct == 0:
hs_o[0][hoidx] = i; ho_ct = 1
elif ho_ct == 1:
hs_o[1][hoidx] = i; ho_ct = 0; hoidx +=1
rg=7.0; x_bn = np.arange(-rg, rg, (2.*rg)/9);
for i in range(1,len(xyz.atom)): # for each time snapshot, except first
th1, th2, ch1, ch2, phi, rr, wd = [],[],[],[],[],[],[]
rng = np.array([volC.get_x_rng_i(i), volC.get_y_rng_i(i),
volC.get_z_rng_i(i)]) # pbc range
in_wat[0] = xyz.atom[i,oi,:]; ou_wat[0] = xyz.atom[i,oou,:];
for h in range(2):
in_wat[h+1]=xyz.atom[i,hs_i[h],:];ou_wat[h+1]=xyz.atom[i,hs_o[h],:]
#binning waters by distribution of x positions
#For the outer waters, need to wrap with PBCS before computing
# to make sure the mean is not in the middle of the box
ou_sft = np.zeros((ou_wat.shape[1:])); ou_sft[0] = ou_wat[0,0]
ou_sft[1:] = translate_pbc(ou_wat[0,0], ou_wat[0,1:], rng)
x_in = np.mean(in_wat[0,:,0]);x_ou = np.mean(ou_sft[:,0]);
b_in = np.digitize(in_wat[0,:,0]-x_in, x_bn)
b_ou = np.digitize(ou_sft[:,0]-x_ou, x_bn)
for j in range(len(x_bn)):
if sum((b_in == j).astype(int)) > 1: #if there is > 1 water in bin
b_arr = b_in == j
t1,t2,c1,c2,ph,r,w=cal_ang(in_wat[:,b_arr],rng)
w = [rng[1]*rng[2]] * len(r)
th1+=t1;th2+=t2;ch1+=c1;ch2+=c2;phi+=ph;rr+=r;wd+=w
if sum((b_ou == j).astype(int)) > 1:
b_arr = b_ou == j
t1,t2,c1,c2,ph,r,w=cal_ang(ou_wat[:,b_arr],rng)
w = [rng[1]*rng[2]] * len(r)
th1+=t1;th2+=t2;ch1+=c1;ch2+=c2;phi+=ph;rr+=r;wd+=w
t1s += [th1];t2s += [th2];c1s += [ch1];c2s += [ch2];phs += [phi];
rs += [rr];ws += [wd]
return list([t1s, t2s, c1s, c2s, phs, rs, ws])
def print_angles(angls, fname):
'''Print file of data in csv-like format, angls is a list of values:
angls = [ [thet1],[thet2], [chi1], [chi2], [phi], [rs] ]'''
f = open(fname, 'w');
nsnap, stn = len(angls[0]), ''
vals = ['atim','the1', 'the2','chi1', 'chi2','phi', 'dis', 'vol']
for j in range(len(vals)):
stn += "{0},".format(vals[j])
f.write(stn[:-1]+'\n')
for j in range(nsnap):
for k in range(len(angls[0][j])): # the number of pairs will change
st = '{0},'.format(j)
for i in range(len(vals)-1):
st += "{0:.5f},".format(angls[i][j][k])
f.write("{0}\n".format(st[:-1]))
f.close()
def main():
''' Given an xyz file, sort all waters, calculate 5 angles between each
pair on each side of the wall '''
xyzname=sys.argv[1]; sep=sys.argv[2]; ln=sys.argv[3]; itr=sys.argv[4]
nm = str(sep)+"_"+str(ln)+"_"+str(itr)
volC = VolFile("run"+nm+".vol")
disC = XYZFile("run"+nm+".dist", VolFile(""))
xyz_cl = XYZFile(xyzname, volC)
angs = get_angles(xyz_cl, disC, volC)
print_angles(angs, "run"+nm+"_angles.csv")
if __name__=="__main__":
main()
|
[
"lfelberg@berkeley.edu"
] |
lfelberg@berkeley.edu
|
9061fd693fed8b422f6387b055e083a8789e24af
|
58d6391d4601e9b847cb603d63fe717af0bed05c
|
/facecover 1.0/cvfacerec2.py
|
b4fa0da8bcbd05729e198932e2184500e0c438e6
|
[] |
no_license
|
ErikEremenko/EEVidFaceBlur
|
b6559283860a8d64ed82f69fc81e679f863327c6
|
34ebcfb48dc9a2b955055e9bd8378cc39ba2b97b
|
refs/heads/main
| 2023-01-12T00:32:00.762422
| 2020-11-22T16:38:48
| 2020-11-22T16:38:48
| 315,079,681
| 0
| 0
| null | 2020-11-22T16:30:56
| 2020-11-22T16:19:33
| null |
UTF-8
|
Python
| false
| false
| 2,182
|
py
|
import cv2
import numpy as np
import os
import eeraser
import ctypes
## Styles:
## 0 : OK
## 1 : OK | Cancel
## 2 : Abort | Retry | Ignore
## 3 : Yes | No | Cancel
## 4 : Yes | No
## 5 : Retry | Cancel
## 6 : Cancel | Try Again | Continue
chooseimg = input()
image = cv2.imread(f'./{chooseimg}.jpg')
result_image = image.copy()
# Specify the trained cascade classifier
face_cascade_name = "./haarcascade_frontalface_alt.xml"
# Create a cascade classifier
face_cascade = cv2.CascadeClassifier()
# Load the specified classifier
face_cascade.load(face_cascade_name)
#Preprocess the image
grayimg = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
grayimg = cv2.equalizeHist(grayimg)
#Run the classifiers
faces = face_cascade.detectMultiScale(grayimg, 1.1, 2, 0|cv2.CASCADE_SCALE_IMAGE, (30, 30))
print("Faces detected")
if len(faces) != 0: # If there are faces in the images
for f in faces: # For each face in the image
# Get the origin co-ordinates and the length and width till where the face extends
x, y, w, h = [ v for v in f ]
# get the rectangle img around all the faces
cv2.rectangle(image, (x,y), (x+w,y+h), (255,255,0), 5)
sub_face = image[y:y+h, x:x+w]
# apply a gaussian blur on this new recangle image
sub_face = cv2.GaussianBlur(sub_face,(23, 23), 30)
# merge this blurry rectangle to our final image
result_image[y:y+sub_face.shape[0], x:x+sub_face.shape[1]] = sub_face
face_file_name = "./face_" + str(y) + ".jpg"
cv2.imwrite(face_file_name, sub_face)
# delete extracted faces if needed
def Mbox(title, text, style):
return ctypes.windll.user32.MessageBoxW(0, text, title, style)
Mbox('Deletion', 'Delete faces: yyy Save faces: n', 0)
eeraser.erase_command()
# cv2.imshow("Detected face", result_image)
cv2.imwrite(f"./result.{chooseimg}(covered).png", result_image)
print('success')
def Mbox2(title, text, style):
return ctypes.windll.user32.MessageBoxW(0, text, title, style)
Mbox2('Operation SUCCEEDED', 'Faces covered successfully:', 0)
quit()
|
[
"noreply@github.com"
] |
ErikEremenko.noreply@github.com
|
e65d0b1b24da675b239b2d2e2c36bb245c7f9fcd
|
144590772aaa89e5ead8936512b0bd035c215c7b
|
/resilient-circuits/tests/selftest_tests/mocked_fail_script.py
|
379c2b9f793bb51a942da5de7616bd5c9153c50c
|
[
"MIT"
] |
permissive
|
ibmresilient/resilient-python-api
|
f65dad3f3c832581127026fa3e626eaf3d4749a7
|
84e8c6d9140ceac0bf47ce0b98e11c7953d95e61
|
refs/heads/main
| 2023-07-23T12:36:49.551506
| 2023-07-11T15:15:43
| 2023-07-11T15:15:43
| 101,414,862
| 37
| 31
|
MIT
| 2023-09-07T14:00:34
| 2017-08-25T14:59:45
|
Python
|
UTF-8
|
Python
| false
| false
| 43
|
py
|
def selftest(args):
raise Exception()
|
[
"Ryan.Gordon1@ibm.com"
] |
Ryan.Gordon1@ibm.com
|
b29fc417278388085744b38361297b4d7f0a9261
|
28cf4f53773e2f53fb9818574e38f79bb5c5c95e
|
/Blog/Blog_main/migrations/0011_auto_20210205_1347.py
|
bf6ec466ea9e7f14350c8a1bf25db10688448553
|
[] |
no_license
|
ialfai/Blog
|
19f8b0a0d3e6007d7a8c267f6dad8b208e15eee7
|
adb9c5993713e80e32cdead97526e9d13ac84349
|
refs/heads/main
| 2023-04-04T01:02:57.648675
| 2021-04-05T08:01:36
| 2021-04-05T08:01:36
| 332,748,632
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 944
|
py
|
# Generated by Django 3.1.5 on 2021-02-05 13:47
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('Blog_main', '0010_board_picture'),
]
operations = [
migrations.AlterField(
model_name='board',
name='picture',
field=models.ImageField(blank=True, null=True, upload_to='media'),
),
migrations.CreateModel(
name='Authors',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('is_author', models.BooleanField(default=True)),
('is_active', models.BooleanField(default=True)),
('user', models.ManyToManyField(to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"k.zgodaferchmin@gmail.com"
] |
k.zgodaferchmin@gmail.com
|
63e74ea4d28b30a0ff320d82f76133830d5bb42a
|
54640633f7da8eb32aabe9f3eb2086eda40f26f4
|
/gemaracard/flashcard/views.py
|
3d22c2bee020d0aec8ef483af19d8b6883f2df9c
|
[] |
no_license
|
BenBagBag/gemaracard
|
66ed295bc3f6c01df69aae5417050698c6574ee8
|
ac2d085791c856996c32030f3a4a9466078f17c3
|
refs/heads/master
| 2021-01-25T06:24:15.445820
| 2017-06-07T01:18:46
| 2017-06-07T01:18:46
| 93,564,229
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,933
|
py
|
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse
from django.http import HttpResponse, HttpResponseRedirect, Http404
from django.shortcuts import get_object_or_404, redirect, render
from django.views.generic.edit import UpdateView
from django.utils import timezone
from .forms import FlashcardForm, TextForm
from .models import Flashcard, User, Text
def set_optional_fields(card):
if card.language != 'LW':
card.loanword_language = ''
return card
@login_required
def delete(request, pk):
Flashcard.objects.get(pk=pk).delete()
return render(request, 'delete-alert.html')
@login_required
def edit(request, pk):
instance = Flashcard.objects.get(pk=pk)
if request.method == 'POST':
form = FlashcardForm(request.POST, instance=instance)
if form.is_valid():
form.save()
# go back and correct the optional fields
form = set_optional_fields(instance)
form.save()
return redirect('flashcard_detail', pk=pk)
else:
form = FlashcardForm(request.POST or None, instance=instance)
return render(request, 'flashcard-form.html', {'form': form})
@login_required
def flashcard_new(request, text_pk=None):
if request.method == 'POST':
form = FlashcardForm(request.POST)
if form.is_valid():
# commit=False b/c we want to add more data
card = form.save(commit=False)
# validate extra optional fields
card = set_optional_fields(card)
card.author = request.user
card.published_date = timezone.now()
card.save()
if text_pk:
try:
text = Text.objects.get(pk=text_pk, user=request.user)
except Text.DoesNotExist:
raise Http404
text.flashcards.add(card)
return redirect('flashcard_detail', pk=card.pk)
else:
form = FlashcardForm()
return render(request, 'flashcard-form.html', {'form': form})
@login_required
def flashcard_detail(request, pk):
try:
card = Flashcard.objects.get(pk=pk, author=request.user)
except Flashcard.DoesNotExist:
raise Http404
context = {'card': card}
return render(request, 'flashcard.html', context)
@login_required
def flashcard_modal(request, pk):
try:
card = Flashcard.objects.get(pk=pk, author=request.user)
except Flashcard.DoesNotExist:
raise Http404
context = {'card': card}
return render(request, 'flashcard-modal.html', context)
@login_required
def flashcard_list(request):
flashcards = Flashcard.objects.filter(author=request.user)
sorted_flashcards = flashcards.order_by('vocab_term')
context = {'cards': sorted_flashcards}
return render(request, 'flashcard-list.html', context)
@login_required
def text_new(request):
if request.method == 'POST':
form = TextForm(request.POST)
if form.is_valid():
text = form.save(commit=False)
text.user = request.user
text.save()
return redirect('text_detail', pk=text.pk)
else:
form = TextForm()
return render(request, 'text-form.html', {'form': form})
@login_required
def text_list(request):
texts = Text.objects.filter(user=request.user)
sorted_texts = texts.order_by('name')
context = {'texts': sorted_texts}
return render(request, 'text-list.html', context)
@login_required
def text_detail(request, pk):
try:
text = Text.objects.get(pk=pk)
except Text.DoesNotExist:
raise Http404
text_flashcards = text.flashcards.all()
user_flashcards = Flashcard.objects.filter(author=request.user)
context = {'text': text, 'text_flashcards': text_flashcards, 'user_flashcards': user_flashcards}
return render(request, 'text.html', context)
def link_flashcard_list(request, text_pk):
try:
flashcards = Flashcard.objects.all()
user_flashcards = flashcards.filter(author=request.user)
text = Text.objects.get(pk=text_pk)
text_flashcards = text.flashcards.all()
unlinked_flashcards = list(set(user_flashcards) - set(text_flashcards))
except Flashcard.DoesNotExist:
raise Http404
context = {'user_flashcards': unlinked_flashcards, 'text_pk': text_pk}
return render(request, 'link-flashcard-list.html', context)
@login_required
def link_flashcard_and_text(request, text_pk, card_pk):
try:
card = Flashcard.objects.get(pk=card_pk, author=request.user)
except Flashcard.DoesNotExist:
raise Http404
try:
text = Text.objects.get(pk=text_pk, user=request.user)
except Text.DoesNotExist:
raise Http404
text.flashcards.add(card)
return redirect('text_detail', pk=text_pk)
def index(request):
return render(request, 'index.html')
|
[
"IsaacC@allenai.org"
] |
IsaacC@allenai.org
|
03fb4eeba75eacbe4144708377efb264a75ff3be
|
9696340459369558a89f1106118826cd2e2cdf73
|
/MapReduce/08_pipeline_delivery_point/05_debug/reducer_track_hid.py
|
20169b1b37c53593a945eb8ef0fbf5e4f59c89d4
|
[] |
no_license
|
jasonzhao3/Fingerprint
|
77bb5165762236a0ebdb72993dd5dcbeab4df812
|
c21fa8869922d38a6b029afefe467a88b29889fc
|
refs/heads/master
| 2021-01-02T22:44:55.540350
| 2014-06-14T02:09:10
| 2014-06-14T02:09:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,032
|
py
|
#!/usr/bin/env python
from itertools import groupby
from operator import itemgetter
import sys
def read_mapper_output(file, separator='\t'):
for line in file:
yield line.rstrip().split(separator, 1)
def main(separator='\t'):
# input comes from STDIN (standard input)
data = read_mapper_output(sys.stdin, separator=separator)
# groupby groups multiple word-count pairs by word,
# and creates an iterator that returns consecutive keys and their group:
# current_word - string containing a word (the key)
# group - iterator yielding all ["<current_word>", "<count>"] items
for current_word, group in groupby(data, itemgetter(0)):
try:
cnt = 0
for hid, device in group:
cnt += 1
if (cnt > 10000):
print "%s%s%d" % (current_word, separator, cnt)
except ValueError:
# count was not a number, so silently discard this item
pass
if __name__ == "__main__":
main()
|
[
"yzhao3@stanford.edu"
] |
yzhao3@stanford.edu
|
14a2867d020b5f3f389f8d7bfcdeb01028177479
|
0153b89a3f89bc45bd9cc2401ddf21a9b6a10654
|
/mlflow/tracking/__init__.py
|
b219fbfd6db315dbd2e943e16b27308c98b1eb74
|
[
"Apache-2.0"
] |
permissive
|
hamroune/mlflow
|
4b7d3d1612e1cfd1228c367590f5fd50a7ca37ba
|
ccdd70aee5e17f28b4260d045a262d7b3c4fd31d
|
refs/heads/master
| 2020-03-27T13:08:26.762956
| 2018-09-07T15:44:36
| 2018-09-07T15:44:36
| 146,592,346
| 0
| 0
|
Apache-2.0
| 2018-08-29T11:50:30
| 2018-08-29T11:50:30
| null |
UTF-8
|
Python
| false
| false
| 694
|
py
|
"""
The MLflow Tracking package provides a Python CRUD interface to MLflow Experiments
and Runs. This is a lower level API that more directly translates to REST calls.
For a more fluent API of managing an 'active run', see :mod:`mlflow`.
"""
from mlflow.tracking.service import MLflowService, get_service
from mlflow.tracking.utils import set_tracking_uri, get_tracking_uri, _get_store, \
_TRACKING_URI_ENV_VAR
from mlflow.tracking.fluent import _EXPERIMENT_ID_ENV_VAR, _RUN_ID_ENV_VAR
__all__ = [
"MLflowService",
"get_service",
"get_tracking_uri",
"set_tracking_uri",
"_get_store",
"_EXPERIMENT_ID_ENV_VAR",
"_RUN_ID_ENV_VAR",
"_TRACKING_URI_ENV_VAR",
]
|
[
"noreply@github.com"
] |
hamroune.noreply@github.com
|
e5dbb60d97f2bfa8da19922f6f081286bc632270
|
1c340ba52370d9076df56a7e29df8df91bce97b4
|
/PrefixSum/BlellochScan.py
|
3bca0cb4e2f067b32844ae7a214ad904f47a238d
|
[
"MIT"
] |
permissive
|
dshpet/nure_TPDS
|
f5eabea9877e718e77038853e72cc3b6eaffd23d
|
deff5a4fd3b6a7ad2692d1128fb18ba1caa0af60
|
refs/heads/master
| 2021-08-30T18:14:31.334423
| 2017-12-19T00:03:30
| 2017-12-19T00:03:30
| 107,272,460
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,651
|
py
|
#!/usr/bin/env python
'''
from Udacity GPU parallel courses by NVIDIA
https://youtu.be/mmYv3Haj6uc
https://developer.nvidia.com/gpugems/GPUGems3/gpugems3_ch39.html
fuck nvidia (c) Linus Torvalds
https://www.mimuw.edu.pl/~ps209291/kgkp/slides/scan.pdf
'''
import threading
import math
import TestFunction
test_data = [1, 2, 3, 4, 5, 6, 7, 8]
'''
Generic sum function
'''
def accumulate(in_list, in_index1, in_index2, out_list, out_index):
sum = in_list[in_index1] + in_list[in_index2]
out_list[out_index] = sum
def downsweep_helper(in_list, in_index, out_list, out_index1, out_index2):
t = in_list[out_index1]
out_list[out_index1] = in_list[in_index]
out_list[out_index2] = t + in_list[in_index]
# upsweep
# 1: for d = 0 to log2 n – 1 do
# 2: for all k = 0 to n – 1 by 2 d+1 in parallel do
# 3: x[k + 2 d+1 – 1] = x[k + 2 d – 1] + x[k + 2 d +1 – 1]
# downsweep
# 1: x[n – 1] = 0
# 2: for d = log2 n – 1 down to 0 do
# 3: for all k = 0 to n – 1 by 2 d +1 in parallel do
# 4: t = x[k + 2 d – 1]
# 5: x[k + 2 d – 1] = x[k + 2 d +1 – 1]
# 6: x[k + 2 d +1 – 1] = t + x[k + 2 d +1 – 1]
def scan(num_list):
# shared memory
out = list(num_list)
current_list = list(num_list)
# upsweep
n = len(num_list)
iterations = int(math.log(n, 2))
for d in range(0, iterations):
jobs = []
stride = 2 ** (d + 1)
for k in range(0, n - 1, stride):
out_index = k + stride - 1
in_index1 = k + 2 ** d - 1
in_index2 = k + stride - 1
thread = threading.Thread(target = accumulate(current_list, in_index1, in_index2, out, out_index))
jobs.append(thread)
for job in jobs:
job.start()
for job in jobs:
job.join()
current_list = list(out)
# print(out)
# upsweep finished
# print(out)
# downsweep
identity = 0
out[n-1] = identity
current_list = list(out)
for d in reversed(range(0, iterations)):
jobs = []
stride = 2 ** (d + 1)
for k in range(0, n - 1, stride):
out_index1 = k + 2 ** d - 1
out_index2 = k + 2 ** (d + 1) - 1
in_index = out_index2
thread = threading.Thread(target = downsweep_helper(current_list, in_index, out, out_index1, out_index2))
jobs.append(thread)
for job in jobs:
job.start()
for job in jobs:
job.join()
current_list = list(out)
# print(out)
# downsweep finished
return out
result = scan(test_data)
# print(result)
TestFunction.Test(scan, 64)
TestFunction.Test(scan, 128)
TestFunction.Test(scan, 256)
TestFunction.Test(scan, 512)
TestFunction.Test(scan, 1024)
TestFunction.Test(scan, 2048)
TestFunction.Test(scan, 4096)
TestFunction.Test(scan, 8192)
|
[
"dshpet@gmail.com"
] |
dshpet@gmail.com
|
6f30485fdc0055a546646267b89fe8ce2600d4e5
|
f4910b48ee7b026fa789aaa7b573cef00df86393
|
/app.py
|
0ba460202800cf4031b68435fde3b1ec6b960169
|
[] |
no_license
|
PrashilAlva/FlaskReference
|
781d9c09437dbf74c4104f7fafc5e711511d7982
|
be0a07186bb71c3f43cac2b7484831f67112c460
|
refs/heads/master
| 2020-12-27T18:29:50.415758
| 2020-02-03T16:09:00
| 2020-02-03T16:09:00
| 238,005,515
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 536
|
py
|
import ipldb as ipl
import pprint
if __name__ == "__main__":
t_names=ipl.get_team_names()
t_labels=ipl.get_team_labels()
for ele in range(0,8):
print(t_names[ele],":",t_labels[ele])
t_details=ipl.get_team_details()
print("*"*100)
pprint.pprint(t_details)
print("*"*100)
p_details=ipl.get_players()
pprint.pprint(p_details)
print("*"*100)
rcb_play=ipl.get_team_players("RCB")
print("Players of RCB are:")
print("*"*100)
pprint.pprint(rcb_play)
print("*"*100)
|
[
"prashilalva27@gmail.com"
] |
prashilalva27@gmail.com
|
5133a67b4edd0c59c4ea5de641e73dca50cb2c73
|
bf8d344b17e2ff9b7e38ad9597d5ce0e3d4da062
|
/ppdet/modeling/cls_utils.py
|
3ae8d116959a96bb2bf337dee7330c5909bc61ac
|
[
"Apache-2.0"
] |
permissive
|
PaddlePaddle/PaddleDetection
|
e7e0f40bef75a4e0b6dcbacfafa7eb1969e44961
|
bd83b98342b0a6bc8d8dcd5936233aeda1e32167
|
refs/heads/release/2.6
| 2023-08-31T07:04:15.357051
| 2023-08-18T02:24:45
| 2023-08-18T02:24:45
| 217,475,193
| 12,523
| 3,096
|
Apache-2.0
| 2023-09-10T10:05:56
| 2019-10-25T07:21:14
|
Python
|
UTF-8
|
Python
| false
| false
| 1,325
|
py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def _get_class_default_kwargs(cls, *args, **kwargs):
"""
Get default arguments of a class in dict format, if args and
kwargs is specified, it will replace default arguments
"""
varnames = cls.__init__.__code__.co_varnames
argcount = cls.__init__.__code__.co_argcount
keys = varnames[:argcount]
assert keys[0] == 'self'
keys = keys[1:]
values = list(cls.__init__.__defaults__)
assert len(values) == len(keys)
if len(args) > 0:
for i, arg in enumerate(args):
values[i] = arg
default_kwargs = dict(zip(keys, values))
if len(kwargs) > 0:
for k, v in kwargs.items():
default_kwargs[k] = v
return default_kwargs
|
[
"noreply@github.com"
] |
PaddlePaddle.noreply@github.com
|
0da4fd579a871c99b4211c261ef6bd0bcc2abf7b
|
d92868f71ee20bd57258277d842c72a47eeeb1c2
|
/o8g/Scripts/constants.py
|
1fddff79b5938630193d110e8fd19d5ddbf81da4
|
[] |
no_license
|
TheWicked/Android-Netrunner-OCTGN
|
9f7d9e89b3ddf7efea51aa0f9bbdabb12acb4772
|
5835c627a752b6e0a157f31bd8b7cee9ed84f573
|
refs/heads/master
| 2021-01-18T05:44:07.099125
| 2013-05-26T15:53:14
| 2013-05-26T15:53:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,527
|
py
|
###==================================================File Contents==================================================###
# This file contains global variables in ANR. They should not be modified by the scripts at all.
###=================================================================================================================###
import re
#---------------------------------------------------------------------------
# These are constant global variables in ANR: They should not be modified by the scripts at all.
#---------------------------------------------------------------------------
mdict = dict( # A dictionary which holds all the hard coded markers (in the markers file)
BadPublicity = ("Bad Publicity", "7ae6b4f2-afee-423a-bc18-70a236b41292"),
Agenda = ("Agenda", "38c5b2a0-caa2-40e4-b5b2-0f1cc7202782"), # We use the blue counter as agendas
Power = ("Power", "815b944d-d7db-4846-8be2-20852a1c9530"),
Virus = ("Virus", "7cbe3738-5c50-4a32-97e7-8cb43bf51afa"),
Click = ("Click", "1c873bd4-007f-46f9-9b17-3d8780dabfc4"),
Credit5 = ("5 Credits","feb0e161-da94-4705-8d56-b48f17d74a99"),
Credits = ("Credit","bda3ae36-c312-4bf7-a288-7ee7760c26f7"),
Tag = ("Tag","1d1e7dd2-c60a-4770-82b7-d2d9232b3be8"),
Advancement = ("Advancement", "f8372e2c-c5df-42d9-9d54-f5d9890e9821"),
Scored = ("Scored", "4911f1ad-abf9-4b75-b4c5-86df3f9098ee"),
PlusOnePerm = ("Permanent +1", "1bd5cc9f-3528-45d2-a8fc-e7d7bd6865d5"),
PlusOne = ("Temporary +1", "e8d0b72e-0384-4762-b983-31137d4b4625"),
MinusOne = ("Temporary -1", "d5466468-e05c-4ad8-8bc0-02fbfe4a2ec6"),
protectionMeatDMG = ("Meat Damage protection","2bcb7e73-125d-4cea-8874-d67b7532cbd5"),
protectionNetDMG = ("Net Damage protection","6ac8bd15-ac1d-4d0c-81e3-990124333a19"),
protectionBrainDMG = ("Brain damage protection","99fa1d76-5361-4213-8300-e4c173bc0143"),
protectionNetBrainDMG = ("Net & Brain Damage protection","de733be8-8aaf-4580-91ce-5fcaa1183865"),
protectionAllDMG = ("Complete Damage protection","13890548-8f1e-4c02-a422-0d93332777b2"),
protectionVirus = ("Virus protection","590322bd-83f0-43fa-9239-a2b723b08460"),
BrainDMG = ("Brain Damage","59810a63-2a6b-4ae2-a71c-348c8965d612"),
DaemonMU = ("Daemon MU", "17844835-3140-4555-b592-0f711048eabd"),
PersonalWorkshop = ("Personal Workshop", "efbfabaa-384d-4139-8be1-7f1d706b3dd8"),
Cloud = ("Cloud", "5f58fb37-e44d-4620-8093-3b7378fb5f57"),
BaseLink = ("Base Link", "2fb5b6bb-31c5-409c-8aa6-2c46e971a8a5"))
regexHooks = dict( # A dictionary which holds the regex that then trigger each core command.
# This is so that I can modify these "hooks" only in one place as I add core commands and modulators.
# We use "[:\$\|]" before all hooks, because we want to make sure the script is a core command, and nor part of a modulator (e.g -traceEffects)
GainX = re.compile(r'(?<![<,+-])(Gain|Lose|SetTo)([0-9]+)'),
CreateDummy = re.compile(r'(?<![<,+-])CreateDummy'),
ReshuffleX = re.compile(r'(?<![<,+-])Reshuffle([A-Za-z& ]+)'),
RollX = re.compile(r'(?<![<,+-])Roll([0-9]+)'),
RequestInt = re.compile(r'(?<![<,+-])RequestInt'),
DiscardX = re.compile(r'(?<![<,+-])Discard[0-9]+'),
TokensX = re.compile(r'(?<![<,+-])(Put|Remove|Refill|Use|Infect)([0-9]+)'),
TransferX = re.compile(r'(?<![<,+-])Transfer([0-9]+)'),
DrawX = re.compile(r'(?<![<,+-])Draw([0-9]+)'),
ShuffleX = re.compile(r'(?<![<,+-])Shuffle([A-Za-z& ]+)'),
RunX = re.compile(r'(?<![<,+-])Run([A-Za-z& ]+)'),
TraceX = re.compile(r'(?<![<,+-])Trace([0-9]+)'),
InflictX = re.compile(r'(?<![<,+-])Inflict([0-9]+)'),
RetrieveX = re.compile(r'(?<![<,+-])Retrieve([0-9]+)'),
ModifyStatus = re.compile(r'(?<![<,+-])(Rez|Derez|Expose|Trash|Uninstall|Possess|Exile|Rework)(Target|Parent|Multi|Myself)'),
SimplyAnnounce = re.compile(r'(?<![<,+-])SimplyAnnounce'),
ChooseKeyword = re.compile(r'(?<![<,+-])ChooseKeyword'),
CustomScript = re.compile(r'(?<![<,+-])CustomScript'),
UseCustomAbility = re.compile(r'(?<![<,+-])UseCustomAbility'))
specialHostPlacementAlgs = { # A Dictionary which holds tuples of X and Y placement offsets, for cards which place their hosted cards differently to normal, such as Personal Workshop
'Personal Workshop' : (-32,0)}
automatedMarkers = [] #Used in the Inspect() command to let the player know if the card has automations based on the markers it puts out.
place = dict( # A table holding tuples with the original location various card types are expected to start their setup
Hardware = (100, -208, 10, 3, 1), # 1st value is X, second is Y third is Offset (i.e. how far from the other cards (in pixel size) each extra copy should be played. Negative values means it will fall on top of the previous ones slightly)
Program = (-7, -220, 10, 10, -1), # 4th value is Loop Limit (i.e. at how many cards after the first do we loop back to the first position. Loop is always slightly offset, so as not to hide the previous ones completely)
Resource = (-6, -345, 10, 10, -1), # Last value is wether the cards will be placed towards the right or left. -1 means to the left.
Event = (480, -190, 20, 2, 1),
Console = (225, -345, 0, 1, 1),
scoredAgenda = (495, 8, -35, 6, 1),
liberatedAgenda = (336, -206, -35, 6, 1),
Server = (-10, 188, 80, 6, -1),
Operation = (480, 270, 20, 2, 1),
ICE = (110, 110, 30, 7, -1), # Temporary. ICE, Upgrades, Assets and Agendas will be special
Upgrade = (-10, 248, -30, 13, -1), # Temporary.
Asset = (-10, 248, -30, 13, -1), # Temporary.
Agenda = (-10, 248, -30, 13, -1) # Temporary.
)
markerRemovals = { # A dictionary which holds the costs to remove various special markers.
# The costs are in a tuple. First is clicks cost and then is credit cost.
'Fang' : (1,2),
'Data Raven' : (1,1),
'Fragmentation Storm' : (1,1),
'Rex' : (1,2),
'Crying' : (1,2),
'Cerberus' : (1,4),
'Baskerville' : (1,3),
'Doppelganger' : (1,4),
'Mastiff' : (1,4)}
CorporateFactions = [
'Haas-Bioroid',
'The Weyland Consortium',
'NBN',
'Jinteki']
RunnerFactions = [
'Anarch',
'Shaper',
'Criminal']
CorporationCardTypes = [
'ICE',
'Asset',
'Agenda',
'Upgrade',
'Operation']
RunnerCardTypes = [
'Program',
'Hardware',
'Resource',
'Event']
SpecialDaemons = [ # These are cards which can host programs and avoid their MU cost, but don't have the daemon keyword
'Dinosaurus'] # Not in use yet.
IgnoredModulators = [ # These are modulators to core commands that we do not want to be mentioning on the multiple choice, of cards that have one
'isSubroutine',
'onAccess',
'ignore',
'div',
'isOptional',
'excludeDummy',
'onlyforDummy',
'isCost']
trashEasterEgg = [
"You really shouldn't try to trash this kind of card.",
"No really, stop trying to trash this card. You need it.",
"Just how silly are you?",
"You just won't rest until you've trashed a setup card will you?",
"I'm warning you...",
"OK, NOW I'm really warning you...",
"Shit's just got real!",
"Careful what you wish for..."]
trashEasterEggIDX = 0
ScoredColor = "#00ff44"
SelectColor = "#009900"
EmergencyColor = "#ff0000"
DummyColor = "#000000" # Marks cards which are supposed to be out of play, so that players can tell them apart.
RevealedColor = "#ffffff"
PriorityColor = "#ffd700"
InactiveColor = "#888888" # Cards which are in play but not active yer (e.g. see the shell traders)
Xaxis = 'x'
Yaxis = 'y'
|
[
"mail@dbzer0.com"
] |
mail@dbzer0.com
|
f3eeecf56824030c75e682ac5163fa2a436168a3
|
0847848675cb44889529073c17c48f4560a81010
|
/Mnist/make.py
|
3484b24ebd3d532add11cca5f950acad174777c2
|
[] |
no_license
|
testaccount-0695/pythonProject4
|
7272931013262b954060245a6e17a717b01fe7dd
|
67d72e26bed82c314e5709a4470913a9203e85a2
|
refs/heads/master
| 2023-03-19T02:49:08.574957
| 2021-03-05T01:38:40
| 2021-03-05T01:38:40
| 344,656,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,226
|
py
|
import os
import tensorflow as tf
from tensorflow import keras
(train_images, train_labels), (test_images, test_labels) = tf.keras.datasets.mnist.load_data()
train_labels = train_labels[:1000]
test_labels = test_labels[:1000]
train_images = train_images[:1000].reshape(-1, 28 * 28) / 255.0
test_images = test_images[:1000].reshape(-1, 28 * 28) / 255.0
print(tf.version.VERSION)
def create_model():
model = tf.keras.models.Sequential([
keras.layers.Dense(512, activation='relu', input_shape=(784,)),
keras.layers.Dropout(0.2),
keras.layers.Dense(512, activation='relu', input_shape=(784,)),
keras.layers.Dropout(0.5),
keras.layers.Dense(10)
])
model.compile(optimizer='adam',
loss=tf.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
return model
# 모델 객체를 만듭니다
model = create_model()
# 모델 구조를 출력합니다
model.summary()
#모델을 학습 시킵니다.합니다.
model.fit(train_images, train_labels, epochs=10)
# 전체 모델을 HDF5 파일로 저장합니다
# '.h5' 확장자는 이 모델이 HDF5로 저장되었다는 것을 나타냅니다
model.save('D:/pythonProject4/Model/Mnist.h5')
|
[
"Whtjdaks2@"
] |
Whtjdaks2@
|
bf5ff811dd36959cbb56b862856ef8a46fcdaabe
|
a7e5aa55139641ca49d27c8b0c275c25f8cc0c54
|
/src/main/python/modules/window_statusbar/gui/bar.py
|
209b4a51917b5adec045657e13011b29c5680617
|
[] |
no_license
|
AlexWoroschilow/AOD-Reader
|
5a5fa4ea8184ea2df2301870ccd67717eab307f1
|
6e643958a4fae62128f036821030b8ea9f937d07
|
refs/heads/master
| 2022-02-24T05:48:48.549468
| 2019-09-20T23:42:03
| 2019-09-20T23:42:03
| 197,986,058
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,795
|
py
|
# -*- coding: utf-8 -*-
# Copyright 2015 Alex Woroschilow (alex.woroschilow@gmail.com)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
from PyQt5 import QtWidgets
from PyQt5 import QtCore
class StatusbarWidget(QtWidgets.QStatusBar):
def __init__(self):
super(StatusbarWidget, self).__init__()
self.setSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
self.status = QtWidgets.QLabel()
self.status.setAlignment(QtCore.Qt.AlignCenter)
self.addWidget(self.status)
self.progress = QtWidgets.QProgressBar()
self.progress.hide()
def text(self, text):
self.status.setText(text)
def start(self, progress):
if self.status is not None:
self.status.hide()
self.removeWidget(self.status)
if self.progress is not None:
self.progress.setValue(progress)
self.addWidget(self.progress, 1)
self.progress.show()
def setProgress(self, progress):
if self.progress is not None:
self.progress.setValue(progress)
def stop(self, progress):
if self.progress is not None:
self.progress.setValue(progress)
self.progress.hide()
self.removeWidget(self.progress)
if self.status is not None:
self.addWidget(self.status, 1)
self.status.show()
|
[
"alex.woroschilow@gmail.com"
] |
alex.woroschilow@gmail.com
|
650c7e5fa5b24c7ff8e597b34a818a2c3cecb225
|
56c3c324f5c35c34e7a72172568dedf4c301c4fe
|
/products/migrations/0010_auto_20210305_1435.py
|
62b43453e670105d6588a3ece4e2ad370d63ecd1
|
[] |
no_license
|
Shubh-Bajpai7/DIS-COM
|
7868b101fb4bba726363b8f982b036a64d3888d9
|
860fa4f2821ae428388cace4cd797302de77f5e0
|
refs/heads/master
| 2023-08-19T00:14:54.060445
| 2021-09-29T12:26:24
| 2021-09-29T12:26:24
| 411,643,523
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 580
|
py
|
# Generated by Django 3.1.6 on 2021-03-05 14:35
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('products', '0009_auto_20210205_0505'),
]
operations = [
migrations.CreateModel(
name='Name',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=10)),
],
),
migrations.DeleteModel(
name='Test',
),
]
|
[
"shubh.bajpai999@gmail.com"
] |
shubh.bajpai999@gmail.com
|
df5280e7916d11004414fad03e054bf80f089274
|
730154818de1d81d0bca2546ca1a3ab029945860
|
/test/cli/test_commands.py
|
7fcb502ab2a37d9db734c94829a20e64fdfc5b1d
|
[
"MIT"
] |
permissive
|
mnalband/schemathesis
|
b2b4cba8505dfc0f7779ac7fb10abbf42c566076
|
42b351589fae3f407a1de248750bc82d6b5796d1
|
refs/heads/master
| 2020-08-21T12:30:58.006890
| 2019-10-16T08:59:36
| 2019-10-18T21:28:32
| 216,160,468
| 0
| 0
|
MIT
| 2019-10-19T06:31:13
| 2019-10-19T06:31:13
| null |
UTF-8
|
Python
| false
| false
| 9,661
|
py
|
import pytest
from _pytest.main import ExitCode
from hypothesis import HealthCheck, Phase, Verbosity
from requests.auth import HTTPDigestAuth
from schemathesis.runner import DEFAULT_CHECKS
def test_commands_help(cli):
result = cli.run_subprocess()
assert result.ret == ExitCode.OK
assert result.stdout.get_lines_after("Commands:") == [" run Perform schemathesis test."]
result_help = cli.run_subprocess("--help")
result_h = cli.run_subprocess("-h")
assert result.stdout.lines == result_h.stdout.lines == result_help.stdout.lines
def test_commands_version(cli):
result = cli.run_subprocess("--version")
assert result.ret == ExitCode.OK
assert "version" in result.stdout.lines[0]
@pytest.mark.parametrize(
"args, error",
(
(("run",), 'Error: Missing argument "SCHEMA".'),
(("run", "not-url"), "Error: Invalid SCHEMA, must be a valid URL."),
(
("run", "http://127.0.0.1", "--auth=123"),
'Error: Invalid value for "--auth" / "-a": Should be in KEY:VALUE format. Got: 123',
),
(
("run", "http://127.0.0.1", "--auth-type=random"),
'Error: Invalid value for "--auth-type" / "-A": invalid choice: random. (choose from basic, digest)',
),
(
("run", "http://127.0.0.1", "--header=123"),
'Error: Invalid value for "--header" / "-H": Should be in KEY:VALUE format. Got: 123',
),
(
("run", "http://127.0.0.1", "--header=:"),
'Error: Invalid value for "--header" / "-H": Header name should not be empty',
),
(
("run", "http://127.0.0.1", "--hypothesis-phases=explicit,first,second"),
'Error: Invalid value for "--hypothesis-phases": invalid choice(s): first, second. '
"Choose from explicit, reuse, generate, shrink",
),
),
)
def test_commands_run_errors(cli, args, error):
# When invalid arguments are passed to CLI
result = cli.run_subprocess(*args)
# Then an appropriate error should be displayed
assert result.ret == ExitCode.INTERRUPTED
assert result.stderr.lines[-1] == error
def test_commands_run_help(cli):
result_help = cli.run_subprocess("run", "--help")
assert result_help.ret == ExitCode.OK
assert result_help.stdout.lines == [
"Usage: schemathesis run [OPTIONS] SCHEMA",
"",
" Perform schemathesis test against an API specified by SCHEMA.",
"",
" SCHEMA must be a valid URL pointing to an Open API / Swagger",
" specification.",
"",
"Options:",
" -c, --checks [not_a_server_error]",
" List of checks to run.",
" -a, --auth TEXT Server user and password. Example:",
" USER:PASSWORD",
" -A, --auth-type [basic|digest] The authentication mechanism to be used.",
" Defaults to 'basic'.",
" -H, --header TEXT Custom header in a that will be used in all",
r" requests to the server. Example:",
r" Authorization: Bearer\ 123",
r" -E, --endpoint TEXT Filter schemathesis test by endpoint",
r" pattern. Example: users/\d+",
" -M, --method TEXT Filter schemathesis test by HTTP method.",
" -b, --base-url TEXT Base URL address of the API.",
" --hypothesis-deadline INTEGER Duration in milliseconds that each",
" individual example with a test is not",
" allowed to exceed.",
" --hypothesis-derandomize Use Hypothesis's deterministic mode.",
" --hypothesis-max-examples INTEGER",
" Maximum number of generated examples per",
" each method/endpoint combination.",
" --hypothesis-phases [explicit|reuse|generate|shrink]",
" Control which phases should be run.",
" --hypothesis-report-multiple-bugs BOOLEAN",
" Raise only the exception with the smallest",
" minimal example.",
" --hypothesis-suppress-health-check [data_too_large|filter_too_much|too_slow|return_value|"
"hung_test|large_base_example|not_a_test_method]",
" Comma-separated list of health checks to",
" disable.",
" --hypothesis-verbosity [quiet|normal|verbose|debug]",
" Verbosity level of Hypothesis messages",
" -h, --help Show this message and exit.",
]
SCHEMA_URI = "https://example.com/swagger.json"
@pytest.mark.parametrize(
"args, expected",
(
([SCHEMA_URI], {"checks": DEFAULT_CHECKS}),
([SCHEMA_URI, "--auth=test:test"], {"checks": DEFAULT_CHECKS, "api_options": {"auth": ("test", "test")}}),
(
[SCHEMA_URI, "--auth=test:test", "--auth-type=digest"],
{"checks": DEFAULT_CHECKS, "api_options": {"auth": HTTPDigestAuth("test", "test")}},
),
(
[SCHEMA_URI, "--auth=test:test", "--auth-type=DIGEST"],
{"checks": DEFAULT_CHECKS, "api_options": {"auth": HTTPDigestAuth("test", "test")}},
),
(
[SCHEMA_URI, "--header=Authorization:Bearer 123"],
{"checks": DEFAULT_CHECKS, "api_options": {"headers": {"Authorization": "Bearer 123"}}},
),
(
[SCHEMA_URI, "--header=Authorization: Bearer 123 "],
{"checks": DEFAULT_CHECKS, "api_options": {"headers": {"Authorization": "Bearer 123 "}}},
),
(
[SCHEMA_URI, "--method=POST", "--method", "GET"],
{"checks": DEFAULT_CHECKS, "loader_options": {"method": ("POST", "GET")}},
),
([SCHEMA_URI, "--endpoint=users"], {"checks": DEFAULT_CHECKS, "loader_options": {"endpoint": ("users",)}}),
(
[SCHEMA_URI, "--base-url=https://example.com/api/v1test"],
{"checks": DEFAULT_CHECKS, "api_options": {"base_url": "https://example.com/api/v1test"}},
),
(
[
SCHEMA_URI,
"--hypothesis-deadline=1000",
"--hypothesis-derandomize",
"--hypothesis-max-examples=1000",
"--hypothesis-phases=explicit,generate",
"--hypothesis-report-multiple-bugs=0",
"--hypothesis-suppress-health-check=too_slow,filter_too_much",
"--hypothesis-verbosity=normal",
],
{
"checks": DEFAULT_CHECKS,
"hypothesis_options": {
"deadline": 1000,
"derandomize": True,
"max_examples": 1000,
"phases": [Phase.explicit, Phase.generate],
"report_multiple_bugs": False,
"suppress_health_check": [HealthCheck.too_slow, HealthCheck.filter_too_much],
"verbosity": Verbosity.normal,
},
},
),
),
)
def test_execute_arguments(cli, mocker, args, expected):
m_execute = mocker.patch("schemathesis.runner.execute", autospec=True)
result = cli.run_inprocess(*args)
assert result.exit_code == 0
m_execute.assert_called_once_with(SCHEMA_URI, **expected)
@pytest.mark.endpoints()
def test_hypothesis_parameters(cli, schema_url):
# When Hypothesis options are passed via command line
result = cli.run_inprocess(
schema_url,
"--hypothesis-deadline=1000",
"--hypothesis-derandomize",
"--hypothesis-max-examples=1000",
"--hypothesis-phases=explicit,generate",
"--hypothesis-report-multiple-bugs=0",
"--hypothesis-suppress-health-check=too_slow,filter_too_much",
"--hypothesis-verbosity=normal",
)
# Then they should be correctly converted into arguments accepted by `hypothesis.settings`
# Parameters are validated in `hypothesis.settings`
assert result.exit_code == 0
@pytest.mark.endpoints("success")
def test_cli_run_output_success(cli, schema_url):
result = cli.run_inprocess(schema_url)
assert result.exit_code == 0
assert " FALSIFYING EXAMPLES " not in result.stdout
assert " SUMMARY " in result.stdout
lines = result.stdout.split("\n")
assert "Running schemathesis test cases ..." in lines
assert "Tests succeeded." in lines
def test_cli_run_output_with_errors(cli, schema_url):
result = cli.run_inprocess(schema_url)
assert result.exit_code == 1
assert " FALSIFYING EXAMPLES " in result.stdout
assert " SUMMARY " in result.stdout
lines = result.stdout.split("\n")
assert "Running schemathesis test cases ..." in lines
assert "not_a_server_error 1 / 3 passed FAILED " in lines
assert "Tests failed." in lines
@pytest.mark.endpoints()
def test_cli_run_output_empty(cli, schema_url):
result = cli.run_inprocess(schema_url)
assert result.exit_code == 0
assert " FALSIFYING EXAMPLES " not in result.stdout
assert " SUMMARY " not in result.stdout
lines = result.stdout.split("\n")
assert "No checks were performed." in lines
assert "Tests succeeded." in lines
|
[
"Stranger6667@users.noreply.github.com"
] |
Stranger6667@users.noreply.github.com
|
a6175ae5cd23a7649a3efbce875e054356277184
|
5b9af6e80cd5ffd1089960dba70143b947150245
|
/OpenCV/text_book/opencv_practice_answer/05-cv-02-05-matplotlib_one.py
|
91d69ab13a4ec87bf2f9760ec85d4c32d4650f8f
|
[] |
no_license
|
bigdatachobo/Study
|
4c0c89c8c5f2e132383fc7b15ac0270f2c98f9dd
|
380dcdaf39803202dcdbf673f1b4665f1c4d07df
|
refs/heads/master
| 2022-05-14T19:40:35.179414
| 2022-04-29T14:13:39
| 2022-04-29T14:13:39
| 247,881,646
| 2
| 16
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 432
|
py
|
import matplotlib.pyplot as plt
import cv2
# 컬러 영상 & 그레이스케일 영상 불러오기
imgBGR = cv2.imread('images\cat.bmp')
imgRGB = cv2.cvtColor(imgBGR, cv2.COLOR_BGR2RGB)
imgGray = cv2.imread('images\cat.bmp', cv2.IMREAD_GRAYSCALE)
# 두 개의 영상을 함께 출력
plt.subplot(121), plt.axis('off'), plt.imshow(imgRGB)
plt.subplot(122), plt.axis('off'), plt.imshow(imgGray, cmap='gray')
plt.show()
|
[
"noreply@github.com"
] |
bigdatachobo.noreply@github.com
|
a36ff24b2ee446e256d58f9764f146c8e03de42e
|
cab6730c6e6dea63a78c75274df21f7791f33950
|
/Fakeinsta/wsgi.py
|
c8337bc252081854989ec3a10462437d867dff51
|
[] |
no_license
|
JoSmith18/Finstagram
|
a44f8a4e1be09306ac1215a70ec7503309f020bd
|
4a1daea7a2427fa4f9ebc613fea4d5b6e8e2edf4
|
refs/heads/master
| 2021-09-03T11:58:11.525867
| 2018-01-08T22:22:06
| 2018-01-08T22:22:06
| 114,687,059
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 395
|
py
|
"""
WSGI config for Fakeinsta project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "Fakeinsta.settings")
application = get_wsgi_application()
|
[
"jsmith@basecampcodingacademy.org"
] |
jsmith@basecampcodingacademy.org
|
672ff531121eeb5e8cbf9065d69e2750fe922853
|
aab63fa11d0553ff8793bedf2737ef06a2138e9f
|
/sms/models.py
|
4bc53ec0bd170c4eeece86138c1541faf5b554a0
|
[] |
no_license
|
sammienjihia/mwananchi
|
4aee52e5b7eeb86670a2ee82eba21ce8da0dca4d
|
23a1494906e9f4f4559a7f1da5f7dd55de97eb99
|
refs/heads/master
| 2021-01-11T00:11:02.863063
| 2016-10-18T10:29:08
| 2016-10-18T10:29:08
| 69,172,068
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,151
|
py
|
from __future__ import unicode_literals
from django.db import models
from search.models import Topics
# Create your models here.
class Insms(models.Model):
sender = models.CharField(max_length=255)
reciever = models.CharField(max_length=255)
recieved_date = models.DateTimeField()
text = models.CharField(max_length=255)
class Meta:
db_table = "Insms"
verbose_name_plural = "Insms"
def __str__(self):
return self.sender, self.recieved_date, self.text
class Outsms(models.Model):
sender = models.CharField(max_length=255)
receiver = models.CharField(max_length=255)
sent_date = models.DateTimeField(auto_now_add=True)
text = models.CharField(max_length=255)
class Meta:
db_table = "Outsms"
verbose_name_plural = "Outsms"
def __str__(self):
return self.receiver, self.sent_date, self.text
class Sms(models.Model):
subscribed_topic = models.ForeignKey(Topics, on_delete=models.CASCADE)
class Meta:
db_table = "Sms"
verbose_name_plural = "Sms"
def __str__(self):
return self.subscribed_topic
|
[
"dilaninjihia@gmail.com"
] |
dilaninjihia@gmail.com
|
03f2d1cb0ebe423fc29491e8f9b182014c338584
|
39ac450698e68c44862fc8fdac9efc0ee9c6994d
|
/Week_02/id_40/leetcode_692_40.py
|
b2171ce9473813898913aa9cf2851a0b5b0413c8
|
[] |
no_license
|
algorithm003/algorithm
|
70d0f6a292c480e017e90ab5996772becbc7113c
|
06b1a12411c22c3f24fd58b24f17a923dca380d5
|
refs/heads/master
| 2022-02-02T11:59:01.917835
| 2019-06-26T14:33:22
| 2019-08-05T15:55:03
| 189,704,070
| 18
| 65
| null | 2019-08-05T04:35:13
| 2019-06-01T07:31:23
|
C++
|
UTF-8
|
Python
| false
| false
| 570
|
py
|
# https://leetcode-cn.com/problems/top-k-frequent-words/
# 注意题目中的一个不太显眼但重要的要求:"如果不同的单词有相同出现频率,按字母顺序排序。"
# 目前了解到的最简洁的做法是直接利用 heapd 模块的 nsmallest 函数.
from typing import List
from heapq import nsmallest
class Solution:
def topKFrequent(self, words: List[str], k: int) -> List[str]:
counts={}
for i,v in enumerate(words):
counts[v] = counts.get(v,0)+1
return nsmallest(k, counts,lambda i:(-counts[i],i))
|
[
"1102109100@qq.com"
] |
1102109100@qq.com
|
ee000972c2fe43c9ba59fd76d31b153e07366a96
|
b386130d3ca39443f2055a1162b3ac27f4bcfe3a
|
/.env/lib/python3.7/site-packages/aws_cdk/aws_autoscaling/__init__.py
|
c54c23d5e125dcd6c3372f5c83a86feecf07657e
|
[] |
no_license
|
miromasat/aws-auto-witness
|
5a83d77cbbada78f93af7cc0e4bd4633d0aebbd2
|
412af55405d7122cc3a6d587ed3115ac1ef9a8a7
|
refs/heads/master
| 2020-12-27T21:52:48.681444
| 2020-02-26T01:29:36
| 2020-02-26T01:29:36
| 238,071,256
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 384,560
|
py
|
"""
## Amazon EC2 Auto Scaling Construct Library
<!--BEGIN STABILITY BANNER-->---

---
<!--END STABILITY BANNER-->
This module is part of the [AWS Cloud Development Kit](https://github.com/aws/aws-cdk) project.
### Fleet
### Auto Scaling Group
An `AutoScalingGroup` represents a number of instances on which you run your code. You
pick the size of the fleet, the instance type and the OS image:
```python
# Example automatically generated without compilation. See https://github.com/aws/jsii/issues/826
import aws_cdk.aws_autoscaling as autoscaling
import aws_cdk.aws_ec2 as ec2
autoscaling.AutoScalingGroup(self, "ASG",
vpc=vpc,
instance_type=ec2.InstanceType.of(ec2.InstanceClass.BURSTABLE2, ec2.InstanceSize.MICRO),
machine_image=ec2.AmazonLinuxImage()
)
```
> NOTE: AutoScalingGroup has an property called `allowAllOutbound` (allowing the instances to contact the
> internet) which is set to `true` by default. Be sure to set this to `false` if you don't want
> your instances to be able to start arbitrary connections.
### Machine Images (AMIs)
AMIs control the OS that gets launched when you start your EC2 instance. The EC2
library contains constructs to select the AMI you want to use.
Depending on the type of AMI, you select it a different way.
The latest version of Amazon Linux and Microsoft Windows images are
selectable by instantiating one of these classes:
```python
# Example automatically generated. See https://github.com/aws/jsii/issues/826
# Pick a Windows edition to use
windows = ec2.WindowsImage(ec2.WindowsVersion.WINDOWS_SERVER_2019_ENGLISH_FULL_BASE)
# Pick the right Amazon Linux edition. All arguments shown are optional
# and will default to these values when omitted.
amzn_linux = ec2.AmazonLinuxImage(
generation=ec2.AmazonLinuxGeneration.AMAZON_LINUX,
edition=ec2.AmazonLinuxEdition.STANDARD,
virtualization=ec2.AmazonLinuxVirt.HVM,
storage=ec2.AmazonLinuxStorage.GENERAL_PURPOSE
)
# For other custom (Linux) images, instantiate a `GenericLinuxImage` with
# a map giving the AMI to in for each region:
linux = ec2.GenericLinuxImage({
"us-east-1": "ami-97785bed",
"eu-west-1": "ami-12345678"
})
```
> NOTE: The Amazon Linux images selected will be cached in your `cdk.json`, so that your
> AutoScalingGroups don't automatically change out from under you when you're making unrelated
> changes. To update to the latest version of Amazon Linux, remove the cache entry from the `context`
> section of your `cdk.json`.
>
> We will add command-line options to make this step easier in the future.
### AutoScaling Instance Counts
AutoScalingGroups make it possible to raise and lower the number of instances in the group,
in response to (or in advance of) changes in workload.
When you create your AutoScalingGroup, you specify a `minCapacity` and a
`maxCapacity`. AutoScaling policies that respond to metrics will never go higher
or lower than the indicated capacity (but scheduled scaling actions might, see
below).
There are three ways to scale your capacity:
* **In response to a metric** (also known as step scaling); for example, you
might want to scale out if the CPU usage across your cluster starts to rise,
and scale in when it drops again.
* **By trying to keep a certain metric around a given value** (also known as
target tracking scaling); you might want to automatically scale out and in to
keep your CPU usage around 50%.
* **On a schedule**; you might want to organize your scaling around traffic
flows you expect, by scaling out in the morning and scaling in in the
evening.
The general pattern of autoscaling will look like this:
```python
# Example automatically generated without compilation. See https://github.com/aws/jsii/issues/826
auto_scaling_group = autoscaling.AutoScalingGroup(self, "ASG",
min_capacity=5,
max_capacity=100
)
# Step scaling
auto_scaling_group.scale_on_metric(...)
# Target tracking scaling
auto_scaling_group.scale_on_cpu_utilization(...)
auto_scaling_group.scale_on_incoming_bytes(...)
auto_scaling_group.scale_on_outgoing_bytes(...)
auto_scaling_group.scale_on_request_count(...)
auto_scaling_group.scale_to_track_metric(...)
# Scheduled scaling
auto_scaling_group.scale_on_schedule(...)
```
#### Step Scaling
This type of scaling scales in and out in deterministics steps that you
configure, in response to metric values. For example, your scaling strategy to
scale in response to a metric that represents your average worker pool usage
might look like this:
```
Scaling -1 (no change) +1 +3
│ │ │ │ │
├────────┼───────────────────────┼────────┼────────┤
│ │ │ │ │
Worker use 0% 10% 50% 70% 100%
```
(Note that this is not necessarily a recommended scaling strategy, but it's
a possible one. You will have to determine what thresholds are right for you).
Note that in order to set up this scaling strategy, you will have to emit a
metric representing your worker utilization from your instances. After that,
you would configure the scaling something like this:
```python
# Example automatically generated without compilation. See https://github.com/aws/jsii/issues/826
worker_utilization_metric = cloudwatch.Metric(
namespace="MyService",
metric_name="WorkerUtilization"
)
capacity.scale_on_metric("ScaleToCPU",
metric=worker_utilization_metric,
scaling_steps=[{"upper": 10, "change": -1}, {"lower": 50, "change": +1}, {"lower": 70, "change": +3}
],
# Change this to AdjustmentType.PERCENT_CHANGE_IN_CAPACITY to interpret the
# 'change' numbers before as percentages instead of capacity counts.
adjustment_type=autoscaling.AdjustmentType.CHANGE_IN_CAPACITY
)
```
The AutoScaling construct library will create the required CloudWatch alarms and
AutoScaling policies for you.
#### Target Tracking Scaling
This type of scaling scales in and out in order to keep a metric around a value
you prefer. There are four types of predefined metrics you can track, or you can
choose to track a custom metric. If you do choose to track a custom metric,
be aware that the metric has to represent instance utilization in some way
(AutoScaling will scale out if the metric is higher than the target, and scale
in if the metric is lower than the target).
If you configure multiple target tracking policies, AutoScaling will use the
one that yields the highest capacity.
The following example scales to keep the CPU usage of your instances around
50% utilization:
```python
# Example automatically generated without compilation. See https://github.com/aws/jsii/issues/826
auto_scaling_group.scale_on_cpu_utilization("KeepSpareCPU",
target_utilization_percent=50
)
```
To scale on average network traffic in and out of your instances:
```python
# Example automatically generated without compilation. See https://github.com/aws/jsii/issues/826
auto_scaling_group.scale_on_incoming_bytes("LimitIngressPerInstance",
target_bytes_per_second=10 * 1024 * 1024
)
auto_scaling_group.scale_on_outcoming_bytes("LimitEgressPerInstance",
target_bytes_per_second=10 * 1024 * 1024
)
```
To scale on the average request count per instance (only works for
AutoScalingGroups that have been attached to Application Load
Balancers):
```python
# Example automatically generated without compilation. See https://github.com/aws/jsii/issues/826
auto_scaling_group.scale_on_request_count("LimitRPS",
target_requests_per_second=1000
)
```
#### Scheduled Scaling
This type of scaling is used to change capacities based on time. It works by
changing `minCapacity`, `maxCapacity` and `desiredCapacity` of the
AutoScalingGroup, and so can be used for two purposes:
* Scale in and out on a schedule by setting the `minCapacity` high or
the `maxCapacity` low.
* Still allow the regular scaling actions to do their job, but restrict
the range they can scale over (by setting both `minCapacity` and
`maxCapacity` but changing their range over time).
A schedule is expressed as a cron expression. The `Schedule` class has a `cron` method to help build cron expressions.
The following example scales the fleet out in the morning, going back to natural
scaling (all the way down to 1 instance if necessary) at night:
```python
# Example automatically generated without compilation. See https://github.com/aws/jsii/issues/826
auto_scaling_group.scale_on_schedule("PrescaleInTheMorning",
schedule=autoscaling.Schedule.cron(hour="8", minute="0"),
min_capacity=20
)
auto_scaling_group.scale_on_schedule("AllowDownscalingAtNight",
schedule=autoscaling.Schedule.cron(hour="20", minute="0"),
min_capacity=1
)
```
### Allowing Connections
See the documentation of the `@aws-cdk/aws-ec2` package for more information
about allowing connections between resources backed by instances.
### Future work
* [ ] CloudWatch Events (impossible to add currently as the AutoScalingGroup ARN is
necessary to make this rule and this cannot be accessed from CloudFormation).
"""
import abc
import builtins
import datetime
import enum
import typing
import jsii
import jsii.compat
import publication
import aws_cdk.aws_autoscaling_common
import aws_cdk.aws_cloudwatch
import aws_cdk.aws_ec2
import aws_cdk.aws_elasticloadbalancing
import aws_cdk.aws_elasticloadbalancingv2
import aws_cdk.aws_iam
import aws_cdk.aws_sns
import aws_cdk.core
__jsii_assembly__ = jsii.JSIIAssembly.load("@aws-cdk/aws-autoscaling", "1.22.0", __name__, "aws-autoscaling@1.22.0.jsii.tgz")
@jsii.data_type(jsii_type="@aws-cdk/aws-autoscaling.AdjustmentTier", jsii_struct_bases=[], name_mapping={'adjustment': 'adjustment', 'lower_bound': 'lowerBound', 'upper_bound': 'upperBound'})
class AdjustmentTier():
def __init__(self, *, adjustment: jsii.Number, lower_bound: typing.Optional[jsii.Number]=None, upper_bound: typing.Optional[jsii.Number]=None):
"""An adjustment.
:param adjustment: What number to adjust the capacity with. The number is interpeted as an added capacity, a new fixed capacity or an added percentage depending on the AdjustmentType value of the StepScalingPolicy. Can be positive or negative.
:param lower_bound: Lower bound where this scaling tier applies. The scaling tier applies if the difference between the metric value and its alarm threshold is higher than this value. Default: -Infinity if this is the first tier, otherwise the upperBound of the previous tier
:param upper_bound: Upper bound where this scaling tier applies. The scaling tier applies if the difference between the metric value and its alarm threshold is lower than this value. Default: +Infinity
"""
self._values = {
'adjustment': adjustment,
}
if lower_bound is not None: self._values["lower_bound"] = lower_bound
if upper_bound is not None: self._values["upper_bound"] = upper_bound
@builtins.property
def adjustment(self) -> jsii.Number:
"""What number to adjust the capacity with.
The number is interpeted as an added capacity, a new fixed capacity or an
added percentage depending on the AdjustmentType value of the
StepScalingPolicy.
Can be positive or negative.
"""
return self._values.get('adjustment')
@builtins.property
def lower_bound(self) -> typing.Optional[jsii.Number]:
"""Lower bound where this scaling tier applies.
The scaling tier applies if the difference between the metric
value and its alarm threshold is higher than this value.
default
:default: -Infinity if this is the first tier, otherwise the upperBound of the previous tier
"""
return self._values.get('lower_bound')
@builtins.property
def upper_bound(self) -> typing.Optional[jsii.Number]:
"""Upper bound where this scaling tier applies.
The scaling tier applies if the difference between the metric
value and its alarm threshold is lower than this value.
default
:default: +Infinity
"""
return self._values.get('upper_bound')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'AdjustmentTier(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
@jsii.enum(jsii_type="@aws-cdk/aws-autoscaling.AdjustmentType")
class AdjustmentType(enum.Enum):
"""How adjustment numbers are interpreted."""
CHANGE_IN_CAPACITY = "CHANGE_IN_CAPACITY"
"""Add the adjustment number to the current capacity.
A positive number increases capacity, a negative number decreases capacity.
"""
PERCENT_CHANGE_IN_CAPACITY = "PERCENT_CHANGE_IN_CAPACITY"
"""Add this percentage of the current capacity to itself.
The number must be between -100 and 100; a positive number increases
capacity and a negative number decreases it.
"""
EXACT_CAPACITY = "EXACT_CAPACITY"
"""Make the capacity equal to the exact number given."""
@jsii.data_type(jsii_type="@aws-cdk/aws-autoscaling.BaseTargetTrackingProps", jsii_struct_bases=[], name_mapping={'cooldown': 'cooldown', 'disable_scale_in': 'disableScaleIn', 'estimated_instance_warmup': 'estimatedInstanceWarmup'})
class BaseTargetTrackingProps():
def __init__(self, *, cooldown: typing.Optional[aws_cdk.core.Duration]=None, disable_scale_in: typing.Optional[bool]=None, estimated_instance_warmup: typing.Optional[aws_cdk.core.Duration]=None):
"""Base interface for target tracking props.
Contains the attributes that are common to target tracking policies,
except the ones relating to the metric and to the scalable target.
This interface is reused by more specific target tracking props objects.
:param cooldown: Period after a scaling completes before another scaling activity can start. Default: - The default cooldown configured on the AutoScalingGroup.
:param disable_scale_in: Indicates whether scale in by the target tracking policy is disabled. If the value is true, scale in is disabled and the target tracking policy won't remove capacity from the autoscaling group. Otherwise, scale in is enabled and the target tracking policy can remove capacity from the group. Default: false
:param estimated_instance_warmup: Estimated time until a newly launched instance can send metrics to CloudWatch. Default: - Same as the cooldown.
"""
self._values = {
}
if cooldown is not None: self._values["cooldown"] = cooldown
if disable_scale_in is not None: self._values["disable_scale_in"] = disable_scale_in
if estimated_instance_warmup is not None: self._values["estimated_instance_warmup"] = estimated_instance_warmup
@builtins.property
def cooldown(self) -> typing.Optional[aws_cdk.core.Duration]:
"""Period after a scaling completes before another scaling activity can start.
default
:default: - The default cooldown configured on the AutoScalingGroup.
"""
return self._values.get('cooldown')
@builtins.property
def disable_scale_in(self) -> typing.Optional[bool]:
"""Indicates whether scale in by the target tracking policy is disabled.
If the value is true, scale in is disabled and the target tracking policy
won't remove capacity from the autoscaling group. Otherwise, scale in is
enabled and the target tracking policy can remove capacity from the
group.
default
:default: false
"""
return self._values.get('disable_scale_in')
@builtins.property
def estimated_instance_warmup(self) -> typing.Optional[aws_cdk.core.Duration]:
"""Estimated time until a newly launched instance can send metrics to CloudWatch.
default
:default: - Same as the cooldown.
"""
return self._values.get('estimated_instance_warmup')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'BaseTargetTrackingProps(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
@jsii.data_type(jsii_type="@aws-cdk/aws-autoscaling.BasicLifecycleHookProps", jsii_struct_bases=[], name_mapping={'lifecycle_transition': 'lifecycleTransition', 'notification_target': 'notificationTarget', 'default_result': 'defaultResult', 'heartbeat_timeout': 'heartbeatTimeout', 'lifecycle_hook_name': 'lifecycleHookName', 'notification_metadata': 'notificationMetadata', 'role': 'role'})
class BasicLifecycleHookProps():
def __init__(self, *, lifecycle_transition: "LifecycleTransition", notification_target: "ILifecycleHookTarget", default_result: typing.Optional["DefaultResult"]=None, heartbeat_timeout: typing.Optional[aws_cdk.core.Duration]=None, lifecycle_hook_name: typing.Optional[str]=None, notification_metadata: typing.Optional[str]=None, role: typing.Optional[aws_cdk.aws_iam.IRole]=None):
"""Basic properties for a lifecycle hook.
:param lifecycle_transition: The state of the Amazon EC2 instance to which you want to attach the lifecycle hook.
:param notification_target: The target of the lifecycle hook.
:param default_result: The action the Auto Scaling group takes when the lifecycle hook timeout elapses or if an unexpected failure occurs. Default: Continue
:param heartbeat_timeout: Maximum time between calls to RecordLifecycleActionHeartbeat for the hook. If the lifecycle hook times out, perform the action in DefaultResult. Default: - No heartbeat timeout.
:param lifecycle_hook_name: Name of the lifecycle hook. Default: - Automatically generated name.
:param notification_metadata: Additional data to pass to the lifecycle hook target. Default: - No metadata.
:param role: The role that allows publishing to the notification target. Default: - A role is automatically created.
"""
self._values = {
'lifecycle_transition': lifecycle_transition,
'notification_target': notification_target,
}
if default_result is not None: self._values["default_result"] = default_result
if heartbeat_timeout is not None: self._values["heartbeat_timeout"] = heartbeat_timeout
if lifecycle_hook_name is not None: self._values["lifecycle_hook_name"] = lifecycle_hook_name
if notification_metadata is not None: self._values["notification_metadata"] = notification_metadata
if role is not None: self._values["role"] = role
@builtins.property
def lifecycle_transition(self) -> "LifecycleTransition":
"""The state of the Amazon EC2 instance to which you want to attach the lifecycle hook."""
return self._values.get('lifecycle_transition')
@builtins.property
def notification_target(self) -> "ILifecycleHookTarget":
"""The target of the lifecycle hook."""
return self._values.get('notification_target')
@builtins.property
def default_result(self) -> typing.Optional["DefaultResult"]:
"""The action the Auto Scaling group takes when the lifecycle hook timeout elapses or if an unexpected failure occurs.
default
:default: Continue
"""
return self._values.get('default_result')
@builtins.property
def heartbeat_timeout(self) -> typing.Optional[aws_cdk.core.Duration]:
"""Maximum time between calls to RecordLifecycleActionHeartbeat for the hook.
If the lifecycle hook times out, perform the action in DefaultResult.
default
:default: - No heartbeat timeout.
"""
return self._values.get('heartbeat_timeout')
@builtins.property
def lifecycle_hook_name(self) -> typing.Optional[str]:
"""Name of the lifecycle hook.
default
:default: - Automatically generated name.
"""
return self._values.get('lifecycle_hook_name')
@builtins.property
def notification_metadata(self) -> typing.Optional[str]:
"""Additional data to pass to the lifecycle hook target.
default
:default: - No metadata.
"""
return self._values.get('notification_metadata')
@builtins.property
def role(self) -> typing.Optional[aws_cdk.aws_iam.IRole]:
"""The role that allows publishing to the notification target.
default
:default: - A role is automatically created.
"""
return self._values.get('role')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'BasicLifecycleHookProps(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
@jsii.data_type(jsii_type="@aws-cdk/aws-autoscaling.BasicScheduledActionProps", jsii_struct_bases=[], name_mapping={'schedule': 'schedule', 'desired_capacity': 'desiredCapacity', 'end_time': 'endTime', 'max_capacity': 'maxCapacity', 'min_capacity': 'minCapacity', 'start_time': 'startTime'})
class BasicScheduledActionProps():
def __init__(self, *, schedule: "Schedule", desired_capacity: typing.Optional[jsii.Number]=None, end_time: typing.Optional[datetime.datetime]=None, max_capacity: typing.Optional[jsii.Number]=None, min_capacity: typing.Optional[jsii.Number]=None, start_time: typing.Optional[datetime.datetime]=None):
"""Properties for a scheduled scaling action.
:param schedule: When to perform this action. Supports cron expressions. For more information about cron expressions, see https://en.wikipedia.org/wiki/Cron.
:param desired_capacity: The new desired capacity. At the scheduled time, set the desired capacity to the given capacity. At least one of maxCapacity, minCapacity, or desiredCapacity must be supplied. Default: - No new desired capacity.
:param end_time: When this scheduled action expires. Default: - The rule never expires.
:param max_capacity: The new maximum capacity. At the scheduled time, set the maximum capacity to the given capacity. At least one of maxCapacity, minCapacity, or desiredCapacity must be supplied. Default: - No new maximum capacity.
:param min_capacity: The new minimum capacity. At the scheduled time, set the minimum capacity to the given capacity. At least one of maxCapacity, minCapacity, or desiredCapacity must be supplied. Default: - No new minimum capacity.
:param start_time: When this scheduled action becomes active. Default: - The rule is activate immediately.
"""
self._values = {
'schedule': schedule,
}
if desired_capacity is not None: self._values["desired_capacity"] = desired_capacity
if end_time is not None: self._values["end_time"] = end_time
if max_capacity is not None: self._values["max_capacity"] = max_capacity
if min_capacity is not None: self._values["min_capacity"] = min_capacity
if start_time is not None: self._values["start_time"] = start_time
@builtins.property
def schedule(self) -> "Schedule":
"""When to perform this action.
Supports cron expressions.
For more information about cron expressions, see https://en.wikipedia.org/wiki/Cron.
Example::
# Example automatically generated without compilation. See https://github.com/aws/jsii/issues/826
08 * * ?
"""
return self._values.get('schedule')
@builtins.property
def desired_capacity(self) -> typing.Optional[jsii.Number]:
"""The new desired capacity.
At the scheduled time, set the desired capacity to the given capacity.
At least one of maxCapacity, minCapacity, or desiredCapacity must be supplied.
default
:default: - No new desired capacity.
"""
return self._values.get('desired_capacity')
@builtins.property
def end_time(self) -> typing.Optional[datetime.datetime]:
"""When this scheduled action expires.
default
:default: - The rule never expires.
"""
return self._values.get('end_time')
@builtins.property
def max_capacity(self) -> typing.Optional[jsii.Number]:
"""The new maximum capacity.
At the scheduled time, set the maximum capacity to the given capacity.
At least one of maxCapacity, minCapacity, or desiredCapacity must be supplied.
default
:default: - No new maximum capacity.
"""
return self._values.get('max_capacity')
@builtins.property
def min_capacity(self) -> typing.Optional[jsii.Number]:
"""The new minimum capacity.
At the scheduled time, set the minimum capacity to the given capacity.
At least one of maxCapacity, minCapacity, or desiredCapacity must be supplied.
default
:default: - No new minimum capacity.
"""
return self._values.get('min_capacity')
@builtins.property
def start_time(self) -> typing.Optional[datetime.datetime]:
"""When this scheduled action becomes active.
default
:default: - The rule is activate immediately.
"""
return self._values.get('start_time')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'BasicScheduledActionProps(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
@jsii.data_type(jsii_type="@aws-cdk/aws-autoscaling.BasicStepScalingPolicyProps", jsii_struct_bases=[], name_mapping={'metric': 'metric', 'scaling_steps': 'scalingSteps', 'adjustment_type': 'adjustmentType', 'cooldown': 'cooldown', 'estimated_instance_warmup': 'estimatedInstanceWarmup', 'min_adjustment_magnitude': 'minAdjustmentMagnitude'})
class BasicStepScalingPolicyProps():
def __init__(self, *, metric: aws_cdk.aws_cloudwatch.IMetric, scaling_steps: typing.List["ScalingInterval"], adjustment_type: typing.Optional["AdjustmentType"]=None, cooldown: typing.Optional[aws_cdk.core.Duration]=None, estimated_instance_warmup: typing.Optional[aws_cdk.core.Duration]=None, min_adjustment_magnitude: typing.Optional[jsii.Number]=None):
"""
:param metric: Metric to scale on.
:param scaling_steps: The intervals for scaling. Maps a range of metric values to a particular scaling behavior.
:param adjustment_type: How the adjustment numbers inside 'intervals' are interpreted. Default: ChangeInCapacity
:param cooldown: Grace period after scaling activity. Default: Default cooldown period on your AutoScalingGroup
:param estimated_instance_warmup: Estimated time until a newly launched instance can send metrics to CloudWatch. Default: Same as the cooldown
:param min_adjustment_magnitude: Minimum absolute number to adjust capacity with as result of percentage scaling. Only when using AdjustmentType = PercentChangeInCapacity, this number controls the minimum absolute effect size. Default: No minimum scaling effect
"""
self._values = {
'metric': metric,
'scaling_steps': scaling_steps,
}
if adjustment_type is not None: self._values["adjustment_type"] = adjustment_type
if cooldown is not None: self._values["cooldown"] = cooldown
if estimated_instance_warmup is not None: self._values["estimated_instance_warmup"] = estimated_instance_warmup
if min_adjustment_magnitude is not None: self._values["min_adjustment_magnitude"] = min_adjustment_magnitude
@builtins.property
def metric(self) -> aws_cdk.aws_cloudwatch.IMetric:
"""Metric to scale on."""
return self._values.get('metric')
@builtins.property
def scaling_steps(self) -> typing.List["ScalingInterval"]:
"""The intervals for scaling.
Maps a range of metric values to a particular scaling behavior.
"""
return self._values.get('scaling_steps')
@builtins.property
def adjustment_type(self) -> typing.Optional["AdjustmentType"]:
"""How the adjustment numbers inside 'intervals' are interpreted.
default
:default: ChangeInCapacity
"""
return self._values.get('adjustment_type')
@builtins.property
def cooldown(self) -> typing.Optional[aws_cdk.core.Duration]:
"""Grace period after scaling activity.
default
:default: Default cooldown period on your AutoScalingGroup
"""
return self._values.get('cooldown')
@builtins.property
def estimated_instance_warmup(self) -> typing.Optional[aws_cdk.core.Duration]:
"""Estimated time until a newly launched instance can send metrics to CloudWatch.
default
:default: Same as the cooldown
"""
return self._values.get('estimated_instance_warmup')
@builtins.property
def min_adjustment_magnitude(self) -> typing.Optional[jsii.Number]:
"""Minimum absolute number to adjust capacity with as result of percentage scaling.
Only when using AdjustmentType = PercentChangeInCapacity, this number controls
the minimum absolute effect size.
default
:default: No minimum scaling effect
"""
return self._values.get('min_adjustment_magnitude')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'BasicStepScalingPolicyProps(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
@jsii.data_type(jsii_type="@aws-cdk/aws-autoscaling.BasicTargetTrackingScalingPolicyProps", jsii_struct_bases=[BaseTargetTrackingProps], name_mapping={'cooldown': 'cooldown', 'disable_scale_in': 'disableScaleIn', 'estimated_instance_warmup': 'estimatedInstanceWarmup', 'target_value': 'targetValue', 'custom_metric': 'customMetric', 'predefined_metric': 'predefinedMetric', 'resource_label': 'resourceLabel'})
class BasicTargetTrackingScalingPolicyProps(BaseTargetTrackingProps):
def __init__(self, *, cooldown: typing.Optional[aws_cdk.core.Duration]=None, disable_scale_in: typing.Optional[bool]=None, estimated_instance_warmup: typing.Optional[aws_cdk.core.Duration]=None, target_value: jsii.Number, custom_metric: typing.Optional[aws_cdk.aws_cloudwatch.IMetric]=None, predefined_metric: typing.Optional["PredefinedMetric"]=None, resource_label: typing.Optional[str]=None):
"""Properties for a Target Tracking policy that include the metric but exclude the target.
:param cooldown: Period after a scaling completes before another scaling activity can start. Default: - The default cooldown configured on the AutoScalingGroup.
:param disable_scale_in: Indicates whether scale in by the target tracking policy is disabled. If the value is true, scale in is disabled and the target tracking policy won't remove capacity from the autoscaling group. Otherwise, scale in is enabled and the target tracking policy can remove capacity from the group. Default: false
:param estimated_instance_warmup: Estimated time until a newly launched instance can send metrics to CloudWatch. Default: - Same as the cooldown.
:param target_value: The target value for the metric.
:param custom_metric: A custom metric for application autoscaling. The metric must track utilization. Scaling out will happen if the metric is higher than the target value, scaling in will happen in the metric is lower than the target value. Exactly one of customMetric or predefinedMetric must be specified. Default: - No custom metric.
:param predefined_metric: A predefined metric for application autoscaling. The metric must track utilization. Scaling out will happen if the metric is higher than the target value, scaling in will happen in the metric is lower than the target value. Exactly one of customMetric or predefinedMetric must be specified. Default: - No predefined metric.
:param resource_label: The resource label associated with the predefined metric. Should be supplied if the predefined metric is ALBRequestCountPerTarget, and the format should be: app///targetgroup// Default: - No resource label.
"""
self._values = {
'target_value': target_value,
}
if cooldown is not None: self._values["cooldown"] = cooldown
if disable_scale_in is not None: self._values["disable_scale_in"] = disable_scale_in
if estimated_instance_warmup is not None: self._values["estimated_instance_warmup"] = estimated_instance_warmup
if custom_metric is not None: self._values["custom_metric"] = custom_metric
if predefined_metric is not None: self._values["predefined_metric"] = predefined_metric
if resource_label is not None: self._values["resource_label"] = resource_label
@builtins.property
def cooldown(self) -> typing.Optional[aws_cdk.core.Duration]:
"""Period after a scaling completes before another scaling activity can start.
default
:default: - The default cooldown configured on the AutoScalingGroup.
"""
return self._values.get('cooldown')
@builtins.property
def disable_scale_in(self) -> typing.Optional[bool]:
"""Indicates whether scale in by the target tracking policy is disabled.
If the value is true, scale in is disabled and the target tracking policy
won't remove capacity from the autoscaling group. Otherwise, scale in is
enabled and the target tracking policy can remove capacity from the
group.
default
:default: false
"""
return self._values.get('disable_scale_in')
@builtins.property
def estimated_instance_warmup(self) -> typing.Optional[aws_cdk.core.Duration]:
"""Estimated time until a newly launched instance can send metrics to CloudWatch.
default
:default: - Same as the cooldown.
"""
return self._values.get('estimated_instance_warmup')
@builtins.property
def target_value(self) -> jsii.Number:
"""The target value for the metric."""
return self._values.get('target_value')
@builtins.property
def custom_metric(self) -> typing.Optional[aws_cdk.aws_cloudwatch.IMetric]:
"""A custom metric for application autoscaling.
The metric must track utilization. Scaling out will happen if the metric is higher than
the target value, scaling in will happen in the metric is lower than the target value.
Exactly one of customMetric or predefinedMetric must be specified.
default
:default: - No custom metric.
"""
return self._values.get('custom_metric')
@builtins.property
def predefined_metric(self) -> typing.Optional["PredefinedMetric"]:
"""A predefined metric for application autoscaling.
The metric must track utilization. Scaling out will happen if the metric is higher than
the target value, scaling in will happen in the metric is lower than the target value.
Exactly one of customMetric or predefinedMetric must be specified.
default
:default: - No predefined metric.
"""
return self._values.get('predefined_metric')
@builtins.property
def resource_label(self) -> typing.Optional[str]:
"""The resource label associated with the predefined metric.
Should be supplied if the predefined metric is ALBRequestCountPerTarget, and the
format should be:
app///targetgroup//
default
:default: - No resource label.
"""
return self._values.get('resource_label')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'BasicTargetTrackingScalingPolicyProps(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
@jsii.data_type(jsii_type="@aws-cdk/aws-autoscaling.BlockDevice", jsii_struct_bases=[], name_mapping={'device_name': 'deviceName', 'volume': 'volume', 'mapping_enabled': 'mappingEnabled'})
class BlockDevice():
def __init__(self, *, device_name: str, volume: "BlockDeviceVolume", mapping_enabled: typing.Optional[bool]=None):
"""Block device.
:param device_name: The device name exposed to the EC2 instance.
:param volume: Defines the block device volume, to be either an Amazon EBS volume or an ephemeral instance store volume.
:param mapping_enabled: If false, the device mapping will be suppressed. If set to false for the root device, the instance might fail the Amazon EC2 health check. Amazon EC2 Auto Scaling launches a replacement instance if the instance fails the health check. Default: true - device mapping is left untouched
"""
self._values = {
'device_name': device_name,
'volume': volume,
}
if mapping_enabled is not None: self._values["mapping_enabled"] = mapping_enabled
@builtins.property
def device_name(self) -> str:
"""The device name exposed to the EC2 instance.
see
:see: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/device_naming.html
Example::
# Example automatically generated without compilation. See https://github.com/aws/jsii/issues/826
"/dev/sdh" , "xvdh"
"""
return self._values.get('device_name')
@builtins.property
def volume(self) -> "BlockDeviceVolume":
"""Defines the block device volume, to be either an Amazon EBS volume or an ephemeral instance store volume.
Example::
# Example automatically generated without compilation. See https://github.com/aws/jsii/issues/826
BlockDeviceVolume.ebs(15) , BlockDeviceVolume.ephemeral(0)
"""
return self._values.get('volume')
@builtins.property
def mapping_enabled(self) -> typing.Optional[bool]:
"""If false, the device mapping will be suppressed.
If set to false for the root device, the instance might fail the Amazon EC2 health check.
Amazon EC2 Auto Scaling launches a replacement instance if the instance fails the health check.
default
:default: true - device mapping is left untouched
"""
return self._values.get('mapping_enabled')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'BlockDevice(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
class BlockDeviceVolume(metaclass=jsii.JSIIMeta, jsii_type="@aws-cdk/aws-autoscaling.BlockDeviceVolume"):
"""Describes a block device mapping for an EC2 instance or Auto Scaling group."""
def __init__(self, ebs_device: typing.Optional["EbsDeviceProps"]=None, virtual_name: typing.Optional[str]=None) -> None:
"""
:param ebs_device: EBS device info.
:param virtual_name: Virtual device name.
"""
jsii.create(BlockDeviceVolume, self, [ebs_device, virtual_name])
@jsii.member(jsii_name="ebs")
@builtins.classmethod
def ebs(cls, volume_size: jsii.Number, *, encrypted: typing.Optional[bool]=None, delete_on_termination: typing.Optional[bool]=None, iops: typing.Optional[jsii.Number]=None, volume_type: typing.Optional["EbsDeviceVolumeType"]=None) -> "BlockDeviceVolume":
"""Creates a new Elastic Block Storage device.
:param volume_size: The volume size, in Gibibytes (GiB).
:param encrypted: Specifies whether the EBS volume is encrypted. Encrypted EBS volumes can only be attached to instances that support Amazon EBS encryption Default: false
:param delete_on_termination: Indicates whether to delete the volume when the instance is terminated. Default: - true for Amazon EC2 Auto Scaling, false otherwise (e.g. EBS)
:param iops: The number of I/O operations per second (IOPS) to provision for the volume. Must only be set for {@link volumeType}: {@link EbsDeviceVolumeType.IO1} The maximum ratio of IOPS to volume size (in GiB) is 50:1, so for 5,000 provisioned IOPS, you need at least 100 GiB storage on the volume. Default: - none, required for {@link EbsDeviceVolumeType.IO1}
:param volume_type: The EBS volume type. Default: {@link EbsDeviceVolumeType.GP2}
"""
options = EbsDeviceOptions(encrypted=encrypted, delete_on_termination=delete_on_termination, iops=iops, volume_type=volume_type)
return jsii.sinvoke(cls, "ebs", [volume_size, options])
@jsii.member(jsii_name="ebsFromSnapshot")
@builtins.classmethod
def ebs_from_snapshot(cls, snapshot_id: str, *, volume_size: typing.Optional[jsii.Number]=None, delete_on_termination: typing.Optional[bool]=None, iops: typing.Optional[jsii.Number]=None, volume_type: typing.Optional["EbsDeviceVolumeType"]=None) -> "BlockDeviceVolume":
"""Creates a new Elastic Block Storage device from an existing snapshot.
:param snapshot_id: The snapshot ID of the volume to use.
:param volume_size: The volume size, in Gibibytes (GiB). If you specify volumeSize, it must be equal or greater than the size of the snapshot. Default: - The snapshot size
:param delete_on_termination: Indicates whether to delete the volume when the instance is terminated. Default: - true for Amazon EC2 Auto Scaling, false otherwise (e.g. EBS)
:param iops: The number of I/O operations per second (IOPS) to provision for the volume. Must only be set for {@link volumeType}: {@link EbsDeviceVolumeType.IO1} The maximum ratio of IOPS to volume size (in GiB) is 50:1, so for 5,000 provisioned IOPS, you need at least 100 GiB storage on the volume. Default: - none, required for {@link EbsDeviceVolumeType.IO1}
:param volume_type: The EBS volume type. Default: {@link EbsDeviceVolumeType.GP2}
"""
options = EbsDeviceSnapshotOptions(volume_size=volume_size, delete_on_termination=delete_on_termination, iops=iops, volume_type=volume_type)
return jsii.sinvoke(cls, "ebsFromSnapshot", [snapshot_id, options])
@jsii.member(jsii_name="ephemeral")
@builtins.classmethod
def ephemeral(cls, volume_index: jsii.Number) -> "BlockDeviceVolume":
"""Creates a virtual, ephemeral device.
The name will be in the form ephemeral{volumeIndex}.
:param volume_index: the volume index. Must be equal or greater than 0
"""
return jsii.sinvoke(cls, "ephemeral", [volume_index])
@builtins.property
@jsii.member(jsii_name="ebsDevice")
def ebs_device(self) -> typing.Optional["EbsDeviceProps"]:
"""EBS device info."""
return jsii.get(self, "ebsDevice")
@builtins.property
@jsii.member(jsii_name="virtualName")
def virtual_name(self) -> typing.Optional[str]:
"""Virtual device name."""
return jsii.get(self, "virtualName")
@jsii.implements(aws_cdk.core.IInspectable)
class CfnAutoScalingGroup(aws_cdk.core.CfnResource, metaclass=jsii.JSIIMeta, jsii_type="@aws-cdk/aws-autoscaling.CfnAutoScalingGroup"):
"""A CloudFormation ``AWS::AutoScaling::AutoScalingGroup``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-group.html
cloudformationResource:
:cloudformationResource:: AWS::AutoScaling::AutoScalingGroup
"""
def __init__(self, scope: aws_cdk.core.Construct, id: str, *, max_size: str, min_size: str, auto_scaling_group_name: typing.Optional[str]=None, availability_zones: typing.Optional[typing.List[str]]=None, cooldown: typing.Optional[str]=None, desired_capacity: typing.Optional[str]=None, health_check_grace_period: typing.Optional[jsii.Number]=None, health_check_type: typing.Optional[str]=None, instance_id: typing.Optional[str]=None, launch_configuration_name: typing.Optional[str]=None, launch_template: typing.Optional[typing.Union[typing.Optional[aws_cdk.core.IResolvable], typing.Optional["LaunchTemplateSpecificationProperty"]]]=None, lifecycle_hook_specification_list: typing.Optional[typing.Union[typing.Optional[aws_cdk.core.IResolvable], typing.Optional[typing.List[typing.Union[aws_cdk.core.IResolvable, "LifecycleHookSpecificationProperty"]]]]]=None, load_balancer_names: typing.Optional[typing.List[str]]=None, metrics_collection: typing.Optional[typing.Union[typing.Optional[aws_cdk.core.IResolvable], typing.Optional[typing.List[typing.Union[aws_cdk.core.IResolvable, "MetricsCollectionProperty"]]]]]=None, mixed_instances_policy: typing.Optional[typing.Union[typing.Optional[aws_cdk.core.IResolvable], typing.Optional["MixedInstancesPolicyProperty"]]]=None, notification_configurations: typing.Optional[typing.Union[typing.Optional[aws_cdk.core.IResolvable], typing.Optional[typing.List[typing.Union[aws_cdk.core.IResolvable, "NotificationConfigurationProperty"]]]]]=None, placement_group: typing.Optional[str]=None, service_linked_role_arn: typing.Optional[str]=None, tags: typing.Optional[typing.List["TagPropertyProperty"]]=None, target_group_arns: typing.Optional[typing.List[str]]=None, termination_policies: typing.Optional[typing.List[str]]=None, vpc_zone_identifier: typing.Optional[typing.List[str]]=None) -> None:
"""Create a new ``AWS::AutoScaling::AutoScalingGroup``.
:param scope: - scope in which this resource is defined.
:param id: - scoped id of the resource.
:param max_size: ``AWS::AutoScaling::AutoScalingGroup.MaxSize``.
:param min_size: ``AWS::AutoScaling::AutoScalingGroup.MinSize``.
:param auto_scaling_group_name: ``AWS::AutoScaling::AutoScalingGroup.AutoScalingGroupName``.
:param availability_zones: ``AWS::AutoScaling::AutoScalingGroup.AvailabilityZones``.
:param cooldown: ``AWS::AutoScaling::AutoScalingGroup.Cooldown``.
:param desired_capacity: ``AWS::AutoScaling::AutoScalingGroup.DesiredCapacity``.
:param health_check_grace_period: ``AWS::AutoScaling::AutoScalingGroup.HealthCheckGracePeriod``.
:param health_check_type: ``AWS::AutoScaling::AutoScalingGroup.HealthCheckType``.
:param instance_id: ``AWS::AutoScaling::AutoScalingGroup.InstanceId``.
:param launch_configuration_name: ``AWS::AutoScaling::AutoScalingGroup.LaunchConfigurationName``.
:param launch_template: ``AWS::AutoScaling::AutoScalingGroup.LaunchTemplate``.
:param lifecycle_hook_specification_list: ``AWS::AutoScaling::AutoScalingGroup.LifecycleHookSpecificationList``.
:param load_balancer_names: ``AWS::AutoScaling::AutoScalingGroup.LoadBalancerNames``.
:param metrics_collection: ``AWS::AutoScaling::AutoScalingGroup.MetricsCollection``.
:param mixed_instances_policy: ``AWS::AutoScaling::AutoScalingGroup.MixedInstancesPolicy``.
:param notification_configurations: ``AWS::AutoScaling::AutoScalingGroup.NotificationConfigurations``.
:param placement_group: ``AWS::AutoScaling::AutoScalingGroup.PlacementGroup``.
:param service_linked_role_arn: ``AWS::AutoScaling::AutoScalingGroup.ServiceLinkedRoleARN``.
:param tags: ``AWS::AutoScaling::AutoScalingGroup.Tags``.
:param target_group_arns: ``AWS::AutoScaling::AutoScalingGroup.TargetGroupARNs``.
:param termination_policies: ``AWS::AutoScaling::AutoScalingGroup.TerminationPolicies``.
:param vpc_zone_identifier: ``AWS::AutoScaling::AutoScalingGroup.VPCZoneIdentifier``.
"""
props = CfnAutoScalingGroupProps(max_size=max_size, min_size=min_size, auto_scaling_group_name=auto_scaling_group_name, availability_zones=availability_zones, cooldown=cooldown, desired_capacity=desired_capacity, health_check_grace_period=health_check_grace_period, health_check_type=health_check_type, instance_id=instance_id, launch_configuration_name=launch_configuration_name, launch_template=launch_template, lifecycle_hook_specification_list=lifecycle_hook_specification_list, load_balancer_names=load_balancer_names, metrics_collection=metrics_collection, mixed_instances_policy=mixed_instances_policy, notification_configurations=notification_configurations, placement_group=placement_group, service_linked_role_arn=service_linked_role_arn, tags=tags, target_group_arns=target_group_arns, termination_policies=termination_policies, vpc_zone_identifier=vpc_zone_identifier)
jsii.create(CfnAutoScalingGroup, self, [scope, id, props])
@jsii.member(jsii_name="inspect")
def inspect(self, inspector: aws_cdk.core.TreeInspector) -> None:
"""Examines the CloudFormation resource and discloses attributes.
:param inspector: - tree inspector to collect and process attributes.
stability
:stability: experimental
"""
return jsii.invoke(self, "inspect", [inspector])
@jsii.member(jsii_name="renderProperties")
def _render_properties(self, props: typing.Mapping[str,typing.Any]) -> typing.Mapping[str,typing.Any]:
"""
:param props: -
"""
return jsii.invoke(self, "renderProperties", [props])
@jsii.python.classproperty
@jsii.member(jsii_name="CFN_RESOURCE_TYPE_NAME")
def CFN_RESOURCE_TYPE_NAME(cls) -> str:
"""The CloudFormation resource type name for this resource class."""
return jsii.sget(cls, "CFN_RESOURCE_TYPE_NAME")
@builtins.property
@jsii.member(jsii_name="cfnProperties")
def _cfn_properties(self) -> typing.Mapping[str,typing.Any]:
return jsii.get(self, "cfnProperties")
@builtins.property
@jsii.member(jsii_name="tags")
def tags(self) -> aws_cdk.core.TagManager:
"""``AWS::AutoScaling::AutoScalingGroup.Tags``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-group.html#cfn-as-group-tags
"""
return jsii.get(self, "tags")
@builtins.property
@jsii.member(jsii_name="maxSize")
def max_size(self) -> str:
"""``AWS::AutoScaling::AutoScalingGroup.MaxSize``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-group.html#cfn-as-group-maxsize
"""
return jsii.get(self, "maxSize")
@max_size.setter
def max_size(self, value: str):
jsii.set(self, "maxSize", value)
@builtins.property
@jsii.member(jsii_name="minSize")
def min_size(self) -> str:
"""``AWS::AutoScaling::AutoScalingGroup.MinSize``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-group.html#cfn-as-group-minsize
"""
return jsii.get(self, "minSize")
@min_size.setter
def min_size(self, value: str):
jsii.set(self, "minSize", value)
@builtins.property
@jsii.member(jsii_name="autoScalingGroupName")
def auto_scaling_group_name(self) -> typing.Optional[str]:
"""``AWS::AutoScaling::AutoScalingGroup.AutoScalingGroupName``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-group.html#cfn-autoscaling-autoscalinggroup-autoscalinggroupname
"""
return jsii.get(self, "autoScalingGroupName")
@auto_scaling_group_name.setter
def auto_scaling_group_name(self, value: typing.Optional[str]):
jsii.set(self, "autoScalingGroupName", value)
@builtins.property
@jsii.member(jsii_name="availabilityZones")
def availability_zones(self) -> typing.Optional[typing.List[str]]:
"""``AWS::AutoScaling::AutoScalingGroup.AvailabilityZones``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-group.html#cfn-as-group-availabilityzones
"""
return jsii.get(self, "availabilityZones")
@availability_zones.setter
def availability_zones(self, value: typing.Optional[typing.List[str]]):
jsii.set(self, "availabilityZones", value)
@builtins.property
@jsii.member(jsii_name="cooldown")
def cooldown(self) -> typing.Optional[str]:
"""``AWS::AutoScaling::AutoScalingGroup.Cooldown``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-group.html#cfn-as-group-cooldown
"""
return jsii.get(self, "cooldown")
@cooldown.setter
def cooldown(self, value: typing.Optional[str]):
jsii.set(self, "cooldown", value)
@builtins.property
@jsii.member(jsii_name="desiredCapacity")
def desired_capacity(self) -> typing.Optional[str]:
"""``AWS::AutoScaling::AutoScalingGroup.DesiredCapacity``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-group.html#cfn-as-group-desiredcapacity
"""
return jsii.get(self, "desiredCapacity")
@desired_capacity.setter
def desired_capacity(self, value: typing.Optional[str]):
jsii.set(self, "desiredCapacity", value)
@builtins.property
@jsii.member(jsii_name="healthCheckGracePeriod")
def health_check_grace_period(self) -> typing.Optional[jsii.Number]:
"""``AWS::AutoScaling::AutoScalingGroup.HealthCheckGracePeriod``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-group.html#cfn-as-group-healthcheckgraceperiod
"""
return jsii.get(self, "healthCheckGracePeriod")
@health_check_grace_period.setter
def health_check_grace_period(self, value: typing.Optional[jsii.Number]):
jsii.set(self, "healthCheckGracePeriod", value)
@builtins.property
@jsii.member(jsii_name="healthCheckType")
def health_check_type(self) -> typing.Optional[str]:
"""``AWS::AutoScaling::AutoScalingGroup.HealthCheckType``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-group.html#cfn-as-group-healthchecktype
"""
return jsii.get(self, "healthCheckType")
@health_check_type.setter
def health_check_type(self, value: typing.Optional[str]):
jsii.set(self, "healthCheckType", value)
@builtins.property
@jsii.member(jsii_name="instanceId")
def instance_id(self) -> typing.Optional[str]:
"""``AWS::AutoScaling::AutoScalingGroup.InstanceId``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-group.html#cfn-as-group-instanceid
"""
return jsii.get(self, "instanceId")
@instance_id.setter
def instance_id(self, value: typing.Optional[str]):
jsii.set(self, "instanceId", value)
@builtins.property
@jsii.member(jsii_name="launchConfigurationName")
def launch_configuration_name(self) -> typing.Optional[str]:
"""``AWS::AutoScaling::AutoScalingGroup.LaunchConfigurationName``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-group.html#cfn-as-group-launchconfigurationname
"""
return jsii.get(self, "launchConfigurationName")
@launch_configuration_name.setter
def launch_configuration_name(self, value: typing.Optional[str]):
jsii.set(self, "launchConfigurationName", value)
@builtins.property
@jsii.member(jsii_name="launchTemplate")
def launch_template(self) -> typing.Optional[typing.Union[typing.Optional[aws_cdk.core.IResolvable], typing.Optional["LaunchTemplateSpecificationProperty"]]]:
"""``AWS::AutoScaling::AutoScalingGroup.LaunchTemplate``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-group.html#cfn-as-group-launchtemplate
"""
return jsii.get(self, "launchTemplate")
@launch_template.setter
def launch_template(self, value: typing.Optional[typing.Union[typing.Optional[aws_cdk.core.IResolvable], typing.Optional["LaunchTemplateSpecificationProperty"]]]):
jsii.set(self, "launchTemplate", value)
@builtins.property
@jsii.member(jsii_name="lifecycleHookSpecificationList")
def lifecycle_hook_specification_list(self) -> typing.Optional[typing.Union[typing.Optional[aws_cdk.core.IResolvable], typing.Optional[typing.List[typing.Union[aws_cdk.core.IResolvable, "LifecycleHookSpecificationProperty"]]]]]:
"""``AWS::AutoScaling::AutoScalingGroup.LifecycleHookSpecificationList``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-group.html#cfn-autoscaling-autoscalinggroup-lifecyclehookspecificationlist
"""
return jsii.get(self, "lifecycleHookSpecificationList")
@lifecycle_hook_specification_list.setter
def lifecycle_hook_specification_list(self, value: typing.Optional[typing.Union[typing.Optional[aws_cdk.core.IResolvable], typing.Optional[typing.List[typing.Union[aws_cdk.core.IResolvable, "LifecycleHookSpecificationProperty"]]]]]):
jsii.set(self, "lifecycleHookSpecificationList", value)
@builtins.property
@jsii.member(jsii_name="loadBalancerNames")
def load_balancer_names(self) -> typing.Optional[typing.List[str]]:
"""``AWS::AutoScaling::AutoScalingGroup.LoadBalancerNames``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-group.html#cfn-as-group-loadbalancernames
"""
return jsii.get(self, "loadBalancerNames")
@load_balancer_names.setter
def load_balancer_names(self, value: typing.Optional[typing.List[str]]):
jsii.set(self, "loadBalancerNames", value)
@builtins.property
@jsii.member(jsii_name="metricsCollection")
def metrics_collection(self) -> typing.Optional[typing.Union[typing.Optional[aws_cdk.core.IResolvable], typing.Optional[typing.List[typing.Union[aws_cdk.core.IResolvable, "MetricsCollectionProperty"]]]]]:
"""``AWS::AutoScaling::AutoScalingGroup.MetricsCollection``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-group.html#cfn-as-group-metricscollection
"""
return jsii.get(self, "metricsCollection")
@metrics_collection.setter
def metrics_collection(self, value: typing.Optional[typing.Union[typing.Optional[aws_cdk.core.IResolvable], typing.Optional[typing.List[typing.Union[aws_cdk.core.IResolvable, "MetricsCollectionProperty"]]]]]):
jsii.set(self, "metricsCollection", value)
@builtins.property
@jsii.member(jsii_name="mixedInstancesPolicy")
def mixed_instances_policy(self) -> typing.Optional[typing.Union[typing.Optional[aws_cdk.core.IResolvable], typing.Optional["MixedInstancesPolicyProperty"]]]:
"""``AWS::AutoScaling::AutoScalingGroup.MixedInstancesPolicy``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-group.html#cfn-as-group-mixedinstancespolicy
"""
return jsii.get(self, "mixedInstancesPolicy")
@mixed_instances_policy.setter
def mixed_instances_policy(self, value: typing.Optional[typing.Union[typing.Optional[aws_cdk.core.IResolvable], typing.Optional["MixedInstancesPolicyProperty"]]]):
jsii.set(self, "mixedInstancesPolicy", value)
@builtins.property
@jsii.member(jsii_name="notificationConfigurations")
def notification_configurations(self) -> typing.Optional[typing.Union[typing.Optional[aws_cdk.core.IResolvable], typing.Optional[typing.List[typing.Union[aws_cdk.core.IResolvable, "NotificationConfigurationProperty"]]]]]:
"""``AWS::AutoScaling::AutoScalingGroup.NotificationConfigurations``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-group.html#cfn-as-group-notificationconfigurations
"""
return jsii.get(self, "notificationConfigurations")
@notification_configurations.setter
def notification_configurations(self, value: typing.Optional[typing.Union[typing.Optional[aws_cdk.core.IResolvable], typing.Optional[typing.List[typing.Union[aws_cdk.core.IResolvable, "NotificationConfigurationProperty"]]]]]):
jsii.set(self, "notificationConfigurations", value)
@builtins.property
@jsii.member(jsii_name="placementGroup")
def placement_group(self) -> typing.Optional[str]:
"""``AWS::AutoScaling::AutoScalingGroup.PlacementGroup``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-group.html#cfn-as-group-placementgroup
"""
return jsii.get(self, "placementGroup")
@placement_group.setter
def placement_group(self, value: typing.Optional[str]):
jsii.set(self, "placementGroup", value)
@builtins.property
@jsii.member(jsii_name="serviceLinkedRoleArn")
def service_linked_role_arn(self) -> typing.Optional[str]:
"""``AWS::AutoScaling::AutoScalingGroup.ServiceLinkedRoleARN``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-group.html#cfn-autoscaling-autoscalinggroup-servicelinkedrolearn
"""
return jsii.get(self, "serviceLinkedRoleArn")
@service_linked_role_arn.setter
def service_linked_role_arn(self, value: typing.Optional[str]):
jsii.set(self, "serviceLinkedRoleArn", value)
@builtins.property
@jsii.member(jsii_name="targetGroupArns")
def target_group_arns(self) -> typing.Optional[typing.List[str]]:
"""``AWS::AutoScaling::AutoScalingGroup.TargetGroupARNs``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-group.html#cfn-as-group-targetgrouparns
"""
return jsii.get(self, "targetGroupArns")
@target_group_arns.setter
def target_group_arns(self, value: typing.Optional[typing.List[str]]):
jsii.set(self, "targetGroupArns", value)
@builtins.property
@jsii.member(jsii_name="terminationPolicies")
def termination_policies(self) -> typing.Optional[typing.List[str]]:
"""``AWS::AutoScaling::AutoScalingGroup.TerminationPolicies``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-group.html#cfn-as-group-termpolicy
"""
return jsii.get(self, "terminationPolicies")
@termination_policies.setter
def termination_policies(self, value: typing.Optional[typing.List[str]]):
jsii.set(self, "terminationPolicies", value)
@builtins.property
@jsii.member(jsii_name="vpcZoneIdentifier")
def vpc_zone_identifier(self) -> typing.Optional[typing.List[str]]:
"""``AWS::AutoScaling::AutoScalingGroup.VPCZoneIdentifier``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-group.html#cfn-as-group-vpczoneidentifier
"""
return jsii.get(self, "vpcZoneIdentifier")
@vpc_zone_identifier.setter
def vpc_zone_identifier(self, value: typing.Optional[typing.List[str]]):
jsii.set(self, "vpcZoneIdentifier", value)
@jsii.data_type(jsii_type="@aws-cdk/aws-autoscaling.CfnAutoScalingGroup.InstancesDistributionProperty", jsii_struct_bases=[], name_mapping={'on_demand_allocation_strategy': 'onDemandAllocationStrategy', 'on_demand_base_capacity': 'onDemandBaseCapacity', 'on_demand_percentage_above_base_capacity': 'onDemandPercentageAboveBaseCapacity', 'spot_allocation_strategy': 'spotAllocationStrategy', 'spot_instance_pools': 'spotInstancePools', 'spot_max_price': 'spotMaxPrice'})
class InstancesDistributionProperty():
def __init__(self, *, on_demand_allocation_strategy: typing.Optional[str]=None, on_demand_base_capacity: typing.Optional[jsii.Number]=None, on_demand_percentage_above_base_capacity: typing.Optional[jsii.Number]=None, spot_allocation_strategy: typing.Optional[str]=None, spot_instance_pools: typing.Optional[jsii.Number]=None, spot_max_price: typing.Optional[str]=None):
"""
:param on_demand_allocation_strategy: ``CfnAutoScalingGroup.InstancesDistributionProperty.OnDemandAllocationStrategy``.
:param on_demand_base_capacity: ``CfnAutoScalingGroup.InstancesDistributionProperty.OnDemandBaseCapacity``.
:param on_demand_percentage_above_base_capacity: ``CfnAutoScalingGroup.InstancesDistributionProperty.OnDemandPercentageAboveBaseCapacity``.
:param spot_allocation_strategy: ``CfnAutoScalingGroup.InstancesDistributionProperty.SpotAllocationStrategy``.
:param spot_instance_pools: ``CfnAutoScalingGroup.InstancesDistributionProperty.SpotInstancePools``.
:param spot_max_price: ``CfnAutoScalingGroup.InstancesDistributionProperty.SpotMaxPrice``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/cfn-as-mixedinstancespolicy-instancesdistribution.html
"""
self._values = {
}
if on_demand_allocation_strategy is not None: self._values["on_demand_allocation_strategy"] = on_demand_allocation_strategy
if on_demand_base_capacity is not None: self._values["on_demand_base_capacity"] = on_demand_base_capacity
if on_demand_percentage_above_base_capacity is not None: self._values["on_demand_percentage_above_base_capacity"] = on_demand_percentage_above_base_capacity
if spot_allocation_strategy is not None: self._values["spot_allocation_strategy"] = spot_allocation_strategy
if spot_instance_pools is not None: self._values["spot_instance_pools"] = spot_instance_pools
if spot_max_price is not None: self._values["spot_max_price"] = spot_max_price
@builtins.property
def on_demand_allocation_strategy(self) -> typing.Optional[str]:
"""``CfnAutoScalingGroup.InstancesDistributionProperty.OnDemandAllocationStrategy``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/cfn-as-mixedinstancespolicy-instancesdistribution.html#cfn-autoscaling-autoscalinggroup-instancesdistribution-ondemandallocationstrategy
"""
return self._values.get('on_demand_allocation_strategy')
@builtins.property
def on_demand_base_capacity(self) -> typing.Optional[jsii.Number]:
"""``CfnAutoScalingGroup.InstancesDistributionProperty.OnDemandBaseCapacity``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/cfn-as-mixedinstancespolicy-instancesdistribution.html#cfn-autoscaling-autoscalinggroup-instancesdistribution-ondemandbasecapacity
"""
return self._values.get('on_demand_base_capacity')
@builtins.property
def on_demand_percentage_above_base_capacity(self) -> typing.Optional[jsii.Number]:
"""``CfnAutoScalingGroup.InstancesDistributionProperty.OnDemandPercentageAboveBaseCapacity``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/cfn-as-mixedinstancespolicy-instancesdistribution.html#cfn-autoscaling-autoscalinggroup-instancesdistribution-ondemandpercentageabovebasecapacity
"""
return self._values.get('on_demand_percentage_above_base_capacity')
@builtins.property
def spot_allocation_strategy(self) -> typing.Optional[str]:
"""``CfnAutoScalingGroup.InstancesDistributionProperty.SpotAllocationStrategy``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/cfn-as-mixedinstancespolicy-instancesdistribution.html#cfn-autoscaling-autoscalinggroup-instancesdistribution-spotallocationstrategy
"""
return self._values.get('spot_allocation_strategy')
@builtins.property
def spot_instance_pools(self) -> typing.Optional[jsii.Number]:
"""``CfnAutoScalingGroup.InstancesDistributionProperty.SpotInstancePools``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/cfn-as-mixedinstancespolicy-instancesdistribution.html#cfn-autoscaling-autoscalinggroup-instancesdistribution-spotinstancepools
"""
return self._values.get('spot_instance_pools')
@builtins.property
def spot_max_price(self) -> typing.Optional[str]:
"""``CfnAutoScalingGroup.InstancesDistributionProperty.SpotMaxPrice``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/cfn-as-mixedinstancespolicy-instancesdistribution.html#cfn-autoscaling-autoscalinggroup-instancesdistribution-spotmaxprice
"""
return self._values.get('spot_max_price')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'InstancesDistributionProperty(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
@jsii.data_type(jsii_type="@aws-cdk/aws-autoscaling.CfnAutoScalingGroup.LaunchTemplateOverridesProperty", jsii_struct_bases=[], name_mapping={'instance_type': 'instanceType', 'weighted_capacity': 'weightedCapacity'})
class LaunchTemplateOverridesProperty():
def __init__(self, *, instance_type: typing.Optional[str]=None, weighted_capacity: typing.Optional[str]=None):
"""
:param instance_type: ``CfnAutoScalingGroup.LaunchTemplateOverridesProperty.InstanceType``.
:param weighted_capacity: ``CfnAutoScalingGroup.LaunchTemplateOverridesProperty.WeightedCapacity``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/cfn-as-mixedinstancespolicy-launchtemplateoverrides.html
"""
self._values = {
}
if instance_type is not None: self._values["instance_type"] = instance_type
if weighted_capacity is not None: self._values["weighted_capacity"] = weighted_capacity
@builtins.property
def instance_type(self) -> typing.Optional[str]:
"""``CfnAutoScalingGroup.LaunchTemplateOverridesProperty.InstanceType``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/cfn-as-mixedinstancespolicy-launchtemplateoverrides.html#cfn-autoscaling-autoscalinggroup-launchtemplateoverrides-instancetype
"""
return self._values.get('instance_type')
@builtins.property
def weighted_capacity(self) -> typing.Optional[str]:
"""``CfnAutoScalingGroup.LaunchTemplateOverridesProperty.WeightedCapacity``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/cfn-as-mixedinstancespolicy-launchtemplateoverrides.html#cfn-autoscaling-autoscalinggroup-launchtemplateoverrides-weightedcapacity
"""
return self._values.get('weighted_capacity')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'LaunchTemplateOverridesProperty(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
@jsii.data_type(jsii_type="@aws-cdk/aws-autoscaling.CfnAutoScalingGroup.LaunchTemplateProperty", jsii_struct_bases=[], name_mapping={'launch_template_specification': 'launchTemplateSpecification', 'overrides': 'overrides'})
class LaunchTemplateProperty():
def __init__(self, *, launch_template_specification: typing.Union[aws_cdk.core.IResolvable, "CfnAutoScalingGroup.LaunchTemplateSpecificationProperty"], overrides: typing.Optional[typing.Union[typing.Optional[aws_cdk.core.IResolvable], typing.Optional[typing.List[typing.Union[aws_cdk.core.IResolvable, "CfnAutoScalingGroup.LaunchTemplateOverridesProperty"]]]]]=None):
"""
:param launch_template_specification: ``CfnAutoScalingGroup.LaunchTemplateProperty.LaunchTemplateSpecification``.
:param overrides: ``CfnAutoScalingGroup.LaunchTemplateProperty.Overrides``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/cfn-as-mixedinstancespolicy-launchtemplate.html
"""
self._values = {
'launch_template_specification': launch_template_specification,
}
if overrides is not None: self._values["overrides"] = overrides
@builtins.property
def launch_template_specification(self) -> typing.Union[aws_cdk.core.IResolvable, "CfnAutoScalingGroup.LaunchTemplateSpecificationProperty"]:
"""``CfnAutoScalingGroup.LaunchTemplateProperty.LaunchTemplateSpecification``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/cfn-as-mixedinstancespolicy-launchtemplate.html#cfn-as-group-launchtemplate
"""
return self._values.get('launch_template_specification')
@builtins.property
def overrides(self) -> typing.Optional[typing.Union[typing.Optional[aws_cdk.core.IResolvable], typing.Optional[typing.List[typing.Union[aws_cdk.core.IResolvable, "CfnAutoScalingGroup.LaunchTemplateOverridesProperty"]]]]]:
"""``CfnAutoScalingGroup.LaunchTemplateProperty.Overrides``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/cfn-as-mixedinstancespolicy-launchtemplate.html#cfn-as-mixedinstancespolicy-overrides
"""
return self._values.get('overrides')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'LaunchTemplateProperty(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
@jsii.data_type(jsii_type="@aws-cdk/aws-autoscaling.CfnAutoScalingGroup.LaunchTemplateSpecificationProperty", jsii_struct_bases=[], name_mapping={'version': 'version', 'launch_template_id': 'launchTemplateId', 'launch_template_name': 'launchTemplateName'})
class LaunchTemplateSpecificationProperty():
def __init__(self, *, version: str, launch_template_id: typing.Optional[str]=None, launch_template_name: typing.Optional[str]=None):
"""
:param version: ``CfnAutoScalingGroup.LaunchTemplateSpecificationProperty.Version``.
:param launch_template_id: ``CfnAutoScalingGroup.LaunchTemplateSpecificationProperty.LaunchTemplateId``.
:param launch_template_name: ``CfnAutoScalingGroup.LaunchTemplateSpecificationProperty.LaunchTemplateName``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-autoscaling-autoscalinggroup-launchtemplatespecification.html
"""
self._values = {
'version': version,
}
if launch_template_id is not None: self._values["launch_template_id"] = launch_template_id
if launch_template_name is not None: self._values["launch_template_name"] = launch_template_name
@builtins.property
def version(self) -> str:
"""``CfnAutoScalingGroup.LaunchTemplateSpecificationProperty.Version``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-autoscaling-autoscalinggroup-launchtemplatespecification.html#cfn-autoscaling-autoscalinggroup-launchtemplatespecification-version
"""
return self._values.get('version')
@builtins.property
def launch_template_id(self) -> typing.Optional[str]:
"""``CfnAutoScalingGroup.LaunchTemplateSpecificationProperty.LaunchTemplateId``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-autoscaling-autoscalinggroup-launchtemplatespecification.html#cfn-autoscaling-autoscalinggroup-launchtemplatespecification-launchtemplateid
"""
return self._values.get('launch_template_id')
@builtins.property
def launch_template_name(self) -> typing.Optional[str]:
"""``CfnAutoScalingGroup.LaunchTemplateSpecificationProperty.LaunchTemplateName``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-autoscaling-autoscalinggroup-launchtemplatespecification.html#cfn-autoscaling-autoscalinggroup-launchtemplatespecification-launchtemplatename
"""
return self._values.get('launch_template_name')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'LaunchTemplateSpecificationProperty(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
@jsii.data_type(jsii_type="@aws-cdk/aws-autoscaling.CfnAutoScalingGroup.LifecycleHookSpecificationProperty", jsii_struct_bases=[], name_mapping={'lifecycle_hook_name': 'lifecycleHookName', 'lifecycle_transition': 'lifecycleTransition', 'default_result': 'defaultResult', 'heartbeat_timeout': 'heartbeatTimeout', 'notification_metadata': 'notificationMetadata', 'notification_target_arn': 'notificationTargetArn', 'role_arn': 'roleArn'})
class LifecycleHookSpecificationProperty():
def __init__(self, *, lifecycle_hook_name: str, lifecycle_transition: str, default_result: typing.Optional[str]=None, heartbeat_timeout: typing.Optional[jsii.Number]=None, notification_metadata: typing.Optional[str]=None, notification_target_arn: typing.Optional[str]=None, role_arn: typing.Optional[str]=None):
"""
:param lifecycle_hook_name: ``CfnAutoScalingGroup.LifecycleHookSpecificationProperty.LifecycleHookName``.
:param lifecycle_transition: ``CfnAutoScalingGroup.LifecycleHookSpecificationProperty.LifecycleTransition``.
:param default_result: ``CfnAutoScalingGroup.LifecycleHookSpecificationProperty.DefaultResult``.
:param heartbeat_timeout: ``CfnAutoScalingGroup.LifecycleHookSpecificationProperty.HeartbeatTimeout``.
:param notification_metadata: ``CfnAutoScalingGroup.LifecycleHookSpecificationProperty.NotificationMetadata``.
:param notification_target_arn: ``CfnAutoScalingGroup.LifecycleHookSpecificationProperty.NotificationTargetARN``.
:param role_arn: ``CfnAutoScalingGroup.LifecycleHookSpecificationProperty.RoleARN``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-autoscaling-autoscalinggroup-lifecyclehookspecification.html
"""
self._values = {
'lifecycle_hook_name': lifecycle_hook_name,
'lifecycle_transition': lifecycle_transition,
}
if default_result is not None: self._values["default_result"] = default_result
if heartbeat_timeout is not None: self._values["heartbeat_timeout"] = heartbeat_timeout
if notification_metadata is not None: self._values["notification_metadata"] = notification_metadata
if notification_target_arn is not None: self._values["notification_target_arn"] = notification_target_arn
if role_arn is not None: self._values["role_arn"] = role_arn
@builtins.property
def lifecycle_hook_name(self) -> str:
"""``CfnAutoScalingGroup.LifecycleHookSpecificationProperty.LifecycleHookName``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-autoscaling-autoscalinggroup-lifecyclehookspecification.html#cfn-autoscaling-autoscalinggroup-lifecyclehookspecification-lifecyclehookname
"""
return self._values.get('lifecycle_hook_name')
@builtins.property
def lifecycle_transition(self) -> str:
"""``CfnAutoScalingGroup.LifecycleHookSpecificationProperty.LifecycleTransition``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-autoscaling-autoscalinggroup-lifecyclehookspecification.html#cfn-autoscaling-autoscalinggroup-lifecyclehookspecification-lifecycletransition
"""
return self._values.get('lifecycle_transition')
@builtins.property
def default_result(self) -> typing.Optional[str]:
"""``CfnAutoScalingGroup.LifecycleHookSpecificationProperty.DefaultResult``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-autoscaling-autoscalinggroup-lifecyclehookspecification.html#cfn-autoscaling-autoscalinggroup-lifecyclehookspecification-defaultresult
"""
return self._values.get('default_result')
@builtins.property
def heartbeat_timeout(self) -> typing.Optional[jsii.Number]:
"""``CfnAutoScalingGroup.LifecycleHookSpecificationProperty.HeartbeatTimeout``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-autoscaling-autoscalinggroup-lifecyclehookspecification.html#cfn-autoscaling-autoscalinggroup-lifecyclehookspecification-heartbeattimeout
"""
return self._values.get('heartbeat_timeout')
@builtins.property
def notification_metadata(self) -> typing.Optional[str]:
"""``CfnAutoScalingGroup.LifecycleHookSpecificationProperty.NotificationMetadata``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-autoscaling-autoscalinggroup-lifecyclehookspecification.html#cfn-autoscaling-autoscalinggroup-lifecyclehookspecification-notificationmetadata
"""
return self._values.get('notification_metadata')
@builtins.property
def notification_target_arn(self) -> typing.Optional[str]:
"""``CfnAutoScalingGroup.LifecycleHookSpecificationProperty.NotificationTargetARN``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-autoscaling-autoscalinggroup-lifecyclehookspecification.html#cfn-autoscaling-autoscalinggroup-lifecyclehookspecification-notificationtargetarn
"""
return self._values.get('notification_target_arn')
@builtins.property
def role_arn(self) -> typing.Optional[str]:
"""``CfnAutoScalingGroup.LifecycleHookSpecificationProperty.RoleARN``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-autoscaling-autoscalinggroup-lifecyclehookspecification.html#cfn-autoscaling-autoscalinggroup-lifecyclehookspecification-rolearn
"""
return self._values.get('role_arn')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'LifecycleHookSpecificationProperty(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
@jsii.data_type(jsii_type="@aws-cdk/aws-autoscaling.CfnAutoScalingGroup.MetricsCollectionProperty", jsii_struct_bases=[], name_mapping={'granularity': 'granularity', 'metrics': 'metrics'})
class MetricsCollectionProperty():
def __init__(self, *, granularity: str, metrics: typing.Optional[typing.List[str]]=None):
"""
:param granularity: ``CfnAutoScalingGroup.MetricsCollectionProperty.Granularity``.
:param metrics: ``CfnAutoScalingGroup.MetricsCollectionProperty.Metrics``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-metricscollection.html
"""
self._values = {
'granularity': granularity,
}
if metrics is not None: self._values["metrics"] = metrics
@builtins.property
def granularity(self) -> str:
"""``CfnAutoScalingGroup.MetricsCollectionProperty.Granularity``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-metricscollection.html#cfn-as-metricscollection-granularity
"""
return self._values.get('granularity')
@builtins.property
def metrics(self) -> typing.Optional[typing.List[str]]:
"""``CfnAutoScalingGroup.MetricsCollectionProperty.Metrics``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-metricscollection.html#cfn-as-metricscollection-metrics
"""
return self._values.get('metrics')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'MetricsCollectionProperty(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
@jsii.data_type(jsii_type="@aws-cdk/aws-autoscaling.CfnAutoScalingGroup.MixedInstancesPolicyProperty", jsii_struct_bases=[], name_mapping={'launch_template': 'launchTemplate', 'instances_distribution': 'instancesDistribution'})
class MixedInstancesPolicyProperty():
def __init__(self, *, launch_template: typing.Union[aws_cdk.core.IResolvable, "CfnAutoScalingGroup.LaunchTemplateProperty"], instances_distribution: typing.Optional[typing.Union[typing.Optional[aws_cdk.core.IResolvable], typing.Optional["CfnAutoScalingGroup.InstancesDistributionProperty"]]]=None):
"""
:param launch_template: ``CfnAutoScalingGroup.MixedInstancesPolicyProperty.LaunchTemplate``.
:param instances_distribution: ``CfnAutoScalingGroup.MixedInstancesPolicyProperty.InstancesDistribution``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/cfn-as-group-mixedinstancespolicy.html
"""
self._values = {
'launch_template': launch_template,
}
if instances_distribution is not None: self._values["instances_distribution"] = instances_distribution
@builtins.property
def launch_template(self) -> typing.Union[aws_cdk.core.IResolvable, "CfnAutoScalingGroup.LaunchTemplateProperty"]:
"""``CfnAutoScalingGroup.MixedInstancesPolicyProperty.LaunchTemplate``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/cfn-as-group-mixedinstancespolicy.html#cfn-as-mixedinstancespolicy-launchtemplate
"""
return self._values.get('launch_template')
@builtins.property
def instances_distribution(self) -> typing.Optional[typing.Union[typing.Optional[aws_cdk.core.IResolvable], typing.Optional["CfnAutoScalingGroup.InstancesDistributionProperty"]]]:
"""``CfnAutoScalingGroup.MixedInstancesPolicyProperty.InstancesDistribution``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/cfn-as-group-mixedinstancespolicy.html#cfn-as-mixedinstancespolicy-instancesdistribution
"""
return self._values.get('instances_distribution')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'MixedInstancesPolicyProperty(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
@jsii.data_type(jsii_type="@aws-cdk/aws-autoscaling.CfnAutoScalingGroup.NotificationConfigurationProperty", jsii_struct_bases=[], name_mapping={'topic_arn': 'topicArn', 'notification_types': 'notificationTypes'})
class NotificationConfigurationProperty():
def __init__(self, *, topic_arn: str, notification_types: typing.Optional[typing.List[str]]=None):
"""
:param topic_arn: ``CfnAutoScalingGroup.NotificationConfigurationProperty.TopicARN``.
:param notification_types: ``CfnAutoScalingGroup.NotificationConfigurationProperty.NotificationTypes``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-notificationconfigurations.html
"""
self._values = {
'topic_arn': topic_arn,
}
if notification_types is not None: self._values["notification_types"] = notification_types
@builtins.property
def topic_arn(self) -> str:
"""``CfnAutoScalingGroup.NotificationConfigurationProperty.TopicARN``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-notificationconfigurations.html#cfn-autoscaling-autoscalinggroup-notificationconfigurations-topicarn
"""
return self._values.get('topic_arn')
@builtins.property
def notification_types(self) -> typing.Optional[typing.List[str]]:
"""``CfnAutoScalingGroup.NotificationConfigurationProperty.NotificationTypes``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-notificationconfigurations.html#cfn-as-group-notificationconfigurations-notificationtypes
"""
return self._values.get('notification_types')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'NotificationConfigurationProperty(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
@jsii.data_type(jsii_type="@aws-cdk/aws-autoscaling.CfnAutoScalingGroup.TagPropertyProperty", jsii_struct_bases=[], name_mapping={'key': 'key', 'propagate_at_launch': 'propagateAtLaunch', 'value': 'value'})
class TagPropertyProperty():
def __init__(self, *, key: str, propagate_at_launch: typing.Union[bool, aws_cdk.core.IResolvable], value: str):
"""
:param key: ``CfnAutoScalingGroup.TagPropertyProperty.Key``.
:param propagate_at_launch: ``CfnAutoScalingGroup.TagPropertyProperty.PropagateAtLaunch``.
:param value: ``CfnAutoScalingGroup.TagPropertyProperty.Value``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-tags.html
"""
self._values = {
'key': key,
'propagate_at_launch': propagate_at_launch,
'value': value,
}
@builtins.property
def key(self) -> str:
"""``CfnAutoScalingGroup.TagPropertyProperty.Key``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-tags.html#cfn-as-tags-Key
"""
return self._values.get('key')
@builtins.property
def propagate_at_launch(self) -> typing.Union[bool, aws_cdk.core.IResolvable]:
"""``CfnAutoScalingGroup.TagPropertyProperty.PropagateAtLaunch``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-tags.html#cfn-as-tags-PropagateAtLaunch
"""
return self._values.get('propagate_at_launch')
@builtins.property
def value(self) -> str:
"""``CfnAutoScalingGroup.TagPropertyProperty.Value``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-tags.html#cfn-as-tags-Value
"""
return self._values.get('value')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'TagPropertyProperty(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
@jsii.data_type(jsii_type="@aws-cdk/aws-autoscaling.CfnAutoScalingGroupProps", jsii_struct_bases=[], name_mapping={'max_size': 'maxSize', 'min_size': 'minSize', 'auto_scaling_group_name': 'autoScalingGroupName', 'availability_zones': 'availabilityZones', 'cooldown': 'cooldown', 'desired_capacity': 'desiredCapacity', 'health_check_grace_period': 'healthCheckGracePeriod', 'health_check_type': 'healthCheckType', 'instance_id': 'instanceId', 'launch_configuration_name': 'launchConfigurationName', 'launch_template': 'launchTemplate', 'lifecycle_hook_specification_list': 'lifecycleHookSpecificationList', 'load_balancer_names': 'loadBalancerNames', 'metrics_collection': 'metricsCollection', 'mixed_instances_policy': 'mixedInstancesPolicy', 'notification_configurations': 'notificationConfigurations', 'placement_group': 'placementGroup', 'service_linked_role_arn': 'serviceLinkedRoleArn', 'tags': 'tags', 'target_group_arns': 'targetGroupArns', 'termination_policies': 'terminationPolicies', 'vpc_zone_identifier': 'vpcZoneIdentifier'})
class CfnAutoScalingGroupProps():
def __init__(self, *, max_size: str, min_size: str, auto_scaling_group_name: typing.Optional[str]=None, availability_zones: typing.Optional[typing.List[str]]=None, cooldown: typing.Optional[str]=None, desired_capacity: typing.Optional[str]=None, health_check_grace_period: typing.Optional[jsii.Number]=None, health_check_type: typing.Optional[str]=None, instance_id: typing.Optional[str]=None, launch_configuration_name: typing.Optional[str]=None, launch_template: typing.Optional[typing.Union[typing.Optional[aws_cdk.core.IResolvable], typing.Optional["CfnAutoScalingGroup.LaunchTemplateSpecificationProperty"]]]=None, lifecycle_hook_specification_list: typing.Optional[typing.Union[typing.Optional[aws_cdk.core.IResolvable], typing.Optional[typing.List[typing.Union[aws_cdk.core.IResolvable, "CfnAutoScalingGroup.LifecycleHookSpecificationProperty"]]]]]=None, load_balancer_names: typing.Optional[typing.List[str]]=None, metrics_collection: typing.Optional[typing.Union[typing.Optional[aws_cdk.core.IResolvable], typing.Optional[typing.List[typing.Union[aws_cdk.core.IResolvable, "CfnAutoScalingGroup.MetricsCollectionProperty"]]]]]=None, mixed_instances_policy: typing.Optional[typing.Union[typing.Optional[aws_cdk.core.IResolvable], typing.Optional["CfnAutoScalingGroup.MixedInstancesPolicyProperty"]]]=None, notification_configurations: typing.Optional[typing.Union[typing.Optional[aws_cdk.core.IResolvable], typing.Optional[typing.List[typing.Union[aws_cdk.core.IResolvable, "CfnAutoScalingGroup.NotificationConfigurationProperty"]]]]]=None, placement_group: typing.Optional[str]=None, service_linked_role_arn: typing.Optional[str]=None, tags: typing.Optional[typing.List["CfnAutoScalingGroup.TagPropertyProperty"]]=None, target_group_arns: typing.Optional[typing.List[str]]=None, termination_policies: typing.Optional[typing.List[str]]=None, vpc_zone_identifier: typing.Optional[typing.List[str]]=None):
"""Properties for defining a ``AWS::AutoScaling::AutoScalingGroup``.
:param max_size: ``AWS::AutoScaling::AutoScalingGroup.MaxSize``.
:param min_size: ``AWS::AutoScaling::AutoScalingGroup.MinSize``.
:param auto_scaling_group_name: ``AWS::AutoScaling::AutoScalingGroup.AutoScalingGroupName``.
:param availability_zones: ``AWS::AutoScaling::AutoScalingGroup.AvailabilityZones``.
:param cooldown: ``AWS::AutoScaling::AutoScalingGroup.Cooldown``.
:param desired_capacity: ``AWS::AutoScaling::AutoScalingGroup.DesiredCapacity``.
:param health_check_grace_period: ``AWS::AutoScaling::AutoScalingGroup.HealthCheckGracePeriod``.
:param health_check_type: ``AWS::AutoScaling::AutoScalingGroup.HealthCheckType``.
:param instance_id: ``AWS::AutoScaling::AutoScalingGroup.InstanceId``.
:param launch_configuration_name: ``AWS::AutoScaling::AutoScalingGroup.LaunchConfigurationName``.
:param launch_template: ``AWS::AutoScaling::AutoScalingGroup.LaunchTemplate``.
:param lifecycle_hook_specification_list: ``AWS::AutoScaling::AutoScalingGroup.LifecycleHookSpecificationList``.
:param load_balancer_names: ``AWS::AutoScaling::AutoScalingGroup.LoadBalancerNames``.
:param metrics_collection: ``AWS::AutoScaling::AutoScalingGroup.MetricsCollection``.
:param mixed_instances_policy: ``AWS::AutoScaling::AutoScalingGroup.MixedInstancesPolicy``.
:param notification_configurations: ``AWS::AutoScaling::AutoScalingGroup.NotificationConfigurations``.
:param placement_group: ``AWS::AutoScaling::AutoScalingGroup.PlacementGroup``.
:param service_linked_role_arn: ``AWS::AutoScaling::AutoScalingGroup.ServiceLinkedRoleARN``.
:param tags: ``AWS::AutoScaling::AutoScalingGroup.Tags``.
:param target_group_arns: ``AWS::AutoScaling::AutoScalingGroup.TargetGroupARNs``.
:param termination_policies: ``AWS::AutoScaling::AutoScalingGroup.TerminationPolicies``.
:param vpc_zone_identifier: ``AWS::AutoScaling::AutoScalingGroup.VPCZoneIdentifier``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-group.html
"""
self._values = {
'max_size': max_size,
'min_size': min_size,
}
if auto_scaling_group_name is not None: self._values["auto_scaling_group_name"] = auto_scaling_group_name
if availability_zones is not None: self._values["availability_zones"] = availability_zones
if cooldown is not None: self._values["cooldown"] = cooldown
if desired_capacity is not None: self._values["desired_capacity"] = desired_capacity
if health_check_grace_period is not None: self._values["health_check_grace_period"] = health_check_grace_period
if health_check_type is not None: self._values["health_check_type"] = health_check_type
if instance_id is not None: self._values["instance_id"] = instance_id
if launch_configuration_name is not None: self._values["launch_configuration_name"] = launch_configuration_name
if launch_template is not None: self._values["launch_template"] = launch_template
if lifecycle_hook_specification_list is not None: self._values["lifecycle_hook_specification_list"] = lifecycle_hook_specification_list
if load_balancer_names is not None: self._values["load_balancer_names"] = load_balancer_names
if metrics_collection is not None: self._values["metrics_collection"] = metrics_collection
if mixed_instances_policy is not None: self._values["mixed_instances_policy"] = mixed_instances_policy
if notification_configurations is not None: self._values["notification_configurations"] = notification_configurations
if placement_group is not None: self._values["placement_group"] = placement_group
if service_linked_role_arn is not None: self._values["service_linked_role_arn"] = service_linked_role_arn
if tags is not None: self._values["tags"] = tags
if target_group_arns is not None: self._values["target_group_arns"] = target_group_arns
if termination_policies is not None: self._values["termination_policies"] = termination_policies
if vpc_zone_identifier is not None: self._values["vpc_zone_identifier"] = vpc_zone_identifier
@builtins.property
def max_size(self) -> str:
"""``AWS::AutoScaling::AutoScalingGroup.MaxSize``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-group.html#cfn-as-group-maxsize
"""
return self._values.get('max_size')
@builtins.property
def min_size(self) -> str:
"""``AWS::AutoScaling::AutoScalingGroup.MinSize``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-group.html#cfn-as-group-minsize
"""
return self._values.get('min_size')
@builtins.property
def auto_scaling_group_name(self) -> typing.Optional[str]:
"""``AWS::AutoScaling::AutoScalingGroup.AutoScalingGroupName``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-group.html#cfn-autoscaling-autoscalinggroup-autoscalinggroupname
"""
return self._values.get('auto_scaling_group_name')
@builtins.property
def availability_zones(self) -> typing.Optional[typing.List[str]]:
"""``AWS::AutoScaling::AutoScalingGroup.AvailabilityZones``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-group.html#cfn-as-group-availabilityzones
"""
return self._values.get('availability_zones')
@builtins.property
def cooldown(self) -> typing.Optional[str]:
"""``AWS::AutoScaling::AutoScalingGroup.Cooldown``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-group.html#cfn-as-group-cooldown
"""
return self._values.get('cooldown')
@builtins.property
def desired_capacity(self) -> typing.Optional[str]:
"""``AWS::AutoScaling::AutoScalingGroup.DesiredCapacity``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-group.html#cfn-as-group-desiredcapacity
"""
return self._values.get('desired_capacity')
@builtins.property
def health_check_grace_period(self) -> typing.Optional[jsii.Number]:
"""``AWS::AutoScaling::AutoScalingGroup.HealthCheckGracePeriod``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-group.html#cfn-as-group-healthcheckgraceperiod
"""
return self._values.get('health_check_grace_period')
@builtins.property
def health_check_type(self) -> typing.Optional[str]:
"""``AWS::AutoScaling::AutoScalingGroup.HealthCheckType``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-group.html#cfn-as-group-healthchecktype
"""
return self._values.get('health_check_type')
@builtins.property
def instance_id(self) -> typing.Optional[str]:
"""``AWS::AutoScaling::AutoScalingGroup.InstanceId``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-group.html#cfn-as-group-instanceid
"""
return self._values.get('instance_id')
@builtins.property
def launch_configuration_name(self) -> typing.Optional[str]:
"""``AWS::AutoScaling::AutoScalingGroup.LaunchConfigurationName``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-group.html#cfn-as-group-launchconfigurationname
"""
return self._values.get('launch_configuration_name')
@builtins.property
def launch_template(self) -> typing.Optional[typing.Union[typing.Optional[aws_cdk.core.IResolvable], typing.Optional["CfnAutoScalingGroup.LaunchTemplateSpecificationProperty"]]]:
"""``AWS::AutoScaling::AutoScalingGroup.LaunchTemplate``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-group.html#cfn-as-group-launchtemplate
"""
return self._values.get('launch_template')
@builtins.property
def lifecycle_hook_specification_list(self) -> typing.Optional[typing.Union[typing.Optional[aws_cdk.core.IResolvable], typing.Optional[typing.List[typing.Union[aws_cdk.core.IResolvable, "CfnAutoScalingGroup.LifecycleHookSpecificationProperty"]]]]]:
"""``AWS::AutoScaling::AutoScalingGroup.LifecycleHookSpecificationList``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-group.html#cfn-autoscaling-autoscalinggroup-lifecyclehookspecificationlist
"""
return self._values.get('lifecycle_hook_specification_list')
@builtins.property
def load_balancer_names(self) -> typing.Optional[typing.List[str]]:
"""``AWS::AutoScaling::AutoScalingGroup.LoadBalancerNames``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-group.html#cfn-as-group-loadbalancernames
"""
return self._values.get('load_balancer_names')
@builtins.property
def metrics_collection(self) -> typing.Optional[typing.Union[typing.Optional[aws_cdk.core.IResolvable], typing.Optional[typing.List[typing.Union[aws_cdk.core.IResolvable, "CfnAutoScalingGroup.MetricsCollectionProperty"]]]]]:
"""``AWS::AutoScaling::AutoScalingGroup.MetricsCollection``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-group.html#cfn-as-group-metricscollection
"""
return self._values.get('metrics_collection')
@builtins.property
def mixed_instances_policy(self) -> typing.Optional[typing.Union[typing.Optional[aws_cdk.core.IResolvable], typing.Optional["CfnAutoScalingGroup.MixedInstancesPolicyProperty"]]]:
"""``AWS::AutoScaling::AutoScalingGroup.MixedInstancesPolicy``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-group.html#cfn-as-group-mixedinstancespolicy
"""
return self._values.get('mixed_instances_policy')
@builtins.property
def notification_configurations(self) -> typing.Optional[typing.Union[typing.Optional[aws_cdk.core.IResolvable], typing.Optional[typing.List[typing.Union[aws_cdk.core.IResolvable, "CfnAutoScalingGroup.NotificationConfigurationProperty"]]]]]:
"""``AWS::AutoScaling::AutoScalingGroup.NotificationConfigurations``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-group.html#cfn-as-group-notificationconfigurations
"""
return self._values.get('notification_configurations')
@builtins.property
def placement_group(self) -> typing.Optional[str]:
"""``AWS::AutoScaling::AutoScalingGroup.PlacementGroup``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-group.html#cfn-as-group-placementgroup
"""
return self._values.get('placement_group')
@builtins.property
def service_linked_role_arn(self) -> typing.Optional[str]:
"""``AWS::AutoScaling::AutoScalingGroup.ServiceLinkedRoleARN``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-group.html#cfn-autoscaling-autoscalinggroup-servicelinkedrolearn
"""
return self._values.get('service_linked_role_arn')
@builtins.property
def tags(self) -> typing.Optional[typing.List["CfnAutoScalingGroup.TagPropertyProperty"]]:
"""``AWS::AutoScaling::AutoScalingGroup.Tags``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-group.html#cfn-as-group-tags
"""
return self._values.get('tags')
@builtins.property
def target_group_arns(self) -> typing.Optional[typing.List[str]]:
"""``AWS::AutoScaling::AutoScalingGroup.TargetGroupARNs``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-group.html#cfn-as-group-targetgrouparns
"""
return self._values.get('target_group_arns')
@builtins.property
def termination_policies(self) -> typing.Optional[typing.List[str]]:
"""``AWS::AutoScaling::AutoScalingGroup.TerminationPolicies``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-group.html#cfn-as-group-termpolicy
"""
return self._values.get('termination_policies')
@builtins.property
def vpc_zone_identifier(self) -> typing.Optional[typing.List[str]]:
"""``AWS::AutoScaling::AutoScalingGroup.VPCZoneIdentifier``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-group.html#cfn-as-group-vpczoneidentifier
"""
return self._values.get('vpc_zone_identifier')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'CfnAutoScalingGroupProps(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
@jsii.implements(aws_cdk.core.IInspectable)
class CfnLaunchConfiguration(aws_cdk.core.CfnResource, metaclass=jsii.JSIIMeta, jsii_type="@aws-cdk/aws-autoscaling.CfnLaunchConfiguration"):
"""A CloudFormation ``AWS::AutoScaling::LaunchConfiguration``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-launchconfig.html
cloudformationResource:
:cloudformationResource:: AWS::AutoScaling::LaunchConfiguration
"""
def __init__(self, scope: aws_cdk.core.Construct, id: str, *, image_id: str, instance_type: str, associate_public_ip_address: typing.Optional[typing.Union[typing.Optional[bool], typing.Optional[aws_cdk.core.IResolvable]]]=None, block_device_mappings: typing.Optional[typing.Union[typing.Optional[aws_cdk.core.IResolvable], typing.Optional[typing.List[typing.Union[aws_cdk.core.IResolvable, "BlockDeviceMappingProperty"]]]]]=None, classic_link_vpc_id: typing.Optional[str]=None, classic_link_vpc_security_groups: typing.Optional[typing.List[str]]=None, ebs_optimized: typing.Optional[typing.Union[typing.Optional[bool], typing.Optional[aws_cdk.core.IResolvable]]]=None, iam_instance_profile: typing.Optional[str]=None, instance_id: typing.Optional[str]=None, instance_monitoring: typing.Optional[typing.Union[typing.Optional[bool], typing.Optional[aws_cdk.core.IResolvable]]]=None, kernel_id: typing.Optional[str]=None, key_name: typing.Optional[str]=None, launch_configuration_name: typing.Optional[str]=None, placement_tenancy: typing.Optional[str]=None, ram_disk_id: typing.Optional[str]=None, security_groups: typing.Optional[typing.List[str]]=None, spot_price: typing.Optional[str]=None, user_data: typing.Optional[str]=None) -> None:
"""Create a new ``AWS::AutoScaling::LaunchConfiguration``.
:param scope: - scope in which this resource is defined.
:param id: - scoped id of the resource.
:param image_id: ``AWS::AutoScaling::LaunchConfiguration.ImageId``.
:param instance_type: ``AWS::AutoScaling::LaunchConfiguration.InstanceType``.
:param associate_public_ip_address: ``AWS::AutoScaling::LaunchConfiguration.AssociatePublicIpAddress``.
:param block_device_mappings: ``AWS::AutoScaling::LaunchConfiguration.BlockDeviceMappings``.
:param classic_link_vpc_id: ``AWS::AutoScaling::LaunchConfiguration.ClassicLinkVPCId``.
:param classic_link_vpc_security_groups: ``AWS::AutoScaling::LaunchConfiguration.ClassicLinkVPCSecurityGroups``.
:param ebs_optimized: ``AWS::AutoScaling::LaunchConfiguration.EbsOptimized``.
:param iam_instance_profile: ``AWS::AutoScaling::LaunchConfiguration.IamInstanceProfile``.
:param instance_id: ``AWS::AutoScaling::LaunchConfiguration.InstanceId``.
:param instance_monitoring: ``AWS::AutoScaling::LaunchConfiguration.InstanceMonitoring``.
:param kernel_id: ``AWS::AutoScaling::LaunchConfiguration.KernelId``.
:param key_name: ``AWS::AutoScaling::LaunchConfiguration.KeyName``.
:param launch_configuration_name: ``AWS::AutoScaling::LaunchConfiguration.LaunchConfigurationName``.
:param placement_tenancy: ``AWS::AutoScaling::LaunchConfiguration.PlacementTenancy``.
:param ram_disk_id: ``AWS::AutoScaling::LaunchConfiguration.RamDiskId``.
:param security_groups: ``AWS::AutoScaling::LaunchConfiguration.SecurityGroups``.
:param spot_price: ``AWS::AutoScaling::LaunchConfiguration.SpotPrice``.
:param user_data: ``AWS::AutoScaling::LaunchConfiguration.UserData``.
"""
props = CfnLaunchConfigurationProps(image_id=image_id, instance_type=instance_type, associate_public_ip_address=associate_public_ip_address, block_device_mappings=block_device_mappings, classic_link_vpc_id=classic_link_vpc_id, classic_link_vpc_security_groups=classic_link_vpc_security_groups, ebs_optimized=ebs_optimized, iam_instance_profile=iam_instance_profile, instance_id=instance_id, instance_monitoring=instance_monitoring, kernel_id=kernel_id, key_name=key_name, launch_configuration_name=launch_configuration_name, placement_tenancy=placement_tenancy, ram_disk_id=ram_disk_id, security_groups=security_groups, spot_price=spot_price, user_data=user_data)
jsii.create(CfnLaunchConfiguration, self, [scope, id, props])
@jsii.member(jsii_name="inspect")
def inspect(self, inspector: aws_cdk.core.TreeInspector) -> None:
"""Examines the CloudFormation resource and discloses attributes.
:param inspector: - tree inspector to collect and process attributes.
stability
:stability: experimental
"""
return jsii.invoke(self, "inspect", [inspector])
@jsii.member(jsii_name="renderProperties")
def _render_properties(self, props: typing.Mapping[str,typing.Any]) -> typing.Mapping[str,typing.Any]:
"""
:param props: -
"""
return jsii.invoke(self, "renderProperties", [props])
@jsii.python.classproperty
@jsii.member(jsii_name="CFN_RESOURCE_TYPE_NAME")
def CFN_RESOURCE_TYPE_NAME(cls) -> str:
"""The CloudFormation resource type name for this resource class."""
return jsii.sget(cls, "CFN_RESOURCE_TYPE_NAME")
@builtins.property
@jsii.member(jsii_name="cfnProperties")
def _cfn_properties(self) -> typing.Mapping[str,typing.Any]:
return jsii.get(self, "cfnProperties")
@builtins.property
@jsii.member(jsii_name="imageId")
def image_id(self) -> str:
"""``AWS::AutoScaling::LaunchConfiguration.ImageId``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-launchconfig.html#cfn-as-launchconfig-imageid
"""
return jsii.get(self, "imageId")
@image_id.setter
def image_id(self, value: str):
jsii.set(self, "imageId", value)
@builtins.property
@jsii.member(jsii_name="instanceType")
def instance_type(self) -> str:
"""``AWS::AutoScaling::LaunchConfiguration.InstanceType``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-launchconfig.html#cfn-as-launchconfig-instancetype
"""
return jsii.get(self, "instanceType")
@instance_type.setter
def instance_type(self, value: str):
jsii.set(self, "instanceType", value)
@builtins.property
@jsii.member(jsii_name="associatePublicIpAddress")
def associate_public_ip_address(self) -> typing.Optional[typing.Union[typing.Optional[bool], typing.Optional[aws_cdk.core.IResolvable]]]:
"""``AWS::AutoScaling::LaunchConfiguration.AssociatePublicIpAddress``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-launchconfig.html#cf-as-launchconfig-associatepubip
"""
return jsii.get(self, "associatePublicIpAddress")
@associate_public_ip_address.setter
def associate_public_ip_address(self, value: typing.Optional[typing.Union[typing.Optional[bool], typing.Optional[aws_cdk.core.IResolvable]]]):
jsii.set(self, "associatePublicIpAddress", value)
@builtins.property
@jsii.member(jsii_name="blockDeviceMappings")
def block_device_mappings(self) -> typing.Optional[typing.Union[typing.Optional[aws_cdk.core.IResolvable], typing.Optional[typing.List[typing.Union[aws_cdk.core.IResolvable, "BlockDeviceMappingProperty"]]]]]:
"""``AWS::AutoScaling::LaunchConfiguration.BlockDeviceMappings``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-launchconfig.html#cfn-as-launchconfig-blockdevicemappings
"""
return jsii.get(self, "blockDeviceMappings")
@block_device_mappings.setter
def block_device_mappings(self, value: typing.Optional[typing.Union[typing.Optional[aws_cdk.core.IResolvable], typing.Optional[typing.List[typing.Union[aws_cdk.core.IResolvable, "BlockDeviceMappingProperty"]]]]]):
jsii.set(self, "blockDeviceMappings", value)
@builtins.property
@jsii.member(jsii_name="classicLinkVpcId")
def classic_link_vpc_id(self) -> typing.Optional[str]:
"""``AWS::AutoScaling::LaunchConfiguration.ClassicLinkVPCId``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-launchconfig.html#cfn-as-launchconfig-classiclinkvpcid
"""
return jsii.get(self, "classicLinkVpcId")
@classic_link_vpc_id.setter
def classic_link_vpc_id(self, value: typing.Optional[str]):
jsii.set(self, "classicLinkVpcId", value)
@builtins.property
@jsii.member(jsii_name="classicLinkVpcSecurityGroups")
def classic_link_vpc_security_groups(self) -> typing.Optional[typing.List[str]]:
"""``AWS::AutoScaling::LaunchConfiguration.ClassicLinkVPCSecurityGroups``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-launchconfig.html#cfn-as-launchconfig-classiclinkvpcsecuritygroups
"""
return jsii.get(self, "classicLinkVpcSecurityGroups")
@classic_link_vpc_security_groups.setter
def classic_link_vpc_security_groups(self, value: typing.Optional[typing.List[str]]):
jsii.set(self, "classicLinkVpcSecurityGroups", value)
@builtins.property
@jsii.member(jsii_name="ebsOptimized")
def ebs_optimized(self) -> typing.Optional[typing.Union[typing.Optional[bool], typing.Optional[aws_cdk.core.IResolvable]]]:
"""``AWS::AutoScaling::LaunchConfiguration.EbsOptimized``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-launchconfig.html#cfn-as-launchconfig-ebsoptimized
"""
return jsii.get(self, "ebsOptimized")
@ebs_optimized.setter
def ebs_optimized(self, value: typing.Optional[typing.Union[typing.Optional[bool], typing.Optional[aws_cdk.core.IResolvable]]]):
jsii.set(self, "ebsOptimized", value)
@builtins.property
@jsii.member(jsii_name="iamInstanceProfile")
def iam_instance_profile(self) -> typing.Optional[str]:
"""``AWS::AutoScaling::LaunchConfiguration.IamInstanceProfile``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-launchconfig.html#cfn-as-launchconfig-iaminstanceprofile
"""
return jsii.get(self, "iamInstanceProfile")
@iam_instance_profile.setter
def iam_instance_profile(self, value: typing.Optional[str]):
jsii.set(self, "iamInstanceProfile", value)
@builtins.property
@jsii.member(jsii_name="instanceId")
def instance_id(self) -> typing.Optional[str]:
"""``AWS::AutoScaling::LaunchConfiguration.InstanceId``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-launchconfig.html#cfn-as-launchconfig-instanceid
"""
return jsii.get(self, "instanceId")
@instance_id.setter
def instance_id(self, value: typing.Optional[str]):
jsii.set(self, "instanceId", value)
@builtins.property
@jsii.member(jsii_name="instanceMonitoring")
def instance_monitoring(self) -> typing.Optional[typing.Union[typing.Optional[bool], typing.Optional[aws_cdk.core.IResolvable]]]:
"""``AWS::AutoScaling::LaunchConfiguration.InstanceMonitoring``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-launchconfig.html#cfn-as-launchconfig-instancemonitoring
"""
return jsii.get(self, "instanceMonitoring")
@instance_monitoring.setter
def instance_monitoring(self, value: typing.Optional[typing.Union[typing.Optional[bool], typing.Optional[aws_cdk.core.IResolvable]]]):
jsii.set(self, "instanceMonitoring", value)
@builtins.property
@jsii.member(jsii_name="kernelId")
def kernel_id(self) -> typing.Optional[str]:
"""``AWS::AutoScaling::LaunchConfiguration.KernelId``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-launchconfig.html#cfn-as-launchconfig-kernelid
"""
return jsii.get(self, "kernelId")
@kernel_id.setter
def kernel_id(self, value: typing.Optional[str]):
jsii.set(self, "kernelId", value)
@builtins.property
@jsii.member(jsii_name="keyName")
def key_name(self) -> typing.Optional[str]:
"""``AWS::AutoScaling::LaunchConfiguration.KeyName``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-launchconfig.html#cfn-as-launchconfig-keyname
"""
return jsii.get(self, "keyName")
@key_name.setter
def key_name(self, value: typing.Optional[str]):
jsii.set(self, "keyName", value)
@builtins.property
@jsii.member(jsii_name="launchConfigurationName")
def launch_configuration_name(self) -> typing.Optional[str]:
"""``AWS::AutoScaling::LaunchConfiguration.LaunchConfigurationName``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-launchconfig.html#cfn-autoscaling-launchconfig-launchconfigurationname
"""
return jsii.get(self, "launchConfigurationName")
@launch_configuration_name.setter
def launch_configuration_name(self, value: typing.Optional[str]):
jsii.set(self, "launchConfigurationName", value)
@builtins.property
@jsii.member(jsii_name="placementTenancy")
def placement_tenancy(self) -> typing.Optional[str]:
"""``AWS::AutoScaling::LaunchConfiguration.PlacementTenancy``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-launchconfig.html#cfn-as-launchconfig-placementtenancy
"""
return jsii.get(self, "placementTenancy")
@placement_tenancy.setter
def placement_tenancy(self, value: typing.Optional[str]):
jsii.set(self, "placementTenancy", value)
@builtins.property
@jsii.member(jsii_name="ramDiskId")
def ram_disk_id(self) -> typing.Optional[str]:
"""``AWS::AutoScaling::LaunchConfiguration.RamDiskId``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-launchconfig.html#cfn-as-launchconfig-ramdiskid
"""
return jsii.get(self, "ramDiskId")
@ram_disk_id.setter
def ram_disk_id(self, value: typing.Optional[str]):
jsii.set(self, "ramDiskId", value)
@builtins.property
@jsii.member(jsii_name="securityGroups")
def security_groups(self) -> typing.Optional[typing.List[str]]:
"""``AWS::AutoScaling::LaunchConfiguration.SecurityGroups``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-launchconfig.html#cfn-as-launchconfig-securitygroups
"""
return jsii.get(self, "securityGroups")
@security_groups.setter
def security_groups(self, value: typing.Optional[typing.List[str]]):
jsii.set(self, "securityGroups", value)
@builtins.property
@jsii.member(jsii_name="spotPrice")
def spot_price(self) -> typing.Optional[str]:
"""``AWS::AutoScaling::LaunchConfiguration.SpotPrice``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-launchconfig.html#cfn-as-launchconfig-spotprice
"""
return jsii.get(self, "spotPrice")
@spot_price.setter
def spot_price(self, value: typing.Optional[str]):
jsii.set(self, "spotPrice", value)
@builtins.property
@jsii.member(jsii_name="userData")
def user_data(self) -> typing.Optional[str]:
"""``AWS::AutoScaling::LaunchConfiguration.UserData``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-launchconfig.html#cfn-as-launchconfig-userdata
"""
return jsii.get(self, "userData")
@user_data.setter
def user_data(self, value: typing.Optional[str]):
jsii.set(self, "userData", value)
@jsii.data_type(jsii_type="@aws-cdk/aws-autoscaling.CfnLaunchConfiguration.BlockDeviceMappingProperty", jsii_struct_bases=[], name_mapping={'device_name': 'deviceName', 'ebs': 'ebs', 'no_device': 'noDevice', 'virtual_name': 'virtualName'})
class BlockDeviceMappingProperty():
def __init__(self, *, device_name: str, ebs: typing.Optional[typing.Union[typing.Optional[aws_cdk.core.IResolvable], typing.Optional["CfnLaunchConfiguration.BlockDeviceProperty"]]]=None, no_device: typing.Optional[typing.Union[typing.Optional[bool], typing.Optional[aws_cdk.core.IResolvable]]]=None, virtual_name: typing.Optional[str]=None):
"""
:param device_name: ``CfnLaunchConfiguration.BlockDeviceMappingProperty.DeviceName``.
:param ebs: ``CfnLaunchConfiguration.BlockDeviceMappingProperty.Ebs``.
:param no_device: ``CfnLaunchConfiguration.BlockDeviceMappingProperty.NoDevice``.
:param virtual_name: ``CfnLaunchConfiguration.BlockDeviceMappingProperty.VirtualName``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-launchconfig-blockdev-mapping.html
"""
self._values = {
'device_name': device_name,
}
if ebs is not None: self._values["ebs"] = ebs
if no_device is not None: self._values["no_device"] = no_device
if virtual_name is not None: self._values["virtual_name"] = virtual_name
@builtins.property
def device_name(self) -> str:
"""``CfnLaunchConfiguration.BlockDeviceMappingProperty.DeviceName``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-launchconfig-blockdev-mapping.html#cfn-as-launchconfig-blockdev-mapping-devicename
"""
return self._values.get('device_name')
@builtins.property
def ebs(self) -> typing.Optional[typing.Union[typing.Optional[aws_cdk.core.IResolvable], typing.Optional["CfnLaunchConfiguration.BlockDeviceProperty"]]]:
"""``CfnLaunchConfiguration.BlockDeviceMappingProperty.Ebs``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-launchconfig-blockdev-mapping.html#cfn-as-launchconfig-blockdev-mapping-ebs
"""
return self._values.get('ebs')
@builtins.property
def no_device(self) -> typing.Optional[typing.Union[typing.Optional[bool], typing.Optional[aws_cdk.core.IResolvable]]]:
"""``CfnLaunchConfiguration.BlockDeviceMappingProperty.NoDevice``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-launchconfig-blockdev-mapping.html#cfn-as-launchconfig-blockdev-mapping-nodevice
"""
return self._values.get('no_device')
@builtins.property
def virtual_name(self) -> typing.Optional[str]:
"""``CfnLaunchConfiguration.BlockDeviceMappingProperty.VirtualName``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-launchconfig-blockdev-mapping.html#cfn-as-launchconfig-blockdev-mapping-virtualname
"""
return self._values.get('virtual_name')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'BlockDeviceMappingProperty(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
@jsii.data_type(jsii_type="@aws-cdk/aws-autoscaling.CfnLaunchConfiguration.BlockDeviceProperty", jsii_struct_bases=[], name_mapping={'delete_on_termination': 'deleteOnTermination', 'encrypted': 'encrypted', 'iops': 'iops', 'snapshot_id': 'snapshotId', 'volume_size': 'volumeSize', 'volume_type': 'volumeType'})
class BlockDeviceProperty():
def __init__(self, *, delete_on_termination: typing.Optional[typing.Union[typing.Optional[bool], typing.Optional[aws_cdk.core.IResolvable]]]=None, encrypted: typing.Optional[typing.Union[typing.Optional[bool], typing.Optional[aws_cdk.core.IResolvable]]]=None, iops: typing.Optional[jsii.Number]=None, snapshot_id: typing.Optional[str]=None, volume_size: typing.Optional[jsii.Number]=None, volume_type: typing.Optional[str]=None):
"""
:param delete_on_termination: ``CfnLaunchConfiguration.BlockDeviceProperty.DeleteOnTermination``.
:param encrypted: ``CfnLaunchConfiguration.BlockDeviceProperty.Encrypted``.
:param iops: ``CfnLaunchConfiguration.BlockDeviceProperty.Iops``.
:param snapshot_id: ``CfnLaunchConfiguration.BlockDeviceProperty.SnapshotId``.
:param volume_size: ``CfnLaunchConfiguration.BlockDeviceProperty.VolumeSize``.
:param volume_type: ``CfnLaunchConfiguration.BlockDeviceProperty.VolumeType``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-launchconfig-blockdev-template.html
"""
self._values = {
}
if delete_on_termination is not None: self._values["delete_on_termination"] = delete_on_termination
if encrypted is not None: self._values["encrypted"] = encrypted
if iops is not None: self._values["iops"] = iops
if snapshot_id is not None: self._values["snapshot_id"] = snapshot_id
if volume_size is not None: self._values["volume_size"] = volume_size
if volume_type is not None: self._values["volume_type"] = volume_type
@builtins.property
def delete_on_termination(self) -> typing.Optional[typing.Union[typing.Optional[bool], typing.Optional[aws_cdk.core.IResolvable]]]:
"""``CfnLaunchConfiguration.BlockDeviceProperty.DeleteOnTermination``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-launchconfig-blockdev-template.html#cfn-as-launchconfig-blockdev-template-deleteonterm
"""
return self._values.get('delete_on_termination')
@builtins.property
def encrypted(self) -> typing.Optional[typing.Union[typing.Optional[bool], typing.Optional[aws_cdk.core.IResolvable]]]:
"""``CfnLaunchConfiguration.BlockDeviceProperty.Encrypted``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-launchconfig-blockdev-template.html#cfn-as-launchconfig-blockdev-template-encrypted
"""
return self._values.get('encrypted')
@builtins.property
def iops(self) -> typing.Optional[jsii.Number]:
"""``CfnLaunchConfiguration.BlockDeviceProperty.Iops``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-launchconfig-blockdev-template.html#cfn-as-launchconfig-blockdev-template-iops
"""
return self._values.get('iops')
@builtins.property
def snapshot_id(self) -> typing.Optional[str]:
"""``CfnLaunchConfiguration.BlockDeviceProperty.SnapshotId``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-launchconfig-blockdev-template.html#cfn-as-launchconfig-blockdev-template-snapshotid
"""
return self._values.get('snapshot_id')
@builtins.property
def volume_size(self) -> typing.Optional[jsii.Number]:
"""``CfnLaunchConfiguration.BlockDeviceProperty.VolumeSize``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-launchconfig-blockdev-template.html#cfn-as-launchconfig-blockdev-template-volumesize
"""
return self._values.get('volume_size')
@builtins.property
def volume_type(self) -> typing.Optional[str]:
"""``CfnLaunchConfiguration.BlockDeviceProperty.VolumeType``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-launchconfig-blockdev-template.html#cfn-as-launchconfig-blockdev-template-volumetype
"""
return self._values.get('volume_type')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'BlockDeviceProperty(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
@jsii.data_type(jsii_type="@aws-cdk/aws-autoscaling.CfnLaunchConfigurationProps", jsii_struct_bases=[], name_mapping={'image_id': 'imageId', 'instance_type': 'instanceType', 'associate_public_ip_address': 'associatePublicIpAddress', 'block_device_mappings': 'blockDeviceMappings', 'classic_link_vpc_id': 'classicLinkVpcId', 'classic_link_vpc_security_groups': 'classicLinkVpcSecurityGroups', 'ebs_optimized': 'ebsOptimized', 'iam_instance_profile': 'iamInstanceProfile', 'instance_id': 'instanceId', 'instance_monitoring': 'instanceMonitoring', 'kernel_id': 'kernelId', 'key_name': 'keyName', 'launch_configuration_name': 'launchConfigurationName', 'placement_tenancy': 'placementTenancy', 'ram_disk_id': 'ramDiskId', 'security_groups': 'securityGroups', 'spot_price': 'spotPrice', 'user_data': 'userData'})
class CfnLaunchConfigurationProps():
def __init__(self, *, image_id: str, instance_type: str, associate_public_ip_address: typing.Optional[typing.Union[typing.Optional[bool], typing.Optional[aws_cdk.core.IResolvable]]]=None, block_device_mappings: typing.Optional[typing.Union[typing.Optional[aws_cdk.core.IResolvable], typing.Optional[typing.List[typing.Union[aws_cdk.core.IResolvable, "CfnLaunchConfiguration.BlockDeviceMappingProperty"]]]]]=None, classic_link_vpc_id: typing.Optional[str]=None, classic_link_vpc_security_groups: typing.Optional[typing.List[str]]=None, ebs_optimized: typing.Optional[typing.Union[typing.Optional[bool], typing.Optional[aws_cdk.core.IResolvable]]]=None, iam_instance_profile: typing.Optional[str]=None, instance_id: typing.Optional[str]=None, instance_monitoring: typing.Optional[typing.Union[typing.Optional[bool], typing.Optional[aws_cdk.core.IResolvable]]]=None, kernel_id: typing.Optional[str]=None, key_name: typing.Optional[str]=None, launch_configuration_name: typing.Optional[str]=None, placement_tenancy: typing.Optional[str]=None, ram_disk_id: typing.Optional[str]=None, security_groups: typing.Optional[typing.List[str]]=None, spot_price: typing.Optional[str]=None, user_data: typing.Optional[str]=None):
"""Properties for defining a ``AWS::AutoScaling::LaunchConfiguration``.
:param image_id: ``AWS::AutoScaling::LaunchConfiguration.ImageId``.
:param instance_type: ``AWS::AutoScaling::LaunchConfiguration.InstanceType``.
:param associate_public_ip_address: ``AWS::AutoScaling::LaunchConfiguration.AssociatePublicIpAddress``.
:param block_device_mappings: ``AWS::AutoScaling::LaunchConfiguration.BlockDeviceMappings``.
:param classic_link_vpc_id: ``AWS::AutoScaling::LaunchConfiguration.ClassicLinkVPCId``.
:param classic_link_vpc_security_groups: ``AWS::AutoScaling::LaunchConfiguration.ClassicLinkVPCSecurityGroups``.
:param ebs_optimized: ``AWS::AutoScaling::LaunchConfiguration.EbsOptimized``.
:param iam_instance_profile: ``AWS::AutoScaling::LaunchConfiguration.IamInstanceProfile``.
:param instance_id: ``AWS::AutoScaling::LaunchConfiguration.InstanceId``.
:param instance_monitoring: ``AWS::AutoScaling::LaunchConfiguration.InstanceMonitoring``.
:param kernel_id: ``AWS::AutoScaling::LaunchConfiguration.KernelId``.
:param key_name: ``AWS::AutoScaling::LaunchConfiguration.KeyName``.
:param launch_configuration_name: ``AWS::AutoScaling::LaunchConfiguration.LaunchConfigurationName``.
:param placement_tenancy: ``AWS::AutoScaling::LaunchConfiguration.PlacementTenancy``.
:param ram_disk_id: ``AWS::AutoScaling::LaunchConfiguration.RamDiskId``.
:param security_groups: ``AWS::AutoScaling::LaunchConfiguration.SecurityGroups``.
:param spot_price: ``AWS::AutoScaling::LaunchConfiguration.SpotPrice``.
:param user_data: ``AWS::AutoScaling::LaunchConfiguration.UserData``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-launchconfig.html
"""
self._values = {
'image_id': image_id,
'instance_type': instance_type,
}
if associate_public_ip_address is not None: self._values["associate_public_ip_address"] = associate_public_ip_address
if block_device_mappings is not None: self._values["block_device_mappings"] = block_device_mappings
if classic_link_vpc_id is not None: self._values["classic_link_vpc_id"] = classic_link_vpc_id
if classic_link_vpc_security_groups is not None: self._values["classic_link_vpc_security_groups"] = classic_link_vpc_security_groups
if ebs_optimized is not None: self._values["ebs_optimized"] = ebs_optimized
if iam_instance_profile is not None: self._values["iam_instance_profile"] = iam_instance_profile
if instance_id is not None: self._values["instance_id"] = instance_id
if instance_monitoring is not None: self._values["instance_monitoring"] = instance_monitoring
if kernel_id is not None: self._values["kernel_id"] = kernel_id
if key_name is not None: self._values["key_name"] = key_name
if launch_configuration_name is not None: self._values["launch_configuration_name"] = launch_configuration_name
if placement_tenancy is not None: self._values["placement_tenancy"] = placement_tenancy
if ram_disk_id is not None: self._values["ram_disk_id"] = ram_disk_id
if security_groups is not None: self._values["security_groups"] = security_groups
if spot_price is not None: self._values["spot_price"] = spot_price
if user_data is not None: self._values["user_data"] = user_data
@builtins.property
def image_id(self) -> str:
"""``AWS::AutoScaling::LaunchConfiguration.ImageId``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-launchconfig.html#cfn-as-launchconfig-imageid
"""
return self._values.get('image_id')
@builtins.property
def instance_type(self) -> str:
"""``AWS::AutoScaling::LaunchConfiguration.InstanceType``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-launchconfig.html#cfn-as-launchconfig-instancetype
"""
return self._values.get('instance_type')
@builtins.property
def associate_public_ip_address(self) -> typing.Optional[typing.Union[typing.Optional[bool], typing.Optional[aws_cdk.core.IResolvable]]]:
"""``AWS::AutoScaling::LaunchConfiguration.AssociatePublicIpAddress``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-launchconfig.html#cf-as-launchconfig-associatepubip
"""
return self._values.get('associate_public_ip_address')
@builtins.property
def block_device_mappings(self) -> typing.Optional[typing.Union[typing.Optional[aws_cdk.core.IResolvable], typing.Optional[typing.List[typing.Union[aws_cdk.core.IResolvable, "CfnLaunchConfiguration.BlockDeviceMappingProperty"]]]]]:
"""``AWS::AutoScaling::LaunchConfiguration.BlockDeviceMappings``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-launchconfig.html#cfn-as-launchconfig-blockdevicemappings
"""
return self._values.get('block_device_mappings')
@builtins.property
def classic_link_vpc_id(self) -> typing.Optional[str]:
"""``AWS::AutoScaling::LaunchConfiguration.ClassicLinkVPCId``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-launchconfig.html#cfn-as-launchconfig-classiclinkvpcid
"""
return self._values.get('classic_link_vpc_id')
@builtins.property
def classic_link_vpc_security_groups(self) -> typing.Optional[typing.List[str]]:
"""``AWS::AutoScaling::LaunchConfiguration.ClassicLinkVPCSecurityGroups``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-launchconfig.html#cfn-as-launchconfig-classiclinkvpcsecuritygroups
"""
return self._values.get('classic_link_vpc_security_groups')
@builtins.property
def ebs_optimized(self) -> typing.Optional[typing.Union[typing.Optional[bool], typing.Optional[aws_cdk.core.IResolvable]]]:
"""``AWS::AutoScaling::LaunchConfiguration.EbsOptimized``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-launchconfig.html#cfn-as-launchconfig-ebsoptimized
"""
return self._values.get('ebs_optimized')
@builtins.property
def iam_instance_profile(self) -> typing.Optional[str]:
"""``AWS::AutoScaling::LaunchConfiguration.IamInstanceProfile``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-launchconfig.html#cfn-as-launchconfig-iaminstanceprofile
"""
return self._values.get('iam_instance_profile')
@builtins.property
def instance_id(self) -> typing.Optional[str]:
"""``AWS::AutoScaling::LaunchConfiguration.InstanceId``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-launchconfig.html#cfn-as-launchconfig-instanceid
"""
return self._values.get('instance_id')
@builtins.property
def instance_monitoring(self) -> typing.Optional[typing.Union[typing.Optional[bool], typing.Optional[aws_cdk.core.IResolvable]]]:
"""``AWS::AutoScaling::LaunchConfiguration.InstanceMonitoring``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-launchconfig.html#cfn-as-launchconfig-instancemonitoring
"""
return self._values.get('instance_monitoring')
@builtins.property
def kernel_id(self) -> typing.Optional[str]:
"""``AWS::AutoScaling::LaunchConfiguration.KernelId``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-launchconfig.html#cfn-as-launchconfig-kernelid
"""
return self._values.get('kernel_id')
@builtins.property
def key_name(self) -> typing.Optional[str]:
"""``AWS::AutoScaling::LaunchConfiguration.KeyName``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-launchconfig.html#cfn-as-launchconfig-keyname
"""
return self._values.get('key_name')
@builtins.property
def launch_configuration_name(self) -> typing.Optional[str]:
"""``AWS::AutoScaling::LaunchConfiguration.LaunchConfigurationName``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-launchconfig.html#cfn-autoscaling-launchconfig-launchconfigurationname
"""
return self._values.get('launch_configuration_name')
@builtins.property
def placement_tenancy(self) -> typing.Optional[str]:
"""``AWS::AutoScaling::LaunchConfiguration.PlacementTenancy``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-launchconfig.html#cfn-as-launchconfig-placementtenancy
"""
return self._values.get('placement_tenancy')
@builtins.property
def ram_disk_id(self) -> typing.Optional[str]:
"""``AWS::AutoScaling::LaunchConfiguration.RamDiskId``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-launchconfig.html#cfn-as-launchconfig-ramdiskid
"""
return self._values.get('ram_disk_id')
@builtins.property
def security_groups(self) -> typing.Optional[typing.List[str]]:
"""``AWS::AutoScaling::LaunchConfiguration.SecurityGroups``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-launchconfig.html#cfn-as-launchconfig-securitygroups
"""
return self._values.get('security_groups')
@builtins.property
def spot_price(self) -> typing.Optional[str]:
"""``AWS::AutoScaling::LaunchConfiguration.SpotPrice``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-launchconfig.html#cfn-as-launchconfig-spotprice
"""
return self._values.get('spot_price')
@builtins.property
def user_data(self) -> typing.Optional[str]:
"""``AWS::AutoScaling::LaunchConfiguration.UserData``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-launchconfig.html#cfn-as-launchconfig-userdata
"""
return self._values.get('user_data')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'CfnLaunchConfigurationProps(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
@jsii.implements(aws_cdk.core.IInspectable)
class CfnLifecycleHook(aws_cdk.core.CfnResource, metaclass=jsii.JSIIMeta, jsii_type="@aws-cdk/aws-autoscaling.CfnLifecycleHook"):
"""A CloudFormation ``AWS::AutoScaling::LifecycleHook``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-as-lifecyclehook.html
cloudformationResource:
:cloudformationResource:: AWS::AutoScaling::LifecycleHook
"""
def __init__(self, scope: aws_cdk.core.Construct, id: str, *, auto_scaling_group_name: str, lifecycle_transition: str, default_result: typing.Optional[str]=None, heartbeat_timeout: typing.Optional[jsii.Number]=None, lifecycle_hook_name: typing.Optional[str]=None, notification_metadata: typing.Optional[str]=None, notification_target_arn: typing.Optional[str]=None, role_arn: typing.Optional[str]=None) -> None:
"""Create a new ``AWS::AutoScaling::LifecycleHook``.
:param scope: - scope in which this resource is defined.
:param id: - scoped id of the resource.
:param auto_scaling_group_name: ``AWS::AutoScaling::LifecycleHook.AutoScalingGroupName``.
:param lifecycle_transition: ``AWS::AutoScaling::LifecycleHook.LifecycleTransition``.
:param default_result: ``AWS::AutoScaling::LifecycleHook.DefaultResult``.
:param heartbeat_timeout: ``AWS::AutoScaling::LifecycleHook.HeartbeatTimeout``.
:param lifecycle_hook_name: ``AWS::AutoScaling::LifecycleHook.LifecycleHookName``.
:param notification_metadata: ``AWS::AutoScaling::LifecycleHook.NotificationMetadata``.
:param notification_target_arn: ``AWS::AutoScaling::LifecycleHook.NotificationTargetARN``.
:param role_arn: ``AWS::AutoScaling::LifecycleHook.RoleARN``.
"""
props = CfnLifecycleHookProps(auto_scaling_group_name=auto_scaling_group_name, lifecycle_transition=lifecycle_transition, default_result=default_result, heartbeat_timeout=heartbeat_timeout, lifecycle_hook_name=lifecycle_hook_name, notification_metadata=notification_metadata, notification_target_arn=notification_target_arn, role_arn=role_arn)
jsii.create(CfnLifecycleHook, self, [scope, id, props])
@jsii.member(jsii_name="inspect")
def inspect(self, inspector: aws_cdk.core.TreeInspector) -> None:
"""Examines the CloudFormation resource and discloses attributes.
:param inspector: - tree inspector to collect and process attributes.
stability
:stability: experimental
"""
return jsii.invoke(self, "inspect", [inspector])
@jsii.member(jsii_name="renderProperties")
def _render_properties(self, props: typing.Mapping[str,typing.Any]) -> typing.Mapping[str,typing.Any]:
"""
:param props: -
"""
return jsii.invoke(self, "renderProperties", [props])
@jsii.python.classproperty
@jsii.member(jsii_name="CFN_RESOURCE_TYPE_NAME")
def CFN_RESOURCE_TYPE_NAME(cls) -> str:
"""The CloudFormation resource type name for this resource class."""
return jsii.sget(cls, "CFN_RESOURCE_TYPE_NAME")
@builtins.property
@jsii.member(jsii_name="cfnProperties")
def _cfn_properties(self) -> typing.Mapping[str,typing.Any]:
return jsii.get(self, "cfnProperties")
@builtins.property
@jsii.member(jsii_name="autoScalingGroupName")
def auto_scaling_group_name(self) -> str:
"""``AWS::AutoScaling::LifecycleHook.AutoScalingGroupName``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-as-lifecyclehook.html#cfn-as-lifecyclehook-autoscalinggroupname
"""
return jsii.get(self, "autoScalingGroupName")
@auto_scaling_group_name.setter
def auto_scaling_group_name(self, value: str):
jsii.set(self, "autoScalingGroupName", value)
@builtins.property
@jsii.member(jsii_name="lifecycleTransition")
def lifecycle_transition(self) -> str:
"""``AWS::AutoScaling::LifecycleHook.LifecycleTransition``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-as-lifecyclehook.html#cfn-as-lifecyclehook-lifecycletransition
"""
return jsii.get(self, "lifecycleTransition")
@lifecycle_transition.setter
def lifecycle_transition(self, value: str):
jsii.set(self, "lifecycleTransition", value)
@builtins.property
@jsii.member(jsii_name="defaultResult")
def default_result(self) -> typing.Optional[str]:
"""``AWS::AutoScaling::LifecycleHook.DefaultResult``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-as-lifecyclehook.html#cfn-as-lifecyclehook-defaultresult
"""
return jsii.get(self, "defaultResult")
@default_result.setter
def default_result(self, value: typing.Optional[str]):
jsii.set(self, "defaultResult", value)
@builtins.property
@jsii.member(jsii_name="heartbeatTimeout")
def heartbeat_timeout(self) -> typing.Optional[jsii.Number]:
"""``AWS::AutoScaling::LifecycleHook.HeartbeatTimeout``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-as-lifecyclehook.html#cfn-as-lifecyclehook-heartbeattimeout
"""
return jsii.get(self, "heartbeatTimeout")
@heartbeat_timeout.setter
def heartbeat_timeout(self, value: typing.Optional[jsii.Number]):
jsii.set(self, "heartbeatTimeout", value)
@builtins.property
@jsii.member(jsii_name="lifecycleHookName")
def lifecycle_hook_name(self) -> typing.Optional[str]:
"""``AWS::AutoScaling::LifecycleHook.LifecycleHookName``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-as-lifecyclehook.html#cfn-autoscaling-lifecyclehook-lifecyclehookname
"""
return jsii.get(self, "lifecycleHookName")
@lifecycle_hook_name.setter
def lifecycle_hook_name(self, value: typing.Optional[str]):
jsii.set(self, "lifecycleHookName", value)
@builtins.property
@jsii.member(jsii_name="notificationMetadata")
def notification_metadata(self) -> typing.Optional[str]:
"""``AWS::AutoScaling::LifecycleHook.NotificationMetadata``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-as-lifecyclehook.html#cfn-as-lifecyclehook-notificationmetadata
"""
return jsii.get(self, "notificationMetadata")
@notification_metadata.setter
def notification_metadata(self, value: typing.Optional[str]):
jsii.set(self, "notificationMetadata", value)
@builtins.property
@jsii.member(jsii_name="notificationTargetArn")
def notification_target_arn(self) -> typing.Optional[str]:
"""``AWS::AutoScaling::LifecycleHook.NotificationTargetARN``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-as-lifecyclehook.html#cfn-as-lifecyclehook-notificationtargetarn
"""
return jsii.get(self, "notificationTargetArn")
@notification_target_arn.setter
def notification_target_arn(self, value: typing.Optional[str]):
jsii.set(self, "notificationTargetArn", value)
@builtins.property
@jsii.member(jsii_name="roleArn")
def role_arn(self) -> typing.Optional[str]:
"""``AWS::AutoScaling::LifecycleHook.RoleARN``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-as-lifecyclehook.html#cfn-as-lifecyclehook-rolearn
"""
return jsii.get(self, "roleArn")
@role_arn.setter
def role_arn(self, value: typing.Optional[str]):
jsii.set(self, "roleArn", value)
@jsii.data_type(jsii_type="@aws-cdk/aws-autoscaling.CfnLifecycleHookProps", jsii_struct_bases=[], name_mapping={'auto_scaling_group_name': 'autoScalingGroupName', 'lifecycle_transition': 'lifecycleTransition', 'default_result': 'defaultResult', 'heartbeat_timeout': 'heartbeatTimeout', 'lifecycle_hook_name': 'lifecycleHookName', 'notification_metadata': 'notificationMetadata', 'notification_target_arn': 'notificationTargetArn', 'role_arn': 'roleArn'})
class CfnLifecycleHookProps():
def __init__(self, *, auto_scaling_group_name: str, lifecycle_transition: str, default_result: typing.Optional[str]=None, heartbeat_timeout: typing.Optional[jsii.Number]=None, lifecycle_hook_name: typing.Optional[str]=None, notification_metadata: typing.Optional[str]=None, notification_target_arn: typing.Optional[str]=None, role_arn: typing.Optional[str]=None):
"""Properties for defining a ``AWS::AutoScaling::LifecycleHook``.
:param auto_scaling_group_name: ``AWS::AutoScaling::LifecycleHook.AutoScalingGroupName``.
:param lifecycle_transition: ``AWS::AutoScaling::LifecycleHook.LifecycleTransition``.
:param default_result: ``AWS::AutoScaling::LifecycleHook.DefaultResult``.
:param heartbeat_timeout: ``AWS::AutoScaling::LifecycleHook.HeartbeatTimeout``.
:param lifecycle_hook_name: ``AWS::AutoScaling::LifecycleHook.LifecycleHookName``.
:param notification_metadata: ``AWS::AutoScaling::LifecycleHook.NotificationMetadata``.
:param notification_target_arn: ``AWS::AutoScaling::LifecycleHook.NotificationTargetARN``.
:param role_arn: ``AWS::AutoScaling::LifecycleHook.RoleARN``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-as-lifecyclehook.html
"""
self._values = {
'auto_scaling_group_name': auto_scaling_group_name,
'lifecycle_transition': lifecycle_transition,
}
if default_result is not None: self._values["default_result"] = default_result
if heartbeat_timeout is not None: self._values["heartbeat_timeout"] = heartbeat_timeout
if lifecycle_hook_name is not None: self._values["lifecycle_hook_name"] = lifecycle_hook_name
if notification_metadata is not None: self._values["notification_metadata"] = notification_metadata
if notification_target_arn is not None: self._values["notification_target_arn"] = notification_target_arn
if role_arn is not None: self._values["role_arn"] = role_arn
@builtins.property
def auto_scaling_group_name(self) -> str:
"""``AWS::AutoScaling::LifecycleHook.AutoScalingGroupName``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-as-lifecyclehook.html#cfn-as-lifecyclehook-autoscalinggroupname
"""
return self._values.get('auto_scaling_group_name')
@builtins.property
def lifecycle_transition(self) -> str:
"""``AWS::AutoScaling::LifecycleHook.LifecycleTransition``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-as-lifecyclehook.html#cfn-as-lifecyclehook-lifecycletransition
"""
return self._values.get('lifecycle_transition')
@builtins.property
def default_result(self) -> typing.Optional[str]:
"""``AWS::AutoScaling::LifecycleHook.DefaultResult``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-as-lifecyclehook.html#cfn-as-lifecyclehook-defaultresult
"""
return self._values.get('default_result')
@builtins.property
def heartbeat_timeout(self) -> typing.Optional[jsii.Number]:
"""``AWS::AutoScaling::LifecycleHook.HeartbeatTimeout``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-as-lifecyclehook.html#cfn-as-lifecyclehook-heartbeattimeout
"""
return self._values.get('heartbeat_timeout')
@builtins.property
def lifecycle_hook_name(self) -> typing.Optional[str]:
"""``AWS::AutoScaling::LifecycleHook.LifecycleHookName``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-as-lifecyclehook.html#cfn-autoscaling-lifecyclehook-lifecyclehookname
"""
return self._values.get('lifecycle_hook_name')
@builtins.property
def notification_metadata(self) -> typing.Optional[str]:
"""``AWS::AutoScaling::LifecycleHook.NotificationMetadata``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-as-lifecyclehook.html#cfn-as-lifecyclehook-notificationmetadata
"""
return self._values.get('notification_metadata')
@builtins.property
def notification_target_arn(self) -> typing.Optional[str]:
"""``AWS::AutoScaling::LifecycleHook.NotificationTargetARN``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-as-lifecyclehook.html#cfn-as-lifecyclehook-notificationtargetarn
"""
return self._values.get('notification_target_arn')
@builtins.property
def role_arn(self) -> typing.Optional[str]:
"""``AWS::AutoScaling::LifecycleHook.RoleARN``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-as-lifecyclehook.html#cfn-as-lifecyclehook-rolearn
"""
return self._values.get('role_arn')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'CfnLifecycleHookProps(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
@jsii.implements(aws_cdk.core.IInspectable)
class CfnScalingPolicy(aws_cdk.core.CfnResource, metaclass=jsii.JSIIMeta, jsii_type="@aws-cdk/aws-autoscaling.CfnScalingPolicy"):
"""A CloudFormation ``AWS::AutoScaling::ScalingPolicy``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-policy.html
cloudformationResource:
:cloudformationResource:: AWS::AutoScaling::ScalingPolicy
"""
def __init__(self, scope: aws_cdk.core.Construct, id: str, *, auto_scaling_group_name: str, adjustment_type: typing.Optional[str]=None, cooldown: typing.Optional[str]=None, estimated_instance_warmup: typing.Optional[jsii.Number]=None, metric_aggregation_type: typing.Optional[str]=None, min_adjustment_magnitude: typing.Optional[jsii.Number]=None, policy_type: typing.Optional[str]=None, scaling_adjustment: typing.Optional[jsii.Number]=None, step_adjustments: typing.Optional[typing.Union[typing.Optional[aws_cdk.core.IResolvable], typing.Optional[typing.List[typing.Union[aws_cdk.core.IResolvable, "StepAdjustmentProperty"]]]]]=None, target_tracking_configuration: typing.Optional[typing.Union[typing.Optional[aws_cdk.core.IResolvable], typing.Optional["TargetTrackingConfigurationProperty"]]]=None) -> None:
"""Create a new ``AWS::AutoScaling::ScalingPolicy``.
:param scope: - scope in which this resource is defined.
:param id: - scoped id of the resource.
:param auto_scaling_group_name: ``AWS::AutoScaling::ScalingPolicy.AutoScalingGroupName``.
:param adjustment_type: ``AWS::AutoScaling::ScalingPolicy.AdjustmentType``.
:param cooldown: ``AWS::AutoScaling::ScalingPolicy.Cooldown``.
:param estimated_instance_warmup: ``AWS::AutoScaling::ScalingPolicy.EstimatedInstanceWarmup``.
:param metric_aggregation_type: ``AWS::AutoScaling::ScalingPolicy.MetricAggregationType``.
:param min_adjustment_magnitude: ``AWS::AutoScaling::ScalingPolicy.MinAdjustmentMagnitude``.
:param policy_type: ``AWS::AutoScaling::ScalingPolicy.PolicyType``.
:param scaling_adjustment: ``AWS::AutoScaling::ScalingPolicy.ScalingAdjustment``.
:param step_adjustments: ``AWS::AutoScaling::ScalingPolicy.StepAdjustments``.
:param target_tracking_configuration: ``AWS::AutoScaling::ScalingPolicy.TargetTrackingConfiguration``.
"""
props = CfnScalingPolicyProps(auto_scaling_group_name=auto_scaling_group_name, adjustment_type=adjustment_type, cooldown=cooldown, estimated_instance_warmup=estimated_instance_warmup, metric_aggregation_type=metric_aggregation_type, min_adjustment_magnitude=min_adjustment_magnitude, policy_type=policy_type, scaling_adjustment=scaling_adjustment, step_adjustments=step_adjustments, target_tracking_configuration=target_tracking_configuration)
jsii.create(CfnScalingPolicy, self, [scope, id, props])
@jsii.member(jsii_name="inspect")
def inspect(self, inspector: aws_cdk.core.TreeInspector) -> None:
"""Examines the CloudFormation resource and discloses attributes.
:param inspector: - tree inspector to collect and process attributes.
stability
:stability: experimental
"""
return jsii.invoke(self, "inspect", [inspector])
@jsii.member(jsii_name="renderProperties")
def _render_properties(self, props: typing.Mapping[str,typing.Any]) -> typing.Mapping[str,typing.Any]:
"""
:param props: -
"""
return jsii.invoke(self, "renderProperties", [props])
@jsii.python.classproperty
@jsii.member(jsii_name="CFN_RESOURCE_TYPE_NAME")
def CFN_RESOURCE_TYPE_NAME(cls) -> str:
"""The CloudFormation resource type name for this resource class."""
return jsii.sget(cls, "CFN_RESOURCE_TYPE_NAME")
@builtins.property
@jsii.member(jsii_name="cfnProperties")
def _cfn_properties(self) -> typing.Mapping[str,typing.Any]:
return jsii.get(self, "cfnProperties")
@builtins.property
@jsii.member(jsii_name="autoScalingGroupName")
def auto_scaling_group_name(self) -> str:
"""``AWS::AutoScaling::ScalingPolicy.AutoScalingGroupName``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-policy.html#cfn-as-scalingpolicy-autoscalinggroupname
"""
return jsii.get(self, "autoScalingGroupName")
@auto_scaling_group_name.setter
def auto_scaling_group_name(self, value: str):
jsii.set(self, "autoScalingGroupName", value)
@builtins.property
@jsii.member(jsii_name="adjustmentType")
def adjustment_type(self) -> typing.Optional[str]:
"""``AWS::AutoScaling::ScalingPolicy.AdjustmentType``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-policy.html#cfn-as-scalingpolicy-adjustmenttype
"""
return jsii.get(self, "adjustmentType")
@adjustment_type.setter
def adjustment_type(self, value: typing.Optional[str]):
jsii.set(self, "adjustmentType", value)
@builtins.property
@jsii.member(jsii_name="cooldown")
def cooldown(self) -> typing.Optional[str]:
"""``AWS::AutoScaling::ScalingPolicy.Cooldown``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-policy.html#cfn-as-scalingpolicy-cooldown
"""
return jsii.get(self, "cooldown")
@cooldown.setter
def cooldown(self, value: typing.Optional[str]):
jsii.set(self, "cooldown", value)
@builtins.property
@jsii.member(jsii_name="estimatedInstanceWarmup")
def estimated_instance_warmup(self) -> typing.Optional[jsii.Number]:
"""``AWS::AutoScaling::ScalingPolicy.EstimatedInstanceWarmup``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-policy.html#cfn-as-scalingpolicy-estimatedinstancewarmup
"""
return jsii.get(self, "estimatedInstanceWarmup")
@estimated_instance_warmup.setter
def estimated_instance_warmup(self, value: typing.Optional[jsii.Number]):
jsii.set(self, "estimatedInstanceWarmup", value)
@builtins.property
@jsii.member(jsii_name="metricAggregationType")
def metric_aggregation_type(self) -> typing.Optional[str]:
"""``AWS::AutoScaling::ScalingPolicy.MetricAggregationType``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-policy.html#cfn-as-scalingpolicy-metricaggregationtype
"""
return jsii.get(self, "metricAggregationType")
@metric_aggregation_type.setter
def metric_aggregation_type(self, value: typing.Optional[str]):
jsii.set(self, "metricAggregationType", value)
@builtins.property
@jsii.member(jsii_name="minAdjustmentMagnitude")
def min_adjustment_magnitude(self) -> typing.Optional[jsii.Number]:
"""``AWS::AutoScaling::ScalingPolicy.MinAdjustmentMagnitude``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-policy.html#cfn-as-scalingpolicy-minadjustmentmagnitude
"""
return jsii.get(self, "minAdjustmentMagnitude")
@min_adjustment_magnitude.setter
def min_adjustment_magnitude(self, value: typing.Optional[jsii.Number]):
jsii.set(self, "minAdjustmentMagnitude", value)
@builtins.property
@jsii.member(jsii_name="policyType")
def policy_type(self) -> typing.Optional[str]:
"""``AWS::AutoScaling::ScalingPolicy.PolicyType``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-policy.html#cfn-as-scalingpolicy-policytype
"""
return jsii.get(self, "policyType")
@policy_type.setter
def policy_type(self, value: typing.Optional[str]):
jsii.set(self, "policyType", value)
@builtins.property
@jsii.member(jsii_name="scalingAdjustment")
def scaling_adjustment(self) -> typing.Optional[jsii.Number]:
"""``AWS::AutoScaling::ScalingPolicy.ScalingAdjustment``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-policy.html#cfn-as-scalingpolicy-scalingadjustment
"""
return jsii.get(self, "scalingAdjustment")
@scaling_adjustment.setter
def scaling_adjustment(self, value: typing.Optional[jsii.Number]):
jsii.set(self, "scalingAdjustment", value)
@builtins.property
@jsii.member(jsii_name="stepAdjustments")
def step_adjustments(self) -> typing.Optional[typing.Union[typing.Optional[aws_cdk.core.IResolvable], typing.Optional[typing.List[typing.Union[aws_cdk.core.IResolvable, "StepAdjustmentProperty"]]]]]:
"""``AWS::AutoScaling::ScalingPolicy.StepAdjustments``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-policy.html#cfn-as-scalingpolicy-stepadjustments
"""
return jsii.get(self, "stepAdjustments")
@step_adjustments.setter
def step_adjustments(self, value: typing.Optional[typing.Union[typing.Optional[aws_cdk.core.IResolvable], typing.Optional[typing.List[typing.Union[aws_cdk.core.IResolvable, "StepAdjustmentProperty"]]]]]):
jsii.set(self, "stepAdjustments", value)
@builtins.property
@jsii.member(jsii_name="targetTrackingConfiguration")
def target_tracking_configuration(self) -> typing.Optional[typing.Union[typing.Optional[aws_cdk.core.IResolvable], typing.Optional["TargetTrackingConfigurationProperty"]]]:
"""``AWS::AutoScaling::ScalingPolicy.TargetTrackingConfiguration``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-policy.html#cfn-autoscaling-scalingpolicy-targettrackingconfiguration
"""
return jsii.get(self, "targetTrackingConfiguration")
@target_tracking_configuration.setter
def target_tracking_configuration(self, value: typing.Optional[typing.Union[typing.Optional[aws_cdk.core.IResolvable], typing.Optional["TargetTrackingConfigurationProperty"]]]):
jsii.set(self, "targetTrackingConfiguration", value)
@jsii.data_type(jsii_type="@aws-cdk/aws-autoscaling.CfnScalingPolicy.CustomizedMetricSpecificationProperty", jsii_struct_bases=[], name_mapping={'metric_name': 'metricName', 'namespace': 'namespace', 'statistic': 'statistic', 'dimensions': 'dimensions', 'unit': 'unit'})
class CustomizedMetricSpecificationProperty():
def __init__(self, *, metric_name: str, namespace: str, statistic: str, dimensions: typing.Optional[typing.Union[typing.Optional[aws_cdk.core.IResolvable], typing.Optional[typing.List[typing.Union[aws_cdk.core.IResolvable, "CfnScalingPolicy.MetricDimensionProperty"]]]]]=None, unit: typing.Optional[str]=None):
"""
:param metric_name: ``CfnScalingPolicy.CustomizedMetricSpecificationProperty.MetricName``.
:param namespace: ``CfnScalingPolicy.CustomizedMetricSpecificationProperty.Namespace``.
:param statistic: ``CfnScalingPolicy.CustomizedMetricSpecificationProperty.Statistic``.
:param dimensions: ``CfnScalingPolicy.CustomizedMetricSpecificationProperty.Dimensions``.
:param unit: ``CfnScalingPolicy.CustomizedMetricSpecificationProperty.Unit``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-autoscaling-scalingpolicy-customizedmetricspecification.html
"""
self._values = {
'metric_name': metric_name,
'namespace': namespace,
'statistic': statistic,
}
if dimensions is not None: self._values["dimensions"] = dimensions
if unit is not None: self._values["unit"] = unit
@builtins.property
def metric_name(self) -> str:
"""``CfnScalingPolicy.CustomizedMetricSpecificationProperty.MetricName``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-autoscaling-scalingpolicy-customizedmetricspecification.html#cfn-autoscaling-scalingpolicy-customizedmetricspecification-metricname
"""
return self._values.get('metric_name')
@builtins.property
def namespace(self) -> str:
"""``CfnScalingPolicy.CustomizedMetricSpecificationProperty.Namespace``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-autoscaling-scalingpolicy-customizedmetricspecification.html#cfn-autoscaling-scalingpolicy-customizedmetricspecification-namespace
"""
return self._values.get('namespace')
@builtins.property
def statistic(self) -> str:
"""``CfnScalingPolicy.CustomizedMetricSpecificationProperty.Statistic``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-autoscaling-scalingpolicy-customizedmetricspecification.html#cfn-autoscaling-scalingpolicy-customizedmetricspecification-statistic
"""
return self._values.get('statistic')
@builtins.property
def dimensions(self) -> typing.Optional[typing.Union[typing.Optional[aws_cdk.core.IResolvable], typing.Optional[typing.List[typing.Union[aws_cdk.core.IResolvable, "CfnScalingPolicy.MetricDimensionProperty"]]]]]:
"""``CfnScalingPolicy.CustomizedMetricSpecificationProperty.Dimensions``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-autoscaling-scalingpolicy-customizedmetricspecification.html#cfn-autoscaling-scalingpolicy-customizedmetricspecification-dimensions
"""
return self._values.get('dimensions')
@builtins.property
def unit(self) -> typing.Optional[str]:
"""``CfnScalingPolicy.CustomizedMetricSpecificationProperty.Unit``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-autoscaling-scalingpolicy-customizedmetricspecification.html#cfn-autoscaling-scalingpolicy-customizedmetricspecification-unit
"""
return self._values.get('unit')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'CustomizedMetricSpecificationProperty(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
@jsii.data_type(jsii_type="@aws-cdk/aws-autoscaling.CfnScalingPolicy.MetricDimensionProperty", jsii_struct_bases=[], name_mapping={'name': 'name', 'value': 'value'})
class MetricDimensionProperty():
def __init__(self, *, name: str, value: str):
"""
:param name: ``CfnScalingPolicy.MetricDimensionProperty.Name``.
:param value: ``CfnScalingPolicy.MetricDimensionProperty.Value``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-autoscaling-scalingpolicy-metricdimension.html
"""
self._values = {
'name': name,
'value': value,
}
@builtins.property
def name(self) -> str:
"""``CfnScalingPolicy.MetricDimensionProperty.Name``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-autoscaling-scalingpolicy-metricdimension.html#cfn-autoscaling-scalingpolicy-metricdimension-name
"""
return self._values.get('name')
@builtins.property
def value(self) -> str:
"""``CfnScalingPolicy.MetricDimensionProperty.Value``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-autoscaling-scalingpolicy-metricdimension.html#cfn-autoscaling-scalingpolicy-metricdimension-value
"""
return self._values.get('value')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'MetricDimensionProperty(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
@jsii.data_type(jsii_type="@aws-cdk/aws-autoscaling.CfnScalingPolicy.PredefinedMetricSpecificationProperty", jsii_struct_bases=[], name_mapping={'predefined_metric_type': 'predefinedMetricType', 'resource_label': 'resourceLabel'})
class PredefinedMetricSpecificationProperty():
def __init__(self, *, predefined_metric_type: str, resource_label: typing.Optional[str]=None):
"""
:param predefined_metric_type: ``CfnScalingPolicy.PredefinedMetricSpecificationProperty.PredefinedMetricType``.
:param resource_label: ``CfnScalingPolicy.PredefinedMetricSpecificationProperty.ResourceLabel``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-autoscaling-scalingpolicy-predefinedmetricspecification.html
"""
self._values = {
'predefined_metric_type': predefined_metric_type,
}
if resource_label is not None: self._values["resource_label"] = resource_label
@builtins.property
def predefined_metric_type(self) -> str:
"""``CfnScalingPolicy.PredefinedMetricSpecificationProperty.PredefinedMetricType``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-autoscaling-scalingpolicy-predefinedmetricspecification.html#cfn-autoscaling-scalingpolicy-predefinedmetricspecification-predefinedmetrictype
"""
return self._values.get('predefined_metric_type')
@builtins.property
def resource_label(self) -> typing.Optional[str]:
"""``CfnScalingPolicy.PredefinedMetricSpecificationProperty.ResourceLabel``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-autoscaling-scalingpolicy-predefinedmetricspecification.html#cfn-autoscaling-scalingpolicy-predefinedmetricspecification-resourcelabel
"""
return self._values.get('resource_label')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'PredefinedMetricSpecificationProperty(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
@jsii.data_type(jsii_type="@aws-cdk/aws-autoscaling.CfnScalingPolicy.StepAdjustmentProperty", jsii_struct_bases=[], name_mapping={'scaling_adjustment': 'scalingAdjustment', 'metric_interval_lower_bound': 'metricIntervalLowerBound', 'metric_interval_upper_bound': 'metricIntervalUpperBound'})
class StepAdjustmentProperty():
def __init__(self, *, scaling_adjustment: jsii.Number, metric_interval_lower_bound: typing.Optional[jsii.Number]=None, metric_interval_upper_bound: typing.Optional[jsii.Number]=None):
"""
:param scaling_adjustment: ``CfnScalingPolicy.StepAdjustmentProperty.ScalingAdjustment``.
:param metric_interval_lower_bound: ``CfnScalingPolicy.StepAdjustmentProperty.MetricIntervalLowerBound``.
:param metric_interval_upper_bound: ``CfnScalingPolicy.StepAdjustmentProperty.MetricIntervalUpperBound``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-autoscaling-scalingpolicy-stepadjustments.html
"""
self._values = {
'scaling_adjustment': scaling_adjustment,
}
if metric_interval_lower_bound is not None: self._values["metric_interval_lower_bound"] = metric_interval_lower_bound
if metric_interval_upper_bound is not None: self._values["metric_interval_upper_bound"] = metric_interval_upper_bound
@builtins.property
def scaling_adjustment(self) -> jsii.Number:
"""``CfnScalingPolicy.StepAdjustmentProperty.ScalingAdjustment``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-autoscaling-scalingpolicy-stepadjustments.html#cfn-autoscaling-scalingpolicy-stepadjustment-scalingadjustment
"""
return self._values.get('scaling_adjustment')
@builtins.property
def metric_interval_lower_bound(self) -> typing.Optional[jsii.Number]:
"""``CfnScalingPolicy.StepAdjustmentProperty.MetricIntervalLowerBound``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-autoscaling-scalingpolicy-stepadjustments.html#cfn-autoscaling-scalingpolicy-stepadjustment-metricintervallowerbound
"""
return self._values.get('metric_interval_lower_bound')
@builtins.property
def metric_interval_upper_bound(self) -> typing.Optional[jsii.Number]:
"""``CfnScalingPolicy.StepAdjustmentProperty.MetricIntervalUpperBound``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-autoscaling-scalingpolicy-stepadjustments.html#cfn-autoscaling-scalingpolicy-stepadjustment-metricintervalupperbound
"""
return self._values.get('metric_interval_upper_bound')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'StepAdjustmentProperty(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
@jsii.data_type(jsii_type="@aws-cdk/aws-autoscaling.CfnScalingPolicy.TargetTrackingConfigurationProperty", jsii_struct_bases=[], name_mapping={'target_value': 'targetValue', 'customized_metric_specification': 'customizedMetricSpecification', 'disable_scale_in': 'disableScaleIn', 'predefined_metric_specification': 'predefinedMetricSpecification'})
class TargetTrackingConfigurationProperty():
def __init__(self, *, target_value: jsii.Number, customized_metric_specification: typing.Optional[typing.Union[typing.Optional[aws_cdk.core.IResolvable], typing.Optional["CfnScalingPolicy.CustomizedMetricSpecificationProperty"]]]=None, disable_scale_in: typing.Optional[typing.Union[typing.Optional[bool], typing.Optional[aws_cdk.core.IResolvable]]]=None, predefined_metric_specification: typing.Optional[typing.Union[typing.Optional[aws_cdk.core.IResolvable], typing.Optional["CfnScalingPolicy.PredefinedMetricSpecificationProperty"]]]=None):
"""
:param target_value: ``CfnScalingPolicy.TargetTrackingConfigurationProperty.TargetValue``.
:param customized_metric_specification: ``CfnScalingPolicy.TargetTrackingConfigurationProperty.CustomizedMetricSpecification``.
:param disable_scale_in: ``CfnScalingPolicy.TargetTrackingConfigurationProperty.DisableScaleIn``.
:param predefined_metric_specification: ``CfnScalingPolicy.TargetTrackingConfigurationProperty.PredefinedMetricSpecification``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-autoscaling-scalingpolicy-targettrackingconfiguration.html
"""
self._values = {
'target_value': target_value,
}
if customized_metric_specification is not None: self._values["customized_metric_specification"] = customized_metric_specification
if disable_scale_in is not None: self._values["disable_scale_in"] = disable_scale_in
if predefined_metric_specification is not None: self._values["predefined_metric_specification"] = predefined_metric_specification
@builtins.property
def target_value(self) -> jsii.Number:
"""``CfnScalingPolicy.TargetTrackingConfigurationProperty.TargetValue``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-autoscaling-scalingpolicy-targettrackingconfiguration.html#cfn-autoscaling-scalingpolicy-targettrackingconfiguration-targetvalue
"""
return self._values.get('target_value')
@builtins.property
def customized_metric_specification(self) -> typing.Optional[typing.Union[typing.Optional[aws_cdk.core.IResolvable], typing.Optional["CfnScalingPolicy.CustomizedMetricSpecificationProperty"]]]:
"""``CfnScalingPolicy.TargetTrackingConfigurationProperty.CustomizedMetricSpecification``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-autoscaling-scalingpolicy-targettrackingconfiguration.html#cfn-autoscaling-scalingpolicy-targettrackingconfiguration-customizedmetricspecification
"""
return self._values.get('customized_metric_specification')
@builtins.property
def disable_scale_in(self) -> typing.Optional[typing.Union[typing.Optional[bool], typing.Optional[aws_cdk.core.IResolvable]]]:
"""``CfnScalingPolicy.TargetTrackingConfigurationProperty.DisableScaleIn``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-autoscaling-scalingpolicy-targettrackingconfiguration.html#cfn-autoscaling-scalingpolicy-targettrackingconfiguration-disablescalein
"""
return self._values.get('disable_scale_in')
@builtins.property
def predefined_metric_specification(self) -> typing.Optional[typing.Union[typing.Optional[aws_cdk.core.IResolvable], typing.Optional["CfnScalingPolicy.PredefinedMetricSpecificationProperty"]]]:
"""``CfnScalingPolicy.TargetTrackingConfigurationProperty.PredefinedMetricSpecification``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-autoscaling-scalingpolicy-targettrackingconfiguration.html#cfn-autoscaling-scalingpolicy-targettrackingconfiguration-predefinedmetricspecification
"""
return self._values.get('predefined_metric_specification')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'TargetTrackingConfigurationProperty(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
@jsii.data_type(jsii_type="@aws-cdk/aws-autoscaling.CfnScalingPolicyProps", jsii_struct_bases=[], name_mapping={'auto_scaling_group_name': 'autoScalingGroupName', 'adjustment_type': 'adjustmentType', 'cooldown': 'cooldown', 'estimated_instance_warmup': 'estimatedInstanceWarmup', 'metric_aggregation_type': 'metricAggregationType', 'min_adjustment_magnitude': 'minAdjustmentMagnitude', 'policy_type': 'policyType', 'scaling_adjustment': 'scalingAdjustment', 'step_adjustments': 'stepAdjustments', 'target_tracking_configuration': 'targetTrackingConfiguration'})
class CfnScalingPolicyProps():
def __init__(self, *, auto_scaling_group_name: str, adjustment_type: typing.Optional[str]=None, cooldown: typing.Optional[str]=None, estimated_instance_warmup: typing.Optional[jsii.Number]=None, metric_aggregation_type: typing.Optional[str]=None, min_adjustment_magnitude: typing.Optional[jsii.Number]=None, policy_type: typing.Optional[str]=None, scaling_adjustment: typing.Optional[jsii.Number]=None, step_adjustments: typing.Optional[typing.Union[typing.Optional[aws_cdk.core.IResolvable], typing.Optional[typing.List[typing.Union[aws_cdk.core.IResolvable, "CfnScalingPolicy.StepAdjustmentProperty"]]]]]=None, target_tracking_configuration: typing.Optional[typing.Union[typing.Optional[aws_cdk.core.IResolvable], typing.Optional["CfnScalingPolicy.TargetTrackingConfigurationProperty"]]]=None):
"""Properties for defining a ``AWS::AutoScaling::ScalingPolicy``.
:param auto_scaling_group_name: ``AWS::AutoScaling::ScalingPolicy.AutoScalingGroupName``.
:param adjustment_type: ``AWS::AutoScaling::ScalingPolicy.AdjustmentType``.
:param cooldown: ``AWS::AutoScaling::ScalingPolicy.Cooldown``.
:param estimated_instance_warmup: ``AWS::AutoScaling::ScalingPolicy.EstimatedInstanceWarmup``.
:param metric_aggregation_type: ``AWS::AutoScaling::ScalingPolicy.MetricAggregationType``.
:param min_adjustment_magnitude: ``AWS::AutoScaling::ScalingPolicy.MinAdjustmentMagnitude``.
:param policy_type: ``AWS::AutoScaling::ScalingPolicy.PolicyType``.
:param scaling_adjustment: ``AWS::AutoScaling::ScalingPolicy.ScalingAdjustment``.
:param step_adjustments: ``AWS::AutoScaling::ScalingPolicy.StepAdjustments``.
:param target_tracking_configuration: ``AWS::AutoScaling::ScalingPolicy.TargetTrackingConfiguration``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-policy.html
"""
self._values = {
'auto_scaling_group_name': auto_scaling_group_name,
}
if adjustment_type is not None: self._values["adjustment_type"] = adjustment_type
if cooldown is not None: self._values["cooldown"] = cooldown
if estimated_instance_warmup is not None: self._values["estimated_instance_warmup"] = estimated_instance_warmup
if metric_aggregation_type is not None: self._values["metric_aggregation_type"] = metric_aggregation_type
if min_adjustment_magnitude is not None: self._values["min_adjustment_magnitude"] = min_adjustment_magnitude
if policy_type is not None: self._values["policy_type"] = policy_type
if scaling_adjustment is not None: self._values["scaling_adjustment"] = scaling_adjustment
if step_adjustments is not None: self._values["step_adjustments"] = step_adjustments
if target_tracking_configuration is not None: self._values["target_tracking_configuration"] = target_tracking_configuration
@builtins.property
def auto_scaling_group_name(self) -> str:
"""``AWS::AutoScaling::ScalingPolicy.AutoScalingGroupName``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-policy.html#cfn-as-scalingpolicy-autoscalinggroupname
"""
return self._values.get('auto_scaling_group_name')
@builtins.property
def adjustment_type(self) -> typing.Optional[str]:
"""``AWS::AutoScaling::ScalingPolicy.AdjustmentType``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-policy.html#cfn-as-scalingpolicy-adjustmenttype
"""
return self._values.get('adjustment_type')
@builtins.property
def cooldown(self) -> typing.Optional[str]:
"""``AWS::AutoScaling::ScalingPolicy.Cooldown``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-policy.html#cfn-as-scalingpolicy-cooldown
"""
return self._values.get('cooldown')
@builtins.property
def estimated_instance_warmup(self) -> typing.Optional[jsii.Number]:
"""``AWS::AutoScaling::ScalingPolicy.EstimatedInstanceWarmup``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-policy.html#cfn-as-scalingpolicy-estimatedinstancewarmup
"""
return self._values.get('estimated_instance_warmup')
@builtins.property
def metric_aggregation_type(self) -> typing.Optional[str]:
"""``AWS::AutoScaling::ScalingPolicy.MetricAggregationType``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-policy.html#cfn-as-scalingpolicy-metricaggregationtype
"""
return self._values.get('metric_aggregation_type')
@builtins.property
def min_adjustment_magnitude(self) -> typing.Optional[jsii.Number]:
"""``AWS::AutoScaling::ScalingPolicy.MinAdjustmentMagnitude``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-policy.html#cfn-as-scalingpolicy-minadjustmentmagnitude
"""
return self._values.get('min_adjustment_magnitude')
@builtins.property
def policy_type(self) -> typing.Optional[str]:
"""``AWS::AutoScaling::ScalingPolicy.PolicyType``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-policy.html#cfn-as-scalingpolicy-policytype
"""
return self._values.get('policy_type')
@builtins.property
def scaling_adjustment(self) -> typing.Optional[jsii.Number]:
"""``AWS::AutoScaling::ScalingPolicy.ScalingAdjustment``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-policy.html#cfn-as-scalingpolicy-scalingadjustment
"""
return self._values.get('scaling_adjustment')
@builtins.property
def step_adjustments(self) -> typing.Optional[typing.Union[typing.Optional[aws_cdk.core.IResolvable], typing.Optional[typing.List[typing.Union[aws_cdk.core.IResolvable, "CfnScalingPolicy.StepAdjustmentProperty"]]]]]:
"""``AWS::AutoScaling::ScalingPolicy.StepAdjustments``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-policy.html#cfn-as-scalingpolicy-stepadjustments
"""
return self._values.get('step_adjustments')
@builtins.property
def target_tracking_configuration(self) -> typing.Optional[typing.Union[typing.Optional[aws_cdk.core.IResolvable], typing.Optional["CfnScalingPolicy.TargetTrackingConfigurationProperty"]]]:
"""``AWS::AutoScaling::ScalingPolicy.TargetTrackingConfiguration``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-policy.html#cfn-autoscaling-scalingpolicy-targettrackingconfiguration
"""
return self._values.get('target_tracking_configuration')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'CfnScalingPolicyProps(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
@jsii.implements(aws_cdk.core.IInspectable)
class CfnScheduledAction(aws_cdk.core.CfnResource, metaclass=jsii.JSIIMeta, jsii_type="@aws-cdk/aws-autoscaling.CfnScheduledAction"):
"""A CloudFormation ``AWS::AutoScaling::ScheduledAction``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-as-scheduledaction.html
cloudformationResource:
:cloudformationResource:: AWS::AutoScaling::ScheduledAction
"""
def __init__(self, scope: aws_cdk.core.Construct, id: str, *, auto_scaling_group_name: str, desired_capacity: typing.Optional[jsii.Number]=None, end_time: typing.Optional[str]=None, max_size: typing.Optional[jsii.Number]=None, min_size: typing.Optional[jsii.Number]=None, recurrence: typing.Optional[str]=None, start_time: typing.Optional[str]=None) -> None:
"""Create a new ``AWS::AutoScaling::ScheduledAction``.
:param scope: - scope in which this resource is defined.
:param id: - scoped id of the resource.
:param auto_scaling_group_name: ``AWS::AutoScaling::ScheduledAction.AutoScalingGroupName``.
:param desired_capacity: ``AWS::AutoScaling::ScheduledAction.DesiredCapacity``.
:param end_time: ``AWS::AutoScaling::ScheduledAction.EndTime``.
:param max_size: ``AWS::AutoScaling::ScheduledAction.MaxSize``.
:param min_size: ``AWS::AutoScaling::ScheduledAction.MinSize``.
:param recurrence: ``AWS::AutoScaling::ScheduledAction.Recurrence``.
:param start_time: ``AWS::AutoScaling::ScheduledAction.StartTime``.
"""
props = CfnScheduledActionProps(auto_scaling_group_name=auto_scaling_group_name, desired_capacity=desired_capacity, end_time=end_time, max_size=max_size, min_size=min_size, recurrence=recurrence, start_time=start_time)
jsii.create(CfnScheduledAction, self, [scope, id, props])
@jsii.member(jsii_name="inspect")
def inspect(self, inspector: aws_cdk.core.TreeInspector) -> None:
"""Examines the CloudFormation resource and discloses attributes.
:param inspector: - tree inspector to collect and process attributes.
stability
:stability: experimental
"""
return jsii.invoke(self, "inspect", [inspector])
@jsii.member(jsii_name="renderProperties")
def _render_properties(self, props: typing.Mapping[str,typing.Any]) -> typing.Mapping[str,typing.Any]:
"""
:param props: -
"""
return jsii.invoke(self, "renderProperties", [props])
@jsii.python.classproperty
@jsii.member(jsii_name="CFN_RESOURCE_TYPE_NAME")
def CFN_RESOURCE_TYPE_NAME(cls) -> str:
"""The CloudFormation resource type name for this resource class."""
return jsii.sget(cls, "CFN_RESOURCE_TYPE_NAME")
@builtins.property
@jsii.member(jsii_name="cfnProperties")
def _cfn_properties(self) -> typing.Mapping[str,typing.Any]:
return jsii.get(self, "cfnProperties")
@builtins.property
@jsii.member(jsii_name="autoScalingGroupName")
def auto_scaling_group_name(self) -> str:
"""``AWS::AutoScaling::ScheduledAction.AutoScalingGroupName``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-as-scheduledaction.html#cfn-as-scheduledaction-asgname
"""
return jsii.get(self, "autoScalingGroupName")
@auto_scaling_group_name.setter
def auto_scaling_group_name(self, value: str):
jsii.set(self, "autoScalingGroupName", value)
@builtins.property
@jsii.member(jsii_name="desiredCapacity")
def desired_capacity(self) -> typing.Optional[jsii.Number]:
"""``AWS::AutoScaling::ScheduledAction.DesiredCapacity``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-as-scheduledaction.html#cfn-as-scheduledaction-desiredcapacity
"""
return jsii.get(self, "desiredCapacity")
@desired_capacity.setter
def desired_capacity(self, value: typing.Optional[jsii.Number]):
jsii.set(self, "desiredCapacity", value)
@builtins.property
@jsii.member(jsii_name="endTime")
def end_time(self) -> typing.Optional[str]:
"""``AWS::AutoScaling::ScheduledAction.EndTime``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-as-scheduledaction.html#cfn-as-scheduledaction-endtime
"""
return jsii.get(self, "endTime")
@end_time.setter
def end_time(self, value: typing.Optional[str]):
jsii.set(self, "endTime", value)
@builtins.property
@jsii.member(jsii_name="maxSize")
def max_size(self) -> typing.Optional[jsii.Number]:
"""``AWS::AutoScaling::ScheduledAction.MaxSize``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-as-scheduledaction.html#cfn-as-scheduledaction-maxsize
"""
return jsii.get(self, "maxSize")
@max_size.setter
def max_size(self, value: typing.Optional[jsii.Number]):
jsii.set(self, "maxSize", value)
@builtins.property
@jsii.member(jsii_name="minSize")
def min_size(self) -> typing.Optional[jsii.Number]:
"""``AWS::AutoScaling::ScheduledAction.MinSize``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-as-scheduledaction.html#cfn-as-scheduledaction-minsize
"""
return jsii.get(self, "minSize")
@min_size.setter
def min_size(self, value: typing.Optional[jsii.Number]):
jsii.set(self, "minSize", value)
@builtins.property
@jsii.member(jsii_name="recurrence")
def recurrence(self) -> typing.Optional[str]:
"""``AWS::AutoScaling::ScheduledAction.Recurrence``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-as-scheduledaction.html#cfn-as-scheduledaction-recurrence
"""
return jsii.get(self, "recurrence")
@recurrence.setter
def recurrence(self, value: typing.Optional[str]):
jsii.set(self, "recurrence", value)
@builtins.property
@jsii.member(jsii_name="startTime")
def start_time(self) -> typing.Optional[str]:
"""``AWS::AutoScaling::ScheduledAction.StartTime``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-as-scheduledaction.html#cfn-as-scheduledaction-starttime
"""
return jsii.get(self, "startTime")
@start_time.setter
def start_time(self, value: typing.Optional[str]):
jsii.set(self, "startTime", value)
@jsii.data_type(jsii_type="@aws-cdk/aws-autoscaling.CfnScheduledActionProps", jsii_struct_bases=[], name_mapping={'auto_scaling_group_name': 'autoScalingGroupName', 'desired_capacity': 'desiredCapacity', 'end_time': 'endTime', 'max_size': 'maxSize', 'min_size': 'minSize', 'recurrence': 'recurrence', 'start_time': 'startTime'})
class CfnScheduledActionProps():
def __init__(self, *, auto_scaling_group_name: str, desired_capacity: typing.Optional[jsii.Number]=None, end_time: typing.Optional[str]=None, max_size: typing.Optional[jsii.Number]=None, min_size: typing.Optional[jsii.Number]=None, recurrence: typing.Optional[str]=None, start_time: typing.Optional[str]=None):
"""Properties for defining a ``AWS::AutoScaling::ScheduledAction``.
:param auto_scaling_group_name: ``AWS::AutoScaling::ScheduledAction.AutoScalingGroupName``.
:param desired_capacity: ``AWS::AutoScaling::ScheduledAction.DesiredCapacity``.
:param end_time: ``AWS::AutoScaling::ScheduledAction.EndTime``.
:param max_size: ``AWS::AutoScaling::ScheduledAction.MaxSize``.
:param min_size: ``AWS::AutoScaling::ScheduledAction.MinSize``.
:param recurrence: ``AWS::AutoScaling::ScheduledAction.Recurrence``.
:param start_time: ``AWS::AutoScaling::ScheduledAction.StartTime``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-as-scheduledaction.html
"""
self._values = {
'auto_scaling_group_name': auto_scaling_group_name,
}
if desired_capacity is not None: self._values["desired_capacity"] = desired_capacity
if end_time is not None: self._values["end_time"] = end_time
if max_size is not None: self._values["max_size"] = max_size
if min_size is not None: self._values["min_size"] = min_size
if recurrence is not None: self._values["recurrence"] = recurrence
if start_time is not None: self._values["start_time"] = start_time
@builtins.property
def auto_scaling_group_name(self) -> str:
"""``AWS::AutoScaling::ScheduledAction.AutoScalingGroupName``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-as-scheduledaction.html#cfn-as-scheduledaction-asgname
"""
return self._values.get('auto_scaling_group_name')
@builtins.property
def desired_capacity(self) -> typing.Optional[jsii.Number]:
"""``AWS::AutoScaling::ScheduledAction.DesiredCapacity``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-as-scheduledaction.html#cfn-as-scheduledaction-desiredcapacity
"""
return self._values.get('desired_capacity')
@builtins.property
def end_time(self) -> typing.Optional[str]:
"""``AWS::AutoScaling::ScheduledAction.EndTime``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-as-scheduledaction.html#cfn-as-scheduledaction-endtime
"""
return self._values.get('end_time')
@builtins.property
def max_size(self) -> typing.Optional[jsii.Number]:
"""``AWS::AutoScaling::ScheduledAction.MaxSize``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-as-scheduledaction.html#cfn-as-scheduledaction-maxsize
"""
return self._values.get('max_size')
@builtins.property
def min_size(self) -> typing.Optional[jsii.Number]:
"""``AWS::AutoScaling::ScheduledAction.MinSize``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-as-scheduledaction.html#cfn-as-scheduledaction-minsize
"""
return self._values.get('min_size')
@builtins.property
def recurrence(self) -> typing.Optional[str]:
"""``AWS::AutoScaling::ScheduledAction.Recurrence``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-as-scheduledaction.html#cfn-as-scheduledaction-recurrence
"""
return self._values.get('recurrence')
@builtins.property
def start_time(self) -> typing.Optional[str]:
"""``AWS::AutoScaling::ScheduledAction.StartTime``.
see
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-as-scheduledaction.html#cfn-as-scheduledaction-starttime
"""
return self._values.get('start_time')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'CfnScheduledActionProps(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
@jsii.data_type(jsii_type="@aws-cdk/aws-autoscaling.CommonAutoScalingGroupProps", jsii_struct_bases=[], name_mapping={'allow_all_outbound': 'allowAllOutbound', 'associate_public_ip_address': 'associatePublicIpAddress', 'cooldown': 'cooldown', 'desired_capacity': 'desiredCapacity', 'health_check': 'healthCheck', 'ignore_unmodified_size_properties': 'ignoreUnmodifiedSizeProperties', 'key_name': 'keyName', 'max_capacity': 'maxCapacity', 'min_capacity': 'minCapacity', 'notifications_topic': 'notificationsTopic', 'replacing_update_min_successful_instances_percent': 'replacingUpdateMinSuccessfulInstancesPercent', 'resource_signal_count': 'resourceSignalCount', 'resource_signal_timeout': 'resourceSignalTimeout', 'rolling_update_configuration': 'rollingUpdateConfiguration', 'spot_price': 'spotPrice', 'update_type': 'updateType', 'vpc_subnets': 'vpcSubnets'})
class CommonAutoScalingGroupProps():
def __init__(self, *, allow_all_outbound: typing.Optional[bool]=None, associate_public_ip_address: typing.Optional[bool]=None, cooldown: typing.Optional[aws_cdk.core.Duration]=None, desired_capacity: typing.Optional[jsii.Number]=None, health_check: typing.Optional["HealthCheck"]=None, ignore_unmodified_size_properties: typing.Optional[bool]=None, key_name: typing.Optional[str]=None, max_capacity: typing.Optional[jsii.Number]=None, min_capacity: typing.Optional[jsii.Number]=None, notifications_topic: typing.Optional[aws_cdk.aws_sns.ITopic]=None, replacing_update_min_successful_instances_percent: typing.Optional[jsii.Number]=None, resource_signal_count: typing.Optional[jsii.Number]=None, resource_signal_timeout: typing.Optional[aws_cdk.core.Duration]=None, rolling_update_configuration: typing.Optional["RollingUpdateConfiguration"]=None, spot_price: typing.Optional[str]=None, update_type: typing.Optional["UpdateType"]=None, vpc_subnets: typing.Optional[aws_cdk.aws_ec2.SubnetSelection]=None):
"""Basic properties of an AutoScalingGroup, except the exact machines to run and where they should run.
Constructs that want to create AutoScalingGroups can inherit
this interface and specialize the essential parts in various ways.
:param allow_all_outbound: Whether the instances can initiate connections to anywhere by default. Default: true
:param associate_public_ip_address: Whether instances in the Auto Scaling Group should have public IP addresses associated with them. Default: - Use subnet setting.
:param cooldown: Default scaling cooldown for this AutoScalingGroup. Default: Duration.minutes(5)
:param desired_capacity: Initial amount of instances in the fleet. If this is set to a number, every deployment will reset the amount of instances to this number. It is recommended to leave this value blank. Default: minCapacity, and leave unchanged during deployment
:param health_check: Configuration for health checks. Default: - HealthCheck.ec2 with no grace period
:param ignore_unmodified_size_properties: If the ASG has scheduled actions, don't reset unchanged group sizes. Only used if the ASG has scheduled actions (which may scale your ASG up or down regardless of cdk deployments). If true, the size of the group will only be reset if it has been changed in the CDK app. If false, the sizes will always be changed back to what they were in the CDK app on deployment. Default: true
:param key_name: Name of SSH keypair to grant access to instances. Default: - No SSH access will be possible.
:param max_capacity: Maximum number of instances in the fleet. Default: desiredCapacity
:param min_capacity: Minimum number of instances in the fleet. Default: 1
:param notifications_topic: SNS topic to send notifications about fleet changes. Default: - No fleet change notifications will be sent.
:param replacing_update_min_successful_instances_percent: Configuration for replacing updates. Only used if updateType == UpdateType.ReplacingUpdate. Specifies how many instances must signal success for the update to succeed. Default: minSuccessfulInstancesPercent
:param resource_signal_count: How many ResourceSignal calls CloudFormation expects before the resource is considered created. Default: 1
:param resource_signal_timeout: The length of time to wait for the resourceSignalCount. The maximum value is 43200 (12 hours). Default: Duration.minutes(5)
:param rolling_update_configuration: Configuration for rolling updates. Only used if updateType == UpdateType.RollingUpdate. Default: - RollingUpdateConfiguration with defaults.
:param spot_price: The maximum hourly price (in USD) to be paid for any Spot Instance launched to fulfill the request. Spot Instances are launched when the price you specify exceeds the current Spot market price. Default: none
:param update_type: What to do when an AutoScalingGroup's instance configuration is changed. This is applied when any of the settings on the ASG are changed that affect how the instances should be created (VPC, instance type, startup scripts, etc.). It indicates how the existing instances should be replaced with new instances matching the new config. By default, nothing is done and only new instances are launched with the new config. Default: UpdateType.None
:param vpc_subnets: Where to place instances within the VPC. Default: - All Private subnets.
"""
if isinstance(rolling_update_configuration, dict): rolling_update_configuration = RollingUpdateConfiguration(**rolling_update_configuration)
if isinstance(vpc_subnets, dict): vpc_subnets = aws_cdk.aws_ec2.SubnetSelection(**vpc_subnets)
self._values = {
}
if allow_all_outbound is not None: self._values["allow_all_outbound"] = allow_all_outbound
if associate_public_ip_address is not None: self._values["associate_public_ip_address"] = associate_public_ip_address
if cooldown is not None: self._values["cooldown"] = cooldown
if desired_capacity is not None: self._values["desired_capacity"] = desired_capacity
if health_check is not None: self._values["health_check"] = health_check
if ignore_unmodified_size_properties is not None: self._values["ignore_unmodified_size_properties"] = ignore_unmodified_size_properties
if key_name is not None: self._values["key_name"] = key_name
if max_capacity is not None: self._values["max_capacity"] = max_capacity
if min_capacity is not None: self._values["min_capacity"] = min_capacity
if notifications_topic is not None: self._values["notifications_topic"] = notifications_topic
if replacing_update_min_successful_instances_percent is not None: self._values["replacing_update_min_successful_instances_percent"] = replacing_update_min_successful_instances_percent
if resource_signal_count is not None: self._values["resource_signal_count"] = resource_signal_count
if resource_signal_timeout is not None: self._values["resource_signal_timeout"] = resource_signal_timeout
if rolling_update_configuration is not None: self._values["rolling_update_configuration"] = rolling_update_configuration
if spot_price is not None: self._values["spot_price"] = spot_price
if update_type is not None: self._values["update_type"] = update_type
if vpc_subnets is not None: self._values["vpc_subnets"] = vpc_subnets
@builtins.property
def allow_all_outbound(self) -> typing.Optional[bool]:
"""Whether the instances can initiate connections to anywhere by default.
default
:default: true
"""
return self._values.get('allow_all_outbound')
@builtins.property
def associate_public_ip_address(self) -> typing.Optional[bool]:
"""Whether instances in the Auto Scaling Group should have public IP addresses associated with them.
default
:default: - Use subnet setting.
"""
return self._values.get('associate_public_ip_address')
@builtins.property
def cooldown(self) -> typing.Optional[aws_cdk.core.Duration]:
"""Default scaling cooldown for this AutoScalingGroup.
default
:default: Duration.minutes(5)
"""
return self._values.get('cooldown')
@builtins.property
def desired_capacity(self) -> typing.Optional[jsii.Number]:
"""Initial amount of instances in the fleet.
If this is set to a number, every deployment will reset the amount of
instances to this number. It is recommended to leave this value blank.
default
:default: minCapacity, and leave unchanged during deployment
see
:see: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-group.html#cfn-as-group-desiredcapacity
"""
return self._values.get('desired_capacity')
@builtins.property
def health_check(self) -> typing.Optional["HealthCheck"]:
"""Configuration for health checks.
default
:default: - HealthCheck.ec2 with no grace period
"""
return self._values.get('health_check')
@builtins.property
def ignore_unmodified_size_properties(self) -> typing.Optional[bool]:
"""If the ASG has scheduled actions, don't reset unchanged group sizes.
Only used if the ASG has scheduled actions (which may scale your ASG up
or down regardless of cdk deployments). If true, the size of the group
will only be reset if it has been changed in the CDK app. If false, the
sizes will always be changed back to what they were in the CDK app
on deployment.
default
:default: true
"""
return self._values.get('ignore_unmodified_size_properties')
@builtins.property
def key_name(self) -> typing.Optional[str]:
"""Name of SSH keypair to grant access to instances.
default
:default: - No SSH access will be possible.
"""
return self._values.get('key_name')
@builtins.property
def max_capacity(self) -> typing.Optional[jsii.Number]:
"""Maximum number of instances in the fleet.
default
:default: desiredCapacity
"""
return self._values.get('max_capacity')
@builtins.property
def min_capacity(self) -> typing.Optional[jsii.Number]:
"""Minimum number of instances in the fleet.
default
:default: 1
"""
return self._values.get('min_capacity')
@builtins.property
def notifications_topic(self) -> typing.Optional[aws_cdk.aws_sns.ITopic]:
"""SNS topic to send notifications about fleet changes.
default
:default: - No fleet change notifications will be sent.
"""
return self._values.get('notifications_topic')
@builtins.property
def replacing_update_min_successful_instances_percent(self) -> typing.Optional[jsii.Number]:
"""Configuration for replacing updates.
Only used if updateType == UpdateType.ReplacingUpdate. Specifies how
many instances must signal success for the update to succeed.
default
:default: minSuccessfulInstancesPercent
"""
return self._values.get('replacing_update_min_successful_instances_percent')
@builtins.property
def resource_signal_count(self) -> typing.Optional[jsii.Number]:
"""How many ResourceSignal calls CloudFormation expects before the resource is considered created.
default
:default: 1
"""
return self._values.get('resource_signal_count')
@builtins.property
def resource_signal_timeout(self) -> typing.Optional[aws_cdk.core.Duration]:
"""The length of time to wait for the resourceSignalCount.
The maximum value is 43200 (12 hours).
default
:default: Duration.minutes(5)
"""
return self._values.get('resource_signal_timeout')
@builtins.property
def rolling_update_configuration(self) -> typing.Optional["RollingUpdateConfiguration"]:
"""Configuration for rolling updates.
Only used if updateType == UpdateType.RollingUpdate.
default
:default: - RollingUpdateConfiguration with defaults.
"""
return self._values.get('rolling_update_configuration')
@builtins.property
def spot_price(self) -> typing.Optional[str]:
"""The maximum hourly price (in USD) to be paid for any Spot Instance launched to fulfill the request.
Spot Instances are
launched when the price you specify exceeds the current Spot market price.
default
:default: none
"""
return self._values.get('spot_price')
@builtins.property
def update_type(self) -> typing.Optional["UpdateType"]:
"""What to do when an AutoScalingGroup's instance configuration is changed.
This is applied when any of the settings on the ASG are changed that
affect how the instances should be created (VPC, instance type, startup
scripts, etc.). It indicates how the existing instances should be
replaced with new instances matching the new config. By default, nothing
is done and only new instances are launched with the new config.
default
:default: UpdateType.None
"""
return self._values.get('update_type')
@builtins.property
def vpc_subnets(self) -> typing.Optional[aws_cdk.aws_ec2.SubnetSelection]:
"""Where to place instances within the VPC.
default
:default: - All Private subnets.
"""
return self._values.get('vpc_subnets')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'CommonAutoScalingGroupProps(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
@jsii.data_type(jsii_type="@aws-cdk/aws-autoscaling.AutoScalingGroupProps", jsii_struct_bases=[CommonAutoScalingGroupProps], name_mapping={'allow_all_outbound': 'allowAllOutbound', 'associate_public_ip_address': 'associatePublicIpAddress', 'cooldown': 'cooldown', 'desired_capacity': 'desiredCapacity', 'health_check': 'healthCheck', 'ignore_unmodified_size_properties': 'ignoreUnmodifiedSizeProperties', 'key_name': 'keyName', 'max_capacity': 'maxCapacity', 'min_capacity': 'minCapacity', 'notifications_topic': 'notificationsTopic', 'replacing_update_min_successful_instances_percent': 'replacingUpdateMinSuccessfulInstancesPercent', 'resource_signal_count': 'resourceSignalCount', 'resource_signal_timeout': 'resourceSignalTimeout', 'rolling_update_configuration': 'rollingUpdateConfiguration', 'spot_price': 'spotPrice', 'update_type': 'updateType', 'vpc_subnets': 'vpcSubnets', 'instance_type': 'instanceType', 'machine_image': 'machineImage', 'vpc': 'vpc', 'block_devices': 'blockDevices', 'role': 'role', 'user_data': 'userData'})
class AutoScalingGroupProps(CommonAutoScalingGroupProps):
def __init__(self, *, allow_all_outbound: typing.Optional[bool]=None, associate_public_ip_address: typing.Optional[bool]=None, cooldown: typing.Optional[aws_cdk.core.Duration]=None, desired_capacity: typing.Optional[jsii.Number]=None, health_check: typing.Optional["HealthCheck"]=None, ignore_unmodified_size_properties: typing.Optional[bool]=None, key_name: typing.Optional[str]=None, max_capacity: typing.Optional[jsii.Number]=None, min_capacity: typing.Optional[jsii.Number]=None, notifications_topic: typing.Optional[aws_cdk.aws_sns.ITopic]=None, replacing_update_min_successful_instances_percent: typing.Optional[jsii.Number]=None, resource_signal_count: typing.Optional[jsii.Number]=None, resource_signal_timeout: typing.Optional[aws_cdk.core.Duration]=None, rolling_update_configuration: typing.Optional["RollingUpdateConfiguration"]=None, spot_price: typing.Optional[str]=None, update_type: typing.Optional["UpdateType"]=None, vpc_subnets: typing.Optional[aws_cdk.aws_ec2.SubnetSelection]=None, instance_type: aws_cdk.aws_ec2.InstanceType, machine_image: aws_cdk.aws_ec2.IMachineImage, vpc: aws_cdk.aws_ec2.IVpc, block_devices: typing.Optional[typing.List["BlockDevice"]]=None, role: typing.Optional[aws_cdk.aws_iam.IRole]=None, user_data: typing.Optional[aws_cdk.aws_ec2.UserData]=None):
"""Properties of a Fleet.
:param allow_all_outbound: Whether the instances can initiate connections to anywhere by default. Default: true
:param associate_public_ip_address: Whether instances in the Auto Scaling Group should have public IP addresses associated with them. Default: - Use subnet setting.
:param cooldown: Default scaling cooldown for this AutoScalingGroup. Default: Duration.minutes(5)
:param desired_capacity: Initial amount of instances in the fleet. If this is set to a number, every deployment will reset the amount of instances to this number. It is recommended to leave this value blank. Default: minCapacity, and leave unchanged during deployment
:param health_check: Configuration for health checks. Default: - HealthCheck.ec2 with no grace period
:param ignore_unmodified_size_properties: If the ASG has scheduled actions, don't reset unchanged group sizes. Only used if the ASG has scheduled actions (which may scale your ASG up or down regardless of cdk deployments). If true, the size of the group will only be reset if it has been changed in the CDK app. If false, the sizes will always be changed back to what they were in the CDK app on deployment. Default: true
:param key_name: Name of SSH keypair to grant access to instances. Default: - No SSH access will be possible.
:param max_capacity: Maximum number of instances in the fleet. Default: desiredCapacity
:param min_capacity: Minimum number of instances in the fleet. Default: 1
:param notifications_topic: SNS topic to send notifications about fleet changes. Default: - No fleet change notifications will be sent.
:param replacing_update_min_successful_instances_percent: Configuration for replacing updates. Only used if updateType == UpdateType.ReplacingUpdate. Specifies how many instances must signal success for the update to succeed. Default: minSuccessfulInstancesPercent
:param resource_signal_count: How many ResourceSignal calls CloudFormation expects before the resource is considered created. Default: 1
:param resource_signal_timeout: The length of time to wait for the resourceSignalCount. The maximum value is 43200 (12 hours). Default: Duration.minutes(5)
:param rolling_update_configuration: Configuration for rolling updates. Only used if updateType == UpdateType.RollingUpdate. Default: - RollingUpdateConfiguration with defaults.
:param spot_price: The maximum hourly price (in USD) to be paid for any Spot Instance launched to fulfill the request. Spot Instances are launched when the price you specify exceeds the current Spot market price. Default: none
:param update_type: What to do when an AutoScalingGroup's instance configuration is changed. This is applied when any of the settings on the ASG are changed that affect how the instances should be created (VPC, instance type, startup scripts, etc.). It indicates how the existing instances should be replaced with new instances matching the new config. By default, nothing is done and only new instances are launched with the new config. Default: UpdateType.None
:param vpc_subnets: Where to place instances within the VPC. Default: - All Private subnets.
:param instance_type: Type of instance to launch.
:param machine_image: AMI to launch.
:param vpc: VPC to launch these instances in.
:param block_devices: Specifies how block devices are exposed to the instance. You can specify virtual devices and EBS volumes. Each instance that is launched has an associated root device volume, either an Amazon EBS volume or an instance store volume. You can use block device mappings to specify additional EBS volumes or instance store volumes to attach to an instance when it is launched. Default: - Uses the block device mapping of the AMI
:param role: An IAM role to associate with the instance profile assigned to this Auto Scaling Group. The role must be assumable by the service principal ``ec2.amazonaws.com``: Default: A role will automatically be created, it can be accessed via the ``role`` property
:param user_data: Specific UserData to use. The UserData may still be mutated after creation. Default: - A UserData object appropriate for the MachineImage's Operating System is created.
"""
if isinstance(rolling_update_configuration, dict): rolling_update_configuration = RollingUpdateConfiguration(**rolling_update_configuration)
if isinstance(vpc_subnets, dict): vpc_subnets = aws_cdk.aws_ec2.SubnetSelection(**vpc_subnets)
self._values = {
'instance_type': instance_type,
'machine_image': machine_image,
'vpc': vpc,
}
if allow_all_outbound is not None: self._values["allow_all_outbound"] = allow_all_outbound
if associate_public_ip_address is not None: self._values["associate_public_ip_address"] = associate_public_ip_address
if cooldown is not None: self._values["cooldown"] = cooldown
if desired_capacity is not None: self._values["desired_capacity"] = desired_capacity
if health_check is not None: self._values["health_check"] = health_check
if ignore_unmodified_size_properties is not None: self._values["ignore_unmodified_size_properties"] = ignore_unmodified_size_properties
if key_name is not None: self._values["key_name"] = key_name
if max_capacity is not None: self._values["max_capacity"] = max_capacity
if min_capacity is not None: self._values["min_capacity"] = min_capacity
if notifications_topic is not None: self._values["notifications_topic"] = notifications_topic
if replacing_update_min_successful_instances_percent is not None: self._values["replacing_update_min_successful_instances_percent"] = replacing_update_min_successful_instances_percent
if resource_signal_count is not None: self._values["resource_signal_count"] = resource_signal_count
if resource_signal_timeout is not None: self._values["resource_signal_timeout"] = resource_signal_timeout
if rolling_update_configuration is not None: self._values["rolling_update_configuration"] = rolling_update_configuration
if spot_price is not None: self._values["spot_price"] = spot_price
if update_type is not None: self._values["update_type"] = update_type
if vpc_subnets is not None: self._values["vpc_subnets"] = vpc_subnets
if block_devices is not None: self._values["block_devices"] = block_devices
if role is not None: self._values["role"] = role
if user_data is not None: self._values["user_data"] = user_data
@builtins.property
def allow_all_outbound(self) -> typing.Optional[bool]:
"""Whether the instances can initiate connections to anywhere by default.
default
:default: true
"""
return self._values.get('allow_all_outbound')
@builtins.property
def associate_public_ip_address(self) -> typing.Optional[bool]:
"""Whether instances in the Auto Scaling Group should have public IP addresses associated with them.
default
:default: - Use subnet setting.
"""
return self._values.get('associate_public_ip_address')
@builtins.property
def cooldown(self) -> typing.Optional[aws_cdk.core.Duration]:
"""Default scaling cooldown for this AutoScalingGroup.
default
:default: Duration.minutes(5)
"""
return self._values.get('cooldown')
@builtins.property
def desired_capacity(self) -> typing.Optional[jsii.Number]:
"""Initial amount of instances in the fleet.
If this is set to a number, every deployment will reset the amount of
instances to this number. It is recommended to leave this value blank.
default
:default: minCapacity, and leave unchanged during deployment
see
:see: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-group.html#cfn-as-group-desiredcapacity
"""
return self._values.get('desired_capacity')
@builtins.property
def health_check(self) -> typing.Optional["HealthCheck"]:
"""Configuration for health checks.
default
:default: - HealthCheck.ec2 with no grace period
"""
return self._values.get('health_check')
@builtins.property
def ignore_unmodified_size_properties(self) -> typing.Optional[bool]:
"""If the ASG has scheduled actions, don't reset unchanged group sizes.
Only used if the ASG has scheduled actions (which may scale your ASG up
or down regardless of cdk deployments). If true, the size of the group
will only be reset if it has been changed in the CDK app. If false, the
sizes will always be changed back to what they were in the CDK app
on deployment.
default
:default: true
"""
return self._values.get('ignore_unmodified_size_properties')
@builtins.property
def key_name(self) -> typing.Optional[str]:
"""Name of SSH keypair to grant access to instances.
default
:default: - No SSH access will be possible.
"""
return self._values.get('key_name')
@builtins.property
def max_capacity(self) -> typing.Optional[jsii.Number]:
"""Maximum number of instances in the fleet.
default
:default: desiredCapacity
"""
return self._values.get('max_capacity')
@builtins.property
def min_capacity(self) -> typing.Optional[jsii.Number]:
"""Minimum number of instances in the fleet.
default
:default: 1
"""
return self._values.get('min_capacity')
@builtins.property
def notifications_topic(self) -> typing.Optional[aws_cdk.aws_sns.ITopic]:
"""SNS topic to send notifications about fleet changes.
default
:default: - No fleet change notifications will be sent.
"""
return self._values.get('notifications_topic')
@builtins.property
def replacing_update_min_successful_instances_percent(self) -> typing.Optional[jsii.Number]:
"""Configuration for replacing updates.
Only used if updateType == UpdateType.ReplacingUpdate. Specifies how
many instances must signal success for the update to succeed.
default
:default: minSuccessfulInstancesPercent
"""
return self._values.get('replacing_update_min_successful_instances_percent')
@builtins.property
def resource_signal_count(self) -> typing.Optional[jsii.Number]:
"""How many ResourceSignal calls CloudFormation expects before the resource is considered created.
default
:default: 1
"""
return self._values.get('resource_signal_count')
@builtins.property
def resource_signal_timeout(self) -> typing.Optional[aws_cdk.core.Duration]:
"""The length of time to wait for the resourceSignalCount.
The maximum value is 43200 (12 hours).
default
:default: Duration.minutes(5)
"""
return self._values.get('resource_signal_timeout')
@builtins.property
def rolling_update_configuration(self) -> typing.Optional["RollingUpdateConfiguration"]:
"""Configuration for rolling updates.
Only used if updateType == UpdateType.RollingUpdate.
default
:default: - RollingUpdateConfiguration with defaults.
"""
return self._values.get('rolling_update_configuration')
@builtins.property
def spot_price(self) -> typing.Optional[str]:
"""The maximum hourly price (in USD) to be paid for any Spot Instance launched to fulfill the request.
Spot Instances are
launched when the price you specify exceeds the current Spot market price.
default
:default: none
"""
return self._values.get('spot_price')
@builtins.property
def update_type(self) -> typing.Optional["UpdateType"]:
"""What to do when an AutoScalingGroup's instance configuration is changed.
This is applied when any of the settings on the ASG are changed that
affect how the instances should be created (VPC, instance type, startup
scripts, etc.). It indicates how the existing instances should be
replaced with new instances matching the new config. By default, nothing
is done and only new instances are launched with the new config.
default
:default: UpdateType.None
"""
return self._values.get('update_type')
@builtins.property
def vpc_subnets(self) -> typing.Optional[aws_cdk.aws_ec2.SubnetSelection]:
"""Where to place instances within the VPC.
default
:default: - All Private subnets.
"""
return self._values.get('vpc_subnets')
@builtins.property
def instance_type(self) -> aws_cdk.aws_ec2.InstanceType:
"""Type of instance to launch."""
return self._values.get('instance_type')
@builtins.property
def machine_image(self) -> aws_cdk.aws_ec2.IMachineImage:
"""AMI to launch."""
return self._values.get('machine_image')
@builtins.property
def vpc(self) -> aws_cdk.aws_ec2.IVpc:
"""VPC to launch these instances in."""
return self._values.get('vpc')
@builtins.property
def block_devices(self) -> typing.Optional[typing.List["BlockDevice"]]:
"""Specifies how block devices are exposed to the instance. You can specify virtual devices and EBS volumes.
Each instance that is launched has an associated root device volume,
either an Amazon EBS volume or an instance store volume.
You can use block device mappings to specify additional EBS volumes or
instance store volumes to attach to an instance when it is launched.
default
:default: - Uses the block device mapping of the AMI
see
:see: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/block-device-mapping-concepts.html
"""
return self._values.get('block_devices')
@builtins.property
def role(self) -> typing.Optional[aws_cdk.aws_iam.IRole]:
"""An IAM role to associate with the instance profile assigned to this Auto Scaling Group.
The role must be assumable by the service principal ``ec2.amazonaws.com``:
default
:default: A role will automatically be created, it can be accessed via the ``role`` property
Example::
# Example automatically generated without compilation. See https://github.com/aws/jsii/issues/826
role = iam.Role(self, "MyRole",
assumed_by=iam.ServicePrincipal("ec2.amazonaws.com")
)
"""
return self._values.get('role')
@builtins.property
def user_data(self) -> typing.Optional[aws_cdk.aws_ec2.UserData]:
"""Specific UserData to use.
The UserData may still be mutated after creation.
default
:default:
- A UserData object appropriate for the MachineImage's
Operating System is created.
"""
return self._values.get('user_data')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'AutoScalingGroupProps(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
@jsii.data_type(jsii_type="@aws-cdk/aws-autoscaling.CpuUtilizationScalingProps", jsii_struct_bases=[BaseTargetTrackingProps], name_mapping={'cooldown': 'cooldown', 'disable_scale_in': 'disableScaleIn', 'estimated_instance_warmup': 'estimatedInstanceWarmup', 'target_utilization_percent': 'targetUtilizationPercent'})
class CpuUtilizationScalingProps(BaseTargetTrackingProps):
def __init__(self, *, cooldown: typing.Optional[aws_cdk.core.Duration]=None, disable_scale_in: typing.Optional[bool]=None, estimated_instance_warmup: typing.Optional[aws_cdk.core.Duration]=None, target_utilization_percent: jsii.Number):
"""Properties for enabling scaling based on CPU utilization.
:param cooldown: Period after a scaling completes before another scaling activity can start. Default: - The default cooldown configured on the AutoScalingGroup.
:param disable_scale_in: Indicates whether scale in by the target tracking policy is disabled. If the value is true, scale in is disabled and the target tracking policy won't remove capacity from the autoscaling group. Otherwise, scale in is enabled and the target tracking policy can remove capacity from the group. Default: false
:param estimated_instance_warmup: Estimated time until a newly launched instance can send metrics to CloudWatch. Default: - Same as the cooldown.
:param target_utilization_percent: Target average CPU utilization across the task.
"""
self._values = {
'target_utilization_percent': target_utilization_percent,
}
if cooldown is not None: self._values["cooldown"] = cooldown
if disable_scale_in is not None: self._values["disable_scale_in"] = disable_scale_in
if estimated_instance_warmup is not None: self._values["estimated_instance_warmup"] = estimated_instance_warmup
@builtins.property
def cooldown(self) -> typing.Optional[aws_cdk.core.Duration]:
"""Period after a scaling completes before another scaling activity can start.
default
:default: - The default cooldown configured on the AutoScalingGroup.
"""
return self._values.get('cooldown')
@builtins.property
def disable_scale_in(self) -> typing.Optional[bool]:
"""Indicates whether scale in by the target tracking policy is disabled.
If the value is true, scale in is disabled and the target tracking policy
won't remove capacity from the autoscaling group. Otherwise, scale in is
enabled and the target tracking policy can remove capacity from the
group.
default
:default: false
"""
return self._values.get('disable_scale_in')
@builtins.property
def estimated_instance_warmup(self) -> typing.Optional[aws_cdk.core.Duration]:
"""Estimated time until a newly launched instance can send metrics to CloudWatch.
default
:default: - Same as the cooldown.
"""
return self._values.get('estimated_instance_warmup')
@builtins.property
def target_utilization_percent(self) -> jsii.Number:
"""Target average CPU utilization across the task."""
return self._values.get('target_utilization_percent')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'CpuUtilizationScalingProps(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
@jsii.data_type(jsii_type="@aws-cdk/aws-autoscaling.CronOptions", jsii_struct_bases=[], name_mapping={'day': 'day', 'hour': 'hour', 'minute': 'minute', 'month': 'month', 'week_day': 'weekDay'})
class CronOptions():
def __init__(self, *, day: typing.Optional[str]=None, hour: typing.Optional[str]=None, minute: typing.Optional[str]=None, month: typing.Optional[str]=None, week_day: typing.Optional[str]=None):
"""Options to configure a cron expression.
All fields are strings so you can use complex expresions. Absence of
a field implies '*' or '?', whichever one is appropriate.
:param day: The day of the month to run this rule at. Default: - Every day of the month
:param hour: The hour to run this rule at. Default: - Every hour
:param minute: The minute to run this rule at. Default: - Every minute
:param month: The month to run this rule at. Default: - Every month
:param week_day: The day of the week to run this rule at. Default: - Any day of the week
see
:see: http://crontab.org/
"""
self._values = {
}
if day is not None: self._values["day"] = day
if hour is not None: self._values["hour"] = hour
if minute is not None: self._values["minute"] = minute
if month is not None: self._values["month"] = month
if week_day is not None: self._values["week_day"] = week_day
@builtins.property
def day(self) -> typing.Optional[str]:
"""The day of the month to run this rule at.
default
:default: - Every day of the month
"""
return self._values.get('day')
@builtins.property
def hour(self) -> typing.Optional[str]:
"""The hour to run this rule at.
default
:default: - Every hour
"""
return self._values.get('hour')
@builtins.property
def minute(self) -> typing.Optional[str]:
"""The minute to run this rule at.
default
:default: - Every minute
"""
return self._values.get('minute')
@builtins.property
def month(self) -> typing.Optional[str]:
"""The month to run this rule at.
default
:default: - Every month
"""
return self._values.get('month')
@builtins.property
def week_day(self) -> typing.Optional[str]:
"""The day of the week to run this rule at.
default
:default: - Any day of the week
"""
return self._values.get('week_day')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'CronOptions(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
@jsii.enum(jsii_type="@aws-cdk/aws-autoscaling.DefaultResult")
class DefaultResult(enum.Enum):
CONTINUE = "CONTINUE"
ABANDON = "ABANDON"
@jsii.data_type(jsii_type="@aws-cdk/aws-autoscaling.EbsDeviceOptionsBase", jsii_struct_bases=[], name_mapping={'delete_on_termination': 'deleteOnTermination', 'iops': 'iops', 'volume_type': 'volumeType'})
class EbsDeviceOptionsBase():
def __init__(self, *, delete_on_termination: typing.Optional[bool]=None, iops: typing.Optional[jsii.Number]=None, volume_type: typing.Optional["EbsDeviceVolumeType"]=None):
"""Base block device options for an EBS volume.
:param delete_on_termination: Indicates whether to delete the volume when the instance is terminated. Default: - true for Amazon EC2 Auto Scaling, false otherwise (e.g. EBS)
:param iops: The number of I/O operations per second (IOPS) to provision for the volume. Must only be set for {@link volumeType}: {@link EbsDeviceVolumeType.IO1} The maximum ratio of IOPS to volume size (in GiB) is 50:1, so for 5,000 provisioned IOPS, you need at least 100 GiB storage on the volume. Default: - none, required for {@link EbsDeviceVolumeType.IO1}
:param volume_type: The EBS volume type. Default: {@link EbsDeviceVolumeType.GP2}
"""
self._values = {
}
if delete_on_termination is not None: self._values["delete_on_termination"] = delete_on_termination
if iops is not None: self._values["iops"] = iops
if volume_type is not None: self._values["volume_type"] = volume_type
@builtins.property
def delete_on_termination(self) -> typing.Optional[bool]:
"""Indicates whether to delete the volume when the instance is terminated.
default
:default: - true for Amazon EC2 Auto Scaling, false otherwise (e.g. EBS)
"""
return self._values.get('delete_on_termination')
@builtins.property
def iops(self) -> typing.Optional[jsii.Number]:
"""The number of I/O operations per second (IOPS) to provision for the volume.
Must only be set for {@link volumeType}: {@link EbsDeviceVolumeType.IO1}
The maximum ratio of IOPS to volume size (in GiB) is 50:1, so for 5,000 provisioned IOPS,
you need at least 100 GiB storage on the volume.
default
:default: - none, required for {@link EbsDeviceVolumeType.IO1}
see
:see: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html
"""
return self._values.get('iops')
@builtins.property
def volume_type(self) -> typing.Optional["EbsDeviceVolumeType"]:
"""The EBS volume type.
default
:default: {@link EbsDeviceVolumeType.GP2}
see
:see: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html
"""
return self._values.get('volume_type')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'EbsDeviceOptionsBase(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
@jsii.data_type(jsii_type="@aws-cdk/aws-autoscaling.EbsDeviceOptions", jsii_struct_bases=[EbsDeviceOptionsBase], name_mapping={'delete_on_termination': 'deleteOnTermination', 'iops': 'iops', 'volume_type': 'volumeType', 'encrypted': 'encrypted'})
class EbsDeviceOptions(EbsDeviceOptionsBase):
def __init__(self, *, delete_on_termination: typing.Optional[bool]=None, iops: typing.Optional[jsii.Number]=None, volume_type: typing.Optional["EbsDeviceVolumeType"]=None, encrypted: typing.Optional[bool]=None):
"""Block device options for an EBS volume.
:param delete_on_termination: Indicates whether to delete the volume when the instance is terminated. Default: - true for Amazon EC2 Auto Scaling, false otherwise (e.g. EBS)
:param iops: The number of I/O operations per second (IOPS) to provision for the volume. Must only be set for {@link volumeType}: {@link EbsDeviceVolumeType.IO1} The maximum ratio of IOPS to volume size (in GiB) is 50:1, so for 5,000 provisioned IOPS, you need at least 100 GiB storage on the volume. Default: - none, required for {@link EbsDeviceVolumeType.IO1}
:param volume_type: The EBS volume type. Default: {@link EbsDeviceVolumeType.GP2}
:param encrypted: Specifies whether the EBS volume is encrypted. Encrypted EBS volumes can only be attached to instances that support Amazon EBS encryption Default: false
"""
self._values = {
}
if delete_on_termination is not None: self._values["delete_on_termination"] = delete_on_termination
if iops is not None: self._values["iops"] = iops
if volume_type is not None: self._values["volume_type"] = volume_type
if encrypted is not None: self._values["encrypted"] = encrypted
@builtins.property
def delete_on_termination(self) -> typing.Optional[bool]:
"""Indicates whether to delete the volume when the instance is terminated.
default
:default: - true for Amazon EC2 Auto Scaling, false otherwise (e.g. EBS)
"""
return self._values.get('delete_on_termination')
@builtins.property
def iops(self) -> typing.Optional[jsii.Number]:
"""The number of I/O operations per second (IOPS) to provision for the volume.
Must only be set for {@link volumeType}: {@link EbsDeviceVolumeType.IO1}
The maximum ratio of IOPS to volume size (in GiB) is 50:1, so for 5,000 provisioned IOPS,
you need at least 100 GiB storage on the volume.
default
:default: - none, required for {@link EbsDeviceVolumeType.IO1}
see
:see: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html
"""
return self._values.get('iops')
@builtins.property
def volume_type(self) -> typing.Optional["EbsDeviceVolumeType"]:
"""The EBS volume type.
default
:default: {@link EbsDeviceVolumeType.GP2}
see
:see: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html
"""
return self._values.get('volume_type')
@builtins.property
def encrypted(self) -> typing.Optional[bool]:
"""Specifies whether the EBS volume is encrypted.
Encrypted EBS volumes can only be attached to instances that support Amazon EBS encryption
default
:default: false
see
:see: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html#EBSEncryption_supported_instances
"""
return self._values.get('encrypted')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'EbsDeviceOptions(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
@jsii.data_type(jsii_type="@aws-cdk/aws-autoscaling.EbsDeviceSnapshotOptions", jsii_struct_bases=[EbsDeviceOptionsBase], name_mapping={'delete_on_termination': 'deleteOnTermination', 'iops': 'iops', 'volume_type': 'volumeType', 'volume_size': 'volumeSize'})
class EbsDeviceSnapshotOptions(EbsDeviceOptionsBase):
def __init__(self, *, delete_on_termination: typing.Optional[bool]=None, iops: typing.Optional[jsii.Number]=None, volume_type: typing.Optional["EbsDeviceVolumeType"]=None, volume_size: typing.Optional[jsii.Number]=None):
"""Block device options for an EBS volume created from a snapshot.
:param delete_on_termination: Indicates whether to delete the volume when the instance is terminated. Default: - true for Amazon EC2 Auto Scaling, false otherwise (e.g. EBS)
:param iops: The number of I/O operations per second (IOPS) to provision for the volume. Must only be set for {@link volumeType}: {@link EbsDeviceVolumeType.IO1} The maximum ratio of IOPS to volume size (in GiB) is 50:1, so for 5,000 provisioned IOPS, you need at least 100 GiB storage on the volume. Default: - none, required for {@link EbsDeviceVolumeType.IO1}
:param volume_type: The EBS volume type. Default: {@link EbsDeviceVolumeType.GP2}
:param volume_size: The volume size, in Gibibytes (GiB). If you specify volumeSize, it must be equal or greater than the size of the snapshot. Default: - The snapshot size
"""
self._values = {
}
if delete_on_termination is not None: self._values["delete_on_termination"] = delete_on_termination
if iops is not None: self._values["iops"] = iops
if volume_type is not None: self._values["volume_type"] = volume_type
if volume_size is not None: self._values["volume_size"] = volume_size
@builtins.property
def delete_on_termination(self) -> typing.Optional[bool]:
"""Indicates whether to delete the volume when the instance is terminated.
default
:default: - true for Amazon EC2 Auto Scaling, false otherwise (e.g. EBS)
"""
return self._values.get('delete_on_termination')
@builtins.property
def iops(self) -> typing.Optional[jsii.Number]:
"""The number of I/O operations per second (IOPS) to provision for the volume.
Must only be set for {@link volumeType}: {@link EbsDeviceVolumeType.IO1}
The maximum ratio of IOPS to volume size (in GiB) is 50:1, so for 5,000 provisioned IOPS,
you need at least 100 GiB storage on the volume.
default
:default: - none, required for {@link EbsDeviceVolumeType.IO1}
see
:see: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html
"""
return self._values.get('iops')
@builtins.property
def volume_type(self) -> typing.Optional["EbsDeviceVolumeType"]:
"""The EBS volume type.
default
:default: {@link EbsDeviceVolumeType.GP2}
see
:see: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html
"""
return self._values.get('volume_type')
@builtins.property
def volume_size(self) -> typing.Optional[jsii.Number]:
"""The volume size, in Gibibytes (GiB).
If you specify volumeSize, it must be equal or greater than the size of the snapshot.
default
:default: - The snapshot size
"""
return self._values.get('volume_size')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'EbsDeviceSnapshotOptions(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
@jsii.data_type(jsii_type="@aws-cdk/aws-autoscaling.EbsDeviceProps", jsii_struct_bases=[EbsDeviceSnapshotOptions], name_mapping={'delete_on_termination': 'deleteOnTermination', 'iops': 'iops', 'volume_type': 'volumeType', 'volume_size': 'volumeSize', 'snapshot_id': 'snapshotId'})
class EbsDeviceProps(EbsDeviceSnapshotOptions):
def __init__(self, *, delete_on_termination: typing.Optional[bool]=None, iops: typing.Optional[jsii.Number]=None, volume_type: typing.Optional["EbsDeviceVolumeType"]=None, volume_size: typing.Optional[jsii.Number]=None, snapshot_id: typing.Optional[str]=None):
"""Properties of an EBS block device.
:param delete_on_termination: Indicates whether to delete the volume when the instance is terminated. Default: - true for Amazon EC2 Auto Scaling, false otherwise (e.g. EBS)
:param iops: The number of I/O operations per second (IOPS) to provision for the volume. Must only be set for {@link volumeType}: {@link EbsDeviceVolumeType.IO1} The maximum ratio of IOPS to volume size (in GiB) is 50:1, so for 5,000 provisioned IOPS, you need at least 100 GiB storage on the volume. Default: - none, required for {@link EbsDeviceVolumeType.IO1}
:param volume_type: The EBS volume type. Default: {@link EbsDeviceVolumeType.GP2}
:param volume_size: The volume size, in Gibibytes (GiB). If you specify volumeSize, it must be equal or greater than the size of the snapshot. Default: - The snapshot size
:param snapshot_id: The snapshot ID of the volume to use. Default: - No snapshot will be used
"""
self._values = {
}
if delete_on_termination is not None: self._values["delete_on_termination"] = delete_on_termination
if iops is not None: self._values["iops"] = iops
if volume_type is not None: self._values["volume_type"] = volume_type
if volume_size is not None: self._values["volume_size"] = volume_size
if snapshot_id is not None: self._values["snapshot_id"] = snapshot_id
@builtins.property
def delete_on_termination(self) -> typing.Optional[bool]:
"""Indicates whether to delete the volume when the instance is terminated.
default
:default: - true for Amazon EC2 Auto Scaling, false otherwise (e.g. EBS)
"""
return self._values.get('delete_on_termination')
@builtins.property
def iops(self) -> typing.Optional[jsii.Number]:
"""The number of I/O operations per second (IOPS) to provision for the volume.
Must only be set for {@link volumeType}: {@link EbsDeviceVolumeType.IO1}
The maximum ratio of IOPS to volume size (in GiB) is 50:1, so for 5,000 provisioned IOPS,
you need at least 100 GiB storage on the volume.
default
:default: - none, required for {@link EbsDeviceVolumeType.IO1}
see
:see: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html
"""
return self._values.get('iops')
@builtins.property
def volume_type(self) -> typing.Optional["EbsDeviceVolumeType"]:
"""The EBS volume type.
default
:default: {@link EbsDeviceVolumeType.GP2}
see
:see: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html
"""
return self._values.get('volume_type')
@builtins.property
def volume_size(self) -> typing.Optional[jsii.Number]:
"""The volume size, in Gibibytes (GiB).
If you specify volumeSize, it must be equal or greater than the size of the snapshot.
default
:default: - The snapshot size
"""
return self._values.get('volume_size')
@builtins.property
def snapshot_id(self) -> typing.Optional[str]:
"""The snapshot ID of the volume to use.
default
:default: - No snapshot will be used
"""
return self._values.get('snapshot_id')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'EbsDeviceProps(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
@jsii.enum(jsii_type="@aws-cdk/aws-autoscaling.EbsDeviceVolumeType")
class EbsDeviceVolumeType(enum.Enum):
"""Supported EBS volume types for blockDevices."""
STANDARD = "STANDARD"
"""Magnetic."""
IO1 = "IO1"
"""Provisioned IOPS SSD."""
GP2 = "GP2"
"""General Purpose SSD."""
ST1 = "ST1"
"""Throughput Optimized HDD."""
SC1 = "SC1"
"""Cold HDD."""
@jsii.data_type(jsii_type="@aws-cdk/aws-autoscaling.Ec2HealthCheckOptions", jsii_struct_bases=[], name_mapping={'grace': 'grace'})
class Ec2HealthCheckOptions():
def __init__(self, *, grace: typing.Optional[aws_cdk.core.Duration]=None):
"""EC2 Heath check options.
:param grace: Specified the time Auto Scaling waits before checking the health status of an EC2 instance that has come into service. Default: Duration.seconds(0)
"""
self._values = {
}
if grace is not None: self._values["grace"] = grace
@builtins.property
def grace(self) -> typing.Optional[aws_cdk.core.Duration]:
"""Specified the time Auto Scaling waits before checking the health status of an EC2 instance that has come into service.
default
:default: Duration.seconds(0)
"""
return self._values.get('grace')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'Ec2HealthCheckOptions(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
@jsii.data_type(jsii_type="@aws-cdk/aws-autoscaling.ElbHealthCheckOptions", jsii_struct_bases=[], name_mapping={'grace': 'grace'})
class ElbHealthCheckOptions():
def __init__(self, *, grace: aws_cdk.core.Duration):
"""ELB Heath check options.
:param grace: Specified the time Auto Scaling waits before checking the health status of an EC2 instance that has come into service. This option is required for ELB health checks.
"""
self._values = {
'grace': grace,
}
@builtins.property
def grace(self) -> aws_cdk.core.Duration:
"""Specified the time Auto Scaling waits before checking the health status of an EC2 instance that has come into service.
This option is required for ELB health checks.
"""
return self._values.get('grace')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'ElbHealthCheckOptions(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
class HealthCheck(metaclass=jsii.JSIIMeta, jsii_type="@aws-cdk/aws-autoscaling.HealthCheck"):
"""Health check settings."""
@jsii.member(jsii_name="ec2")
@builtins.classmethod
def ec2(cls, *, grace: typing.Optional[aws_cdk.core.Duration]=None) -> "HealthCheck":
"""Use EC2 for health checks.
:param grace: Specified the time Auto Scaling waits before checking the health status of an EC2 instance that has come into service. Default: Duration.seconds(0)
"""
options = Ec2HealthCheckOptions(grace=grace)
return jsii.sinvoke(cls, "ec2", [options])
@jsii.member(jsii_name="elb")
@builtins.classmethod
def elb(cls, *, grace: aws_cdk.core.Duration) -> "HealthCheck":
"""Use ELB for health checks.
It considers the instance unhealthy if it fails either the EC2 status checks or the load balancer health checks.
:param grace: Specified the time Auto Scaling waits before checking the health status of an EC2 instance that has come into service. This option is required for ELB health checks.
"""
options = ElbHealthCheckOptions(grace=grace)
return jsii.sinvoke(cls, "elb", [options])
@builtins.property
@jsii.member(jsii_name="type")
def type(self) -> str:
return jsii.get(self, "type")
@builtins.property
@jsii.member(jsii_name="gracePeriod")
def grace_period(self) -> typing.Optional[aws_cdk.core.Duration]:
return jsii.get(self, "gracePeriod")
@jsii.interface(jsii_type="@aws-cdk/aws-autoscaling.IAutoScalingGroup")
class IAutoScalingGroup(aws_cdk.core.IResource, jsii.compat.Protocol):
"""An AutoScalingGroup."""
@builtins.staticmethod
def __jsii_proxy_class__():
return _IAutoScalingGroupProxy
@builtins.property
@jsii.member(jsii_name="autoScalingGroupArn")
def auto_scaling_group_arn(self) -> str:
"""The arn of the AutoScalingGroup.
attribute:
:attribute:: true
"""
...
@builtins.property
@jsii.member(jsii_name="autoScalingGroupName")
def auto_scaling_group_name(self) -> str:
"""The name of the AutoScalingGroup.
attribute:
:attribute:: true
"""
...
@jsii.member(jsii_name="addLifecycleHook")
def add_lifecycle_hook(self, id: str, *, lifecycle_transition: "LifecycleTransition", notification_target: "ILifecycleHookTarget", default_result: typing.Optional["DefaultResult"]=None, heartbeat_timeout: typing.Optional[aws_cdk.core.Duration]=None, lifecycle_hook_name: typing.Optional[str]=None, notification_metadata: typing.Optional[str]=None, role: typing.Optional[aws_cdk.aws_iam.IRole]=None) -> "LifecycleHook":
"""Send a message to either an SQS queue or SNS topic when instances launch or terminate.
:param id: -
:param lifecycle_transition: The state of the Amazon EC2 instance to which you want to attach the lifecycle hook.
:param notification_target: The target of the lifecycle hook.
:param default_result: The action the Auto Scaling group takes when the lifecycle hook timeout elapses or if an unexpected failure occurs. Default: Continue
:param heartbeat_timeout: Maximum time between calls to RecordLifecycleActionHeartbeat for the hook. If the lifecycle hook times out, perform the action in DefaultResult. Default: - No heartbeat timeout.
:param lifecycle_hook_name: Name of the lifecycle hook. Default: - Automatically generated name.
:param notification_metadata: Additional data to pass to the lifecycle hook target. Default: - No metadata.
:param role: The role that allows publishing to the notification target. Default: - A role is automatically created.
"""
...
@jsii.member(jsii_name="scaleOnCpuUtilization")
def scale_on_cpu_utilization(self, id: str, *, target_utilization_percent: jsii.Number, cooldown: typing.Optional[aws_cdk.core.Duration]=None, disable_scale_in: typing.Optional[bool]=None, estimated_instance_warmup: typing.Optional[aws_cdk.core.Duration]=None) -> "TargetTrackingScalingPolicy":
"""Scale out or in to achieve a target CPU utilization.
:param id: -
:param target_utilization_percent: Target average CPU utilization across the task.
:param cooldown: Period after a scaling completes before another scaling activity can start. Default: - The default cooldown configured on the AutoScalingGroup.
:param disable_scale_in: Indicates whether scale in by the target tracking policy is disabled. If the value is true, scale in is disabled and the target tracking policy won't remove capacity from the autoscaling group. Otherwise, scale in is enabled and the target tracking policy can remove capacity from the group. Default: false
:param estimated_instance_warmup: Estimated time until a newly launched instance can send metrics to CloudWatch. Default: - Same as the cooldown.
"""
...
@jsii.member(jsii_name="scaleOnIncomingBytes")
def scale_on_incoming_bytes(self, id: str, *, target_bytes_per_second: jsii.Number, cooldown: typing.Optional[aws_cdk.core.Duration]=None, disable_scale_in: typing.Optional[bool]=None, estimated_instance_warmup: typing.Optional[aws_cdk.core.Duration]=None) -> "TargetTrackingScalingPolicy":
"""Scale out or in to achieve a target network ingress rate.
:param id: -
:param target_bytes_per_second: Target average bytes/seconds on each instance.
:param cooldown: Period after a scaling completes before another scaling activity can start. Default: - The default cooldown configured on the AutoScalingGroup.
:param disable_scale_in: Indicates whether scale in by the target tracking policy is disabled. If the value is true, scale in is disabled and the target tracking policy won't remove capacity from the autoscaling group. Otherwise, scale in is enabled and the target tracking policy can remove capacity from the group. Default: false
:param estimated_instance_warmup: Estimated time until a newly launched instance can send metrics to CloudWatch. Default: - Same as the cooldown.
"""
...
@jsii.member(jsii_name="scaleOnMetric")
def scale_on_metric(self, id: str, *, metric: aws_cdk.aws_cloudwatch.IMetric, scaling_steps: typing.List["ScalingInterval"], adjustment_type: typing.Optional["AdjustmentType"]=None, cooldown: typing.Optional[aws_cdk.core.Duration]=None, estimated_instance_warmup: typing.Optional[aws_cdk.core.Duration]=None, min_adjustment_magnitude: typing.Optional[jsii.Number]=None) -> "StepScalingPolicy":
"""Scale out or in, in response to a metric.
:param id: -
:param metric: Metric to scale on.
:param scaling_steps: The intervals for scaling. Maps a range of metric values to a particular scaling behavior.
:param adjustment_type: How the adjustment numbers inside 'intervals' are interpreted. Default: ChangeInCapacity
:param cooldown: Grace period after scaling activity. Default: Default cooldown period on your AutoScalingGroup
:param estimated_instance_warmup: Estimated time until a newly launched instance can send metrics to CloudWatch. Default: Same as the cooldown
:param min_adjustment_magnitude: Minimum absolute number to adjust capacity with as result of percentage scaling. Only when using AdjustmentType = PercentChangeInCapacity, this number controls the minimum absolute effect size. Default: No minimum scaling effect
"""
...
@jsii.member(jsii_name="scaleOnOutgoingBytes")
def scale_on_outgoing_bytes(self, id: str, *, target_bytes_per_second: jsii.Number, cooldown: typing.Optional[aws_cdk.core.Duration]=None, disable_scale_in: typing.Optional[bool]=None, estimated_instance_warmup: typing.Optional[aws_cdk.core.Duration]=None) -> "TargetTrackingScalingPolicy":
"""Scale out or in to achieve a target network egress rate.
:param id: -
:param target_bytes_per_second: Target average bytes/seconds on each instance.
:param cooldown: Period after a scaling completes before another scaling activity can start. Default: - The default cooldown configured on the AutoScalingGroup.
:param disable_scale_in: Indicates whether scale in by the target tracking policy is disabled. If the value is true, scale in is disabled and the target tracking policy won't remove capacity from the autoscaling group. Otherwise, scale in is enabled and the target tracking policy can remove capacity from the group. Default: false
:param estimated_instance_warmup: Estimated time until a newly launched instance can send metrics to CloudWatch. Default: - Same as the cooldown.
"""
...
@jsii.member(jsii_name="scaleOnSchedule")
def scale_on_schedule(self, id: str, *, schedule: "Schedule", desired_capacity: typing.Optional[jsii.Number]=None, end_time: typing.Optional[datetime.datetime]=None, max_capacity: typing.Optional[jsii.Number]=None, min_capacity: typing.Optional[jsii.Number]=None, start_time: typing.Optional[datetime.datetime]=None) -> "ScheduledAction":
"""Scale out or in based on time.
:param id: -
:param schedule: When to perform this action. Supports cron expressions. For more information about cron expressions, see https://en.wikipedia.org/wiki/Cron.
:param desired_capacity: The new desired capacity. At the scheduled time, set the desired capacity to the given capacity. At least one of maxCapacity, minCapacity, or desiredCapacity must be supplied. Default: - No new desired capacity.
:param end_time: When this scheduled action expires. Default: - The rule never expires.
:param max_capacity: The new maximum capacity. At the scheduled time, set the maximum capacity to the given capacity. At least one of maxCapacity, minCapacity, or desiredCapacity must be supplied. Default: - No new maximum capacity.
:param min_capacity: The new minimum capacity. At the scheduled time, set the minimum capacity to the given capacity. At least one of maxCapacity, minCapacity, or desiredCapacity must be supplied. Default: - No new minimum capacity.
:param start_time: When this scheduled action becomes active. Default: - The rule is activate immediately.
"""
...
@jsii.member(jsii_name="scaleToTrackMetric")
def scale_to_track_metric(self, id: str, *, metric: aws_cdk.aws_cloudwatch.IMetric, target_value: jsii.Number, cooldown: typing.Optional[aws_cdk.core.Duration]=None, disable_scale_in: typing.Optional[bool]=None, estimated_instance_warmup: typing.Optional[aws_cdk.core.Duration]=None) -> "TargetTrackingScalingPolicy":
"""Scale out or in in order to keep a metric around a target value.
:param id: -
:param metric: Metric to track. The metric must represent a utilization, so that if it's higher than the target value, your ASG should scale out, and if it's lower it should scale in.
:param target_value: Value to keep the metric around.
:param cooldown: Period after a scaling completes before another scaling activity can start. Default: - The default cooldown configured on the AutoScalingGroup.
:param disable_scale_in: Indicates whether scale in by the target tracking policy is disabled. If the value is true, scale in is disabled and the target tracking policy won't remove capacity from the autoscaling group. Otherwise, scale in is enabled and the target tracking policy can remove capacity from the group. Default: false
:param estimated_instance_warmup: Estimated time until a newly launched instance can send metrics to CloudWatch. Default: - Same as the cooldown.
"""
...
class _IAutoScalingGroupProxy(jsii.proxy_for(aws_cdk.core.IResource)):
"""An AutoScalingGroup."""
__jsii_type__ = "@aws-cdk/aws-autoscaling.IAutoScalingGroup"
@builtins.property
@jsii.member(jsii_name="autoScalingGroupArn")
def auto_scaling_group_arn(self) -> str:
"""The arn of the AutoScalingGroup.
attribute:
:attribute:: true
"""
return jsii.get(self, "autoScalingGroupArn")
@builtins.property
@jsii.member(jsii_name="autoScalingGroupName")
def auto_scaling_group_name(self) -> str:
"""The name of the AutoScalingGroup.
attribute:
:attribute:: true
"""
return jsii.get(self, "autoScalingGroupName")
@jsii.member(jsii_name="addLifecycleHook")
def add_lifecycle_hook(self, id: str, *, lifecycle_transition: "LifecycleTransition", notification_target: "ILifecycleHookTarget", default_result: typing.Optional["DefaultResult"]=None, heartbeat_timeout: typing.Optional[aws_cdk.core.Duration]=None, lifecycle_hook_name: typing.Optional[str]=None, notification_metadata: typing.Optional[str]=None, role: typing.Optional[aws_cdk.aws_iam.IRole]=None) -> "LifecycleHook":
"""Send a message to either an SQS queue or SNS topic when instances launch or terminate.
:param id: -
:param lifecycle_transition: The state of the Amazon EC2 instance to which you want to attach the lifecycle hook.
:param notification_target: The target of the lifecycle hook.
:param default_result: The action the Auto Scaling group takes when the lifecycle hook timeout elapses or if an unexpected failure occurs. Default: Continue
:param heartbeat_timeout: Maximum time between calls to RecordLifecycleActionHeartbeat for the hook. If the lifecycle hook times out, perform the action in DefaultResult. Default: - No heartbeat timeout.
:param lifecycle_hook_name: Name of the lifecycle hook. Default: - Automatically generated name.
:param notification_metadata: Additional data to pass to the lifecycle hook target. Default: - No metadata.
:param role: The role that allows publishing to the notification target. Default: - A role is automatically created.
"""
props = BasicLifecycleHookProps(lifecycle_transition=lifecycle_transition, notification_target=notification_target, default_result=default_result, heartbeat_timeout=heartbeat_timeout, lifecycle_hook_name=lifecycle_hook_name, notification_metadata=notification_metadata, role=role)
return jsii.invoke(self, "addLifecycleHook", [id, props])
@jsii.member(jsii_name="scaleOnCpuUtilization")
def scale_on_cpu_utilization(self, id: str, *, target_utilization_percent: jsii.Number, cooldown: typing.Optional[aws_cdk.core.Duration]=None, disable_scale_in: typing.Optional[bool]=None, estimated_instance_warmup: typing.Optional[aws_cdk.core.Duration]=None) -> "TargetTrackingScalingPolicy":
"""Scale out or in to achieve a target CPU utilization.
:param id: -
:param target_utilization_percent: Target average CPU utilization across the task.
:param cooldown: Period after a scaling completes before another scaling activity can start. Default: - The default cooldown configured on the AutoScalingGroup.
:param disable_scale_in: Indicates whether scale in by the target tracking policy is disabled. If the value is true, scale in is disabled and the target tracking policy won't remove capacity from the autoscaling group. Otherwise, scale in is enabled and the target tracking policy can remove capacity from the group. Default: false
:param estimated_instance_warmup: Estimated time until a newly launched instance can send metrics to CloudWatch. Default: - Same as the cooldown.
"""
props = CpuUtilizationScalingProps(target_utilization_percent=target_utilization_percent, cooldown=cooldown, disable_scale_in=disable_scale_in, estimated_instance_warmup=estimated_instance_warmup)
return jsii.invoke(self, "scaleOnCpuUtilization", [id, props])
@jsii.member(jsii_name="scaleOnIncomingBytes")
def scale_on_incoming_bytes(self, id: str, *, target_bytes_per_second: jsii.Number, cooldown: typing.Optional[aws_cdk.core.Duration]=None, disable_scale_in: typing.Optional[bool]=None, estimated_instance_warmup: typing.Optional[aws_cdk.core.Duration]=None) -> "TargetTrackingScalingPolicy":
"""Scale out or in to achieve a target network ingress rate.
:param id: -
:param target_bytes_per_second: Target average bytes/seconds on each instance.
:param cooldown: Period after a scaling completes before another scaling activity can start. Default: - The default cooldown configured on the AutoScalingGroup.
:param disable_scale_in: Indicates whether scale in by the target tracking policy is disabled. If the value is true, scale in is disabled and the target tracking policy won't remove capacity from the autoscaling group. Otherwise, scale in is enabled and the target tracking policy can remove capacity from the group. Default: false
:param estimated_instance_warmup: Estimated time until a newly launched instance can send metrics to CloudWatch. Default: - Same as the cooldown.
"""
props = NetworkUtilizationScalingProps(target_bytes_per_second=target_bytes_per_second, cooldown=cooldown, disable_scale_in=disable_scale_in, estimated_instance_warmup=estimated_instance_warmup)
return jsii.invoke(self, "scaleOnIncomingBytes", [id, props])
@jsii.member(jsii_name="scaleOnMetric")
def scale_on_metric(self, id: str, *, metric: aws_cdk.aws_cloudwatch.IMetric, scaling_steps: typing.List["ScalingInterval"], adjustment_type: typing.Optional["AdjustmentType"]=None, cooldown: typing.Optional[aws_cdk.core.Duration]=None, estimated_instance_warmup: typing.Optional[aws_cdk.core.Duration]=None, min_adjustment_magnitude: typing.Optional[jsii.Number]=None) -> "StepScalingPolicy":
"""Scale out or in, in response to a metric.
:param id: -
:param metric: Metric to scale on.
:param scaling_steps: The intervals for scaling. Maps a range of metric values to a particular scaling behavior.
:param adjustment_type: How the adjustment numbers inside 'intervals' are interpreted. Default: ChangeInCapacity
:param cooldown: Grace period after scaling activity. Default: Default cooldown period on your AutoScalingGroup
:param estimated_instance_warmup: Estimated time until a newly launched instance can send metrics to CloudWatch. Default: Same as the cooldown
:param min_adjustment_magnitude: Minimum absolute number to adjust capacity with as result of percentage scaling. Only when using AdjustmentType = PercentChangeInCapacity, this number controls the minimum absolute effect size. Default: No minimum scaling effect
"""
props = BasicStepScalingPolicyProps(metric=metric, scaling_steps=scaling_steps, adjustment_type=adjustment_type, cooldown=cooldown, estimated_instance_warmup=estimated_instance_warmup, min_adjustment_magnitude=min_adjustment_magnitude)
return jsii.invoke(self, "scaleOnMetric", [id, props])
@jsii.member(jsii_name="scaleOnOutgoingBytes")
def scale_on_outgoing_bytes(self, id: str, *, target_bytes_per_second: jsii.Number, cooldown: typing.Optional[aws_cdk.core.Duration]=None, disable_scale_in: typing.Optional[bool]=None, estimated_instance_warmup: typing.Optional[aws_cdk.core.Duration]=None) -> "TargetTrackingScalingPolicy":
"""Scale out or in to achieve a target network egress rate.
:param id: -
:param target_bytes_per_second: Target average bytes/seconds on each instance.
:param cooldown: Period after a scaling completes before another scaling activity can start. Default: - The default cooldown configured on the AutoScalingGroup.
:param disable_scale_in: Indicates whether scale in by the target tracking policy is disabled. If the value is true, scale in is disabled and the target tracking policy won't remove capacity from the autoscaling group. Otherwise, scale in is enabled and the target tracking policy can remove capacity from the group. Default: false
:param estimated_instance_warmup: Estimated time until a newly launched instance can send metrics to CloudWatch. Default: - Same as the cooldown.
"""
props = NetworkUtilizationScalingProps(target_bytes_per_second=target_bytes_per_second, cooldown=cooldown, disable_scale_in=disable_scale_in, estimated_instance_warmup=estimated_instance_warmup)
return jsii.invoke(self, "scaleOnOutgoingBytes", [id, props])
@jsii.member(jsii_name="scaleOnSchedule")
def scale_on_schedule(self, id: str, *, schedule: "Schedule", desired_capacity: typing.Optional[jsii.Number]=None, end_time: typing.Optional[datetime.datetime]=None, max_capacity: typing.Optional[jsii.Number]=None, min_capacity: typing.Optional[jsii.Number]=None, start_time: typing.Optional[datetime.datetime]=None) -> "ScheduledAction":
"""Scale out or in based on time.
:param id: -
:param schedule: When to perform this action. Supports cron expressions. For more information about cron expressions, see https://en.wikipedia.org/wiki/Cron.
:param desired_capacity: The new desired capacity. At the scheduled time, set the desired capacity to the given capacity. At least one of maxCapacity, minCapacity, or desiredCapacity must be supplied. Default: - No new desired capacity.
:param end_time: When this scheduled action expires. Default: - The rule never expires.
:param max_capacity: The new maximum capacity. At the scheduled time, set the maximum capacity to the given capacity. At least one of maxCapacity, minCapacity, or desiredCapacity must be supplied. Default: - No new maximum capacity.
:param min_capacity: The new minimum capacity. At the scheduled time, set the minimum capacity to the given capacity. At least one of maxCapacity, minCapacity, or desiredCapacity must be supplied. Default: - No new minimum capacity.
:param start_time: When this scheduled action becomes active. Default: - The rule is activate immediately.
"""
props = BasicScheduledActionProps(schedule=schedule, desired_capacity=desired_capacity, end_time=end_time, max_capacity=max_capacity, min_capacity=min_capacity, start_time=start_time)
return jsii.invoke(self, "scaleOnSchedule", [id, props])
@jsii.member(jsii_name="scaleToTrackMetric")
def scale_to_track_metric(self, id: str, *, metric: aws_cdk.aws_cloudwatch.IMetric, target_value: jsii.Number, cooldown: typing.Optional[aws_cdk.core.Duration]=None, disable_scale_in: typing.Optional[bool]=None, estimated_instance_warmup: typing.Optional[aws_cdk.core.Duration]=None) -> "TargetTrackingScalingPolicy":
"""Scale out or in in order to keep a metric around a target value.
:param id: -
:param metric: Metric to track. The metric must represent a utilization, so that if it's higher than the target value, your ASG should scale out, and if it's lower it should scale in.
:param target_value: Value to keep the metric around.
:param cooldown: Period after a scaling completes before another scaling activity can start. Default: - The default cooldown configured on the AutoScalingGroup.
:param disable_scale_in: Indicates whether scale in by the target tracking policy is disabled. If the value is true, scale in is disabled and the target tracking policy won't remove capacity from the autoscaling group. Otherwise, scale in is enabled and the target tracking policy can remove capacity from the group. Default: false
:param estimated_instance_warmup: Estimated time until a newly launched instance can send metrics to CloudWatch. Default: - Same as the cooldown.
"""
props = MetricTargetTrackingProps(metric=metric, target_value=target_value, cooldown=cooldown, disable_scale_in=disable_scale_in, estimated_instance_warmup=estimated_instance_warmup)
return jsii.invoke(self, "scaleToTrackMetric", [id, props])
@jsii.implements(aws_cdk.aws_elasticloadbalancing.ILoadBalancerTarget, aws_cdk.aws_ec2.IConnectable, aws_cdk.aws_elasticloadbalancingv2.IApplicationLoadBalancerTarget, aws_cdk.aws_elasticloadbalancingv2.INetworkLoadBalancerTarget, aws_cdk.aws_iam.IGrantable, IAutoScalingGroup)
class AutoScalingGroup(aws_cdk.core.Resource, metaclass=jsii.JSIIMeta, jsii_type="@aws-cdk/aws-autoscaling.AutoScalingGroup"):
"""A Fleet represents a managed set of EC2 instances.
The Fleet models a number of AutoScalingGroups, a launch configuration, a
security group and an instance role.
It allows adding arbitrary commands to the startup scripts of the instances
in the fleet.
The ASG spans all availability zones.
"""
def __init__(self, scope: aws_cdk.core.Construct, id: str, *, instance_type: aws_cdk.aws_ec2.InstanceType, machine_image: aws_cdk.aws_ec2.IMachineImage, vpc: aws_cdk.aws_ec2.IVpc, block_devices: typing.Optional[typing.List["BlockDevice"]]=None, role: typing.Optional[aws_cdk.aws_iam.IRole]=None, user_data: typing.Optional[aws_cdk.aws_ec2.UserData]=None, allow_all_outbound: typing.Optional[bool]=None, associate_public_ip_address: typing.Optional[bool]=None, cooldown: typing.Optional[aws_cdk.core.Duration]=None, desired_capacity: typing.Optional[jsii.Number]=None, health_check: typing.Optional["HealthCheck"]=None, ignore_unmodified_size_properties: typing.Optional[bool]=None, key_name: typing.Optional[str]=None, max_capacity: typing.Optional[jsii.Number]=None, min_capacity: typing.Optional[jsii.Number]=None, notifications_topic: typing.Optional[aws_cdk.aws_sns.ITopic]=None, replacing_update_min_successful_instances_percent: typing.Optional[jsii.Number]=None, resource_signal_count: typing.Optional[jsii.Number]=None, resource_signal_timeout: typing.Optional[aws_cdk.core.Duration]=None, rolling_update_configuration: typing.Optional["RollingUpdateConfiguration"]=None, spot_price: typing.Optional[str]=None, update_type: typing.Optional["UpdateType"]=None, vpc_subnets: typing.Optional[aws_cdk.aws_ec2.SubnetSelection]=None) -> None:
"""
:param scope: -
:param id: -
:param instance_type: Type of instance to launch.
:param machine_image: AMI to launch.
:param vpc: VPC to launch these instances in.
:param block_devices: Specifies how block devices are exposed to the instance. You can specify virtual devices and EBS volumes. Each instance that is launched has an associated root device volume, either an Amazon EBS volume or an instance store volume. You can use block device mappings to specify additional EBS volumes or instance store volumes to attach to an instance when it is launched. Default: - Uses the block device mapping of the AMI
:param role: An IAM role to associate with the instance profile assigned to this Auto Scaling Group. The role must be assumable by the service principal ``ec2.amazonaws.com``: Default: A role will automatically be created, it can be accessed via the ``role`` property
:param user_data: Specific UserData to use. The UserData may still be mutated after creation. Default: - A UserData object appropriate for the MachineImage's Operating System is created.
:param allow_all_outbound: Whether the instances can initiate connections to anywhere by default. Default: true
:param associate_public_ip_address: Whether instances in the Auto Scaling Group should have public IP addresses associated with them. Default: - Use subnet setting.
:param cooldown: Default scaling cooldown for this AutoScalingGroup. Default: Duration.minutes(5)
:param desired_capacity: Initial amount of instances in the fleet. If this is set to a number, every deployment will reset the amount of instances to this number. It is recommended to leave this value blank. Default: minCapacity, and leave unchanged during deployment
:param health_check: Configuration for health checks. Default: - HealthCheck.ec2 with no grace period
:param ignore_unmodified_size_properties: If the ASG has scheduled actions, don't reset unchanged group sizes. Only used if the ASG has scheduled actions (which may scale your ASG up or down regardless of cdk deployments). If true, the size of the group will only be reset if it has been changed in the CDK app. If false, the sizes will always be changed back to what they were in the CDK app on deployment. Default: true
:param key_name: Name of SSH keypair to grant access to instances. Default: - No SSH access will be possible.
:param max_capacity: Maximum number of instances in the fleet. Default: desiredCapacity
:param min_capacity: Minimum number of instances in the fleet. Default: 1
:param notifications_topic: SNS topic to send notifications about fleet changes. Default: - No fleet change notifications will be sent.
:param replacing_update_min_successful_instances_percent: Configuration for replacing updates. Only used if updateType == UpdateType.ReplacingUpdate. Specifies how many instances must signal success for the update to succeed. Default: minSuccessfulInstancesPercent
:param resource_signal_count: How many ResourceSignal calls CloudFormation expects before the resource is considered created. Default: 1
:param resource_signal_timeout: The length of time to wait for the resourceSignalCount. The maximum value is 43200 (12 hours). Default: Duration.minutes(5)
:param rolling_update_configuration: Configuration for rolling updates. Only used if updateType == UpdateType.RollingUpdate. Default: - RollingUpdateConfiguration with defaults.
:param spot_price: The maximum hourly price (in USD) to be paid for any Spot Instance launched to fulfill the request. Spot Instances are launched when the price you specify exceeds the current Spot market price. Default: none
:param update_type: What to do when an AutoScalingGroup's instance configuration is changed. This is applied when any of the settings on the ASG are changed that affect how the instances should be created (VPC, instance type, startup scripts, etc.). It indicates how the existing instances should be replaced with new instances matching the new config. By default, nothing is done and only new instances are launched with the new config. Default: UpdateType.None
:param vpc_subnets: Where to place instances within the VPC. Default: - All Private subnets.
"""
props = AutoScalingGroupProps(instance_type=instance_type, machine_image=machine_image, vpc=vpc, block_devices=block_devices, role=role, user_data=user_data, allow_all_outbound=allow_all_outbound, associate_public_ip_address=associate_public_ip_address, cooldown=cooldown, desired_capacity=desired_capacity, health_check=health_check, ignore_unmodified_size_properties=ignore_unmodified_size_properties, key_name=key_name, max_capacity=max_capacity, min_capacity=min_capacity, notifications_topic=notifications_topic, replacing_update_min_successful_instances_percent=replacing_update_min_successful_instances_percent, resource_signal_count=resource_signal_count, resource_signal_timeout=resource_signal_timeout, rolling_update_configuration=rolling_update_configuration, spot_price=spot_price, update_type=update_type, vpc_subnets=vpc_subnets)
jsii.create(AutoScalingGroup, self, [scope, id, props])
@jsii.member(jsii_name="fromAutoScalingGroupName")
@builtins.classmethod
def from_auto_scaling_group_name(cls, scope: aws_cdk.core.Construct, id: str, auto_scaling_group_name: str) -> "IAutoScalingGroup":
"""
:param scope: -
:param id: -
:param auto_scaling_group_name: -
"""
return jsii.sinvoke(cls, "fromAutoScalingGroupName", [scope, id, auto_scaling_group_name])
@jsii.member(jsii_name="addLifecycleHook")
def add_lifecycle_hook(self, id: str, *, lifecycle_transition: "LifecycleTransition", notification_target: "ILifecycleHookTarget", default_result: typing.Optional["DefaultResult"]=None, heartbeat_timeout: typing.Optional[aws_cdk.core.Duration]=None, lifecycle_hook_name: typing.Optional[str]=None, notification_metadata: typing.Optional[str]=None, role: typing.Optional[aws_cdk.aws_iam.IRole]=None) -> "LifecycleHook":
"""Send a message to either an SQS queue or SNS topic when instances launch or terminate.
:param id: -
:param lifecycle_transition: The state of the Amazon EC2 instance to which you want to attach the lifecycle hook.
:param notification_target: The target of the lifecycle hook.
:param default_result: The action the Auto Scaling group takes when the lifecycle hook timeout elapses or if an unexpected failure occurs. Default: Continue
:param heartbeat_timeout: Maximum time between calls to RecordLifecycleActionHeartbeat for the hook. If the lifecycle hook times out, perform the action in DefaultResult. Default: - No heartbeat timeout.
:param lifecycle_hook_name: Name of the lifecycle hook. Default: - Automatically generated name.
:param notification_metadata: Additional data to pass to the lifecycle hook target. Default: - No metadata.
:param role: The role that allows publishing to the notification target. Default: - A role is automatically created.
"""
props = BasicLifecycleHookProps(lifecycle_transition=lifecycle_transition, notification_target=notification_target, default_result=default_result, heartbeat_timeout=heartbeat_timeout, lifecycle_hook_name=lifecycle_hook_name, notification_metadata=notification_metadata, role=role)
return jsii.invoke(self, "addLifecycleHook", [id, props])
@jsii.member(jsii_name="addSecurityGroup")
def add_security_group(self, security_group: aws_cdk.aws_ec2.ISecurityGroup) -> None:
"""Add the security group to all instances via the launch configuration security groups array.
:param security_group: : The security group to add.
"""
return jsii.invoke(self, "addSecurityGroup", [security_group])
@jsii.member(jsii_name="addToRolePolicy")
def add_to_role_policy(self, statement: aws_cdk.aws_iam.PolicyStatement) -> None:
"""Adds a statement to the IAM role assumed by instances of this fleet.
:param statement: -
"""
return jsii.invoke(self, "addToRolePolicy", [statement])
@jsii.member(jsii_name="addUserData")
def add_user_data(self, *commands: str) -> None:
"""Add command to the startup script of fleet instances.
The command must be in the scripting language supported by the fleet's OS (i.e. Linux/Windows).
:param commands: -
"""
return jsii.invoke(self, "addUserData", [*commands])
@jsii.member(jsii_name="attachToApplicationTargetGroup")
def attach_to_application_target_group(self, target_group: aws_cdk.aws_elasticloadbalancingv2.IApplicationTargetGroup) -> aws_cdk.aws_elasticloadbalancingv2.LoadBalancerTargetProps:
"""Attach to ELBv2 Application Target Group.
:param target_group: -
"""
return jsii.invoke(self, "attachToApplicationTargetGroup", [target_group])
@jsii.member(jsii_name="attachToClassicLB")
def attach_to_classic_lb(self, load_balancer: aws_cdk.aws_elasticloadbalancing.LoadBalancer) -> None:
"""Attach to a classic load balancer.
:param load_balancer: -
"""
return jsii.invoke(self, "attachToClassicLB", [load_balancer])
@jsii.member(jsii_name="attachToNetworkTargetGroup")
def attach_to_network_target_group(self, target_group: aws_cdk.aws_elasticloadbalancingv2.INetworkTargetGroup) -> aws_cdk.aws_elasticloadbalancingv2.LoadBalancerTargetProps:
"""Attach to ELBv2 Application Target Group.
:param target_group: -
"""
return jsii.invoke(self, "attachToNetworkTargetGroup", [target_group])
@jsii.member(jsii_name="scaleOnCpuUtilization")
def scale_on_cpu_utilization(self, id: str, *, target_utilization_percent: jsii.Number, cooldown: typing.Optional[aws_cdk.core.Duration]=None, disable_scale_in: typing.Optional[bool]=None, estimated_instance_warmup: typing.Optional[aws_cdk.core.Duration]=None) -> "TargetTrackingScalingPolicy":
"""Scale out or in to achieve a target CPU utilization.
:param id: -
:param target_utilization_percent: Target average CPU utilization across the task.
:param cooldown: Period after a scaling completes before another scaling activity can start. Default: - The default cooldown configured on the AutoScalingGroup.
:param disable_scale_in: Indicates whether scale in by the target tracking policy is disabled. If the value is true, scale in is disabled and the target tracking policy won't remove capacity from the autoscaling group. Otherwise, scale in is enabled and the target tracking policy can remove capacity from the group. Default: false
:param estimated_instance_warmup: Estimated time until a newly launched instance can send metrics to CloudWatch. Default: - Same as the cooldown.
"""
props = CpuUtilizationScalingProps(target_utilization_percent=target_utilization_percent, cooldown=cooldown, disable_scale_in=disable_scale_in, estimated_instance_warmup=estimated_instance_warmup)
return jsii.invoke(self, "scaleOnCpuUtilization", [id, props])
@jsii.member(jsii_name="scaleOnIncomingBytes")
def scale_on_incoming_bytes(self, id: str, *, target_bytes_per_second: jsii.Number, cooldown: typing.Optional[aws_cdk.core.Duration]=None, disable_scale_in: typing.Optional[bool]=None, estimated_instance_warmup: typing.Optional[aws_cdk.core.Duration]=None) -> "TargetTrackingScalingPolicy":
"""Scale out or in to achieve a target network ingress rate.
:param id: -
:param target_bytes_per_second: Target average bytes/seconds on each instance.
:param cooldown: Period after a scaling completes before another scaling activity can start. Default: - The default cooldown configured on the AutoScalingGroup.
:param disable_scale_in: Indicates whether scale in by the target tracking policy is disabled. If the value is true, scale in is disabled and the target tracking policy won't remove capacity from the autoscaling group. Otherwise, scale in is enabled and the target tracking policy can remove capacity from the group. Default: false
:param estimated_instance_warmup: Estimated time until a newly launched instance can send metrics to CloudWatch. Default: - Same as the cooldown.
"""
props = NetworkUtilizationScalingProps(target_bytes_per_second=target_bytes_per_second, cooldown=cooldown, disable_scale_in=disable_scale_in, estimated_instance_warmup=estimated_instance_warmup)
return jsii.invoke(self, "scaleOnIncomingBytes", [id, props])
@jsii.member(jsii_name="scaleOnMetric")
def scale_on_metric(self, id: str, *, metric: aws_cdk.aws_cloudwatch.IMetric, scaling_steps: typing.List["ScalingInterval"], adjustment_type: typing.Optional["AdjustmentType"]=None, cooldown: typing.Optional[aws_cdk.core.Duration]=None, estimated_instance_warmup: typing.Optional[aws_cdk.core.Duration]=None, min_adjustment_magnitude: typing.Optional[jsii.Number]=None) -> "StepScalingPolicy":
"""Scale out or in, in response to a metric.
:param id: -
:param metric: Metric to scale on.
:param scaling_steps: The intervals for scaling. Maps a range of metric values to a particular scaling behavior.
:param adjustment_type: How the adjustment numbers inside 'intervals' are interpreted. Default: ChangeInCapacity
:param cooldown: Grace period after scaling activity. Default: Default cooldown period on your AutoScalingGroup
:param estimated_instance_warmup: Estimated time until a newly launched instance can send metrics to CloudWatch. Default: Same as the cooldown
:param min_adjustment_magnitude: Minimum absolute number to adjust capacity with as result of percentage scaling. Only when using AdjustmentType = PercentChangeInCapacity, this number controls the minimum absolute effect size. Default: No minimum scaling effect
"""
props = BasicStepScalingPolicyProps(metric=metric, scaling_steps=scaling_steps, adjustment_type=adjustment_type, cooldown=cooldown, estimated_instance_warmup=estimated_instance_warmup, min_adjustment_magnitude=min_adjustment_magnitude)
return jsii.invoke(self, "scaleOnMetric", [id, props])
@jsii.member(jsii_name="scaleOnOutgoingBytes")
def scale_on_outgoing_bytes(self, id: str, *, target_bytes_per_second: jsii.Number, cooldown: typing.Optional[aws_cdk.core.Duration]=None, disable_scale_in: typing.Optional[bool]=None, estimated_instance_warmup: typing.Optional[aws_cdk.core.Duration]=None) -> "TargetTrackingScalingPolicy":
"""Scale out or in to achieve a target network egress rate.
:param id: -
:param target_bytes_per_second: Target average bytes/seconds on each instance.
:param cooldown: Period after a scaling completes before another scaling activity can start. Default: - The default cooldown configured on the AutoScalingGroup.
:param disable_scale_in: Indicates whether scale in by the target tracking policy is disabled. If the value is true, scale in is disabled and the target tracking policy won't remove capacity from the autoscaling group. Otherwise, scale in is enabled and the target tracking policy can remove capacity from the group. Default: false
:param estimated_instance_warmup: Estimated time until a newly launched instance can send metrics to CloudWatch. Default: - Same as the cooldown.
"""
props = NetworkUtilizationScalingProps(target_bytes_per_second=target_bytes_per_second, cooldown=cooldown, disable_scale_in=disable_scale_in, estimated_instance_warmup=estimated_instance_warmup)
return jsii.invoke(self, "scaleOnOutgoingBytes", [id, props])
@jsii.member(jsii_name="scaleOnRequestCount")
def scale_on_request_count(self, id: str, *, target_requests_per_second: jsii.Number, cooldown: typing.Optional[aws_cdk.core.Duration]=None, disable_scale_in: typing.Optional[bool]=None, estimated_instance_warmup: typing.Optional[aws_cdk.core.Duration]=None) -> "TargetTrackingScalingPolicy":
"""Scale out or in to achieve a target request handling rate.
The AutoScalingGroup must have been attached to an Application Load Balancer
in order to be able to call this.
:param id: -
:param target_requests_per_second: Target average requests/seconds on each instance.
:param cooldown: Period after a scaling completes before another scaling activity can start. Default: - The default cooldown configured on the AutoScalingGroup.
:param disable_scale_in: Indicates whether scale in by the target tracking policy is disabled. If the value is true, scale in is disabled and the target tracking policy won't remove capacity from the autoscaling group. Otherwise, scale in is enabled and the target tracking policy can remove capacity from the group. Default: false
:param estimated_instance_warmup: Estimated time until a newly launched instance can send metrics to CloudWatch. Default: - Same as the cooldown.
"""
props = RequestCountScalingProps(target_requests_per_second=target_requests_per_second, cooldown=cooldown, disable_scale_in=disable_scale_in, estimated_instance_warmup=estimated_instance_warmup)
return jsii.invoke(self, "scaleOnRequestCount", [id, props])
@jsii.member(jsii_name="scaleOnSchedule")
def scale_on_schedule(self, id: str, *, schedule: "Schedule", desired_capacity: typing.Optional[jsii.Number]=None, end_time: typing.Optional[datetime.datetime]=None, max_capacity: typing.Optional[jsii.Number]=None, min_capacity: typing.Optional[jsii.Number]=None, start_time: typing.Optional[datetime.datetime]=None) -> "ScheduledAction":
"""Scale out or in based on time.
:param id: -
:param schedule: When to perform this action. Supports cron expressions. For more information about cron expressions, see https://en.wikipedia.org/wiki/Cron.
:param desired_capacity: The new desired capacity. At the scheduled time, set the desired capacity to the given capacity. At least one of maxCapacity, minCapacity, or desiredCapacity must be supplied. Default: - No new desired capacity.
:param end_time: When this scheduled action expires. Default: - The rule never expires.
:param max_capacity: The new maximum capacity. At the scheduled time, set the maximum capacity to the given capacity. At least one of maxCapacity, minCapacity, or desiredCapacity must be supplied. Default: - No new maximum capacity.
:param min_capacity: The new minimum capacity. At the scheduled time, set the minimum capacity to the given capacity. At least one of maxCapacity, minCapacity, or desiredCapacity must be supplied. Default: - No new minimum capacity.
:param start_time: When this scheduled action becomes active. Default: - The rule is activate immediately.
"""
props = BasicScheduledActionProps(schedule=schedule, desired_capacity=desired_capacity, end_time=end_time, max_capacity=max_capacity, min_capacity=min_capacity, start_time=start_time)
return jsii.invoke(self, "scaleOnSchedule", [id, props])
@jsii.member(jsii_name="scaleToTrackMetric")
def scale_to_track_metric(self, id: str, *, metric: aws_cdk.aws_cloudwatch.IMetric, target_value: jsii.Number, cooldown: typing.Optional[aws_cdk.core.Duration]=None, disable_scale_in: typing.Optional[bool]=None, estimated_instance_warmup: typing.Optional[aws_cdk.core.Duration]=None) -> "TargetTrackingScalingPolicy":
"""Scale out or in in order to keep a metric around a target value.
:param id: -
:param metric: Metric to track. The metric must represent a utilization, so that if it's higher than the target value, your ASG should scale out, and if it's lower it should scale in.
:param target_value: Value to keep the metric around.
:param cooldown: Period after a scaling completes before another scaling activity can start. Default: - The default cooldown configured on the AutoScalingGroup.
:param disable_scale_in: Indicates whether scale in by the target tracking policy is disabled. If the value is true, scale in is disabled and the target tracking policy won't remove capacity from the autoscaling group. Otherwise, scale in is enabled and the target tracking policy can remove capacity from the group. Default: false
:param estimated_instance_warmup: Estimated time until a newly launched instance can send metrics to CloudWatch. Default: - Same as the cooldown.
"""
props = MetricTargetTrackingProps(metric=metric, target_value=target_value, cooldown=cooldown, disable_scale_in=disable_scale_in, estimated_instance_warmup=estimated_instance_warmup)
return jsii.invoke(self, "scaleToTrackMetric", [id, props])
@builtins.property
@jsii.member(jsii_name="autoScalingGroupArn")
def auto_scaling_group_arn(self) -> str:
"""Arn of the AutoScalingGroup."""
return jsii.get(self, "autoScalingGroupArn")
@builtins.property
@jsii.member(jsii_name="autoScalingGroupName")
def auto_scaling_group_name(self) -> str:
"""Name of the AutoScalingGroup."""
return jsii.get(self, "autoScalingGroupName")
@builtins.property
@jsii.member(jsii_name="connections")
def connections(self) -> aws_cdk.aws_ec2.Connections:
"""Allows specify security group connections for instances of this fleet."""
return jsii.get(self, "connections")
@builtins.property
@jsii.member(jsii_name="grantPrincipal")
def grant_principal(self) -> aws_cdk.aws_iam.IPrincipal:
"""The principal to grant permissions to."""
return jsii.get(self, "grantPrincipal")
@builtins.property
@jsii.member(jsii_name="osType")
def os_type(self) -> aws_cdk.aws_ec2.OperatingSystemType:
"""The type of OS instances of this fleet are running."""
return jsii.get(self, "osType")
@builtins.property
@jsii.member(jsii_name="role")
def role(self) -> aws_cdk.aws_iam.IRole:
"""The IAM role assumed by instances of this fleet."""
return jsii.get(self, "role")
@builtins.property
@jsii.member(jsii_name="userData")
def user_data(self) -> aws_cdk.aws_ec2.UserData:
"""UserData for the instances."""
return jsii.get(self, "userData")
@builtins.property
@jsii.member(jsii_name="spotPrice")
def spot_price(self) -> typing.Optional[str]:
"""The maximum spot price configured for thie autoscaling group.
``undefined``
indicates that this group uses on-demand capacity.
"""
return jsii.get(self, "spotPrice")
@builtins.property
@jsii.member(jsii_name="albTargetGroup")
def _alb_target_group(self) -> typing.Optional[aws_cdk.aws_elasticloadbalancingv2.ApplicationTargetGroup]:
return jsii.get(self, "albTargetGroup")
@_alb_target_group.setter
def _alb_target_group(self, value: typing.Optional[aws_cdk.aws_elasticloadbalancingv2.ApplicationTargetGroup]):
jsii.set(self, "albTargetGroup", value)
@jsii.interface(jsii_type="@aws-cdk/aws-autoscaling.ILifecycleHook")
class ILifecycleHook(aws_cdk.core.IResource, jsii.compat.Protocol):
"""A basic lifecycle hook object."""
@builtins.staticmethod
def __jsii_proxy_class__():
return _ILifecycleHookProxy
@builtins.property
@jsii.member(jsii_name="role")
def role(self) -> aws_cdk.aws_iam.IRole:
"""The role for the lifecycle hook to execute."""
...
class _ILifecycleHookProxy(jsii.proxy_for(aws_cdk.core.IResource)):
"""A basic lifecycle hook object."""
__jsii_type__ = "@aws-cdk/aws-autoscaling.ILifecycleHook"
@builtins.property
@jsii.member(jsii_name="role")
def role(self) -> aws_cdk.aws_iam.IRole:
"""The role for the lifecycle hook to execute."""
return jsii.get(self, "role")
@jsii.interface(jsii_type="@aws-cdk/aws-autoscaling.ILifecycleHookTarget")
class ILifecycleHookTarget(jsii.compat.Protocol):
"""Interface for autoscaling lifecycle hook targets."""
@builtins.staticmethod
def __jsii_proxy_class__():
return _ILifecycleHookTargetProxy
@jsii.member(jsii_name="bind")
def bind(self, scope: aws_cdk.core.Construct, lifecycle_hook: "ILifecycleHook") -> "LifecycleHookTargetConfig":
"""Called when this object is used as the target of a lifecycle hook.
:param scope: -
:param lifecycle_hook: -
"""
...
class _ILifecycleHookTargetProxy():
"""Interface for autoscaling lifecycle hook targets."""
__jsii_type__ = "@aws-cdk/aws-autoscaling.ILifecycleHookTarget"
@jsii.member(jsii_name="bind")
def bind(self, scope: aws_cdk.core.Construct, lifecycle_hook: "ILifecycleHook") -> "LifecycleHookTargetConfig":
"""Called when this object is used as the target of a lifecycle hook.
:param scope: -
:param lifecycle_hook: -
"""
return jsii.invoke(self, "bind", [scope, lifecycle_hook])
@jsii.implements(ILifecycleHook)
class LifecycleHook(aws_cdk.core.Resource, metaclass=jsii.JSIIMeta, jsii_type="@aws-cdk/aws-autoscaling.LifecycleHook"):
"""Define a life cycle hook."""
def __init__(self, scope: aws_cdk.core.Construct, id: str, *, auto_scaling_group: "IAutoScalingGroup", lifecycle_transition: "LifecycleTransition", notification_target: "ILifecycleHookTarget", default_result: typing.Optional["DefaultResult"]=None, heartbeat_timeout: typing.Optional[aws_cdk.core.Duration]=None, lifecycle_hook_name: typing.Optional[str]=None, notification_metadata: typing.Optional[str]=None, role: typing.Optional[aws_cdk.aws_iam.IRole]=None) -> None:
"""
:param scope: -
:param id: -
:param auto_scaling_group: The AutoScalingGroup to add the lifecycle hook to.
:param lifecycle_transition: The state of the Amazon EC2 instance to which you want to attach the lifecycle hook.
:param notification_target: The target of the lifecycle hook.
:param default_result: The action the Auto Scaling group takes when the lifecycle hook timeout elapses or if an unexpected failure occurs. Default: Continue
:param heartbeat_timeout: Maximum time between calls to RecordLifecycleActionHeartbeat for the hook. If the lifecycle hook times out, perform the action in DefaultResult. Default: - No heartbeat timeout.
:param lifecycle_hook_name: Name of the lifecycle hook. Default: - Automatically generated name.
:param notification_metadata: Additional data to pass to the lifecycle hook target. Default: - No metadata.
:param role: The role that allows publishing to the notification target. Default: - A role is automatically created.
"""
props = LifecycleHookProps(auto_scaling_group=auto_scaling_group, lifecycle_transition=lifecycle_transition, notification_target=notification_target, default_result=default_result, heartbeat_timeout=heartbeat_timeout, lifecycle_hook_name=lifecycle_hook_name, notification_metadata=notification_metadata, role=role)
jsii.create(LifecycleHook, self, [scope, id, props])
@builtins.property
@jsii.member(jsii_name="lifecycleHookName")
def lifecycle_hook_name(self) -> str:
"""The name of this lifecycle hook.
attribute:
:attribute:: true
"""
return jsii.get(self, "lifecycleHookName")
@builtins.property
@jsii.member(jsii_name="role")
def role(self) -> aws_cdk.aws_iam.IRole:
"""The role that allows the ASG to publish to the notification target."""
return jsii.get(self, "role")
@jsii.data_type(jsii_type="@aws-cdk/aws-autoscaling.LifecycleHookProps", jsii_struct_bases=[BasicLifecycleHookProps], name_mapping={'lifecycle_transition': 'lifecycleTransition', 'notification_target': 'notificationTarget', 'default_result': 'defaultResult', 'heartbeat_timeout': 'heartbeatTimeout', 'lifecycle_hook_name': 'lifecycleHookName', 'notification_metadata': 'notificationMetadata', 'role': 'role', 'auto_scaling_group': 'autoScalingGroup'})
class LifecycleHookProps(BasicLifecycleHookProps):
def __init__(self, *, lifecycle_transition: "LifecycleTransition", notification_target: "ILifecycleHookTarget", default_result: typing.Optional["DefaultResult"]=None, heartbeat_timeout: typing.Optional[aws_cdk.core.Duration]=None, lifecycle_hook_name: typing.Optional[str]=None, notification_metadata: typing.Optional[str]=None, role: typing.Optional[aws_cdk.aws_iam.IRole]=None, auto_scaling_group: "IAutoScalingGroup"):
"""Properties for a Lifecycle hook.
:param lifecycle_transition: The state of the Amazon EC2 instance to which you want to attach the lifecycle hook.
:param notification_target: The target of the lifecycle hook.
:param default_result: The action the Auto Scaling group takes when the lifecycle hook timeout elapses or if an unexpected failure occurs. Default: Continue
:param heartbeat_timeout: Maximum time between calls to RecordLifecycleActionHeartbeat for the hook. If the lifecycle hook times out, perform the action in DefaultResult. Default: - No heartbeat timeout.
:param lifecycle_hook_name: Name of the lifecycle hook. Default: - Automatically generated name.
:param notification_metadata: Additional data to pass to the lifecycle hook target. Default: - No metadata.
:param role: The role that allows publishing to the notification target. Default: - A role is automatically created.
:param auto_scaling_group: The AutoScalingGroup to add the lifecycle hook to.
"""
self._values = {
'lifecycle_transition': lifecycle_transition,
'notification_target': notification_target,
'auto_scaling_group': auto_scaling_group,
}
if default_result is not None: self._values["default_result"] = default_result
if heartbeat_timeout is not None: self._values["heartbeat_timeout"] = heartbeat_timeout
if lifecycle_hook_name is not None: self._values["lifecycle_hook_name"] = lifecycle_hook_name
if notification_metadata is not None: self._values["notification_metadata"] = notification_metadata
if role is not None: self._values["role"] = role
@builtins.property
def lifecycle_transition(self) -> "LifecycleTransition":
"""The state of the Amazon EC2 instance to which you want to attach the lifecycle hook."""
return self._values.get('lifecycle_transition')
@builtins.property
def notification_target(self) -> "ILifecycleHookTarget":
"""The target of the lifecycle hook."""
return self._values.get('notification_target')
@builtins.property
def default_result(self) -> typing.Optional["DefaultResult"]:
"""The action the Auto Scaling group takes when the lifecycle hook timeout elapses or if an unexpected failure occurs.
default
:default: Continue
"""
return self._values.get('default_result')
@builtins.property
def heartbeat_timeout(self) -> typing.Optional[aws_cdk.core.Duration]:
"""Maximum time between calls to RecordLifecycleActionHeartbeat for the hook.
If the lifecycle hook times out, perform the action in DefaultResult.
default
:default: - No heartbeat timeout.
"""
return self._values.get('heartbeat_timeout')
@builtins.property
def lifecycle_hook_name(self) -> typing.Optional[str]:
"""Name of the lifecycle hook.
default
:default: - Automatically generated name.
"""
return self._values.get('lifecycle_hook_name')
@builtins.property
def notification_metadata(self) -> typing.Optional[str]:
"""Additional data to pass to the lifecycle hook target.
default
:default: - No metadata.
"""
return self._values.get('notification_metadata')
@builtins.property
def role(self) -> typing.Optional[aws_cdk.aws_iam.IRole]:
"""The role that allows publishing to the notification target.
default
:default: - A role is automatically created.
"""
return self._values.get('role')
@builtins.property
def auto_scaling_group(self) -> "IAutoScalingGroup":
"""The AutoScalingGroup to add the lifecycle hook to."""
return self._values.get('auto_scaling_group')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'LifecycleHookProps(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
@jsii.data_type(jsii_type="@aws-cdk/aws-autoscaling.LifecycleHookTargetConfig", jsii_struct_bases=[], name_mapping={'notification_target_arn': 'notificationTargetArn'})
class LifecycleHookTargetConfig():
def __init__(self, *, notification_target_arn: str):
"""Properties to add the target to a lifecycle hook.
:param notification_target_arn: The ARN to use as the notification target.
"""
self._values = {
'notification_target_arn': notification_target_arn,
}
@builtins.property
def notification_target_arn(self) -> str:
"""The ARN to use as the notification target."""
return self._values.get('notification_target_arn')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'LifecycleHookTargetConfig(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
@jsii.enum(jsii_type="@aws-cdk/aws-autoscaling.LifecycleTransition")
class LifecycleTransition(enum.Enum):
"""What instance transition to attach the hook to."""
INSTANCE_LAUNCHING = "INSTANCE_LAUNCHING"
"""Execute the hook when an instance is about to be added."""
INSTANCE_TERMINATING = "INSTANCE_TERMINATING"
"""Execute the hook when an instance is about to be terminated."""
@jsii.enum(jsii_type="@aws-cdk/aws-autoscaling.MetricAggregationType")
class MetricAggregationType(enum.Enum):
"""How the scaling metric is going to be aggregated."""
AVERAGE = "AVERAGE"
"""Average."""
MINIMUM = "MINIMUM"
"""Minimum."""
MAXIMUM = "MAXIMUM"
"""Maximum."""
@jsii.data_type(jsii_type="@aws-cdk/aws-autoscaling.MetricTargetTrackingProps", jsii_struct_bases=[BaseTargetTrackingProps], name_mapping={'cooldown': 'cooldown', 'disable_scale_in': 'disableScaleIn', 'estimated_instance_warmup': 'estimatedInstanceWarmup', 'metric': 'metric', 'target_value': 'targetValue'})
class MetricTargetTrackingProps(BaseTargetTrackingProps):
def __init__(self, *, cooldown: typing.Optional[aws_cdk.core.Duration]=None, disable_scale_in: typing.Optional[bool]=None, estimated_instance_warmup: typing.Optional[aws_cdk.core.Duration]=None, metric: aws_cdk.aws_cloudwatch.IMetric, target_value: jsii.Number):
"""Properties for enabling tracking of an arbitrary metric.
:param cooldown: Period after a scaling completes before another scaling activity can start. Default: - The default cooldown configured on the AutoScalingGroup.
:param disable_scale_in: Indicates whether scale in by the target tracking policy is disabled. If the value is true, scale in is disabled and the target tracking policy won't remove capacity from the autoscaling group. Otherwise, scale in is enabled and the target tracking policy can remove capacity from the group. Default: false
:param estimated_instance_warmup: Estimated time until a newly launched instance can send metrics to CloudWatch. Default: - Same as the cooldown.
:param metric: Metric to track. The metric must represent a utilization, so that if it's higher than the target value, your ASG should scale out, and if it's lower it should scale in.
:param target_value: Value to keep the metric around.
"""
self._values = {
'metric': metric,
'target_value': target_value,
}
if cooldown is not None: self._values["cooldown"] = cooldown
if disable_scale_in is not None: self._values["disable_scale_in"] = disable_scale_in
if estimated_instance_warmup is not None: self._values["estimated_instance_warmup"] = estimated_instance_warmup
@builtins.property
def cooldown(self) -> typing.Optional[aws_cdk.core.Duration]:
"""Period after a scaling completes before another scaling activity can start.
default
:default: - The default cooldown configured on the AutoScalingGroup.
"""
return self._values.get('cooldown')
@builtins.property
def disable_scale_in(self) -> typing.Optional[bool]:
"""Indicates whether scale in by the target tracking policy is disabled.
If the value is true, scale in is disabled and the target tracking policy
won't remove capacity from the autoscaling group. Otherwise, scale in is
enabled and the target tracking policy can remove capacity from the
group.
default
:default: false
"""
return self._values.get('disable_scale_in')
@builtins.property
def estimated_instance_warmup(self) -> typing.Optional[aws_cdk.core.Duration]:
"""Estimated time until a newly launched instance can send metrics to CloudWatch.
default
:default: - Same as the cooldown.
"""
return self._values.get('estimated_instance_warmup')
@builtins.property
def metric(self) -> aws_cdk.aws_cloudwatch.IMetric:
"""Metric to track.
The metric must represent a utilization, so that if it's higher than the
target value, your ASG should scale out, and if it's lower it should
scale in.
"""
return self._values.get('metric')
@builtins.property
def target_value(self) -> jsii.Number:
"""Value to keep the metric around."""
return self._values.get('target_value')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'MetricTargetTrackingProps(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
@jsii.data_type(jsii_type="@aws-cdk/aws-autoscaling.NetworkUtilizationScalingProps", jsii_struct_bases=[BaseTargetTrackingProps], name_mapping={'cooldown': 'cooldown', 'disable_scale_in': 'disableScaleIn', 'estimated_instance_warmup': 'estimatedInstanceWarmup', 'target_bytes_per_second': 'targetBytesPerSecond'})
class NetworkUtilizationScalingProps(BaseTargetTrackingProps):
def __init__(self, *, cooldown: typing.Optional[aws_cdk.core.Duration]=None, disable_scale_in: typing.Optional[bool]=None, estimated_instance_warmup: typing.Optional[aws_cdk.core.Duration]=None, target_bytes_per_second: jsii.Number):
"""Properties for enabling scaling based on network utilization.
:param cooldown: Period after a scaling completes before another scaling activity can start. Default: - The default cooldown configured on the AutoScalingGroup.
:param disable_scale_in: Indicates whether scale in by the target tracking policy is disabled. If the value is true, scale in is disabled and the target tracking policy won't remove capacity from the autoscaling group. Otherwise, scale in is enabled and the target tracking policy can remove capacity from the group. Default: false
:param estimated_instance_warmup: Estimated time until a newly launched instance can send metrics to CloudWatch. Default: - Same as the cooldown.
:param target_bytes_per_second: Target average bytes/seconds on each instance.
"""
self._values = {
'target_bytes_per_second': target_bytes_per_second,
}
if cooldown is not None: self._values["cooldown"] = cooldown
if disable_scale_in is not None: self._values["disable_scale_in"] = disable_scale_in
if estimated_instance_warmup is not None: self._values["estimated_instance_warmup"] = estimated_instance_warmup
@builtins.property
def cooldown(self) -> typing.Optional[aws_cdk.core.Duration]:
"""Period after a scaling completes before another scaling activity can start.
default
:default: - The default cooldown configured on the AutoScalingGroup.
"""
return self._values.get('cooldown')
@builtins.property
def disable_scale_in(self) -> typing.Optional[bool]:
"""Indicates whether scale in by the target tracking policy is disabled.
If the value is true, scale in is disabled and the target tracking policy
won't remove capacity from the autoscaling group. Otherwise, scale in is
enabled and the target tracking policy can remove capacity from the
group.
default
:default: false
"""
return self._values.get('disable_scale_in')
@builtins.property
def estimated_instance_warmup(self) -> typing.Optional[aws_cdk.core.Duration]:
"""Estimated time until a newly launched instance can send metrics to CloudWatch.
default
:default: - Same as the cooldown.
"""
return self._values.get('estimated_instance_warmup')
@builtins.property
def target_bytes_per_second(self) -> jsii.Number:
"""Target average bytes/seconds on each instance."""
return self._values.get('target_bytes_per_second')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'NetworkUtilizationScalingProps(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
@jsii.enum(jsii_type="@aws-cdk/aws-autoscaling.PredefinedMetric")
class PredefinedMetric(enum.Enum):
"""One of the predefined autoscaling metrics."""
ASG_AVERAGE_CPU_UTILIZATION = "ASG_AVERAGE_CPU_UTILIZATION"
"""Average CPU utilization of the Auto Scaling group."""
ASG_AVERAGE_NETWORK_IN = "ASG_AVERAGE_NETWORK_IN"
"""Average number of bytes received on all network interfaces by the Auto Scaling group."""
ASG_AVERAGE_NETWORK_OUT = "ASG_AVERAGE_NETWORK_OUT"
"""Average number of bytes sent out on all network interfaces by the Auto Scaling group."""
ALB_REQUEST_COUNT_PER_TARGET = "ALB_REQUEST_COUNT_PER_TARGET"
"""Number of requests completed per target in an Application Load Balancer target group.
Specify the ALB to look at in the ``resourceLabel`` field.
"""
@jsii.data_type(jsii_type="@aws-cdk/aws-autoscaling.RequestCountScalingProps", jsii_struct_bases=[BaseTargetTrackingProps], name_mapping={'cooldown': 'cooldown', 'disable_scale_in': 'disableScaleIn', 'estimated_instance_warmup': 'estimatedInstanceWarmup', 'target_requests_per_second': 'targetRequestsPerSecond'})
class RequestCountScalingProps(BaseTargetTrackingProps):
def __init__(self, *, cooldown: typing.Optional[aws_cdk.core.Duration]=None, disable_scale_in: typing.Optional[bool]=None, estimated_instance_warmup: typing.Optional[aws_cdk.core.Duration]=None, target_requests_per_second: jsii.Number):
"""Properties for enabling scaling based on request/second.
:param cooldown: Period after a scaling completes before another scaling activity can start. Default: - The default cooldown configured on the AutoScalingGroup.
:param disable_scale_in: Indicates whether scale in by the target tracking policy is disabled. If the value is true, scale in is disabled and the target tracking policy won't remove capacity from the autoscaling group. Otherwise, scale in is enabled and the target tracking policy can remove capacity from the group. Default: false
:param estimated_instance_warmup: Estimated time until a newly launched instance can send metrics to CloudWatch. Default: - Same as the cooldown.
:param target_requests_per_second: Target average requests/seconds on each instance.
"""
self._values = {
'target_requests_per_second': target_requests_per_second,
}
if cooldown is not None: self._values["cooldown"] = cooldown
if disable_scale_in is not None: self._values["disable_scale_in"] = disable_scale_in
if estimated_instance_warmup is not None: self._values["estimated_instance_warmup"] = estimated_instance_warmup
@builtins.property
def cooldown(self) -> typing.Optional[aws_cdk.core.Duration]:
"""Period after a scaling completes before another scaling activity can start.
default
:default: - The default cooldown configured on the AutoScalingGroup.
"""
return self._values.get('cooldown')
@builtins.property
def disable_scale_in(self) -> typing.Optional[bool]:
"""Indicates whether scale in by the target tracking policy is disabled.
If the value is true, scale in is disabled and the target tracking policy
won't remove capacity from the autoscaling group. Otherwise, scale in is
enabled and the target tracking policy can remove capacity from the
group.
default
:default: false
"""
return self._values.get('disable_scale_in')
@builtins.property
def estimated_instance_warmup(self) -> typing.Optional[aws_cdk.core.Duration]:
"""Estimated time until a newly launched instance can send metrics to CloudWatch.
default
:default: - Same as the cooldown.
"""
return self._values.get('estimated_instance_warmup')
@builtins.property
def target_requests_per_second(self) -> jsii.Number:
"""Target average requests/seconds on each instance."""
return self._values.get('target_requests_per_second')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'RequestCountScalingProps(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
@jsii.data_type(jsii_type="@aws-cdk/aws-autoscaling.RollingUpdateConfiguration", jsii_struct_bases=[], name_mapping={'max_batch_size': 'maxBatchSize', 'min_instances_in_service': 'minInstancesInService', 'min_successful_instances_percent': 'minSuccessfulInstancesPercent', 'pause_time': 'pauseTime', 'suspend_processes': 'suspendProcesses', 'wait_on_resource_signals': 'waitOnResourceSignals'})
class RollingUpdateConfiguration():
def __init__(self, *, max_batch_size: typing.Optional[jsii.Number]=None, min_instances_in_service: typing.Optional[jsii.Number]=None, min_successful_instances_percent: typing.Optional[jsii.Number]=None, pause_time: typing.Optional[aws_cdk.core.Duration]=None, suspend_processes: typing.Optional[typing.List["ScalingProcess"]]=None, wait_on_resource_signals: typing.Optional[bool]=None):
"""Additional settings when a rolling update is selected.
:param max_batch_size: The maximum number of instances that AWS CloudFormation updates at once. Default: 1
:param min_instances_in_service: The minimum number of instances that must be in service before more instances are replaced. This number affects the speed of the replacement. Default: 0
:param min_successful_instances_percent: The percentage of instances that must signal success for an update to succeed. If an instance doesn't send a signal within the time specified in the pauseTime property, AWS CloudFormation assumes that the instance wasn't updated. This number affects the success of the replacement. If you specify this property, you must also enable the waitOnResourceSignals and pauseTime properties. Default: 100
:param pause_time: The pause time after making a change to a batch of instances. This is intended to give those instances time to start software applications. Specify PauseTime in the ISO8601 duration format (in the format PT#H#M#S, where each # is the number of hours, minutes, and seconds, respectively). The maximum PauseTime is one hour (PT1H). Default: Duration.minutes(5) if the waitOnResourceSignals property is true, otherwise 0
:param suspend_processes: Specifies the Auto Scaling processes to suspend during a stack update. Suspending processes prevents Auto Scaling from interfering with a stack update. Default: HealthCheck, ReplaceUnhealthy, AZRebalance, AlarmNotification, ScheduledActions.
:param wait_on_resource_signals: Specifies whether the Auto Scaling group waits on signals from new instances during an update. AWS CloudFormation must receive a signal from each new instance within the specified PauseTime before continuing the update. To have instances wait for an Elastic Load Balancing health check before they signal success, add a health-check verification by using the cfn-init helper script. For an example, see the verify_instance_health command in the Auto Scaling rolling updates sample template. Default: true if you specified the minSuccessfulInstancesPercent property, false otherwise
"""
self._values = {
}
if max_batch_size is not None: self._values["max_batch_size"] = max_batch_size
if min_instances_in_service is not None: self._values["min_instances_in_service"] = min_instances_in_service
if min_successful_instances_percent is not None: self._values["min_successful_instances_percent"] = min_successful_instances_percent
if pause_time is not None: self._values["pause_time"] = pause_time
if suspend_processes is not None: self._values["suspend_processes"] = suspend_processes
if wait_on_resource_signals is not None: self._values["wait_on_resource_signals"] = wait_on_resource_signals
@builtins.property
def max_batch_size(self) -> typing.Optional[jsii.Number]:
"""The maximum number of instances that AWS CloudFormation updates at once.
default
:default: 1
"""
return self._values.get('max_batch_size')
@builtins.property
def min_instances_in_service(self) -> typing.Optional[jsii.Number]:
"""The minimum number of instances that must be in service before more instances are replaced.
This number affects the speed of the replacement.
default
:default: 0
"""
return self._values.get('min_instances_in_service')
@builtins.property
def min_successful_instances_percent(self) -> typing.Optional[jsii.Number]:
"""The percentage of instances that must signal success for an update to succeed.
If an instance doesn't send a signal within the time specified in the
pauseTime property, AWS CloudFormation assumes that the instance wasn't
updated.
This number affects the success of the replacement.
If you specify this property, you must also enable the
waitOnResourceSignals and pauseTime properties.
default
:default: 100
"""
return self._values.get('min_successful_instances_percent')
@builtins.property
def pause_time(self) -> typing.Optional[aws_cdk.core.Duration]:
"""The pause time after making a change to a batch of instances.
This is intended to give those instances time to start software applications.
Specify PauseTime in the ISO8601 duration format (in the format
PT#H#M#S, where each # is the number of hours, minutes, and seconds,
respectively). The maximum PauseTime is one hour (PT1H).
default
:default: Duration.minutes(5) if the waitOnResourceSignals property is true, otherwise 0
"""
return self._values.get('pause_time')
@builtins.property
def suspend_processes(self) -> typing.Optional[typing.List["ScalingProcess"]]:
"""Specifies the Auto Scaling processes to suspend during a stack update.
Suspending processes prevents Auto Scaling from interfering with a stack
update.
default
:default: HealthCheck, ReplaceUnhealthy, AZRebalance, AlarmNotification, ScheduledActions.
"""
return self._values.get('suspend_processes')
@builtins.property
def wait_on_resource_signals(self) -> typing.Optional[bool]:
"""Specifies whether the Auto Scaling group waits on signals from new instances during an update.
AWS CloudFormation must receive a signal from each new instance within
the specified PauseTime before continuing the update.
To have instances wait for an Elastic Load Balancing health check before
they signal success, add a health-check verification by using the
cfn-init helper script. For an example, see the verify_instance_health
command in the Auto Scaling rolling updates sample template.
default
:default: true if you specified the minSuccessfulInstancesPercent property, false otherwise
"""
return self._values.get('wait_on_resource_signals')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'RollingUpdateConfiguration(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
@jsii.data_type(jsii_type="@aws-cdk/aws-autoscaling.ScalingInterval", jsii_struct_bases=[], name_mapping={'change': 'change', 'lower': 'lower', 'upper': 'upper'})
class ScalingInterval():
def __init__(self, *, change: jsii.Number, lower: typing.Optional[jsii.Number]=None, upper: typing.Optional[jsii.Number]=None):
"""A range of metric values in which to apply a certain scaling operation.
:param change: The capacity adjustment to apply in this interval. The number is interpreted differently based on AdjustmentType: - ChangeInCapacity: add the adjustment to the current capacity. The number can be positive or negative. - PercentChangeInCapacity: add or remove the given percentage of the current capacity to itself. The number can be in the range [-100..100]. - ExactCapacity: set the capacity to this number. The number must be positive.
:param lower: The lower bound of the interval. The scaling adjustment will be applied if the metric is higher than this value. Default: Threshold automatically derived from neighbouring intervals
:param upper: The upper bound of the interval. The scaling adjustment will be applied if the metric is lower than this value. Default: Threshold automatically derived from neighbouring intervals
"""
self._values = {
'change': change,
}
if lower is not None: self._values["lower"] = lower
if upper is not None: self._values["upper"] = upper
@builtins.property
def change(self) -> jsii.Number:
"""The capacity adjustment to apply in this interval.
The number is interpreted differently based on AdjustmentType:
- ChangeInCapacity: add the adjustment to the current capacity.
The number can be positive or negative.
- PercentChangeInCapacity: add or remove the given percentage of the current
capacity to itself. The number can be in the range [-100..100].
- ExactCapacity: set the capacity to this number. The number must
be positive.
"""
return self._values.get('change')
@builtins.property
def lower(self) -> typing.Optional[jsii.Number]:
"""The lower bound of the interval.
The scaling adjustment will be applied if the metric is higher than this value.
default
:default: Threshold automatically derived from neighbouring intervals
"""
return self._values.get('lower')
@builtins.property
def upper(self) -> typing.Optional[jsii.Number]:
"""The upper bound of the interval.
The scaling adjustment will be applied if the metric is lower than this value.
default
:default: Threshold automatically derived from neighbouring intervals
"""
return self._values.get('upper')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'ScalingInterval(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
@jsii.enum(jsii_type="@aws-cdk/aws-autoscaling.ScalingProcess")
class ScalingProcess(enum.Enum):
LAUNCH = "LAUNCH"
TERMINATE = "TERMINATE"
HEALTH_CHECK = "HEALTH_CHECK"
REPLACE_UNHEALTHY = "REPLACE_UNHEALTHY"
AZ_REBALANCE = "AZ_REBALANCE"
ALARM_NOTIFICATION = "ALARM_NOTIFICATION"
SCHEDULED_ACTIONS = "SCHEDULED_ACTIONS"
ADD_TO_LOAD_BALANCER = "ADD_TO_LOAD_BALANCER"
class Schedule(metaclass=jsii.JSIIAbstractClass, jsii_type="@aws-cdk/aws-autoscaling.Schedule"):
"""Schedule for scheduled scaling actions."""
@builtins.staticmethod
def __jsii_proxy_class__():
return _ScheduleProxy
def __init__(self) -> None:
jsii.create(Schedule, self, [])
@jsii.member(jsii_name="cron")
@builtins.classmethod
def cron(cls, *, day: typing.Optional[str]=None, hour: typing.Optional[str]=None, minute: typing.Optional[str]=None, month: typing.Optional[str]=None, week_day: typing.Optional[str]=None) -> "Schedule":
"""Create a schedule from a set of cron fields.
:param day: The day of the month to run this rule at. Default: - Every day of the month
:param hour: The hour to run this rule at. Default: - Every hour
:param minute: The minute to run this rule at. Default: - Every minute
:param month: The month to run this rule at. Default: - Every month
:param week_day: The day of the week to run this rule at. Default: - Any day of the week
"""
options = CronOptions(day=day, hour=hour, minute=minute, month=month, week_day=week_day)
return jsii.sinvoke(cls, "cron", [options])
@jsii.member(jsii_name="expression")
@builtins.classmethod
def expression(cls, expression: str) -> "Schedule":
"""Construct a schedule from a literal schedule expression.
:param expression: The expression to use. Must be in a format that AutoScaling will recognize
see
:see: http://crontab.org/
"""
return jsii.sinvoke(cls, "expression", [expression])
@builtins.property
@jsii.member(jsii_name="expressionString")
@abc.abstractmethod
def expression_string(self) -> str:
"""Retrieve the expression for this schedule."""
...
class _ScheduleProxy(Schedule):
@builtins.property
@jsii.member(jsii_name="expressionString")
def expression_string(self) -> str:
"""Retrieve the expression for this schedule."""
return jsii.get(self, "expressionString")
class ScheduledAction(aws_cdk.core.Resource, metaclass=jsii.JSIIMeta, jsii_type="@aws-cdk/aws-autoscaling.ScheduledAction"):
"""Define a scheduled scaling action."""
def __init__(self, scope: aws_cdk.core.Construct, id: str, *, auto_scaling_group: "IAutoScalingGroup", schedule: "Schedule", desired_capacity: typing.Optional[jsii.Number]=None, end_time: typing.Optional[datetime.datetime]=None, max_capacity: typing.Optional[jsii.Number]=None, min_capacity: typing.Optional[jsii.Number]=None, start_time: typing.Optional[datetime.datetime]=None) -> None:
"""
:param scope: -
:param id: -
:param auto_scaling_group: The AutoScalingGroup to apply the scheduled actions to.
:param schedule: When to perform this action. Supports cron expressions. For more information about cron expressions, see https://en.wikipedia.org/wiki/Cron.
:param desired_capacity: The new desired capacity. At the scheduled time, set the desired capacity to the given capacity. At least one of maxCapacity, minCapacity, or desiredCapacity must be supplied. Default: - No new desired capacity.
:param end_time: When this scheduled action expires. Default: - The rule never expires.
:param max_capacity: The new maximum capacity. At the scheduled time, set the maximum capacity to the given capacity. At least one of maxCapacity, minCapacity, or desiredCapacity must be supplied. Default: - No new maximum capacity.
:param min_capacity: The new minimum capacity. At the scheduled time, set the minimum capacity to the given capacity. At least one of maxCapacity, minCapacity, or desiredCapacity must be supplied. Default: - No new minimum capacity.
:param start_time: When this scheduled action becomes active. Default: - The rule is activate immediately.
"""
props = ScheduledActionProps(auto_scaling_group=auto_scaling_group, schedule=schedule, desired_capacity=desired_capacity, end_time=end_time, max_capacity=max_capacity, min_capacity=min_capacity, start_time=start_time)
jsii.create(ScheduledAction, self, [scope, id, props])
@jsii.data_type(jsii_type="@aws-cdk/aws-autoscaling.ScheduledActionProps", jsii_struct_bases=[BasicScheduledActionProps], name_mapping={'schedule': 'schedule', 'desired_capacity': 'desiredCapacity', 'end_time': 'endTime', 'max_capacity': 'maxCapacity', 'min_capacity': 'minCapacity', 'start_time': 'startTime', 'auto_scaling_group': 'autoScalingGroup'})
class ScheduledActionProps(BasicScheduledActionProps):
def __init__(self, *, schedule: "Schedule", desired_capacity: typing.Optional[jsii.Number]=None, end_time: typing.Optional[datetime.datetime]=None, max_capacity: typing.Optional[jsii.Number]=None, min_capacity: typing.Optional[jsii.Number]=None, start_time: typing.Optional[datetime.datetime]=None, auto_scaling_group: "IAutoScalingGroup"):
"""Properties for a scheduled action on an AutoScalingGroup.
:param schedule: When to perform this action. Supports cron expressions. For more information about cron expressions, see https://en.wikipedia.org/wiki/Cron.
:param desired_capacity: The new desired capacity. At the scheduled time, set the desired capacity to the given capacity. At least one of maxCapacity, minCapacity, or desiredCapacity must be supplied. Default: - No new desired capacity.
:param end_time: When this scheduled action expires. Default: - The rule never expires.
:param max_capacity: The new maximum capacity. At the scheduled time, set the maximum capacity to the given capacity. At least one of maxCapacity, minCapacity, or desiredCapacity must be supplied. Default: - No new maximum capacity.
:param min_capacity: The new minimum capacity. At the scheduled time, set the minimum capacity to the given capacity. At least one of maxCapacity, minCapacity, or desiredCapacity must be supplied. Default: - No new minimum capacity.
:param start_time: When this scheduled action becomes active. Default: - The rule is activate immediately.
:param auto_scaling_group: The AutoScalingGroup to apply the scheduled actions to.
"""
self._values = {
'schedule': schedule,
'auto_scaling_group': auto_scaling_group,
}
if desired_capacity is not None: self._values["desired_capacity"] = desired_capacity
if end_time is not None: self._values["end_time"] = end_time
if max_capacity is not None: self._values["max_capacity"] = max_capacity
if min_capacity is not None: self._values["min_capacity"] = min_capacity
if start_time is not None: self._values["start_time"] = start_time
@builtins.property
def schedule(self) -> "Schedule":
"""When to perform this action.
Supports cron expressions.
For more information about cron expressions, see https://en.wikipedia.org/wiki/Cron.
Example::
# Example automatically generated without compilation. See https://github.com/aws/jsii/issues/826
08 * * ?
"""
return self._values.get('schedule')
@builtins.property
def desired_capacity(self) -> typing.Optional[jsii.Number]:
"""The new desired capacity.
At the scheduled time, set the desired capacity to the given capacity.
At least one of maxCapacity, minCapacity, or desiredCapacity must be supplied.
default
:default: - No new desired capacity.
"""
return self._values.get('desired_capacity')
@builtins.property
def end_time(self) -> typing.Optional[datetime.datetime]:
"""When this scheduled action expires.
default
:default: - The rule never expires.
"""
return self._values.get('end_time')
@builtins.property
def max_capacity(self) -> typing.Optional[jsii.Number]:
"""The new maximum capacity.
At the scheduled time, set the maximum capacity to the given capacity.
At least one of maxCapacity, minCapacity, or desiredCapacity must be supplied.
default
:default: - No new maximum capacity.
"""
return self._values.get('max_capacity')
@builtins.property
def min_capacity(self) -> typing.Optional[jsii.Number]:
"""The new minimum capacity.
At the scheduled time, set the minimum capacity to the given capacity.
At least one of maxCapacity, minCapacity, or desiredCapacity must be supplied.
default
:default: - No new minimum capacity.
"""
return self._values.get('min_capacity')
@builtins.property
def start_time(self) -> typing.Optional[datetime.datetime]:
"""When this scheduled action becomes active.
default
:default: - The rule is activate immediately.
"""
return self._values.get('start_time')
@builtins.property
def auto_scaling_group(self) -> "IAutoScalingGroup":
"""The AutoScalingGroup to apply the scheduled actions to."""
return self._values.get('auto_scaling_group')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'ScheduledActionProps(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
class StepScalingAction(aws_cdk.core.Construct, metaclass=jsii.JSIIMeta, jsii_type="@aws-cdk/aws-autoscaling.StepScalingAction"):
"""Define a step scaling action.
This kind of scaling policy adjusts the target capacity in configurable
steps. The size of the step is configurable based on the metric's distance
to its alarm threshold.
This Action must be used as the target of a CloudWatch alarm to take effect.
"""
def __init__(self, scope: aws_cdk.core.Construct, id: str, *, auto_scaling_group: "IAutoScalingGroup", adjustment_type: typing.Optional["AdjustmentType"]=None, cooldown: typing.Optional[aws_cdk.core.Duration]=None, estimated_instance_warmup: typing.Optional[aws_cdk.core.Duration]=None, metric_aggregation_type: typing.Optional["MetricAggregationType"]=None, min_adjustment_magnitude: typing.Optional[jsii.Number]=None) -> None:
"""
:param scope: -
:param id: -
:param auto_scaling_group: The auto scaling group.
:param adjustment_type: How the adjustment numbers are interpreted. Default: ChangeInCapacity
:param cooldown: Period after a scaling completes before another scaling activity can start. Default: The default cooldown configured on the AutoScalingGroup
:param estimated_instance_warmup: Estimated time until a newly launched instance can send metrics to CloudWatch. Default: Same as the cooldown
:param metric_aggregation_type: The aggregation type for the CloudWatch metrics. Default: Average
:param min_adjustment_magnitude: Minimum absolute number to adjust capacity with as result of percentage scaling. Only when using AdjustmentType = PercentChangeInCapacity, this number controls the minimum absolute effect size. Default: No minimum scaling effect
"""
props = StepScalingActionProps(auto_scaling_group=auto_scaling_group, adjustment_type=adjustment_type, cooldown=cooldown, estimated_instance_warmup=estimated_instance_warmup, metric_aggregation_type=metric_aggregation_type, min_adjustment_magnitude=min_adjustment_magnitude)
jsii.create(StepScalingAction, self, [scope, id, props])
@jsii.member(jsii_name="addAdjustment")
def add_adjustment(self, *, adjustment: jsii.Number, lower_bound: typing.Optional[jsii.Number]=None, upper_bound: typing.Optional[jsii.Number]=None) -> None:
"""Add an adjusment interval to the ScalingAction.
:param adjustment: What number to adjust the capacity with. The number is interpeted as an added capacity, a new fixed capacity or an added percentage depending on the AdjustmentType value of the StepScalingPolicy. Can be positive or negative.
:param lower_bound: Lower bound where this scaling tier applies. The scaling tier applies if the difference between the metric value and its alarm threshold is higher than this value. Default: -Infinity if this is the first tier, otherwise the upperBound of the previous tier
:param upper_bound: Upper bound where this scaling tier applies. The scaling tier applies if the difference between the metric value and its alarm threshold is lower than this value. Default: +Infinity
"""
adjustment_ = AdjustmentTier(adjustment=adjustment, lower_bound=lower_bound, upper_bound=upper_bound)
return jsii.invoke(self, "addAdjustment", [adjustment_])
@builtins.property
@jsii.member(jsii_name="scalingPolicyArn")
def scaling_policy_arn(self) -> str:
"""ARN of the scaling policy."""
return jsii.get(self, "scalingPolicyArn")
@jsii.data_type(jsii_type="@aws-cdk/aws-autoscaling.StepScalingActionProps", jsii_struct_bases=[], name_mapping={'auto_scaling_group': 'autoScalingGroup', 'adjustment_type': 'adjustmentType', 'cooldown': 'cooldown', 'estimated_instance_warmup': 'estimatedInstanceWarmup', 'metric_aggregation_type': 'metricAggregationType', 'min_adjustment_magnitude': 'minAdjustmentMagnitude'})
class StepScalingActionProps():
def __init__(self, *, auto_scaling_group: "IAutoScalingGroup", adjustment_type: typing.Optional["AdjustmentType"]=None, cooldown: typing.Optional[aws_cdk.core.Duration]=None, estimated_instance_warmup: typing.Optional[aws_cdk.core.Duration]=None, metric_aggregation_type: typing.Optional["MetricAggregationType"]=None, min_adjustment_magnitude: typing.Optional[jsii.Number]=None):
"""Properties for a scaling policy.
:param auto_scaling_group: The auto scaling group.
:param adjustment_type: How the adjustment numbers are interpreted. Default: ChangeInCapacity
:param cooldown: Period after a scaling completes before another scaling activity can start. Default: The default cooldown configured on the AutoScalingGroup
:param estimated_instance_warmup: Estimated time until a newly launched instance can send metrics to CloudWatch. Default: Same as the cooldown
:param metric_aggregation_type: The aggregation type for the CloudWatch metrics. Default: Average
:param min_adjustment_magnitude: Minimum absolute number to adjust capacity with as result of percentage scaling. Only when using AdjustmentType = PercentChangeInCapacity, this number controls the minimum absolute effect size. Default: No minimum scaling effect
"""
self._values = {
'auto_scaling_group': auto_scaling_group,
}
if adjustment_type is not None: self._values["adjustment_type"] = adjustment_type
if cooldown is not None: self._values["cooldown"] = cooldown
if estimated_instance_warmup is not None: self._values["estimated_instance_warmup"] = estimated_instance_warmup
if metric_aggregation_type is not None: self._values["metric_aggregation_type"] = metric_aggregation_type
if min_adjustment_magnitude is not None: self._values["min_adjustment_magnitude"] = min_adjustment_magnitude
@builtins.property
def auto_scaling_group(self) -> "IAutoScalingGroup":
"""The auto scaling group."""
return self._values.get('auto_scaling_group')
@builtins.property
def adjustment_type(self) -> typing.Optional["AdjustmentType"]:
"""How the adjustment numbers are interpreted.
default
:default: ChangeInCapacity
"""
return self._values.get('adjustment_type')
@builtins.property
def cooldown(self) -> typing.Optional[aws_cdk.core.Duration]:
"""Period after a scaling completes before another scaling activity can start.
default
:default: The default cooldown configured on the AutoScalingGroup
"""
return self._values.get('cooldown')
@builtins.property
def estimated_instance_warmup(self) -> typing.Optional[aws_cdk.core.Duration]:
"""Estimated time until a newly launched instance can send metrics to CloudWatch.
default
:default: Same as the cooldown
"""
return self._values.get('estimated_instance_warmup')
@builtins.property
def metric_aggregation_type(self) -> typing.Optional["MetricAggregationType"]:
"""The aggregation type for the CloudWatch metrics.
default
:default: Average
"""
return self._values.get('metric_aggregation_type')
@builtins.property
def min_adjustment_magnitude(self) -> typing.Optional[jsii.Number]:
"""Minimum absolute number to adjust capacity with as result of percentage scaling.
Only when using AdjustmentType = PercentChangeInCapacity, this number controls
the minimum absolute effect size.
default
:default: No minimum scaling effect
"""
return self._values.get('min_adjustment_magnitude')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'StepScalingActionProps(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
class StepScalingPolicy(aws_cdk.core.Construct, metaclass=jsii.JSIIMeta, jsii_type="@aws-cdk/aws-autoscaling.StepScalingPolicy"):
"""Define a acaling strategy which scales depending on absolute values of some metric.
You can specify the scaling behavior for various values of the metric.
Implemented using one or more CloudWatch alarms and Step Scaling Policies.
"""
def __init__(self, scope: aws_cdk.core.Construct, id: str, *, auto_scaling_group: "IAutoScalingGroup", metric: aws_cdk.aws_cloudwatch.IMetric, scaling_steps: typing.List["ScalingInterval"], adjustment_type: typing.Optional["AdjustmentType"]=None, cooldown: typing.Optional[aws_cdk.core.Duration]=None, estimated_instance_warmup: typing.Optional[aws_cdk.core.Duration]=None, min_adjustment_magnitude: typing.Optional[jsii.Number]=None) -> None:
"""
:param scope: -
:param id: -
:param auto_scaling_group: The auto scaling group.
:param metric: Metric to scale on.
:param scaling_steps: The intervals for scaling. Maps a range of metric values to a particular scaling behavior.
:param adjustment_type: How the adjustment numbers inside 'intervals' are interpreted. Default: ChangeInCapacity
:param cooldown: Grace period after scaling activity. Default: Default cooldown period on your AutoScalingGroup
:param estimated_instance_warmup: Estimated time until a newly launched instance can send metrics to CloudWatch. Default: Same as the cooldown
:param min_adjustment_magnitude: Minimum absolute number to adjust capacity with as result of percentage scaling. Only when using AdjustmentType = PercentChangeInCapacity, this number controls the minimum absolute effect size. Default: No minimum scaling effect
"""
props = StepScalingPolicyProps(auto_scaling_group=auto_scaling_group, metric=metric, scaling_steps=scaling_steps, adjustment_type=adjustment_type, cooldown=cooldown, estimated_instance_warmup=estimated_instance_warmup, min_adjustment_magnitude=min_adjustment_magnitude)
jsii.create(StepScalingPolicy, self, [scope, id, props])
@builtins.property
@jsii.member(jsii_name="lowerAction")
def lower_action(self) -> typing.Optional["StepScalingAction"]:
return jsii.get(self, "lowerAction")
@builtins.property
@jsii.member(jsii_name="lowerAlarm")
def lower_alarm(self) -> typing.Optional[aws_cdk.aws_cloudwatch.Alarm]:
return jsii.get(self, "lowerAlarm")
@builtins.property
@jsii.member(jsii_name="upperAction")
def upper_action(self) -> typing.Optional["StepScalingAction"]:
return jsii.get(self, "upperAction")
@builtins.property
@jsii.member(jsii_name="upperAlarm")
def upper_alarm(self) -> typing.Optional[aws_cdk.aws_cloudwatch.Alarm]:
return jsii.get(self, "upperAlarm")
@jsii.data_type(jsii_type="@aws-cdk/aws-autoscaling.StepScalingPolicyProps", jsii_struct_bases=[BasicStepScalingPolicyProps], name_mapping={'metric': 'metric', 'scaling_steps': 'scalingSteps', 'adjustment_type': 'adjustmentType', 'cooldown': 'cooldown', 'estimated_instance_warmup': 'estimatedInstanceWarmup', 'min_adjustment_magnitude': 'minAdjustmentMagnitude', 'auto_scaling_group': 'autoScalingGroup'})
class StepScalingPolicyProps(BasicStepScalingPolicyProps):
def __init__(self, *, metric: aws_cdk.aws_cloudwatch.IMetric, scaling_steps: typing.List["ScalingInterval"], adjustment_type: typing.Optional["AdjustmentType"]=None, cooldown: typing.Optional[aws_cdk.core.Duration]=None, estimated_instance_warmup: typing.Optional[aws_cdk.core.Duration]=None, min_adjustment_magnitude: typing.Optional[jsii.Number]=None, auto_scaling_group: "IAutoScalingGroup"):
"""
:param metric: Metric to scale on.
:param scaling_steps: The intervals for scaling. Maps a range of metric values to a particular scaling behavior.
:param adjustment_type: How the adjustment numbers inside 'intervals' are interpreted. Default: ChangeInCapacity
:param cooldown: Grace period after scaling activity. Default: Default cooldown period on your AutoScalingGroup
:param estimated_instance_warmup: Estimated time until a newly launched instance can send metrics to CloudWatch. Default: Same as the cooldown
:param min_adjustment_magnitude: Minimum absolute number to adjust capacity with as result of percentage scaling. Only when using AdjustmentType = PercentChangeInCapacity, this number controls the minimum absolute effect size. Default: No minimum scaling effect
:param auto_scaling_group: The auto scaling group.
"""
self._values = {
'metric': metric,
'scaling_steps': scaling_steps,
'auto_scaling_group': auto_scaling_group,
}
if adjustment_type is not None: self._values["adjustment_type"] = adjustment_type
if cooldown is not None: self._values["cooldown"] = cooldown
if estimated_instance_warmup is not None: self._values["estimated_instance_warmup"] = estimated_instance_warmup
if min_adjustment_magnitude is not None: self._values["min_adjustment_magnitude"] = min_adjustment_magnitude
@builtins.property
def metric(self) -> aws_cdk.aws_cloudwatch.IMetric:
"""Metric to scale on."""
return self._values.get('metric')
@builtins.property
def scaling_steps(self) -> typing.List["ScalingInterval"]:
"""The intervals for scaling.
Maps a range of metric values to a particular scaling behavior.
"""
return self._values.get('scaling_steps')
@builtins.property
def adjustment_type(self) -> typing.Optional["AdjustmentType"]:
"""How the adjustment numbers inside 'intervals' are interpreted.
default
:default: ChangeInCapacity
"""
return self._values.get('adjustment_type')
@builtins.property
def cooldown(self) -> typing.Optional[aws_cdk.core.Duration]:
"""Grace period after scaling activity.
default
:default: Default cooldown period on your AutoScalingGroup
"""
return self._values.get('cooldown')
@builtins.property
def estimated_instance_warmup(self) -> typing.Optional[aws_cdk.core.Duration]:
"""Estimated time until a newly launched instance can send metrics to CloudWatch.
default
:default: Same as the cooldown
"""
return self._values.get('estimated_instance_warmup')
@builtins.property
def min_adjustment_magnitude(self) -> typing.Optional[jsii.Number]:
"""Minimum absolute number to adjust capacity with as result of percentage scaling.
Only when using AdjustmentType = PercentChangeInCapacity, this number controls
the minimum absolute effect size.
default
:default: No minimum scaling effect
"""
return self._values.get('min_adjustment_magnitude')
@builtins.property
def auto_scaling_group(self) -> "IAutoScalingGroup":
"""The auto scaling group."""
return self._values.get('auto_scaling_group')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'StepScalingPolicyProps(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
class TargetTrackingScalingPolicy(aws_cdk.core.Construct, metaclass=jsii.JSIIMeta, jsii_type="@aws-cdk/aws-autoscaling.TargetTrackingScalingPolicy"):
def __init__(self, scope: aws_cdk.core.Construct, id: str, *, auto_scaling_group: "IAutoScalingGroup", target_value: jsii.Number, custom_metric: typing.Optional[aws_cdk.aws_cloudwatch.IMetric]=None, predefined_metric: typing.Optional["PredefinedMetric"]=None, resource_label: typing.Optional[str]=None, cooldown: typing.Optional[aws_cdk.core.Duration]=None, disable_scale_in: typing.Optional[bool]=None, estimated_instance_warmup: typing.Optional[aws_cdk.core.Duration]=None) -> None:
"""
:param scope: -
:param id: -
:param auto_scaling_group:
:param target_value: The target value for the metric.
:param custom_metric: A custom metric for application autoscaling. The metric must track utilization. Scaling out will happen if the metric is higher than the target value, scaling in will happen in the metric is lower than the target value. Exactly one of customMetric or predefinedMetric must be specified. Default: - No custom metric.
:param predefined_metric: A predefined metric for application autoscaling. The metric must track utilization. Scaling out will happen if the metric is higher than the target value, scaling in will happen in the metric is lower than the target value. Exactly one of customMetric or predefinedMetric must be specified. Default: - No predefined metric.
:param resource_label: The resource label associated with the predefined metric. Should be supplied if the predefined metric is ALBRequestCountPerTarget, and the format should be: app///targetgroup// Default: - No resource label.
:param cooldown: Period after a scaling completes before another scaling activity can start. Default: - The default cooldown configured on the AutoScalingGroup.
:param disable_scale_in: Indicates whether scale in by the target tracking policy is disabled. If the value is true, scale in is disabled and the target tracking policy won't remove capacity from the autoscaling group. Otherwise, scale in is enabled and the target tracking policy can remove capacity from the group. Default: false
:param estimated_instance_warmup: Estimated time until a newly launched instance can send metrics to CloudWatch. Default: - Same as the cooldown.
"""
props = TargetTrackingScalingPolicyProps(auto_scaling_group=auto_scaling_group, target_value=target_value, custom_metric=custom_metric, predefined_metric=predefined_metric, resource_label=resource_label, cooldown=cooldown, disable_scale_in=disable_scale_in, estimated_instance_warmup=estimated_instance_warmup)
jsii.create(TargetTrackingScalingPolicy, self, [scope, id, props])
@builtins.property
@jsii.member(jsii_name="scalingPolicyArn")
def scaling_policy_arn(self) -> str:
"""ARN of the scaling policy."""
return jsii.get(self, "scalingPolicyArn")
@jsii.data_type(jsii_type="@aws-cdk/aws-autoscaling.TargetTrackingScalingPolicyProps", jsii_struct_bases=[BasicTargetTrackingScalingPolicyProps], name_mapping={'cooldown': 'cooldown', 'disable_scale_in': 'disableScaleIn', 'estimated_instance_warmup': 'estimatedInstanceWarmup', 'target_value': 'targetValue', 'custom_metric': 'customMetric', 'predefined_metric': 'predefinedMetric', 'resource_label': 'resourceLabel', 'auto_scaling_group': 'autoScalingGroup'})
class TargetTrackingScalingPolicyProps(BasicTargetTrackingScalingPolicyProps):
def __init__(self, *, cooldown: typing.Optional[aws_cdk.core.Duration]=None, disable_scale_in: typing.Optional[bool]=None, estimated_instance_warmup: typing.Optional[aws_cdk.core.Duration]=None, target_value: jsii.Number, custom_metric: typing.Optional[aws_cdk.aws_cloudwatch.IMetric]=None, predefined_metric: typing.Optional["PredefinedMetric"]=None, resource_label: typing.Optional[str]=None, auto_scaling_group: "IAutoScalingGroup"):
"""Properties for a concrete TargetTrackingPolicy.
Adds the scalingTarget.
:param cooldown: Period after a scaling completes before another scaling activity can start. Default: - The default cooldown configured on the AutoScalingGroup.
:param disable_scale_in: Indicates whether scale in by the target tracking policy is disabled. If the value is true, scale in is disabled and the target tracking policy won't remove capacity from the autoscaling group. Otherwise, scale in is enabled and the target tracking policy can remove capacity from the group. Default: false
:param estimated_instance_warmup: Estimated time until a newly launched instance can send metrics to CloudWatch. Default: - Same as the cooldown.
:param target_value: The target value for the metric.
:param custom_metric: A custom metric for application autoscaling. The metric must track utilization. Scaling out will happen if the metric is higher than the target value, scaling in will happen in the metric is lower than the target value. Exactly one of customMetric or predefinedMetric must be specified. Default: - No custom metric.
:param predefined_metric: A predefined metric for application autoscaling. The metric must track utilization. Scaling out will happen if the metric is higher than the target value, scaling in will happen in the metric is lower than the target value. Exactly one of customMetric or predefinedMetric must be specified. Default: - No predefined metric.
:param resource_label: The resource label associated with the predefined metric. Should be supplied if the predefined metric is ALBRequestCountPerTarget, and the format should be: app///targetgroup// Default: - No resource label.
:param auto_scaling_group:
"""
self._values = {
'target_value': target_value,
'auto_scaling_group': auto_scaling_group,
}
if cooldown is not None: self._values["cooldown"] = cooldown
if disable_scale_in is not None: self._values["disable_scale_in"] = disable_scale_in
if estimated_instance_warmup is not None: self._values["estimated_instance_warmup"] = estimated_instance_warmup
if custom_metric is not None: self._values["custom_metric"] = custom_metric
if predefined_metric is not None: self._values["predefined_metric"] = predefined_metric
if resource_label is not None: self._values["resource_label"] = resource_label
@builtins.property
def cooldown(self) -> typing.Optional[aws_cdk.core.Duration]:
"""Period after a scaling completes before another scaling activity can start.
default
:default: - The default cooldown configured on the AutoScalingGroup.
"""
return self._values.get('cooldown')
@builtins.property
def disable_scale_in(self) -> typing.Optional[bool]:
"""Indicates whether scale in by the target tracking policy is disabled.
If the value is true, scale in is disabled and the target tracking policy
won't remove capacity from the autoscaling group. Otherwise, scale in is
enabled and the target tracking policy can remove capacity from the
group.
default
:default: false
"""
return self._values.get('disable_scale_in')
@builtins.property
def estimated_instance_warmup(self) -> typing.Optional[aws_cdk.core.Duration]:
"""Estimated time until a newly launched instance can send metrics to CloudWatch.
default
:default: - Same as the cooldown.
"""
return self._values.get('estimated_instance_warmup')
@builtins.property
def target_value(self) -> jsii.Number:
"""The target value for the metric."""
return self._values.get('target_value')
@builtins.property
def custom_metric(self) -> typing.Optional[aws_cdk.aws_cloudwatch.IMetric]:
"""A custom metric for application autoscaling.
The metric must track utilization. Scaling out will happen if the metric is higher than
the target value, scaling in will happen in the metric is lower than the target value.
Exactly one of customMetric or predefinedMetric must be specified.
default
:default: - No custom metric.
"""
return self._values.get('custom_metric')
@builtins.property
def predefined_metric(self) -> typing.Optional["PredefinedMetric"]:
"""A predefined metric for application autoscaling.
The metric must track utilization. Scaling out will happen if the metric is higher than
the target value, scaling in will happen in the metric is lower than the target value.
Exactly one of customMetric or predefinedMetric must be specified.
default
:default: - No predefined metric.
"""
return self._values.get('predefined_metric')
@builtins.property
def resource_label(self) -> typing.Optional[str]:
"""The resource label associated with the predefined metric.
Should be supplied if the predefined metric is ALBRequestCountPerTarget, and the
format should be:
app///targetgroup//
default
:default: - No resource label.
"""
return self._values.get('resource_label')
@builtins.property
def auto_scaling_group(self) -> "IAutoScalingGroup":
return self._values.get('auto_scaling_group')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'TargetTrackingScalingPolicyProps(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
@jsii.enum(jsii_type="@aws-cdk/aws-autoscaling.UpdateType")
class UpdateType(enum.Enum):
"""The type of update to perform on instances in this AutoScalingGroup."""
NONE = "NONE"
"""Don't do anything."""
REPLACING_UPDATE = "REPLACING_UPDATE"
"""Replace the entire AutoScalingGroup.
Builds a new AutoScalingGroup first, then delete the old one.
"""
ROLLING_UPDATE = "ROLLING_UPDATE"
"""Replace the instances in the AutoScalingGroup."""
__all__ = ["AdjustmentTier", "AdjustmentType", "AutoScalingGroup", "AutoScalingGroupProps", "BaseTargetTrackingProps", "BasicLifecycleHookProps", "BasicScheduledActionProps", "BasicStepScalingPolicyProps", "BasicTargetTrackingScalingPolicyProps", "BlockDevice", "BlockDeviceVolume", "CfnAutoScalingGroup", "CfnAutoScalingGroupProps", "CfnLaunchConfiguration", "CfnLaunchConfigurationProps", "CfnLifecycleHook", "CfnLifecycleHookProps", "CfnScalingPolicy", "CfnScalingPolicyProps", "CfnScheduledAction", "CfnScheduledActionProps", "CommonAutoScalingGroupProps", "CpuUtilizationScalingProps", "CronOptions", "DefaultResult", "EbsDeviceOptions", "EbsDeviceOptionsBase", "EbsDeviceProps", "EbsDeviceSnapshotOptions", "EbsDeviceVolumeType", "Ec2HealthCheckOptions", "ElbHealthCheckOptions", "HealthCheck", "IAutoScalingGroup", "ILifecycleHook", "ILifecycleHookTarget", "LifecycleHook", "LifecycleHookProps", "LifecycleHookTargetConfig", "LifecycleTransition", "MetricAggregationType", "MetricTargetTrackingProps", "NetworkUtilizationScalingProps", "PredefinedMetric", "RequestCountScalingProps", "RollingUpdateConfiguration", "ScalingInterval", "ScalingProcess", "Schedule", "ScheduledAction", "ScheduledActionProps", "StepScalingAction", "StepScalingActionProps", "StepScalingPolicy", "StepScalingPolicyProps", "TargetTrackingScalingPolicy", "TargetTrackingScalingPolicyProps", "UpdateType", "__jsii_assembly__"]
publication.publish()
|
[
"mmmasat@a483e7dd7ec3.ant.amazon.com"
] |
mmmasat@a483e7dd7ec3.ant.amazon.com
|
c6c6323c6e8149cfa777226d6b774aadd0f6d089
|
aa1e637de90f69f9ae742d42d5b777421617d10c
|
/nitro/resource/config/ntp/ntpparam.py
|
eeeb63f88a5a0e1502be215dfd0c498591acd78f
|
[
"Apache-2.0",
"Python-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
km0420j/nitro-python
|
db7fcb49fcad3e7a1ae0a99e4fc8675665da29ba
|
d03eb11f492a35a2a8b2a140322fbce22d25a8f7
|
refs/heads/master
| 2021-10-21T18:12:50.218465
| 2019-03-05T14:00:15
| 2019-03-05T15:35:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,294
|
py
|
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nitro.resource.base.base_resource import base_resource
from nitro.resource.base.base_resource import base_response
from nitro.service.options import options
from nitro.exception.nitro_exception import nitro_exception
from nitro.util.nitro_util import nitro_util
class ntpparam(base_resource) :
"""Configuration for NTP parameter resource."""
def __init__(self) :
self._authentication = ""
self._trustedkey = []
self._autokeylogsec = 0
self._revokelogsec = 0
@property
def authentication(self) :
"""Apply NTP authentication, which enables the NTP client (NetScaler) to verify that the server is in fact known and trusted.<br/>Default value: YES<br/>Possible values = YES, NO."""
try :
return self._authentication
except Exception as e:
raise e
@authentication.setter
def authentication(self, authentication) :
"""Apply NTP authentication, which enables the NTP client (NetScaler) to verify that the server is in fact known and trusted.<br/>Default value: YES<br/>Possible values = YES, NO
:param authentication:
"""
try :
self._authentication = authentication
except Exception as e:
raise e
@property
def trustedkey(self) :
"""Key identifiers that are trusted for server authentication with symmetric key cryptography in the keys file.<br/>Minimum length = 1<br/>Maximum length = 65534."""
try :
return self._trustedkey
except Exception as e:
raise e
@trustedkey.setter
def trustedkey(self, trustedkey) :
"""Key identifiers that are trusted for server authentication with symmetric key cryptography in the keys file.<br/>Minimum length = 1<br/>Maximum length = 65534
:param trustedkey:
"""
try :
self._trustedkey = trustedkey
except Exception as e:
raise e
@property
def autokeylogsec(self) :
"""Autokey protocol requires the keys to be refreshed periodically. This parameter specifies the interval between regenerations of new session keys. In seconds, expressed as a power of 2.<br/>Default value: 12<br/>Maximum length = 32."""
try :
return self._autokeylogsec
except Exception as e:
raise e
@autokeylogsec.setter
def autokeylogsec(self, autokeylogsec) :
"""Autokey protocol requires the keys to be refreshed periodically. This parameter specifies the interval between regenerations of new session keys. In seconds, expressed as a power of 2.<br/>Default value: 12<br/>Maximum length = 32
:param autokeylogsec:
"""
try :
self._autokeylogsec = autokeylogsec
except Exception as e:
raise e
@property
def revokelogsec(self) :
"""Interval between re-randomizations of the autokey seeds to prevent brute-force attacks on the autokey algorithms.<br/>Default value: 16<br/>Maximum length = 32."""
try :
return self._revokelogsec
except Exception as e:
raise e
@revokelogsec.setter
def revokelogsec(self, revokelogsec) :
"""Interval between re-randomizations of the autokey seeds to prevent brute-force attacks on the autokey algorithms.<br/>Default value: 16<br/>Maximum length = 32
:param revokelogsec:
"""
try :
self._revokelogsec = revokelogsec
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
"""converts nitro response into object and returns the object array in case of get request.
:param service:
:param response:
"""
try :
result = service.payload_formatter.string_to_resource(ntpparam_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.ntpparam
except Exception as e :
raise e
def _get_object_name(self) :
"""Returns the value of object identifier argument"""
try :
return 0
except Exception as e :
raise e
@classmethod
def update(cls, client, resource) :
"""Use this API to update ntpparam.
:param client:
:param resource:
"""
try :
if type(resource) is not list :
updateresource = ntpparam()
updateresource.authentication = resource.authentication
updateresource.trustedkey = resource.trustedkey
updateresource.autokeylogsec = resource.autokeylogsec
updateresource.revokelogsec = resource.revokelogsec
return updateresource.update_resource(client)
except Exception as e :
raise e
@classmethod
def unset(cls, client, resource, args) :
"""Use this API to unset the properties of ntpparam resource.
Properties that need to be unset are specified in args array.
:param client:
:param resource:
:param args:
"""
try :
if type(resource) is not list :
unsetresource = ntpparam()
return unsetresource.unset_resource(client, args)
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_="") :
"""Use this API to fetch all the ntpparam resources that are configured on netscaler.
:param client:
:param name: (Default value = "")
:param option_: (Default value = "")
"""
try :
if not name :
obj = ntpparam()
response = obj.get_resources(client, option_)
return response
except Exception as e :
raise e
class Authentication:
""" """
YES = "YES"
NO = "NO"
class ntpparam_response(base_response) :
""" """
def __init__(self, length=1) :
self.ntpparam = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.ntpparam = [ntpparam() for _ in range(length)]
|
[
"lennart.weller@hansemerkur.de"
] |
lennart.weller@hansemerkur.de
|
babcc69b9a28f701d40e2b837b6de7735e6547a2
|
fbb544fda7b5b90162ef7b86ccb38f71e6f07c02
|
/src/vision/scripts/pan_tilt_node.py
|
1fa10d2256a1328e9aafcea3f8dac8099d18127b
|
[] |
no_license
|
RingOfFireOrg/PyroKenHumanoidAI
|
5979ae72f21544273b17c9c6593b24d681ed02e6
|
94feaca1d60fa4eb04098e49f9add76a2fad6188
|
refs/heads/master
| 2020-03-13T17:53:51.103728
| 2018-04-27T01:06:43
| 2018-04-27T01:06:43
| 131,226,081
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,164
|
py
|
#!/usr/bin/env python
# This ROS Node converts Joystick inputs from the joy node
# into PiPan controls to manipulate a pan-tilt camera
# It publishes joint state messages.
# =======================================================
from __future__ import division
import atexit
from math import pi
import rospy
from sensor_msgs.msg import Joy
from sensor_msgs.msg import JointState
import time
import threading
##### pin assignments
left_tilt_pin = 1
right_tilt_pin = 2
pan_pin = 0
class PanTiltConfig(object):
def __init__(self):
self.pan_pin = 0
self.tilt_pin = 1
self.pan_left_limit = 90
self.pan_right_limit = 210
self.tilt_down_limit = 125
self.tilt_up_limit = 185
self.pan_center = 150
self.tilt_center = 155
class PanTilt(object):
def __init__(self, config=PanTiltConfig()):
try:
self.config = config
self.sb = open('/dev/servoblaster', 'w')
except (IOError):
print "*** ERROR ***"
print "Unable to open the device, check that servod is running"
print "To start servod, run: sudo /etc/init.d/servoblaster.sh start"
exit()
def pwm(self, pin, angle):
self.sb.write(str(pin) + '=' + str(int(angle)) + '\n')
self.sb.flush()
def go(self, pan_angle, tilt_angle):
self.pwm(self.config.tilt_pin, map(tilt_angle, 0, 100, self.config.tilt_down_limit, self.config.tilt_up_limit))
self.pwm(self.config.pan_pin, map(pan_angle, 0, 100, self.config.pan_left_limit, self.config.pan_right_limit))
def map(value, domainLow, domainHigh, rangeLow, rangeHigh):
return ((value - domainLow) / (domainHigh - domainLow)) * (rangeHigh - rangeLow) + rangeLow
# Receives joystick messages (subscribed to Joy topic)
# then converts the joysick inputs into pipan movement commands
# axis 1 aka left stick vertical controls the tilt servo
# axis 0 aka left stick horizonal controls the pan servo
# servo angle limits
loX = 0
hiX = 100
loY = 0
hiY = 100
speedFactor = 4
speedFactorX = -speedFactor
speedFactorY = speedFactor
# initial position and velocity
x0 = 50
y0 = 50
x = x0
y = y0
dx = 0
dy = 0
goCenter = False
def pan_tilt():
global p, x0, y0, x, y, dx, dy, goCenter
while True:
if (goCenter):
x = x0
y = y0
p.go(x, y)
goCenter = False
publish_joint_state()
elif (dx != 0 or dy != 0):
x += dx
y += dy
if (x < loX): x = loX
if (x > hiX): x = hiX
if (y < loY): y = loY
if (y > hiY): y = hiY
p.go(x, y)
publish_joint_state()
# uncomment the else block to actively hold the position
else:
p.go(x, y)
time.sleep(0.05)
def publish_joint_state():
pan_angle = (x0 - x) * pi / 180.0
tilt_angle = 0.4618 * (y - y0) * pi / 180.0 # TODO correct this for the geometry of the robot
print "x=" + str(x) + ", y=" + str(y) + ", pan=" + str(x-x0) + ", tilt=" + str(y-y0)
#print "pan_angle=" + str(pan_angle) + ", tilt_angle=" + str(tilt_angle)
joint_state = JointState()
joint_state.header.stamp = rospy.Time.now()
joint_state.name.append('upper_neck_head_joint') # pan
joint_state.position.append(pan_angle)
joint_state.velocity.append(0)
joint_state.name.append('torso_neck_joint') # tilt
joint_state.position.append(tilt_angle)
joint_state.velocity.append(0)
publisher.publish(joint_state)
def callback(data):
global dx, dy, speedFactorX, speedFactorY, goCenter
a = data.axes
#x = map(a[0], -1, 1, loX, hiX)
#y = map(a[1], -1, 1, loY, hiY)
dx = speedFactorX * a[0];
dy = speedFactorY * a[1];
if (data.buttons[8] == 1): # SELECT button pressed
goCenter = True
class PanTiltNode(object):
def __init__(self):
global p, publisher
rospy.init_node('pan_tilt_node')
config = PanTiltConfig()
config.pan_pin = int(self.get_param("pan_pin", "0"))
config.tilt_pin = int(self.get_param("tilt_pin", "1"))
config.pan_left_limit = int(self.get_param("pan_left_limit", "90"))
config.pan_right_limit = int(self.get_param("pan_right_limit", "210"))
config.tilt_down_limit = int(self.get_param("tilt_down_limit", "125"))
config.tilt_up_limit = int(self.get_param("tilt_up_limit", "185"))
config.pan_center = int(self.get_param("pan_center", "150"))
config.tilt_center = int(self.get_param("tilt_center", "155"))
p = PanTilt(config)
# publish joint states to sync with rviz virtual model
# the topic to publish to is defined in the source_list parameter
# as: rosparam set source_list "['joints']"
publisher = rospy.Publisher("/joints", JointState)
# subscribed to joystick inputs on topic "joy"
rospy.Subscriber("/joy", Joy, callback)
def get_param(self, param_name, param_default):
value = rospy.get_param(param_name, param_default)
rospy.loginfo('Parameter %s has value %s', rospy.resolve_name(param_name), value)
return value
def run(self):
t = threading.Thread(target=pan_tilt)
t.daemon = True
t.start()
rospy.spin()
def on_exit(self):
rospy.loginfo("Exiting.")
if __name__ == '__main__':
try:
node = PanTiltNode()
atexit.register(node.on_exit)
node.run()
except rospy.ROSInterruptException:
pass
|
[
"daniel.w.mcdonald@gmail.com"
] |
daniel.w.mcdonald@gmail.com
|
5c7a26e769958947f026aad486869e5e75bbac75
|
635aac1d7c2bc0b98c93e7c951f71639b79dfef1
|
/cir/phase_control.py
|
386e46aacff66198beb09b09f5a118162a5b6b2d
|
[
"MIT"
] |
permissive
|
wafield/cir
|
b45c61bbd0685ebda21c3da1f2f37c05bbc9cd11
|
123d4bfe3e5bb4b0d605de486a91a0cb7eb34e4c
|
refs/heads/master
| 2022-10-10T09:13:26.870288
| 2018-06-29T06:14:20
| 2018-06-29T06:14:20
| 138,433,868
| 0
| 0
|
MIT
| 2018-06-23T21:11:53
| 2018-06-23T21:11:53
| null |
UTF-8
|
Python
| false
| false
| 4,248
|
py
|
PHASE_CONTROL = {
'paused': {
'short_name': 'paused',
'full_name': 'Paused',
'instructions': 'The deliberation process is temporarily paused.',
'claim_active': 'active',
'vote_btn': 'disabled',
'prioritize_btn': 'disabled',
'theming_menu': 'hidden',
'improve_menu': 'hidden',
'addstmt_btn': 'hidden',
},
'not_started': {
'short_name': 'not_started',
'full_name': 'Not started',
'instructions': 'The deliberation is not started yet. Further instructions will be updated here!',
'document_active': 'active',
'vote_btn': 'hidden',
'prioritize_btn': 'hidden',
'theming_menu': 'hidden',
'improve_menu': 'hidden',
'addstmt_btn': 'hidden',
},
'tagging': {
'short_name': 'tagging',
'full_name': 'Tagging',
'instructions': 'You\'ll be working on ... Please ... This process will last ...',
'show_highlight_toolbar': True,
'document_active': 'active',
'vote_btn': 'hidden',
'prioritize_btn': 'hidden',
'theming_menu': 'hidden',
'improve_menu': 'hidden',
'addstmt_btn': 'hidden',
},
'nugget': {
'short_name': 'nugget',
'full_name': 'Nugget extraction',
'instructions': 'In this phase you will be working on nuggets extraction. Nuggets are informative pieces in the documents, which could serve the source material for claim construction. To extract a nugget, please ... This process will end on ...',
'show_highlight_toolbar': True,
'document_active': 'active',
'vote_btn': 'hidden',
'prioritize_btn': 'hidden',
'theming_menu': 'hidden',
'improve_menu': 'hidden',
'addstmt_btn': 'hidden',
},
'extract': {
'short_name': 'extract',
'full_name': 'Claim construction',
'instructions': 'The goal of this phase is to build claims out of the nuggets we collaboratively extracted. When you compose a claim, please be sure to make it a clear, coherent and meaningful sentence. Avoid any language that would be difficult to understand by an average citizen.',
'show_highlight_toolbar': True,
'document_active': 'active',
'vote_btn': 'hidden',
'prioritize_btn': 'hidden',
'theming_menu': 'hidden',
'improve_menu': 'hidden',
'addstmt_btn': 'hidden',
},
'categorize': {
'short_name': 'categorize',
'full_name': 'Claim categorization',
'instructions': 'Before we proceed with claim improvement, let\'s category the claims we just composed. Available categories are Pro, Con and Key Finding. If you don\'t think a claim should proceed to the next round, you can also vote a Discard to it. Facilitators will perform the final categorization for each claim.',
'claim_active': 'active',
'prioritize_btn': 'hidden',
'theming_menu': 'hidden',
'improve_menu': 'hidden',
'addstmt_btn': 'hidden',
},
'theming': {
'short_name': 'theming',
'full_name': 'Claim theme identification',
'instructions': 'You\'ll be working on ... Please ... This process will last ...',
'claim_active': 'active',
'vote_btn': 'disabled',
'prioritize_btn': 'hidden',
'improve_menu': 'hidden',
'addstmt_btn': 'hidden',
},
'improve': {
'short_name': 'improve',
'full_name': 'Claim prioritization and improvement',
'instructions': 'This phase involves claim consolidation and improvement in terms of information and language. Please use the rewording and merging functions.',
'claim_active': 'active',
'vote_btn': 'hidden',
'theming_menu': 'hidden',
},
'finished': {
'short_name': 'finished',
'full_name': 'Finished',
'instructions': 'The deliberation event has ended. Please view the Citizens\' Statement.',
'show_statement_link': True,
'claim_active': 'active',
'vote_btn': 'disabled',
'prioritize_btn': 'disabled',
'theming_menu': 'hidden',
'improve_menu': 'hidden',
'addstmt_btn': 'hidden',
}
}
|
[
"wafield.pku@gmail.com"
] |
wafield.pku@gmail.com
|
f2b34dd64c3e7b24cac4c5d362dec04c6200c5d4
|
b207df9aa87729e7802eb2cd825c72d3c13cc64b
|
/flipping.py
|
b0d56e670916aecc572b82b2c2e0454830eb9434
|
[] |
no_license
|
chandumanu111/tcsTest
|
b74be23e81226a98c66bc30e901e58de394540ba
|
01c61c5bf7be492fbcdfac3c846cc571a98c11c2
|
refs/heads/main
| 2023-08-14T18:15:24.986064
| 2021-10-15T16:38:07
| 2021-10-15T16:38:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 479
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 25 23:01:37 2019
@author: Manu
"""
#image flipping
'''
from PIL import Image
img=Image.open('textmirror.jpg')
transpose_image=img.transpose(Image.FLIP_LEFT_RIGHT)
transpose_image.save('corrected.jpg')
print("done flipping")
'''
import cv2
img=cv2.imread('bullethole.jpg')
clahe=cv2.createCLAHE()
gray_img=cv2.cvtColor(img,cv2.Color_BGR2GRAY)
enh_image=clahe.apply(gray_img)
cv2.imwrite('enhanced.jpg',enh_image)
print("enhanced")
|
[
"manasakrishna121@gmail.com"
] |
manasakrishna121@gmail.com
|
c05336076541453c1569b9d8c09221f777d63f7b
|
6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4
|
/2nciiXZN4HCuNEmAi_6.py
|
0bd7f17fca7fe6993cac5c0ec60e2e367d3dc816
|
[] |
no_license
|
daniel-reich/ubiquitous-fiesta
|
26e80f0082f8589e51d359ce7953117a3da7d38c
|
9af2700dbe59284f5697e612491499841a6c126f
|
refs/heads/master
| 2023-04-05T06:40:37.328213
| 2021-04-06T20:17:44
| 2021-04-06T20:17:44
| 355,318,759
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 273
|
py
|
def flatten(r, resp=None, a="main"):
if resp is None:
resp = list()
if type(r) == list:
for i in r:
resp.append(flatten(i, resp, "rec"))
else:
return r
return resp if a== "rec" else [i for i in resp if type(i)!=list]
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
967bf98e119cf4945094659fc337d80cf50c826f
|
764591f1160e39898b8a7ffef262839954c15675
|
/hit_project/hit_project/settings.py
|
9935c5042ecf2069014a0e9132c02b6bc5f83f52
|
[] |
no_license
|
aryakipathak/django-project
|
62c01ebc3d4cd5038b5217778c57a54aa40939ad
|
4b42743bc71a3f87354379e1003e47ecb3bb842d
|
refs/heads/master
| 2020-04-13T04:52:09.833424
| 2018-12-24T13:26:08
| 2018-12-24T13:26:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,390
|
py
|
"""
Django settings for hit_project project.
Generated by 'django-admin startproject' using Django 2.1.4.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'uppsj7fm0n&_ro7pb=kun_u0z##67ov_bm^q1yp&bx%x3gv-cb'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'crispy_forms',
'users.apps.UsersConfig',
'blog.apps.BlogConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'hit_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'hit_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
CRISPY_TEMPLATE_PACK='bootstrap4'
LOGIN_REDIRECT_URL='blog-home'
LOGIN_URL='login'
|
[
"noreply@github.com"
] |
aryakipathak.noreply@github.com
|
04de02a0bdc8ba3e56394b0e811691af03f8d834
|
7a4a33bd2181f818494e95bf33cf02117172dca1
|
/src/harmonic_oscillator_classical_limit_energy.py
|
d9e92e85b813e6b3c1fbd32355de44ce9f4cb3bd
|
[
"MIT"
] |
permissive
|
s6beotto/Harmonic-Oscillator
|
f3e804496f2931b9309656ed1cbfe56299d819b7
|
081555ea0120350f400586e67a26076f6f6ee2ad
|
refs/heads/master
| 2021-02-14T14:49:06.253395
| 2020-03-31T18:13:41
| 2020-03-31T18:13:41
| 244,812,959
| 0
| 0
| null | 2020-03-12T20:40:48
| 2020-03-04T05:10:32
|
Python
|
UTF-8
|
Python
| false
| false
| 4,519
|
py
|
#!/usr/bin/env python3
# import modules
from tools import Potential, Kinetic, Energy, deltaEnergy, Metropolis, getRootDirectory, running_mean, block, autoCorrelationNormalized, getIntegratedCorrelationTime
import numpy as np
from multiprocessing import Pool
import csv
from itertools import islice
from configparser import ConfigParser
import argparse
import pathlib
# parse CLI arguments
parser = argparse.ArgumentParser(description='Create samples for the harmonic oscillator, vary hbar')
parser.add_argument('-i', '--iterations', type=int, default=1000,
help='Number of Metropolis iterations')
parser.add_argument('-N', '--number', type=int, default=1000,
help='Number of lattice sites')
parser.add_argument('-m', '--mass', type=float, default=0.01,
help='Mass of the particle')
parser.add_argument('-u', '--mu', type=float, default=10,
help='Depth of the potential')
parser.add_argument('-t', '--tau', type=float, default=0.1,
help='Time step size')
parser.add_argument('-hb', '--hbar', type=str, default='0:2:0.05',
help='Values of the reduced Plancks constant')
parser.add_argument('-init', '--initial', type=float, default=0,
help='Initial values for the path')
parser.add_argument('-ir', '--initial-random', type=float, default=0,
help='Use random distribution around initial value')
parser.add_argument('-rw', '--random-width', type=float, default=1,
help='Width of the gaussian distribution to use to get the next iteration')
parser.add_argument('-s', '--step', action='store_true',
help='Use a step function as initial state')
parser.add_argument('-o', '--output', type=pathlib.Path,
help='Output filename')
args = parser.parse_args()
# extract parameters
iterations = args.iterations
N = args.number
mass = args.mass
mu = args.mu
tau = args.tau
hbar_min, hbar_max, hbar_step = (float(h) for h in args.hbar.split(':'))
initial = args.initial
initial_random = args.initial_random
random_width = args.random_width
step = args.step
output = args.output
parameters = [
'N', 'mass', 'mu', 'tau', 'hbar_min', 'hbar_max', 'hbar_step',
'initial', 'initial_random', 'random_width', 'step',
]
# filesystem stuff
root_path = getRootDirectory()
dir_ = root_path / 'data' / 'harmonic_oscillator_classical_limit_energy'
dir_.mkdir(exist_ok=True)
file_ = dir_ / ('h%0.2f-%0.2f-%0.4f-N%d.csv' % (hbar_min, hbar_max, hbar_step, N))
if output != None:
file_ = output
# config output
config_filename = file_.with_suffix('.cfg')
config = ConfigParser()
config['DEFAULT'] = {p: eval(p) for p in parameters}
config['DEFAULT']['type'] = 'harmonic_oscillator_classical_limit_energy'
hbars = np.arange(hbar_min + hbar_step, hbar_max + hbar_step, hbar_step)
def calculateEnergy(hbar):
print('calculating for hbar=%0.4f' % hbar)
m = Metropolis(init=initial, valWidth=random_width, initValWidth=initial_random, hbar=hbar, tau=tau, N=N, m=mass, lambda_=0, mu=mu)
k = Kinetic(mass, tau)
p = Potential(mu, 0)
e = Energy(k, p)
accept_ratios = []
energies = []
for _ in range(iterations):
d, a = m.__next__()
accept_ratios.append(a)
energies.append(e(d))
energies = np.array(energies)
# calculate mean energy
d = energies[:-1] - energies[1:]
da = running_mean(d, 10)
if da[0] > 0:
start = np.argmax(da < 0) + 10
else:
start = np.argmax(da > 0) + 10
energies_cut = energies[start:]
energies_cut_ac = autoCorrelationNormalized(energies_cut, np.arange(len(energies_cut)))
# calculate integrated autocorrelation time
tint, dtint, w_max = getIntegratedCorrelationTime(energies_cut_ac, factor=10)
step_size = int((tint + dtint) * 2 + 1)
energies_blocked = block(energies_cut, step_size)
energy, denergy = np.mean(energies_blocked), np.std(energies_blocked)
return [energy, denergy], np.mean(accept_ratios)
# use a multiprocessing pool to generate data in a parallel manner
#for hbar in np.linspace(0.1, 2.0, 20):
# print(calculateEnergy(hbar))
p = Pool()
results = p.map(calculateEnergy, hbars)
accept_ratio = np.mean([r[1] for r in results])
# save csv
with file_.open('w', newline='') as file:
writer = csv.writer(file)
writer.writerow(['hbar', 'energy', 'denergy'])
for i, hbar in enumerate(hbars):
writer.writerow([hbar] + results[i][0])
config['DEFAULT']['accept_ratio'] = str(accept_ratio)
with open(config_filename, 'w') as configfile:
config.write(configfile)
|
[
"s6beotto@uni-bonn.de"
] |
s6beotto@uni-bonn.de
|
6c1cc6e54292ce3dc873ecf2c2ab56d103b1057d
|
6ac0aa077de3d234ad19eae7e65813d3b99a89af
|
/138_CopyListWithRandomPointer/solution2.py
|
155c275cf582a3be2b031f93097cf175bc0f79a3
|
[] |
no_license
|
changchingchen/leetcode
|
2c85dd2c433dd3937be3f74a87534a8fccd80b38
|
481e873a0332c7939e3a24281bcea837b383af58
|
refs/heads/master
| 2023-01-12T02:08:44.339658
| 2020-10-25T14:33:44
| 2020-10-25T14:33:44
| 266,359,974
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 982
|
py
|
"""
# Definition for a Node.
class Node:
def __init__(self, x: int, next: 'Node' = None, random: 'Node' = None):
self.val = int(x)
self.next = next
self.random = random
"""
class Solution:
def copyRandomList(self, head: 'Node') -> 'Node':
if not head:
return None
node = head
while node:
next_node = node.next
node.next = Node(node.val, next_node)
node = next_node
node = head
while node:
if node.random:
node.next.random = node.random.next
node = node.next.next
node = head
copied_head = head.next
while node:
next_node = node.next.next
copied_node = node.next
if next_node:
copied_node.next = next_node.next
node.next = next_node
node = next_node
return copied_head
# Time: O(N)
# Space: O(1)
|
[
"tim.changching.chen@gmail.com"
] |
tim.changching.chen@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.