max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
|---|---|---|---|---|---|---|
a301/landsat/toa_radiance.py
|
Pearl-Ayem/ATSC_Notebook_Data
| 0
|
12784151
|
"""
ported from https://github.com/NASA-DEVELOP/dnppy/tree/master/dnppy/landsat
"""
# standard imports
from .landsat_metadata import landsat_metadata
from . import core
import os
from pathlib import Path
import numpy as np
import rasterio
__all__ = ['toa_radiance_8', # complete
'toa_radiance_457',
'calc_radiance_8',
'calc_radiance_457'
]
def calc_radiance_457(nparray,band_num,meta_path):
"""
Calculate the radiance for a landsat 4,5,7 band
Parameters
----------
np_image: ndarray, 2-D, uint16
landsat scene counts
band_num: str
landsat ban
meta_path: Path object
path to MTL.txt file for scene
Returns
-------
TOA_rad: ndarray, 2-d, float32
radiance for the scene (W/m^2/micron/sr
"""
#the presence of a PRODUCT_CREATION_TIME category is used to identify old metadata
#if this is not present, the meta data is considered new.
#Band6length refers to the length of the Band 6 name string. In the new metadata this string is longer
#metadata format was changed August 29, 2012. This tool can process either the new or old format
with open(meta_path) as f:
MText = f.read()
if "PRODUCT_CREATION_TIME" in MText:
Meta = "oldMeta"
Band6length = 2
else:
Meta = "newMeta"
Band6length = 8
#The tilename is located using the newMeta/oldMeta indixes and the date of capture is recorded
if Meta == "newMeta":
TileName = getattr(metadata, "LANDSAT_SCENE_ID")
year = TileName[9:13]
jday = TileName[13:16]
date = getattr(metadata, "DATE_ACQUIRED")
elif Meta == "oldMeta":
TileName = getattr(metadata, "BAND1_FILE_NAME")
year = TileName[13:17]
jday = TileName[17:20]
date = getattr(metadata, "ACQUISITION_DATE")
#the spacecraft from which the imagery was capture is identified
#this info determines the solar exoatmospheric irradiance (ESun) for each band
spacecraft = getattr(metadata, "SPACECRAFT_ID")
if "7" in spacecraft:
TM_ETM_bands = ['1','2','3','4','5','7','8']
elif "5" in spacecraft:
TM_ETM_bands = ['1','2','3','4','5','7']
elif "4" in spacecraft:
TM_ETM_bands = ['1','2','3','4','5','7']
else:
raise ValueError("Landsat 4, 5, or 7")
#arcpy.AddError("This tool only works for Landsat 4, 5, or 7")
#raise arcpy.ExecuteError()
if band_num not in TM_ETM_bands:
errmsg=f"""Can only perform reflectance conversion on OLI sensor bands")
Skipping band {band_num}
"""
raise ValueError(errmsg)
print(f"Processing radiance for band {band_num}")
str_path = str(meta_path)
band_path = Path(str_path.replace("MTL.txt",f"B{band_num}.TIF"))
with rasterio.open(str(band_path)) as raster:
Qcal = raster.read(1)
hit = (Qcal == 0)
Qcal=Qcal.astype(np.float32)
Qcal[hit]=np.nan
#using the oldMeta/newMeta indixes to pull the min/max for radiance/Digital numbers
if Meta == "newMeta":
LMax = getattr(metadata, "RADIANCE_MAXIMUM_BAND_{0}".format(band_num))
LMin = getattr(metadata, "RADIANCE_MINIMUM_BAND_{0}".format(band_num))
QCalMax = getattr(metadata, "QUANTIZE_CAL_MAX_BAND_{0}".format(band_num))
QCalMin = getattr(metadata, "QUANTIZE_CAL_MIN_BAND_{0}".format(band_num))
elif Meta == "oldMeta":
LMax = getattr(metadata, "LMAX_BAND{0}".format(band_num))
LMin = getattr(metadata, "LMIN_BAND{0}".format(band_num))
QCalMax = getattr(metadata, "QCALMAX_BAND{0}".format(band_num))
QCalMin = getattr(metadata, "QCALMIN_BAND{0}".format(band_num))
TOA_rad = (((LMax - LMin)/(QCalMax-QCalMin)) * (Qcal - QCalMin)) + LMin
return TOA_rad
def calc_radiance_8(nparray,band_num,meta_path):
"""
Calculate the radiance for a landsat 8 band
Parameters
----------
np_image: ndarray, 2-D, uint16
landsat scene counts
band_num: str
landsat ban
meta_path: Path object
path to MTL.txt file for scene
Returns
-------
TOA_rad: ndarray, 2-d, float32
radiance for the scene (W/m^2/micron/sr
"""
meta = landsat_metadata(meta_path)
#scrape the attribute data
Ml = getattr(meta,"RADIANCE_MULT_BAND_{0}".format(band_num)) # multiplicative scaling factor
Al = getattr(meta,"RADIANCE_ADD_BAND_{0}".format(band_num)) # additive rescaling factor
#calculate Top-of-Atmosphere radiance
TOA_rad = (Qcal * Ml) + Al
return TOA_rad
def toa_radiance_8(band_nums, meta_path):
"""
Top of Atmosphere radiance (in Watts/(square meter x steradians x micrometers))
conversion for landsat 8 data. To be performed on raw Landsat 8
level 1 data. See link below for details:
see here http://landsat.usgs.gov/Landsat8_Using_Product.php
Parameters
----------
band_nums: list
A list of desired band numbers such as [3, 4, 5]
meta_path: str or Path object
The full filepath to the MTL.txt metadata file for those bands
Returns
-------
out_dict: dict
dictionary with band_num as keys and TOA radiance (W/m2/sr/um) as values
"""
meta_path = Path(meta_path).resolve()
#enforce list of band numbers and grab the metadata from the MTL file
band_nums = core.enf_list(band_nums)
band_nums = map(str, band_nums)
OLI_bands = ['1','2','3','4','5','6','7','8','9']
#loop through each band
out_dict=dict()
for band_num in band_nums:
print(f'working on band {band_num}')
if band_num not in OLI_bands:
print("Can only perform reflectance conversion on OLI sensor bands")
print("Skipping band {0}".format(band_num))
continue
#create the band name
str_path = str(meta_path)
band_path = Path(str_path.replace("MTL.txt",f"B{band_num}.TIF"))
with rasterio.open(str(band_path)) as raster:
Qcal = raster.read(1)
hit = (Qcal == 0)
Qcal=Qcal.astype(np.float32)
Qcal[hit]=np.nan
out_dict[int(band_num)]=calc_radiance_8(Qcal,band_num,meta_path)
return out_dict
def toa_radiance_457(band_nums, meta_path, outdir = None):
"""
Top of Atmosphere radiance (in Watts/(square meter x steradians x micrometers))
conversion for Landsat 4, 5, or 7 level 1 data.
See link below for details:
see here http://landsat.usgs.gov/Landsat8_Using_Product.php
Parameters
----------
band_nums: list
A list of desired band numbers such as [3, 4, 5]
meta_path: str or Path object
The full filepath to the MTL.txt metadata file for those bands
Returns
-------
out_dict: dict
dictionary with band_num as keys and TOA radiance (W/m2/sr/um) as values
"""
meta_path = Path(meta_path).resolve()
band_nums = core.enf_list(band_nums)
band_nums = map(str, band_nums)
#Calculating values for each band
out_dict={}
for band_num in band_nums:
out_dict[int(band_num)]=calc_radiance_457(np_array,band_num,meta_path)
return out_dict
| 2.28125
| 2
|
fkie_node_manager/src/fkie_node_manager/nmd_client/__init__.py
|
JOiiNT-LAB/multimaster_fkie
| 194
|
12784152
|
<filename>fkie_node_manager/src/fkie_node_manager/nmd_client/__init__.py
# Software License Agreement (BSD License)
#
# Copyright (c) 2018, Fraunhofer FKIE/CMS, <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Fraunhofer nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from python_qt_binding.QtCore import QObject, Signal
import fkie_node_manager_daemon.remote as remote
from .file_channel import FileChannel
from .launch_channel import LaunchChannel
from .monitor_channel import MonitorChannel
from .screen_channel import ScreenChannel
from .settings_channel import SettingsChannel
from .version_channel import VersionChannel
class NmdClient(QObject):
error = Signal(str, str, str, Exception)
'''
:ivar str,str,str,Exception error: error is a signal, which is emitted on errors {method, url, path, Exception}.
'''
def __init__(self):
QObject.__init__(self)
self._channels = []
self.file = FileChannel()
self.file.error.connect(self.on_error)
self._channels.append(self.file)
self.launch = LaunchChannel()
self.launch.error.connect(self.on_error)
self._channels.append(self.launch)
self.monitor = MonitorChannel()
self.monitor.error.connect(self.on_error)
self._channels.append(self.monitor)
self.screen = ScreenChannel()
self.screen.error.connect(self.on_error)
self._channels.append(self.screen)
self.settings = SettingsChannel()
self.settings.error.connect(self.on_error)
self._channels.append(self.settings)
self.version = VersionChannel()
self.version.error.connect(self.on_error)
self._channels.append(self.version)
def stop(self):
print("clear grpc channels...")
for channel in self._channels:
channel.stop()
remote.clear_channels()
print("clear grpc channels...ok")
self.clear_cache()
del self._channels[:]
def clear_cache(self, grpc_path=''):
for channel in self._channels:
channel.clear_cache(grpc_path)
def on_error(self, method, url, path, exception):
self.error.emit(method, url, path, exception)
| 1.25
| 1
|
makeadditions/transform/llvm/__init__.py
|
hutoTUM/MakeAdditions
| 0
|
12784153
|
<reponame>hutoTUM/MakeAdditions<filename>makeadditions/transform/llvm/__init__.py
"""
Import all modules from this directory
Needed for automatic transformer registration
"""
from os.path import dirname, basename, isfile, join
from glob import glob
__all__ = [basename(f)[:-3] for f in glob(join(dirname(__file__), "*.py"))
if isfile(f) and not f.endswith("__init__.py")]
| 1.84375
| 2
|
desafio021.py
|
RickChaves29/Desafios-Python
| 0
|
12784154
|
<reponame>RickChaves29/Desafios-Python
import pygame
pygame.int()
pygame.mixer.music.load('mus.wav')
pygame.mixer.music.play()
pygame.event.wait()
| 2.21875
| 2
|
Logistic_regression_classifier.py
|
polasha/Logistic-Regression-with-a-Neural-Network-mindset
| 0
|
12784155
|
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as img
import h5py #s a common package to interact with
# a dataset that is stored on an H5 file.
#from lr_utils import load_dataset
#load datasets
#Load lr_utils for loading train and testinng datasets
def load_dataset():
train_dataset = h5py.File('/Users/surat/PycharmProjects/start87/datasets/train_catvnoncat.h5', "r")
train_set_x_orig = np.array(train_dataset["train_set_x"][:]) # your train set features
train_set_y_orig = np.array(train_dataset["train_set_y"][:]) # your train set labels
test_dataset = h5py.File('/Users/surat/PycharmProjects/start87/datasets/test_catvnoncat.h5', "r")
test_set_x_orig = np.array(test_dataset["test_set_x"][:]) # your test set features
test_set_y_orig = np.array(test_dataset["test_set_y"][:]) # your test set labels
classes = np.array(test_dataset["list_classes"][:]) # the list of classes
train_set_y_orig = train_set_y_orig.reshape((1, train_set_y_orig.shape[0]))
test_set_y_orig = test_set_y_orig.reshape((1, test_set_y_orig.shape[0]))
return train_set_x_orig, train_set_y_orig, test_set_x_orig, test_set_y_orig, classes
train_set_x_orig, train_set_y, test_set_x_orig, test_set_y, classes = load_dataset()
# Example of a picture
index = 50
example = train_set_x_orig[index]
plt.imshow(train_set_x_orig[index])
plt.show()
print ("y = " + str(train_set_y[:, index]) + ", it's a '" + classes[np.squeeze(train_set_y[:, index])].decode("utf-8") + "' picture.")
m_train = train_set_x_orig.shape[0]
m_test = test_set_x_orig.shape[0]
num_px = train_set_x_orig.shape[1]
print ("Number of training examples: m_train = " + str(m_train))
print ("Number of testing examples: m_test = " + str(m_test))
print ("Height/Width of each image: num_px = " + str(num_px))
print ("Each image is of size: (" + str(num_px) + ", " + str(num_px) + ", 3)")
print ("train_set_x shape: " + str(train_set_x_orig.shape))
print ("train_set_y shape: " + str(train_set_y.shape))
print ("test_set_x shape: " + str(test_set_x_orig.shape))
print ("test_set_y shape: " + str(test_set_y.shape))
# Reshape the training and test examples #Reshape the training and test data sets so that images of size (num_px, num_px, 3)
# are flattened into single vectors of shape (num_px ∗∗ num_px ∗∗ 3, 1).
# A trick when you want to flatten a matrix X of shape (a,b,c,d) to a matrix X_flatten of shape (b ∗∗ c ∗∗ d, a) is to use:
# X_flatten = X.reshape(X.shape[0], -1).T # X.T is the transpose of X
train_set_x_flatten = train_set_x_orig.reshape(train_set_x_orig.shape[0], 64*64*3).T
test_set_x_flatten = test_set_x_orig.reshape(test_set_x_orig.shape[0], -1).T
print ("train_set_x_flatten shape: " + str(train_set_x_flatten.shape))
print ("train_set_y shape: " + str(train_set_y.shape))
print ("test_set_x_flatten shape: " + str(test_set_x_flatten.shape))
print ("test_set_y shape: " + str(test_set_y.shape))
print ("sanity check after reshaping: " + str(train_set_x_flatten[0:5,0]))
#To represent color images, the red, green and blue channels (RGB) must be specified for each pixel, and so the pixel
# value is actually a vector of three numbers ranging from 0 to 255.
# One common preprocessing step in machine learning is to center and standardize your dataset, meaning that you
# substract the mean of the whole numpy array from each example, and then divide each example by the standard deviation
# of the whole numpy array. But for picture datasets, it is simpler and more convenient and works almost as well to
# just divide every row of the dataset by 255 (the maximum value of a pixel channel).Let's standardize our dataset.
train_set_x = train_set_x_flatten/255.
test_set_x = test_set_x_flatten/255.
print('number of train datasets =' + str(train_set_x.shape))
print('number of test datasets =' + str (test_set_x.shape))
#Key steps: -
# 1. Initialize the parameters of the model
# 2. Learn the parameters for the model by minimizing the cost
# 3. Use the learned parameters to make predictions (on the test set)
# 4. Analyse the results and conclude
#algorithm building:
# The main steps for building a Neural Network are:
# Define the model structure (such as number of input features)
# Initialize the model's parameters
# Loop:
# Calculate current loss (forward propagation)
# Calculate current gradient (backward propagation)
# Update parameters (gradient descent)
# GRADED FUNCTION: sigmoid
def sigmoid(z):
"""
Compute the sigmoid of z
Arguments:
z -- A scalar or numpy array of any size.
Return:
s -- sigmoid(z)
"""
### START CODE HERE ### (≈ 1 line of code)
s = 1. / (1 + np.exp(-z))
### END CODE HERE ###
return s
print ("sigmoid([0, 2]) = " + str(sigmoid(np.array([0,2]))))
###initialize_with_zeros
# GRADED FUNCTION: initialize_with_zeros
def initialize_with_zeros(dim):
"""
This function creates a vector of zeros of shape (dim, 1) for w and initializes b to 0.
Argument:
dim -- size of the w vector we want (or number of parameters in this case)
Returns:
w -- initialized vector of shape (dim, 1)
b -- initialized scalar (corresponds to the bias)
"""
w = np.zeros(shape=(dim, 1), dtype=np.float32)
b = 0
assert (w.shape == (dim, 1))
assert (isinstance(b, float) or isinstance(b, int))
return w, b
dim = 2
w, b = initialize_with_zeros(dim)
print ("w = " + str(w))
print ("b = " + str(b))
#forward and backward propagation
#Implement a function propagate() that computes the cost function and its gradient.
# GRADED FUNCTION: propagate
def propagate(w, b, X, Y):
"""
Implement the cost function and its gradient for the propagation explained above
Arguments:
w -- weights, a numpy array of size (num_px * num_px * 3, 1)
b -- bias, a scalar
X -- data of size (num_px * num_px * 3, number of examples)
Y -- true "label" vector (containing 0 if non-cat, 1 if cat) of size (1, number of examples)
Return:
cost -- negative log-likelihood cost for logistic regression
dw -- gradient of the loss with respect to w, thus same shape as w
db -- gradient of the loss with respect to b, thus same shape as b
"""
m = X.shape[1]
# FORWARD PROPAGATION (FROM X TO COST)
A = sigmoid(np.dot(w.T, X) + b) # compute activation
cost = (-1. / m) * np.sum((Y * np.log(A) + (1 - Y) * np.log(1 - A)), axis=1) # compute cost
# BACKWARD PROPAGATION (TO FIND GRAD)
dw = (1. / m) * np.dot(X, ((A - Y).T))
db = (1. / m) * np.sum(A - Y, axis=1)
assert (dw.shape == w.shape)
assert (db.dtype == float)
cost = np.squeeze(cost)
assert (cost.shape == ())
grads = {"dw": dw,
"db": db}
return grads, cost
w, b, X, Y = np.array([[1],[2]]), 2, np.array([[1,2],[3,4]]), np.array([[1,0]])
grads, cost = propagate(w, b, X, Y)
print ("dw = " + str(grads["dw"]))
print ("db = " + str(grads["db"]))
print ("cost = " + str(cost))
##OPTIMIZATION
# initialized your parameters.
# to compute a cost function and its gradient.
# update the parameters using gradient descent
# GRADED FUNCTION: optimize
def optimize(w, b, X, Y, num_iterations, learning_rate, print_cost=False):
"""
This function optimizes w and b by running a gradient descent algorithm
Arguments:
w -- weights, a numpy array of size (num_px * num_px * 3, 1)
b -- bias, a scalar
X -- data of shape (num_px * num_px * 3, number of examples)
Y -- true "label" vector (containing 0 if non-cat, 1 if cat), of shape (1, number of examples)
num_iterations -- number of iterations of the optimization loop
learning_rate -- learning rate of the gradient descent update rule
print_cost -- True to print the loss every 100 steps
Returns:
params -- dictionary containing the weights w and bias b
grads -- dictionary containing the gradients of the weights and bias with respect to the cost function
costs -- list of all the costs computed during the optimization, this will be used to plot the learning curve.
"""
costs = []
for i in range(num_iterations):
# Cost and gradient calculation (≈ 1-4 lines of code)
grads, cost = propagate(w=w, b=b, X=X, Y=Y)
# Retrieve derivatives from grads
dw = grads["dw"]
db = grads["db"]
# update rule (≈ 2 lines of code)
w = w - learning_rate * dw
b = b - learning_rate * db
# Record the costs
if i % 100 == 0:
costs.append(cost)
# Print the cost every 100 training examples
if print_cost and i % 100 == 0:
print("Cost after iteration %i: %f" % (i, cost))
params = {"w": w,
"b": b}
grads = {"dw": dw,
"db": db}
return params, grads, costs
params, grads, costs = optimize(w, b, X, Y, num_iterations= 100, learning_rate = 0.009, print_cost = False)
print ("w = " + str(params["w"]))
print ("b = " + str(params["b"]))
print ("dw = " + str(grads["dw"]))
print ("db = " + str(grads["db"]))
##PREDICTION PART
def predict(w, b, X):
'''
Predict whether the label is 0 or 1 using learned logistic regression parameters (w, b)
Arguments:
w -- weights, a numpy array of size (num_px * num_px * 3, 1)
b -- bias, a scalar
X -- data of size (num_px * num_px * 3, number of examples)
Returns:
Y_prediction -- a numpy array (vector) containing all predictions (0/1) for the examples in X
'''
m = X.shape[1]
Y_prediction = np.zeros((1, m))
w = w.reshape(X.shape[0], 1)
# Compute vector "A" predicting the probabilities of a cat being present in the picture
A = sigmoid(np.dot(w.T, X) + b)
[print(x) for x in A]
for i in range(A.shape[1]):
# Convert probabilities A[0,i] to actual predictions p[0,i]
if A[0, i] >= 0.5:
Y_prediction[0, i] = 1
else:
Y_prediction[0, i] = 0
assert (Y_prediction.shape == (1, m))
return Y_prediction
print ("predictions = " + str(predict(w, b, X)))
## MARGE ALL FUNCTION INTO A MODEL
# GRADED FUNCTION: model
def model(X_train, Y_train, X_test, Y_test, num_iterations=2000, learning_rate=0.5, print_cost=False):
"""
Builds the logistic regression model by calling the function you've implemented previously
Arguments:
X_train -- training set represented by a numpy array of shape (num_px * num_px * 3, m_train)
Y_train -- training labels represented by a numpy array (vector) of shape (1, m_train)
X_test -- test set represented by a numpy array of shape (num_px * num_px * 3, m_test)
Y_test -- test labels represented by a numpy array (vector) of shape (1, m_test)
num_iterations -- hyperparameter representing the number of iterations to optimize the parameters
learning_rate -- hyperparameter representing the learning rate used in the update rule of optimize()
print_cost -- Set to true to print the cost every 100 iterations
Returns:
d -- dictionary containing information about the model.
"""
# initialize parameters with zeros (≈ 1 line of code)
w, b = initialize_with_zeros(X_train.shape[0])
# Gradient descent (≈ 1 line of code)
parameters, grads, costs = optimize(w, b, X_train, Y_train, num_iterations, learning_rate, print_cost)
# Retrieve parameters w and b from dictionary "parameters"
w = parameters["w"]
b = parameters["b"]
# Predict test/train set examples (≈ 2 lines of code)
Y_prediction_test = predict(w, b, X_test)
Y_prediction_train = predict(w, b, X_train)
# Print train/test Errors
print("train accuracy: {} %".format(100 - np.mean(np.abs(Y_prediction_train - Y_train)) * 100))
print("test accuracy: {} %".format(100 - np.mean(np.abs(Y_prediction_test - Y_test)) * 100))
d = {"costs": costs,
"Y_prediction_test": Y_prediction_test,
"Y_prediction_train": Y_prediction_train,
"w": w,
"b": b,
"learning_rate": learning_rate,
"num_iterations": num_iterations}
return d
d = model(train_set_x, train_set_y, test_set_x, test_set_y,
num_iterations = 2000, learning_rate = 0.005, print_cost = True)
# Example of a picture that was wrongly classified.
index = 49
plt.imshow(test_set_x[:,index].reshape((num_px, num_px, 3)))
plt.show()
print ("y = " + str(test_set_y[0,index]) + ", you predicted that it is a \""
+ classes[int(d["Y_prediction_test"][0,index])].decode("utf-8")
+ "\" picture.")
# Plot learning curve (with costs)
costs = np.squeeze(d['costs'])
plt.plot(costs)
plt.ylabel('cost')
plt.xlabel('iterations (per hundreds)')
plt.title("Learning rate =" + str(d["learning_rate"]))
plt.show()
| 3
| 3
|
dagbo/models/gp_factory.py
|
hgl71964/dagbo
| 0
|
12784156
|
import botorch
import gpytorch
from torch import Tensor
from gpytorch.kernels.kernel import Kernel
from gpytorch.mlls.exact_marginal_log_likelihood import ExactMarginalLogLikelihood
from gpytorch.priors.torch_priors import GammaPrior
from botorch.models import SingleTaskGP
from botorch.fit import fit_gpytorch_model
from botorch.posteriors.gpytorch import GPyTorchPosterior
from botorch.models.utils import gpt_posterior_settings
from dagbo.models.dag.node import Node, SingleTaskGP_Node
def make_gps(x: Tensor, y: Tensor, gp_name: str) -> SingleTaskGP:
# noiseless modelling
#likelihood = gpytorch.likelihoods.GaussianLikelihood()
#likelihood.noise = 1e-4
#likelihood.noise_covar.raw_noise.requires_grad_(False)
# get model
#model = SingleTaskGP(x, y, likelihood)
model = SingleTaskGP(x, y)
# equip
#model.likelihood = likelihood
model.covar_module = make_kernels(gp_name)
return model
def make_kernels(name: str) -> Kernel:
if name == "SE":
kernel = gpytorch.kernels.ScaleKernel(gpytorch.kernels.RBFKernel())
elif name == "RQ":
kernel = gpytorch.kernels.ScaleKernel(gpytorch.kernels.RQKernel())
elif name == "MA":
kernel = gpytorch.kernels.ScaleKernel(gpytorch.kernels.MaternKernel(
nu=2.5, lengthscale_prior=GammaPrior(3.0, 6.0)),
outputscale_prior=GammaPrior(
2.0, 0.15))
return kernel
def fit_gpr(model: SingleTaskGP) -> None:
mll = ExactMarginalLogLikelihood(model.likelihood, model)
mll.train()
fit_gpytorch_model(mll) # by default it fits with scipy, so L-BFGS-B
return None
def make_node(x: Tensor, y: Tensor, gp_name: str):
"""
for test purpose
check if Node in Dag is a sound gp
"""
class gp(Node):
def __init__(self, input_names, output_name, train_inputs,
train_targets):
super().__init__(input_names, output_name, train_inputs,
train_targets)
self.num_outputs = 1
def posterior(self,
X: Tensor,
observation_noise=False,
**kwargs) -> GPyTorchPosterior:
self.eval() # make sure model is in eval mode
with gpt_posterior_settings():
mvn = self(X)
posterior = GPyTorchPosterior(mvn=mvn)
return posterior
if len(x.shape) == 2:
x = x.unsqueeze(0)
if len(y.shape) == 2:
y = y.unsqueeze(0)
y = y.squeeze(-1)
#print("node input:")
#print(x.shape) # [batch_size, q, dim]
#print(y.shape) # [batch_size, q]
#print()
model = gp([f"x{i}" for i in range(20)], "final", x, y)
model.covar = make_kernels(gp_name)
return model
def make_SingleTaskGP_node(x: Tensor, y: Tensor, gp_name: str):
"""
for test purpose
if SingleTaskGP_Node in Dag is a sound gp
"""
if len(x.shape) == 2:
x = x.unsqueeze(0)
if len(y.shape) == 2:
y = y.unsqueeze(0)
y = y.squeeze(-1)
#print("node input:")
#print(x.shape) # [batch_size, q, dim]
#print(y.shape) # [batch_size, q]
#print()
class gp(SingleTaskGP_Node):
def __init__(self, input_names, output_name, train_inputs,
train_targets):
super().__init__(input_names, output_name, train_inputs,
train_targets)
# expose posterior to print shape
def posterior(self,
X: Tensor,
observation_noise=False,
**kwargs) -> GPyTorchPosterior:
self.eval() # make sure model is in eval mode
with gpt_posterior_settings():
mvn = self(X)
#print()
#print("X::: ", X.shape)
#print(X)
#print("mvn:::")
#print(mvn)
#print(mvn.loc)
#print()
#print(mvn.loc) # can verify identical mvn
posterior = GPyTorchPosterior(mvn=mvn)
return posterior
#model = SingleTaskGP_Node([f"x{i}" for i in range(20)], "final", x, y)
model = gp([f"x{i}" for i in range(20)], "final", x, y)
model.covar_module = make_kernels(gp_name)
return model
| 2.078125
| 2
|
poly2.py
|
peter-koufalis/quadratic_equation_solver
| 0
|
12784157
|
import math
def poly2(a,b,c):
''' solves quadratic equations of the
form ax^2 + bx + c = 0 '''
x1 = (-b + math.sqrt(b**2 - 4*a*c))/(2*a)
x2 = (-b - math.sqrt(b**2 - 4*a*c))/(2*a)
return x1, x2
| 3.5
| 4
|
lintcode/41.3.py
|
jianershi/algorithm
| 1
|
12784158
|
<reponame>jianershi/algorithm
"""
41. Maximum Subarray
https://www.lintcode.com/problem/maximum-subarray/description?_from=ladder&&fromId=37
continuously check prefix sum
o(n)
"""
import sys
class Solution:
"""
@param nums: A list of integers
@return: A integer indicate the sum of max subarray
"""
def maxSubArray(self, nums):
# write your code here
if not nums:
return 0
n = len(nums)
now_sum = 0
min_sum = 0
max_sum = -sys.maxsize
for num in nums:
now_sum += num
max_sum = max(max_sum, now_sum - min_sum)
min_sum = min(min_sum, now_sum)
return max_sum
| 3.609375
| 4
|
Lessons/Lesson2.py
|
MatthewDShen/ComputingInCivil
| 0
|
12784159
|
<reponame>MatthewDShen/ComputingInCivil
def Sets():
var1 = {3,2.1,'red',2.1}
#gets rid of duplicates
print(var1)
def Dictionaries():
var9 = {'name': 'Julia', 'age': 25, 'hobbies': ['ski', 'music', 'blog']}
var9['name'] #output is Julia
def Index():
var1 = [1,2,3,4,5,6]
print(var1[2]) #shows the 2nd value in the list
print(var1[::2]) #prints every other value
def countVowles():
s = str(input("Insert your sentence here: "))
Vowels = ['a', 'e', 'i', 'o', 'u', 'A', 'E', 'I', 'O', 'U']
NumVowles = 0
for i in s:
if i in Vowels:
NumVowles = NumVowles + 1
print("This sentence has", str(NumVowles), "vowels")
countVowles()
#Index()
| 3.875
| 4
|
reel_miami/models.py
|
deadlyraptor/reel-miami
| 0
|
12784160
|
# models.py
from flask import abort, redirect, request, url_for
from flask_admin import form
from flask_admin.contrib.sqla import ModelView
from flask_security import current_user, RoleMixin, UserMixin
from wtforms import SelectField, TextAreaField
from reel_miami import db
# Database models
class Venue(db.Model):
__tablename__ = 'venues'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String, nullable=False)
address1 = db.Column(db.String, nullable=False)
address2 = db.Column(db.String, nullable=True)
city = db.Column(db.String, nullable=False)
state = db.Column(db.String, nullable=False)
postal_code = db.Column(db.String, nullable=False)
description = db.Column(db.String, nullable=False)
venue_photo = db.Column(db.String(20), nullable=False,
default='default.jpg')
web_url = db.Column(db.String, nullable=True)
phone_number = db.Column(db.String, nullable=True)
films = db.relationship('Film', backref='venue', lazy=True)
def __repr__(self):
return f'<Venue(name={self.name})>'
def __str__(self):
return f'{self.name}'
class Film(db.Model):
__tablename__ = 'films'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String, nullable=False)
running_time = db.Column(db.String, nullable=False)
director = db.Column(db.String, nullable=False)
year = db.Column(db.String, nullable=False)
venue_id = db.Column(db.Integer, db.ForeignKey('venues.id'))
showtimes = db.relationship('Showtime', backref='film', lazy=True)
def __repr__(self):
return f'<Film(name={self.name})>'
def __str__(self):
return f'{self.name}'
class Showtime(db.Model):
__tablename__ = 'showtimes'
id = db.Column(db.Integer, primary_key=True)
date = db.Column(db.String, nullable=False)
time = db.Column(db.String, nullable=False)
ticketing_link = db.Column(db.String, nullable=False)
film_id = db.Column(db.Integer, db.ForeignKey('films.id'))
def __repr__(self):
return f'<Showtime(date={self.date}, time={self.time})>'
def __str__(self):
return f'{self.name}'
'''
The Flask-Security models that are stored in the database.
'''
roles_users = db.Table('roles_users',
db.Column('user_id', db.Integer(),
db.ForeignKey('users.id')),
db.Column('role_id', db.Integer(),
db.ForeignKey('roles.id')))
class Role(db.Model, RoleMixin):
__tablename__ = 'roles'
id = db.Column(db.Integer(), primary_key=True)
name = db.Column(db.String(80), unique=True)
description = db.Column(db.String(255))
def __str__(self):
return f'{self.name}'
class User(db.Model, UserMixin):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String(255), unique=True)
password = db.Column(db.String(255))
active = db.Column(db.Boolean())
confirmed_at = db.Column(db.DateTime())
roles = db.relationship('Role', secondary=roles_users,
backref=db.backref('users', lazy='dynamic'))
def __str__(self):
return f'{self.email}'
"""
The following classes modify the primary Flask-Admin ModelView in order to
accomplish various tasks.
"""
class AdminVenueView(ModelView):
column_labels = {'address1': 'Address', 'address2': 'Address 2',
'web_url': 'Website'}
column_default_sort = 'name'
column_exclude_list = ('description')
form_columns = ('name', 'address1', 'address2', 'city', 'state',
'postal_code', 'description', 'web_url', 'phone_number',
'venue_photo')
form_excluded_columns = ['films']
form_overrides = {'description': TextAreaField}
form_widget_args = {'address1': {
'placeholder': 'Primary address'},
'address2': {
'placeholder': 'Suite/Bulding/Other'
},
'description': {
'rows': 5,
'style': 'color: black',
'maxlength': 1000,
'placeholder': 'max 1000 characters',
'spellcheck': 'true'
},
'phone_number': {
'placeholder': '123.456.7890'
}
}
form_extra_fields = {
'venue_photo': form.ImageUploadField(
'Venue Photo',
base_path='reel_miami/static/img/venues',
url_relative_path='img/venues/',
),
'state': SelectField(label='State', choices=[('FL', 'Florida')],
default='FL')
}
def is_accessible(self):
return (current_user.is_active
and current_user.is_authenticated
and (current_user.has_role('Superuser')
or current_user.has_role('User')))
def _handle_view(self, name, **kwargs):
"""
Override builtin _handle_view in order to redirect users when a view is
not accessible.
"""
if not self.is_accessible():
if current_user.is_authenticated:
# permission denied
abort(403)
else:
# login
return redirect(url_for('security.login', next=request.url))
class AdminRoleView(ModelView):
def is_accessible(self):
return (current_user.is_active
and current_user.is_authenticated
and current_user.has_role('Superuser'))
def _handle_view(self, name, **kwargs):
"""
Override builtin _handle_view in order to redirect users when a view is
not accessible.
"""
if not self.is_accessible():
if current_user.is_authenticated:
# permission denied
abort(403)
else:
# login
return redirect(url_for('security.login', next=request.url))
class AdminUserView(ModelView):
column_exclude_list = 'password'
def is_accessible(self):
return (current_user.is_active
and current_user.is_authenticated
and current_user.has_role('Superuser'))
def _handle_view(self, name, **kwargs):
"""
Override builtin _handle_view in order to redirect users when a view is
not accessible.
"""
if not self.is_accessible():
if current_user.is_authenticated:
# permission denied
abort(403)
else:
# login
return redirect(url_for('security.login', next=request.url))
| 2.3125
| 2
|
tests/test_util.py
|
briandleahy/embryovision
| 0
|
12784161
|
import os
import unittest
import torch
import numpy as np
from PIL import Image
from embryovision import util
from embryovision.tests.common import get_loadable_filenames
class TestReadImage(unittest.TestCase):
def test_read_image_returns_numpy(self):
filename = get_loadable_filenames()[0]
image = util.read_image(filename)
self.assertIsInstance(image, np.ndarray)
def test_read_image_returns_correct_shape(self):
filename = get_loadable_filenames()[0]
image = util.read_image(filename)
self.assertEqual(image.ndim, 3)
self.assertEqual(image.shape[2], 3)
def test_read_image_returns_float_on_01(self):
filename = get_loadable_filenames()[0]
image = util.read_image(filename)
self.assertGreaterEqual(image.min(), 0)
self.assertLessEqual(image.max(), 1)
class TestReadImageForTorch(unittest.TestCase):
def test_read_image_for_torch_returns_torch(self):
filenames = get_loadable_filenames()
as_torch = util.read_images_for_torch(filenames)
self.assertIsInstance(as_torch, torch.Tensor)
def test_read_image_for_torch_returns_correct_shape(self):
# torch expects (n_images, channels, size
filenames = get_loadable_filenames()
as_torch = util.read_images_for_torch(filenames)
n_channels = 3
self.assertEqual(as_torch.size()[:2], (len(filenames), n_channels))
class TestLoadAndCropImage(unittest.TestCase):
def test_returns_pil_image(self):
filename = get_loadable_filenames()[0]
box = (1, 1, 2, 2)
image = util.load_and_crop_image(filename, box)
self.assertIsInstance(image, Image.Image)
def test_output_image_is_correct_shape(self):
filename = get_loadable_filenames()[0]
box = (1, 1, 100, 100)
shape = (150, 140)
image = util.load_and_crop_image(filename, box, output_shape=shape)
self.assertEqual(image.size, shape)
def test_crop_box_is_used_with_resize_nearest(self):
# we crop to a 1 px image, and check that all image values
# are the same value
filename = get_loadable_filenames()[0]
box = (1, 1, 2, 2)
image = util.load_and_crop_image(filename, box)
correct_px_value = np.array(Image.open(filename))[box[0], box[1]]
self.assertTrue(np.all(np.array(image) == correct_px_value))
class TestLoadImageIntoRam(unittest.TestCase):
def test_load_image_as_bytes_io(self):
filename = get_loadable_filenames()[0]
loaded_into_ram = util.load_image_into_ram(filename)
image0 = util.read_image(filename)
image1 = util.read_image(loaded_into_ram)
self.assertTrue(np.all(image0 == image1))
class TestTransformingCollection(unittest.TestCase):
def test_getitem_transforms(self):
np.random.seed(400)
data = np.random.randn(20)
transform = lambda x: -2 * x
loader = util.TransformingCollection(data, transform)
index = 0
self.assertEqual(transform(data[index]), loader[index])
def test_len(self):
data = np.random.randn(20)
transform = lambda x: -2 * x
loader = util.TransformingCollection(data, transform)
self.assertEqual(len(loader), data.size)
def test_on_images(self):
filenames = get_loadable_filenames()
images_ram = [util.load_image_into_ram(nm) for nm in filenames]
loader = util.TransformingCollection(images_ram, util.read_image)
index = 0
image_filename = util.read_image(filenames[index])
image_loader = loader[index]
self.assertTrue(np.all(image_filename == image_loader))
class TestMisc(unittest.TestCase):
def test_split_all(self):
dummy_folder = '/some/long/directory/structure/'
filename = 'D2017_05_05_S1477_I313_pdb/WELL06/F0/016.jpg'
fullname = os.path.join(dummy_folder, filename)
fullname_f0_split = util.split_all(fullname)
correct_answer = (
'/', 'some', 'long', 'directory', 'structure',
'D2017_05_05_S1477_I313_pdb', 'WELL06', 'F0', '016.jpg')
self.assertEqual(fullname_f0_split, correct_answer)
def test_augment_focus(self):
filename = get_loadable_filenames()[0]
augmented = util.augment_focus(filename)
for foundname, focus_correct in zip(augmented, ['F-15', 'F0', 'F15']):
*head, focus_found, image_number = util.split_all(foundname)
self.assertTrue(os.path.exists(foundname))
self.assertEqual(focus_found, focus_correct)
def test_augment_focus_raises_error_when_no_filename(self):
unloadable_filename = '/some/wrong/directory/structure/001.jpg'
assert not os.path.exists(unloadable_filename)
self.assertRaises(
FileNotFoundError,
util.augment_focus,
unloadable_filename,)
def make_loader():
filenames = get_loadable_filenames()
return util.ImageTransformingCollection(filenames)
if __name__ == '__main__':
unittest.main()
| 2.546875
| 3
|
bindings/python/idocp/mpc/__init__.py
|
z8674558/idocp
| 43
|
12784162
|
from .mpc_quadrupedal_walking import *
from .mpc_quadrupedal_trotting import *
| 1.101563
| 1
|
wcm_spiders/philosophers/spiders/plato.py
|
cakiki/philosophy-graph
| 3
|
12784163
|
<gh_stars>1-10
import scrapy
class PlatoSpider(scrapy.Spider):
name = 'plato'
start_urls = ['https://plato.stanford.edu/contents.html']
def parse(self, response):
entry_links = response.xpath('//a[contains(@href, "entries")]/@href').getall()
yield from response.follow_all(entry_links, callback=self.parse_entry)
def parse_entry(self, response):
related_entries = response.xpath('//div[@id="related-entries"]//a/@href').getall()
related_entries = [response.urljoin(entry) for entry in related_entries]
title = response.xpath('//meta[@property="citation_title"]/@content').get()
url = response.url
authors = response.xpath('//meta[@property="citation_author"]/@content').getall()
publication_date = response.xpath('//meta[@property="citation_publication_date"]/@content').get()
abstract = response.xpath('string(//div[@id="preamble"])').get().replace('\n',' ').strip()
full_article_with_tags = response.xpath('//div[@id="main-text"]').get()
bibliography_with_tags = response.xpath('//div[@id="bibliography"]').get()
yield {
'title':title,
'url':url,
'related_entries': related_entries,
'abstract': abstract,
'publication_date': publication_date,
'authors':authors,
'full_article_with_tags': full_article_with_tags,
'bibliography': bibliography_with_tags
}
| 2.890625
| 3
|
python/venv/lib/python2.7/site-packages/keystoneauth1/loading/opts.py
|
sjsucohort6/openstack
| 0
|
12784164
|
<gh_stars>0
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import itertools
import os
try:
from oslo_config import cfg
except ImportError:
cfg = None
from keystoneauth1 import _utils as utils
__all__ = ['Opt']
class Opt(object):
@utils.positional()
def __init__(self,
name,
type=str,
help=None,
secret=False,
dest=None,
deprecated=None,
default=None,
metavar=None,
required=False):
if not callable(type):
raise TypeError('type must be callable')
if dest is None:
dest = name.replace('-', '_')
self.name = name
self.type = type
self.help = help
self.secret = secret
self.required = required
self.dest = dest
self.deprecated = [] if deprecated is None else deprecated
self.default = default
self.metavar = metavar
def __repr__(self):
return '<Opt: %s>' % self.name
def _to_oslo_opt(self):
if not cfg:
raise ImportError("oslo.config is not an automatic dependency of "
"keystoneauth. If you wish to use oslo.config "
"you need to import it into your application's "
"requirements file. ")
deprecated_opts = [o._to_oslo_opt() for o in self.deprecated]
return cfg.Opt(name=self.name,
type=self.type,
help=self.help,
secret=self.secret,
required=self.required,
dest=self.dest,
deprecated_opts=deprecated_opts,
metavar=self.metavar)
def __eq__(self, other):
return (type(self) == type(other) and
self.name == other.name and
self.type == other.type and
self.help == other.help and
self.secret == other.secret and
self.required == other.required and
self.dest == other.dest and
self.deprecated == other.deprecated and
self.default == other.default and
self.metavar == other.metavar)
@property
def _all_opts(self):
return itertools.chain([self], self.deprecated)
@property
def argparse_args(self):
return ['--os-%s' % o.name for o in self._all_opts]
@property
def argparse_default(self):
# select the first ENV that is not false-y or return None
for o in self._all_opts:
v = os.environ.get('OS_%s' % o.name.replace('-', '_').upper())
if v:
return v
return self.default
| 2.03125
| 2
|
clubhub/main/templatetags/add_class.py
|
geosir/clubhub
| 0
|
12784165
|
<gh_stars>0
# add_class - A template tag to add the given classes to a DOM object's classes.
from django import template
register = template.Library()
@register.filter(name='add_class')
def addclass(value, arg):
return value.as_widget(attrs={'class': arg})
| 2.375
| 2
|
vis_3d/fusion_3d.py
|
JHZ-2326/3d_detection_kit
| 23
|
12784166
|
<gh_stars>10-100
"""
converting between lidar points and image
"""
import numpy as np
class LidarCamCalibration(object):
"""
3x4 p0-p3 Camera P matrix. Contains extrinsic
and intrinsic parameters.
3x3 r0_rect Rectification matrix, required to transform points
from velodyne to camera coordinate frame.
3x4 tr_velodyne_to_cam Used to transform from velodyne to cam
coordinate frame according to:
Point_Camera = P_cam * R0_rect *
Tr_velo_to_cam *
Point_Velodyne.
"""
def __init__(self):
self.p0 = []
self.p1 = []
self.p2 = []
self.p3 = []
self.r0_rect = []
self.tr_velodyne_to_cam = []
def __str__(self):
return 'p0: {}\np1: {}\np2: {}\np3: {}\nr0_rect: {}\ntr_veodyne_to_cam: {}\n'.format(
self.p0, self.p1, self.p2, self.p3, self.r0_rect, self.tr_velodyne_to_cam
)
def load_from_file(self, f):
"""
load from file with Kitti format calibration
"""
pass
# --------------- Convert 3D points to image --------------------
| 2.765625
| 3
|
tests/test_hpoo.py
|
rubenjf/oo_cli
| 0
|
12784167
|
<reponame>rubenjf/oo_cli
import unittest
from mock import Mock, patch, call
from oo_client.hpoo import OOClient
import oo_client.errors as errors
class TestHPOO(unittest.TestCase):
"""Unittest tool for hpoo"""
def setUp(self):
pass
@patch('oo_client.hpoo.OORestCaller.put')
@patch('oo_client.hpoo.OORestCaller.get')
@patch('__builtin__.open')
def test_deploy_content_pack(self, mock_open, mock_get, mock_put):
client = OOClient("https://blah:1234", "aa", "bb")
mock_cp = mock_open()
mock_response = Mock()
mock_response = {"contentPackResponses": {"dummy.jar":
{"responses": [{"responseCategory": "Success",
"message": "lolol"}]}}}
mock_put.return_value = mock_response
ret = client.deploy_content_pack('/path/to/dummy.jar')
client.rest.put.assert_called_with("content-packs/dummy",
mock_cp.__enter__())
self.assertTrue(ret)
mock_response = {"contentPackResponses": {"dummy.jar":
{"responses": [{"responseCategory": "FAIL",
"message": "lolol"}]}}}
mock_put.return_value = mock_response
ret = client.deploy_content_pack('/path/to/dummy.jar')
self.assertFalse(ret)
@patch('oo_client.hpoo.OORestCaller.post')
@patch('oo_client.hpoo.OORestCaller.get')
def test_get_new_deployment(self, mock_get, mock_post):
client = OOClient("https://blah:1234", "aa", "bb")
mock_response = Mock()
mock_response = {"deploymentProcessId": 1234}
mock_config = {'return_value': mock_response}
mock_post.configure_mock(**mock_config)
ret = client.get_new_deployment()
self.assertEqual(ret, 1234)
@patch('oo_client.hpoo.OOClient.wait_for_deployment_to_complete')
@patch('oo_client.hpoo.OORestCaller.put')
@patch('oo_client.hpoo.OORestCaller.post')
@patch('oo_client.hpoo.OORestCaller.get')
@patch('__builtin__.open')
@patch('oo_client.hpoo.OOClient.get_new_deployment')
def test_deploy_content_packs(self, mock_get_dep, mock_open, mock_get,
mock_post, mock_put, mock_wait):
mock_get_dep.return_value = 1234
mock_cp = mock_open()
client = OOClient("https://blah:1234", "aa", "bb")
mock_response = {"contentPackResponses":
{"dummy.jar":
{"responses": [{"responseCategory": "Success",
"message": "lolol"}]},
"not.jar":
{"responses": [{"responseCategory": "Success",
"message": "lolol"}]}}}
mock_wait.return_value = mock_response
ret = client.deploy_content_packs(['/some/dummy.jar',
'/another/not.jar'])
first_cp = call("deployments/1234/files",
files={'file': ('dummy.jar',
mock_cp.__enter__())})
second_cp = call("deployments/1234/files",
files={'file': ('not.jar',
mock_cp.__enter__())})
client.rest.post.assert_has_calls([first_cp, second_cp])
mock_wait.assert_called_with(1234, 3600, 5)
mock_put.assert_called_with('deployments/1234', None)
self.assertTrue(ret)
mock_response = {"contentPackResponses":
{"dummy.jar":
{"responses": [{"responseCategory": "Success",
"message": "lolol"}]},
"not.jar":
{"responses": [{"responseCategory": "NotASuccess",
"message": "lolol"}]}}}
mock_wait.return_value = mock_response
ret = client.deploy_content_packs(['/some/dummy.jar',
'/another/not.jar'])
self.assertFalse(ret)
@patch('oo_client.hpoo.OORestCaller.get')
def test_is_deployment_complete(self, mock_get):
mock_get.return_value = {'status': 'FINISHED',
'deploymentResultVO': 'OBJECTblah'}
client = OOClient("https://blah:1234", "aa", "bb")
ret = client.is_deployment_complete(5555)
mock_get.assert_called_with('deployments/5555')
self.assertEqual(ret, 'OBJECTblah')
mock_get.return_value = {'status': 'PAUSED',
'deploymentResultVO': 'OBJECTblah'}
ret = client.is_deployment_complete(5555)
self.assertFalse(ret)
@patch('oo_client.hpoo.OORestCaller.post')
@patch('oo_client.hpoo.OORestCaller.get')
def test_run_flow_async(self, mock_get, mock_post):
mock_ret = Mock()
mock_post.return_value = mock_ret
client = OOClient("https://blah:1234", "aa", "bb")
ret = client.run_flow_async('f1739b66-a586-44dc-942a-479caaecec34',
run_name='some-name',
inputs={'an': 'input'})
mock_payload = {'uuid': 'f1739b66-a586-44dc-942a-479caaecec34',
'runName': 'some-name',
'inputs': {'an': 'input'}}
client.rest.post.assert_called_with('executions', data=mock_payload)
self.assertEqual(ret, mock_ret)
@patch('oo_client.hpoo.OOClient.get_flow_uuid_from_path')
@patch('oo_client.hpoo.OORestCaller.post')
@patch('oo_client.hpoo.OORestCaller.get')
def test_run_flow_async_by_path(self, mock_get, mock_post, mock_get_flow):
mock_ret = Mock()
mock_post.return_value = mock_ret
mock_get_flow.return_value = 'the-uuid'
client = OOClient("https://blah:1234", "aa", "bb")
ret = client.run_flow_async('Some/flow/path',
run_name='some-name',
inputs={'an': 'input'})
mock_get_flow.assert_called_with('Some/flow/path')
mock_payload = {'uuid': 'the-uuid',
'runName': 'some-name',
'inputs': {'an': 'input'}}
client.rest.post.assert_called_with('executions', data=mock_payload)
self.assertEqual(ret, mock_ret)
@patch('oo_client.hpoo.OOClient.get_run_result_type')
@patch('oo_client.hpoo.OOClient.get_run_status')
@patch('oo_client.hpoo.OOClient.wait_for_run_to_complete')
@patch('oo_client.hpoo.OOClient.run_flow_async')
@patch('oo_client.hpoo.OORestCaller.get')
def test_run_flow(self, mock_get, mock_run, mock_wait, mock_get_s,
mock_get_r):
mock_run.return_value = {'executionId': 345}
mock_get_s.return_value = 'COMPLETED'
mock_get_r.return_value = 'result'
client = OOClient("https://blah:1234", "aa", "bb")
ret = client.run_flow('Some/flow/path',
run_name='some-name',
inputs={'an': 'input'})
mock_wait.assert_called_with(345, 300, 5)
mock_get_s.assert_called_with(345)
mock_get_r.assert_called_with(345)
self.assertEqual(ret, 'result')
mock_get_s.return_value = 'FAIL'
ret = client.run_flow('Some/flow/path',
run_name='some-name',
inputs={'an': 'input'})
self.assertIsNone(ret)
@patch('oo_client.hpoo.OOClient.run_flow')
@patch('oo_client.hpoo.OORestCaller.get')
def test_run_flows(self, mock_get, mock_run_flow):
mock_run_flow.side_effect = ['RESOLVED', 'RESOLVED']
client = OOClient("https://blah:1234", "aa", "bb")
ret = client.run_flows(['Some/flow/path', 'Other/flow/path'])
self.assertTrue(ret)
mock_run_flow.side_effect = ['RESOLVED', 'ERROR']
ret = client.run_flows(['Some/flow/path', 'Other/flow/path'])
self.assertFalse(ret)
@patch('oo_client.hpoo.OOClient.get_run_status')
@patch('oo_client.hpoo.OORestCaller.get')
def test_is_run_complete(self, mock_get, mock_get_run_status):
mock_get_run_status.return_value = 'COMPLETED'
client = OOClient("https://blah:1234", "aa", "bb")
ret = client.is_run_complete(12345)
mock_get_run_status.assert_called_with(12345)
self.assertTrue(ret)
mock_get_run_status.return_value = 'PENDING'
ret = client.is_run_complete(12345)
self.assertFalse(ret)
@patch('oo_client.hpoo.OORestCaller.get')
def test_get_flow_uuid_from_path(self, mock_get):
client = OOClient("https://blah:1234", "aa", "bb")
mock_get.return_value = [{'id': 123, 'name': 'a_flow'},
{'id': 456, 'name': 'a_nother_flow'}]
ret = client.get_flow_uuid_from_path('path/to/a_flow')
mock_get.assert_called_with('flows/tree/level', path='path/to')
self.assertEqual(ret, 123)
with self.assertRaises(errors.NotFound):
client.get_flow_uuid_from_path('path/to/not_a_flow')
@patch('oo_client.hpoo.OOClient.get_run_summary')
@patch('oo_client.hpoo.OORestCaller.get')
def test_get_run_status(self, mock_get, mock_get_run_s):
mock_get_run_s.return_value = {'status': 'spam'}
client = OOClient("https://blah:1234", "aa", "bb")
ret = client.get_run_status(666)
mock_get_run_s.assert_called_with(666)
self.assertEqual(ret, 'spam')
@patch('oo_client.hpoo.OOClient.get_run_summary')
@patch('oo_client.hpoo.OORestCaller.get')
def test_get_run_result_type(self, mock_get, mock_get_run_s):
mock_get_run_s.return_value = {'resultStatusType': 'eggs'}
client = OOClient("https://blah:1234", "aa", "bb")
ret = client.get_run_result_type(666)
mock_get_run_s.assert_called_with(666)
self.assertEqual(ret, 'eggs')
@patch('oo_client.hpoo.OORestCaller.get')
def test_get_run_summary(self, mock_get):
client = OOClient("https://blah:1234", "aa", "bb")
mock_get.return_value = ['some']
ret = client.get_run_summary(123)
mock_get.assert_called_with('executions/123/summary')
self.assertEqual(ret, 'some')
mock_get.return_value = []
with self.assertRaises(errors.NotFound):
client.get_flow_uuid_from_path('path/to/not_a_flow')
@patch('oo_client.hpoo.OORestCaller.get')
def test_get_content_pack_id(self, mock_get):
client = OOClient("https://blah:1234", "aa", "bb")
mock_get.return_value = [{'id': 123, 'name': 'a_cp'},
{'id': 456, 'name': 'a_nother_cp'}]
ret = client.get_content_pack_id('a_cp')
mock_get.assert_called_with('content-packs')
self.assertEqual(ret, 123)
with self.assertRaises(errors.NotFound):
client.get_flow_uuid_from_path('not_a_cp')
@patch('oo_client.hpoo.OOClient.get_flow_uuid_from_path')
@patch('oo_client.hpoo.OORestCaller.get')
def test_get_content_pack_from_flow(self, mock_get, mock_get_flow):
client = OOClient("https://blah:1234", "aa", "bb")
mock_get.return_value = {'cpName': 'my-cp'}
mock_get_flow.return_value = 'uuidLOL'
ret = client.get_content_pack_from_flow('Some/path/to/flow')
mock_get.assert_called_with('flows/uuidLOL')
self.assertEqual(ret, 'my-cp')
@patch('oo_client.hpoo.OORestCaller.get')
@patch('oo_client.hpoo.OOClient.get_content_pack_id')
def test_get_all_flows_in_cp(self, mock_get_content, mock_get):
client = OOClient("https://blah:1234", "aa", "bb")
mock_get_content.return_value = 1234
mock_get.return_value = [{'id': 123, 'path': 'some/path/to/flow',
'type': 'FLOW'},
{'id': 456, 'path': 'another/flow',
'type': 'FLOW'},
{'id': 666, 'path': 'another/op',
'type': 'OPERATION'}]
ret = client.get_all_flows_in_cp('my-cp')
mock_get_content.assert_called_with('my-cp')
mock_get.assert_called_with('content-packs/1234/content-tree')
expected = {123: 'some/path/to/flow', 456: 'another/flow'}
self.assertEqual(ret, expected)
@patch('oo_client.hpoo.OORestCaller.get')
def test_get_name_value_pair(self, mock_get):
mock_get.return_value = Mock()
client = OOClient("https://blah:1234", "aa", "bb")
mock_input = {"name": "sa1",
"value": "{\"username\":\"yf5\",\"password\":\"************\"}",
"path": "sa1",
"type": "system-accounts",
"uuid": "ebe1e757-46de-4c68-abd6-d41141ed76c2"}
expected = {"name": "sa1",
"value": "{\"username\":\"yf5\",\"password\":\"************\"}"}
ret = client.get_name_value_pair(mock_input)
self.assertEqual(ret, expected)
@patch('oo_client.hpoo.OORestCaller.get')
def test_get_a_configuration_item(self, mock_get):
# system-accounts
mock_input = {"name": "sa1",
"value": "{\"username\":\"yf5\",\"password\":\"************\"}",
"path": "sa1",
"type": "system-accounts",
"uuid": "ebe1e757-46de-4c68-abd6-d41141ed76c2"}
mock_get.return_value = mock_input
mock_ret = {"name": "sa1",
"value": "{\"username\":\"yf5\",\"password\":\"************\"}"}
client = OOClient("https://blah:1234", "aa", "bb")
ret = client.get_a_configuration_item('system-accounts', 'some/path')
mock_get.assert_called_with('config-items/system-accounts/some/path')
self.assertEqual(ret, mock_ret)
# system-properties
mock_input = {"name": "sp1",
"value": "blah",
"path": "sp1",
"type": "system-properties",
"uuid": "ebe1e757-46de-4c68-abd6-d41141ed76c2"}
mock_get.return_value = mock_input
mock_ret = {"name": "sp1",
"value": "blah"}
client = OOClient("https://blah:1234", "aa", "bb")
ret = client.get_a_configuration_item('system-properties', 'sp1')
mock_get.assert_called_with('config-items/system-properties/sp1')
self.assertEqual(ret, mock_ret)
@patch('oo_client.hpoo.OORestCaller.get')
def test_get_all_configuration_item(self, mock_get):
mock_input = [{"type": "selection-lists",
"path": "some/path",
"name": "Yes No",
"value": "Yes|No",
"uuid": "somesequence"},
{"type": "selection-lists",
"name": "Yes No - No Default",
"path": "some/path",
"value": "No|Yes",
"uuid": "somesequence"},
{"type": "group-aliases",
"name": "RAS_Operator_Path",
"path": "some/path",
"value": "RAS_Operator_Path",
"uuid": "somesequence"},
{"type": "system-accounts",
"name": "sa1",
"path": "some/path",
"uuid": "somesequence",
"value": "{\"username\":\"yf5\",\"password\":\"************\"}"}]
mock_get.return_value = mock_input
mock_ret = [{"type": "selection-lists",
"name": "Yes No",
"value": "Yes|No"},
{"type": "selection-lists",
"name": "Yes No - No Default",
"value": "No|Yes"},
{"type": "group-aliases",
"name": "RAS_Operator_Path",
"value": "RAS_Operator_Path"},
{"type": "system-accounts",
"name": "sa1",
"value": "{\"username\":\"yf5\",\"password\":\"************\"}"}]
client = OOClient("https://blah:1234", "aa", "bb")
ret = client.get_all_configuration_items()
mock_get.assert_called_with('config-items')
self.assertEqual(ret, mock_ret)
@patch('oo_client.hpoo.OORestCaller.get')
def test_get_configuration_items_by_type(self, mock_get):
mock_input = [{"type": "selection-lists",
"path": "some/path",
"name": "Yes No",
"value": "Yes|No",
"uuid": "somesequence"},
{"type": "selection-lists",
"name": "Yes No - No Default",
"path": "some/path",
"value": "No|Yes",
"uuid": "somesequence"}]
mock_get.return_value = mock_input
mock_ret = [{"name": "Yes No",
"value": "Yes|No"},
{"name": "Yes No - No Default",
"value": "No|Yes"}]
client = OOClient("https://blah:1234", "aa", "bb")
ret = client.get_configuration_items_by_type("selection-lists")
mock_get.assert_called_with('config-items/selection-lists')
self.assertEqual(ret, mock_ret)
# system-accounts
mock_input = [{"type": "system-accounts",
"name": "sa1",
"path": "some/path",
"uuid": "somesequence",
"value": "{\"username\":\"yf5\",\"password\":\"************\"}"},
{"type": "system-accounts",
"name": "sa2",
"path": "some/other/path",
"uuid": "somesequence",
"value": "{\"username\":\"yf6\",\"password\":\"************\"}"}]
mock_get.return_value = mock_input
mock_ret = [{"name": "sa1",
"value": "{\"username\":\"yf5\",\"password\":\"************\"}"},
{"name": "sa2",
"value": "{\"username\":\"yf6\",\"password\":\"************\"}"}]
ret = client.get_configuration_items_by_type("system-accounts")
mock_get.assert_called_with('config-items/system-accounts')
self.assertEqual(ret, mock_ret)
@patch('oo_client.hpoo.OORestCaller.put')
@patch('oo_client.hpoo.OORestCaller.get')
def test_set_a_configuration_item(self, mock_get, mock_put):
mock_get.return_value = ['some']
client = OOClient("https://blah:1234", "aa", "bb")
# system-accounts
mock_data = '"{\\"username\\": \\"blah\\", \\"password\\": \\"password\\"}"'
mock_put_ret = {u'customValue': u'{"username":"blah","password":"************"}', u'name': u'sa1', u'defaultValue': u'{"username":"username","password":null}', u'value': u'{"username":"blah","password":"************"}', u'path': u'sa1', u'fullPath': u'Configuration/System Accounts/sa1.xml', u'type': u'system-accounts', u'uuid': u'ebe1e757-46de-4c68-abd6-d41141ed76c2'}
mock_ret = {'name': u'sa1', 'value': u'{"username":"blah","password":"************"}'}
mock_put.return_value = mock_put_ret
ret = client.set_a_configuration_item('system-accounts', 'sa1', 'blah:password')
mock_put.assert_called_with('config-items/system-accounts/sa1', mock_data)
self.assertEqual(ret, mock_ret)
# system-properties
mock_data = '"blah"'
mock_put_ret = {u'customValue': u'blah', u'name': u'sp1', u'defaultValue': u'', u'value': u'blah', u'path': u'sp1', u'fullPath': u'Configuration/System Properties/sp1.xml', u'type': u'system-properties', u'uuid': u'0c755cf0-67b6-4264-bfd9-7a880e280a73'}
mock_ret = {'name': u'sp1', 'value': u'blah'}
mock_put.return_value = mock_put_ret
ret = client.set_a_configuration_item('system-properties', 'sp1', 'blah')
mock_put.assert_called_with('config-items/system-properties/sp1', mock_data)
self.assertEqual(ret, mock_ret)
def tearDown(self):
pass
| 2.484375
| 2
|
Q-learning-control/sumo_speed_limit.py
|
KaguraTart/SUMO-RL-ramp-control
| 6
|
12784168
|
<reponame>KaguraTart/SUMO-RL-ramp-control
from tqdm import tqdm
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib
import numpy as np
import math
from dateutil.relativedelta import relativedelta
from datetime import datetime, date
import os, sys
import time#绘图图式
plt.rcParams['figure.figsize']=(30,10)
# plt.style.use('ggplot')
from sumolib import checkBinary
# import xml2csv
'''
traci提供实时交互接口
'''
if 'SUMO_HOME' in os.environ:
tools = os.path.join(os.environ['SUMO_HOME'], 'tools')
sys.path.append(tools)
else:
sys.exit("please declare environment variable 'SUMO_HOME'")
sys.path.append(r"F:/software two/sumo-1.8.0/tools/xml")
import traci
gui = True
if gui == True:
sumoBinary = r"F:\software two\sumo-1.8.0\bin/sumo-gui"
else:
sumoBinary = r"F:\software two\sumo-1.8.0\bin/sumo"
sumoCmd = [sumoBinary, "-c",
r"F:\software two\sumo-1.8.0/file1/test1.sumo.cfg",'--tripinfo-output',
r'F:\software two\sumo-1.8.0/file1/tripinfo2.xml','--duration-log.statistics']
#全局变量
simulation_time =1200
H_0_meanspeed_list =[]
H_1_meanspeed_list =[]
H_2_meanspeed_list =[]
H_3_meanspeed_list =[]
H_4_meanspeed_list =[]
get_OOC0_list = []
get_OOC1_list = []
get_OOC2_list = []
get_OOC3_list = []
get_OOC4_list = []
get_OOCall_list = []
H_0_car_speed = 0
H_1_car_speed = 0
H_2_car_speed = 0
H_3_car_speed = 0
H_4_car_speed = 0
Actions_move =['r','G']
def build_qq_table(step, actions):
table = pd.DataFrame(
np.zeros((step, len(actions))), # q_table initial values
columns=actions, # actions's name
)
return table
qq_table = build_qq_table(simulation_time,Actions_move)
def RedYellowGreen_control():
pass
def trafficlight_control(time):
if time % 20 > 0 and time % 20 < 10:
ramp = 'r' #r means red light
else:
ramp = 'G' #G means green light
return ramp
def trafficlight_control2(time):
H_12_car_mean_speed = (H_1_meanspeed_list[time]+H_2_meanspeed_list[time])/2
if H_12_car_mean_speed < 33:
ramp = 'r'
qq_table.iloc[time,0] = 1
else:
ramp = 'G'
qq_table.iloc[time,1] = 1
return ramp
T1 = 'Lane car mean speed for limit speed '
T2 = 'car mean speed of H_1 and H_2 for limit speed '
T3 = 'Lane car OCC for limit speed'
T4 = 'Lane car OCC_mean for limit speed'
control_way = 'g'
def print_lane_speed():#输出0 1 道路的平均速度
pass
def output_lane_speed(): #输出01234道路的平均速度
car_simple1 = []
car_simple2 = []
car_simple3 = []
car_simple4 = []
car_simple5 = []
car_simple_1_2_mean = []
car_index = []
car_speed = {'H_0':H_0_meanspeed_list,
'H_1':H_1_meanspeed_list,'H_2':H_2_meanspeed_list,
'H_3':H_3_meanspeed_list,'H_4':H_4_meanspeed_list}
car_speed = pd.DataFrame(data=car_speed)
for i in range(0,car_speed.count()[0]):
if i % 20 == 0:
car_simple1.append(car_speed['H_0'][i])
car_simple2.append(car_speed['H_1'][i])
car_simple3.append(car_speed['H_2'][i])
car_simple4.append(car_speed['H_3'][i])
car_simple5.append(car_speed['H_4'][i])
car_simple_1_2_mean.append((car_speed['H_1'][i]+car_speed['H_2'][i])/2)
car_index.append(i/60)
car_simple_speed = {'H_0':car_simple1,'H_1':car_simple2,'H_2':car_simple3,'H_3':car_simple4,'H_4':car_simple5 }
car_simple_speed = pd.DataFrame(data = car_simple_speed,index = car_index)
car_simple_speed.to_csv(r'F:/大学/MTFwiki\SUMO-Q_learning\SUMO-RL-ramp-control/Q-learning-control/csv/'+T1+'.csv')
ax = car_simple_speed[['H_0', 'H_1','H_2','H_3','H_4']].plot(fontsize =30)
plt.title(T1,fontsize = 30)
fig = ax.get_figure()
plt.xlabel('time /min',fontsize = 30)
plt.ylabel('speed km/h',fontsize = 30)
plt.legend(fontsize = 20)
plt.show()
fig.savefig(r'F:/大学/MTFwiki\SUMO-Q_learning\SUMO-RL-ramp-control/Q-learning-control/img/' + T1+'.png')
bplot1 = car_simple_speed[['H_0', 'H_1','H_2','H_3','H_4']].boxplot(fontsize=30)
# colors = ['pink', 'lightblue', 'lightgreen','red']
# for patch, color in zip(bplot1['boxes'], colors):
# patch.set_facecolor(color)
plt.grid(True)
plt.xlabel('Lane name',fontsize = 30)
plt.ylabel('speed distribution(km/h)',fontsize = 30)
plt.title('the Lanes speed box plots for limit speed',fontsize = 35)
plt.show()
fig.savefig(r'F:/大学/MTFwiki\SUMO-Q_learning\SUMO-RL-ramp-control/Q-learning-control/img/' + T1+'_box.png')
car_mean_speed = {'H_1_2_mean':car_simple_1_2_mean}
car_mean_speed = pd.DataFrame(data = car_mean_speed,index = car_index)
ax= car_mean_speed[['H_1_2_mean']].plot(fontsize =30)
plt.title(T2,fontsize = 30)
fig = ax.get_figure()
plt.xlabel('time /min',fontsize = 30)
plt.ylabel('speed km/h',fontsize = 30)
plt.legend(fontsize = 20)
plt.show()
fig.savefig(r'F:/大学/MTFwiki\SUMO-Q_learning\SUMO-RL-ramp-control/Q-learning-control/img/' + T2+'.png')
def output_lane_OOC(): #画图
get_OOC = {'H_0':get_OOC0_list,
'H_1':get_OOC1_list,'H_2':get_OOC2_list,
'H_3':get_OOC3_list,'H_4':get_OOC4_list,'H_all':get_OOCall_list}
get_OOC = pd.DataFrame(data=get_OOC)
car_OOC_simple1 =[]
car_OOC_simple2 =[]
car_OOC_simple3 =[]
car_OOC_simple4 =[]
car_OOC_simple5 =[]
car_OOC_simpleall =[]
car_OOC_index =[]
for i in range(0,get_OOC.count()[0]):
if i % 20 == 0:
car_OOC_simple1.append(get_OOC['H_0'][i])
car_OOC_simple2.append(get_OOC['H_1'][i])
car_OOC_simple3.append(get_OOC['H_2'][i])
car_OOC_simple4.append(get_OOC['H_3'][i])
car_OOC_simple5.append(get_OOC['H_4'][i])
car_OOC_simpleall.append((get_OOC['H_0'][i]+get_OOC['H_1'][i]+get_OOC['H_2'][i])/3)
car_OOC_index.append(i/60)
car_simple_OOC = {'H_0':car_OOC_simple1,'H_1':car_OOC_simple2,'H_2':car_OOC_simple3,'H_3':car_OOC_simple4,'H_4':car_OOC_simple5 ,'H_all':car_OOC_simpleall}
car_simple_OOC = pd.DataFrame(data = car_simple_OOC,index = car_OOC_index)
car_simple_OOC.to_csv(r'F:/大学/MTFwiki\SUMO-Q_learning\SUMO-RL-ramp-control/Q-learning-control/csv/'+T3+'.csv')
ax = car_simple_OOC[['H_0', 'H_1','H_2','H_3','H_4']].plot(fontsize =30)
car_simple_OOC[['H_0', 'H_1','H_2','H_3','H_4']].to_csv()
plt.title(T3,fontsize = 30)
fig = ax.get_figure()
plt.xlabel('time /min',fontsize = 30)
plt.ylabel('%',fontsize = 30)
plt.legend(fontsize = 20)
plt.show()
fig.savefig(r'F:/大学/MTFwiki\SUMO-Q_learning\SUMO-RL-ramp-control/Q-learning-control/img/' +T3+ '.png')
boxpl2= car_simple_OOC[['H_0', 'H_1','H_2','H_3','H_4']].boxplot()
plt.grid(True)
plt.xlabel('Lane name',fontsize = 30)
plt.ylabel('speed distribution(km/h)',fontsize = 30)
plt.title('the Lanes OCC box plots for limit speed',fontsize = 35)
plt.show()
fig.savefig(r'F:/大学/MTFwiki\SUMO-Q_learning\SUMO-RL-ramp-control/Q-learning-control/img/' + T3+'_box.png')
ax = car_simple_OOC[['H_all']].plot(fontsize =30)
plt.title(T4,fontsize = 30)
fig = ax.get_figure()
plt.xlabel('time /min',fontsize = 30)
plt.ylabel('%',fontsize = 30)
plt.legend(fontsize = 20)
plt.show()
fig.savefig(r'F:/大学/MTFwiki\SUMO-Q_learning\SUMO-RL-ramp-control/Q-learning-control/img/' + T4+'.png')
#traci控制
def traci_control(step_time):
for i in range(0,5):
traci.lane.setMaxSpeed('H_'+str(i),27.78)
traci.lane.setMaxSpeed('C1_0',8)
traci.lane.setMaxSpeed('C1_1',8)
# traci.lane.setMaxSpeed('H_2',15)
# traci.lane.setMaxSpeed('H_3',15)
# traci.lane.setMaxSpeed('H_4',15)
for step in range(0,step_time):
H_0_meanspeed_list.append(traci.lane.getLastStepMeanSpeed('H_0')*3.6)
H_1_meanspeed_list.append(traci.lane.getLastStepMeanSpeed('H_1')*3.6)
H_2_meanspeed_list.append(traci.lane.getLastStepMeanSpeed('H_2')*3.6)
H_3_meanspeed_list.append(traci.lane.getLastStepMeanSpeed('H_3')*3.6)
H_4_meanspeed_list.append(traci.lane.getLastStepMeanSpeed('H_4')*3.6)
get_OOC0_list.append(traci.lane.getLastStepOccupancy('H_0')*100)
get_OOC1_list.append(traci.lane.getLastStepOccupancy('H_1')*100)
get_OOC2_list.append(traci.lane.getLastStepOccupancy('H_2')*100)
get_OOC3_list.append(traci.lane.getLastStepOccupancy('H_3')*100)
get_OOC4_list.append(traci.lane.getLastStepOccupancy('H_4')*100)
get_OOCall_list.append((traci.lane.getLastStepOccupancy('H_0')+traci.lane.getLastStepOccupancy('H_1')+
traci.lane.getLastStepOccupancy('H_2')+traci.lane.getLastStepOccupancy('H_3')+traci.lane.getLastStepOccupancy('H_4'))/4*100)
#仿真延迟
# time.sleep(/0.1)
#交通信号灯控制
traci.trafficlight.setRedYellowGreenState(traci.trafficlight.getIDList()[0], 'g'+'G') #trafficlight_control(step) trafficlight_control2(step)
#步长控制
traci.simulationStep(step +1)
# simulation_current_time = traci.simulation.getTime()
#目前时间
# print('simulation time is:',simulation_current_time)
#获取车辆ID
all_vehicle_id = traci.vehicle.getIDList()
#获取车辆位置
# all_vehicle_position = traci.vehicle.getPosition(step)
#获取车辆是否经过车线
try :# 获取截屏方法
pass
# 获取截屏
# traci.gui.screenshot('View #0',r'F:\software two\sumo-1.8.0/file1/img/img{}.jpg'.format(step),-1,-1)
# try:
# if traci.inductionloop.getLastStepVehicleNumber() > 0:
# traci.trafficlight.setRedYellowGreenState("0", "GGGGG")
# except:
# traci.close()
# break
except :
pass
# print(H_0_meanspeed)
traci.close(wait=True)
'''
trafficlight_ID_list = traci.trafficlight.getIDList()
RedYellowGreenState = traci.trafficlight.getRedYellowGreenState(trafficlight_ID_list[0])
# print(trafficlight_ID_list[0],RedYellowGreenState)
# Lane_car_ID = traci.lanearea.getIDList()
# print(Lane_car_ID)
lane_ID = traci.lane.getIDList()
'''
'''
主函数
'''
if __name__ == "__main__":
#运行sumo
traci.start(sumoCmd)
# traci.gui.setSchema('View #0','cus') #改变GUI为真实车辆
traci_control(simulation_time)
# output_lane_speed()
# output_lane_OOC()
print(qq_table)
qq_table.to_excel(r'F:\software two\sumo-1.8.0/file1/img/'+'qqtable.xlsx',index=False)
# ax= qq_table[['r']].plot(fontsize =30)
# plt.title('qq_table ',fontsize = 30)
# fig = ax.get_figure()
# plt.xlabel('time',fontsize = 30)
# plt.ylabel(' ',fontsize = 30)
# plt.show()
# fig.savefig(r'F:\software two\sumo-1.8.0/file1/img/' + 'qqtable.png')
| 2.0625
| 2
|
airflow/providers/amazon/aws/example_dags/example_quicksight.py
|
holly-evans/airflow
| 3
|
12784169
|
<reponame>holly-evans/airflow
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
from datetime import datetime
from airflow import DAG
from airflow.models.baseoperator import chain
from airflow.providers.amazon.aws.operators.quicksight import QuickSightCreateIngestionOperator
from airflow.providers.amazon.aws.sensors.quicksight import QuickSightSensor
DATA_SET_ID = os.getenv("DATA_SET_ID", "data-set-id")
INGESTION_ID = os.getenv("INGESTION_ID", "ingestion-id")
with DAG(
dag_id="example_quicksight",
schedule_interval=None,
start_date=datetime(2021, 1, 1),
tags=["example"],
catchup=False,
) as dag:
# Create and Start the QuickSight SPICE data ingestion
# and does not wait for its completion
# [START howto_operator_quicksight_create_ingestion]
quicksight_create_ingestion_no_waiting = QuickSightCreateIngestionOperator(
task_id="quicksight_create_ingestion_no_waiting",
data_set_id=DATA_SET_ID,
ingestion_id=INGESTION_ID,
wait_for_completion=False,
)
# [END howto_operator_quicksight_create_ingestion]
# The following task checks the status of the QuickSight SPICE ingestion
# job until it succeeds.
# [START howto_sensor_quicksight]
quicksight_job_status = QuickSightSensor(
task_id="quicksight_job_status",
data_set_id=DATA_SET_ID,
ingestion_id=INGESTION_ID,
)
# [END howto_sensor_quicksight]
chain(quicksight_create_ingestion_no_waiting, quicksight_job_status)
| 1.84375
| 2
|
lambda/ppe_detection_lambda.py
|
mfakbar/meraki-ppe-detection
| 0
|
12784170
|
import boto3
import json
import requests
from webex_lambda import *
import io
from PIL import Image, ImageDraw
from pymongo import MongoClient
# AWS access key and secret key
ACCESS_KEY = ""
SECRET_KEY = ""
# Mongo DB database
Database = "mongodb+srv://youraccount:<EMAIL>@<EMAIL>.xxxx.mongodb.net/xxxx" # db name
Cluster = "Tables" # name of the sub tables
Events_collection_name = "Events" # collection name to store the event
# What PPE policy to implement
ppe_requirement = ['FACE_COVER', 'HAND_COVER', 'HEAD_COVER']
# Employee email domain
email_domain = <EMAIL>"
def detect_labels(photo, bucket_name):
# Connect to AWS Rekognition
rekog = boto3.client('rekognition', region_name='ap-southeast-1',
aws_access_key_id=ACCESS_KEY, aws_secret_access_key=SECRET_KEY)
response = rekog.detect_protective_equipment(
Image={'S3Object': {'Bucket': bucket_name, 'Name': photo}},
SummarizationAttributes={
'MinConfidence': 80,
'RequiredEquipmentTypes': ppe_requirement
}
)
# Image analysis
person_count = str(len(response["Persons"]))
missing_ppe_msg = ""
bounding_box = []
person_num = 1
# Only proceed if there's a PPE violation
if response["Summary"]["PersonsWithoutRequiredEquipment"] != []:
for person in response["Persons"]:
missing_ppe_msg += "[Person #" + str(person_num) + ": "
for bodypart in person["BodyParts"]:
if bodypart["Name"] == "FACE":
if len(bodypart["EquipmentDetections"]) > 0:
bounding_box.append(
bodypart["EquipmentDetections"][0]["BoundingBox"])
if bodypart["EquipmentDetections"][0]["CoversBodyPart"]["Value"] == False:
missing_ppe_msg += "(Face-Uncovered)"
else:
missing_ppe_msg += "(Face)"
if bodypart["Name"] == "LEFT_HAND":
if len(bodypart["EquipmentDetections"]) > 0:
bounding_box.append(
bodypart["EquipmentDetections"][0]["BoundingBox"])
if bodypart["EquipmentDetections"][0]["CoversBodyPart"]["Value"] == False:
missing_ppe_msg += "(Left Hand-Uncovered)"
else:
missing_ppe_msg += "(Left Hand)"
if bodypart["Name"] == "RIGHT_HAND":
if len(bodypart["EquipmentDetections"]) > 0:
bounding_box.append(
bodypart["EquipmentDetections"][0]["BoundingBox"])
if bodypart["EquipmentDetections"][0]["CoversBodyPart"]["Value"] == False:
missing_ppe_msg += "(Right Hand-Uncovered)"
else:
missing_ppe_msg += "(Right Hand)"
if bodypart["Name"] == "HEAD":
if len(bodypart["EquipmentDetections"]) > 0:
bounding_box.append(
bodypart["EquipmentDetections"][0]["BoundingBox"])
if bodypart["EquipmentDetections"][0]["CoversBodyPart"]["Value"] == False:
missing_ppe_msg += "(Head-Uncovered)"
else:
missing_ppe_msg += "(Head)"
missing_ppe_msg += "] "
person_num += 1
print("Person count: ", person_count)
print("Missing ppe msg: ", missing_ppe_msg)
return missing_ppe_msg, person_count, bounding_box
def upload_to_s3(snapshot_url, bucket_name, key_name):
temp_image = requests.get(snapshot_url, stream=True)
session = boto3.Session()
s3 = session.resource('s3')
bucket = s3.Bucket(bucket_name)
bucket.upload_fileobj(temp_image.raw, key_name)
print("Image uploaded to S3")
return key_name
def search_face(photo, bucket_name, collection_name):
threshold = 70
maxFaces = 2
rekog = boto3.client('rekognition')
response = rekog.search_faces_by_image(CollectionId=collection_name,
Image={'S3Object': {
'Bucket': bucket_name, 'Name': photo}},
FaceMatchThreshold=threshold,
MaxFaces=maxFaces)
faceMatches = response['FaceMatches']
print('Matching faces:')
for match in faceMatches:
print('Employee alias:' + match['Face']['ExternalImageId'])
print('Similarity: ' + "{:.2f}".format(match['Similarity']) + "%")
detected_face_msg = ""
bounding_box = [response['SearchedFaceBoundingBox']]
detected_names = []
person_num = 1
for match in faceMatches:
detected_names.append(match['Face']['ExternalImageId'][:-4])
detected_face_msg += "[Person #" + str(person_num) + ": "
detected_face_msg += match['Face']['ExternalImageId'][:-4] + \
" ({:.2f}".format(match['Similarity']) + "%)"
detected_face_msg += "] "
person_num += 1
if len(faceMatches) == 0:
detected_face_msg = "Face ID not recognized"
print("Detected face msg: ", detected_face_msg)
return detected_face_msg, bounding_box, detected_names
# Draw bounding box to the image
def draw_boxes(photo, bucket_name, bounding_box, key_name):
# Load image from S3 bucket
session = boto3.Session()
s3 = session.resource('s3')
s3_object = s3.Object(bucket_name, photo)
s3_response = s3_object.get()
stream = io.BytesIO(s3_response['Body'].read())
image = Image.open(stream)
imgWidth, imgHeight = image.size
draw = ImageDraw.Draw(image)
# Calculate and display bounding boxes for each detected ppe
for per_ppe_box in bounding_box:
left = imgWidth * per_ppe_box['Left']
top = imgHeight * per_ppe_box['Top']
width = imgWidth * per_ppe_box['Width']
height = imgHeight * per_ppe_box['Height']
points = (
(left, top),
(left + width, top),
(left + width, top + height),
(left, top + height),
(left, top)
)
# Draw rectangle
draw.rectangle([left, top, left + width, top + height],
outline='#00d400')
# image.show()
# Upload image with boxes
in_mem_file = io.BytesIO() # Save the image to an in-memory file
image.save(in_mem_file, format=image.format)
in_mem_file.seek(0)
# Upload image to s3
s3 = boto3.client('s3')
s3.upload_fileobj(in_mem_file, bucket_name, key_name,
ExtraArgs={
'ACL': 'public-read',
'ContentType': 'image/jpeg',
'ContentDisposition': 'inline; filename='+key_name,
}
)
print("Image with boxes uploaded to S3")
# Update Mongo DB
def update_db(mv_loc, mv_sn, person_count, detected_face_msg, missing_ppe_msg, event_time):
# get collection
cluster = MongoClient(Database)
db = cluster[Cluster]
events_collection = db[Events_collection_name]
# update collection
events_collection.insert_one({"Time": event_time, "Camera SN": mv_sn, "Camera Location": mv_loc,
"People Count": person_count, "Names": detected_face_msg, "Missing PPEs": missing_ppe_msg})
return print("Event stored in the database")
def lambda_handler(event, context):
# data from Meraki
payload = json.loads(event['body'])
snapshot_url = payload["snapshot_url"]
event_time = payload["event_time"]
mv_loc = payload["mv_loc"]
mv_sn = payload["serial_number"]
# AWS parameter
bucket_name = "faceforppedetection" # AWS S3 bucket name
collection_name = "face_collection_for_PPE_detection" # AWS collection id
region = "ap-southeast-1" # AWS region
key_name = "<KEY>" # The name of the image file we want to upload to the bucket
key_name_box = "snapshot_with_boxes.jpg" # The name of the image file w/ boxes
# Upload Meraki snapshot to S3
photo = upload_to_s3(snapshot_url, bucket_name, key_name)
# Detect missing ppe and person count
missing_ppe_msg, person_count, bounding_box = detect_labels(
photo, bucket_name)
# Detect face
detected_face_msg, bounding_box_face, detected_names = search_face(
photo, bucket_name, collection_name)
# Append bounding box
if len(bounding_box_face) > 0:
for box in bounding_box_face:
bounding_box.append(box)
print("Final bounding box: ", bounding_box)
print
# Draw bounding box
draw_boxes(photo, bucket_name, bounding_box, key_name_box)
# Send notification to employee if any names are detected
if len(detected_names) > 0:
for name in detected_names:
detected_email = name+email_domain
post_message(mv_loc, detected_email, event_time)
# Webex notification to security team
s3_obj_url = "https://" + bucket_name + ".s3." + \
region + ".amazonaws.com/" + key_name_box
print("S3 URL: ", s3_obj_url)
post_card(mv_loc, s3_obj_url, person_count,
detected_face_msg, missing_ppe_msg, event_time)
# Update Mongo DB
update_db(mv_loc, mv_sn, person_count,
detected_face_msg, missing_ppe_msg, event_time)
return {
'statusCode': 200,
}
| 2.453125
| 2
|
server/toolz_swap_app/migrations/0004_auto_20211211_0853.py
|
minerva-university/cs162-toolz-swap-service
| 0
|
12784171
|
<gh_stars>0
# Generated by Django 3.2.9 on 2021-12-11 16:53
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('toolz_swap_app', '0003_auto_20211209_0903'),
]
operations = [
migrations.RemoveField(
model_name='user',
name='created_on',
),
migrations.AlterField(
model_name='listingreview',
name='rating',
field=models.IntegerField(validators=[django.core.validators.MaxValueValidator(5), django.core.validators.MinValueValidator(1)]),
),
]
| 1.65625
| 2
|
PySide2extn/examples/rpb/initPos.py
|
anjalp/PySide2extn
| 25
|
12784172
|
<reponame>anjalp/PySide2extn
import sys
from PySide2 import QtCore, QtWidgets, QtGui
from PySide2extn.RoundProgressBar import roundProgressBar
class MyWidget(QtWidgets.QWidget):
def __init__(self):
QtWidgets.QWidget.__init__(self)
self.rpb = roundProgressBar()
self.rpb2 = roundProgressBar()
self.rpb.rpb_setInitialPos('South')
self.rpb2.rpb_setInitialPos('East')
self.layout = QtWidgets.QHBoxLayout()
self.layout.addWidget(self.rpb)
self.layout.addWidget(self.rpb2)
self.setLayout(self.layout)
if __name__ == "__main__":
app = QtWidgets.QApplication(sys.argv)
widget = MyWidget()
widget.show()
sys.exit(app.exec_())
| 2.6875
| 3
|
tv-script-generation/script.py
|
dmh2000/deep-learning
| 0
|
12784173
|
import tensorflow as tf
import numpy as np
import helper
import problem_unittests as tests
# Number of Epochs
num_epochs = 2
# Batch Size
batch_size = 64
# RNN Size
rnn_size = 256
# Embedding Dimension Size
embed_dim = 300
# Sequence Length
seq_length = 100
# Learning Rate
learning_rate = 0.001
# Show stats for every n number of batches
show_every_n_batches = 100
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
save_dir = './save'
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
# Save parameters for checkpoint
helper.save_params((seq_length, save_dir))
# =====================================================================================
# =====================================================================================
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
_, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()
seq_length, load_dir = helper.load_params()
# =====================================================================================
# =====================================================================================
def get_tensors(loaded_graph):
"""
Get input, initial state, final state, and probabilities tensor from <loaded_graph>
:param loaded_graph: TensorFlow graph loaded from file
:return: Tuple (InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor)
"""
# TODO: Implement Function
inp = loaded_graph.get_tensor_by_name("input:0")
istate = loaded_graph.get_tensor_by_name("initial_state:0")
fstate = loaded_graph.get_tensor_by_name("final_state:0")
probs = loaded_graph.get_tensor_by_name("probs:0")
return inp, istate, fstate, probs
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_tensors(get_tensors)
# =====================================================================================
# =====================================================================================
def pick_word(probabilities, int_to_vocab):
"""
Pick the next word in the generated text
:param probabilities: Probabilites of the next word
:param int_to_vocab: Dictionary of word ids as the keys and words as the values
:return: String of the predicted word
"""
# TODO: Implement Function
index = np.random.choice(len(probabilities), p=probabilities)
return int_to_vocab[index]
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_pick_word(pick_word)
# =====================================================================================
# =====================================================================================
gen_length = 20
# homer_simpson, moe_szyslak, or Barney_Gumble
prime_word = 'moe_szyslak'
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
loaded_graph = tf.Graph()
with tf.Session(graph=loaded_graph) as sess:
# Load saved model
loader = tf.train.import_meta_graph(load_dir + '.meta')
loader.restore(sess, load_dir)
# Get Tensors from loaded model
input_text, initial_state, final_state, probs = get_tensors(loaded_graph)
# Sentences generation setup
gen_sentences = [prime_word + ':']
prev_state = sess.run(initial_state, {input_text: np.array([[1]])})
# Generate sentences
for n in range(gen_length):
# Dynamic Input
dyn_input = [[vocab_to_int[word] for word in gen_sentences[-seq_length:]]]
dyn_seq_length = len(dyn_input[0])
# Get Prediction
probabilities, prev_state = sess.run(
[probs, final_state],
{input_text: dyn_input, initial_state: prev_state})
pred_word = pick_word(probabilities[dyn_seq_length - 1], int_to_vocab)
gen_sentences.append(pred_word)
# Remove tokens
tv_script = ' '.join(gen_sentences)
for key, token in token_dict.items():
ending = ' ' if key in ['\n', '(', '"'] else ''
tv_script = tv_script.replace(' ' + token.lower(), key)
tv_script = tv_script.replace('\n ', '\n')
tv_script = tv_script.replace('( ', '(')
print(tv_script)
# =====================================================================================
# =====================================================================================
| 2.609375
| 3
|
glow/test/run_metrics.py
|
arquolo/ort
| 0
|
12784174
|
<gh_stars>0
from __future__ import annotations
import torch
from matplotlib import pyplot as plt
from torch import nn
from tqdm.auto import tqdm
import glow.nn
from glow import metrics as m
metrics: tuple[m.Metric, ...] = (
m.Lambda(m.accuracy_),
m.Confusion(
acc=m.accuracy,
accb=m.accuracy_balanced,
iou=m.iou,
kappa=m.kappa,
),
)
c = 8
b = 128
true = torch.randint(c, size=[b])
pred = torch.randn(b, c, requires_grad=True)
class Model(nn.Module):
def forward(self, x):
return net.param
net = Model()
net.param = nn.Parameter(data=pred, requires_grad=True)
optim = glow.nn.RAdam(net.parameters())
cm_grad = m.ConfusionGrad()
plt.ion()
_, ax = plt.subplots(ncols=4)
ax[2].plot(true.numpy())
with tqdm(range(32)) as pbar:
for _ in pbar:
for _ in range(64):
net.zero_grad()
cm = cm_grad(net(None), true)
loss = -m.accuracy(cm)
# loss = -m.accuracy_balanced(cm)
# loss = -m.kappa(cm)
loss.backward()
optim.step()
pbar.set_postfix({'score': -loss.detach_().item()})
cm.detach_()
ax[0].imshow(pred.detach().numpy())
ax[1].imshow(pred.detach().softmax(1).numpy())
ax[2].cla()
ax[2].plot(sorted(zip(true.numpy(), pred.detach().argmax(1).numpy())))
ax[3].imshow(cm.numpy(), vmax=1 / c)
plt.pause(1e-2)
with torch.no_grad():
meter = m.compose(*metrics)
d = meter.send((pred, true))
print(', '.join(f'{k}: {v:.3f}' for k, v in d.scalars.items()))
print(', '.join(f'{k}: {v.item():.3f}'
for k, v in d.tensors.items() if v.numel() == 1))
| 2.15625
| 2
|
tests/djangokeys/core/djangokeys/test_secret_key.py
|
alanverresen/django-keys
| 0
|
12784175
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Contains tests for accessing env vars as Django's secret key.
import pytest
from djangokeys.core.djangokeys import DjangoKeys
from djangokeys.exceptions import EnvironmentVariableNotFound
from djangokeys.exceptions import ValueIsEmpty
from tests.files import EMPTY_ENV_PATH
from tests.utils.environment_vars import use_environment_variable
def test__django_keys_secret_key__not_found():
""" An appropriate exception is raised when env var is not set.
"""
keys = DjangoKeys(EMPTY_ENV_PATH)
with pytest.raises(EnvironmentVariableNotFound):
keys.secret_key("DOES_NOT_EXIST")
def test__django_keys_str__empty_string():
""" An empty value cannot be used as Django's secret key.
"""
with use_environment_variable('SECRET_KEY', ''):
keys = DjangoKeys(EMPTY_ENV_PATH)
with pytest.raises(ValueIsEmpty):
keys.secret_key('SECRET_KEY')
def test__django_keys_str__regular_string():
""" A string of characters can be used as Django's secret key.
"""
with use_environment_variable('SECRET_KEY', '<KEY>'):
keys = DjangoKeys(EMPTY_ENV_PATH)
assert keys.secret_key("SECRET_KEY") == "<KEY>"
| 2.609375
| 3
|
app.py
|
LandRegistry-Attic/concept-homepage
| 0
|
12784176
|
<reponame>LandRegistry-Attic/concept-homepage<gh_stars>0
import os
from flask import Flask, redirect, render_template, request
from flask.ext.basicauth import BasicAuth
import logging
from raven.contrib.flask import Sentry
app = Flask(__name__)
# Auth
if os.environ.get('BASIC_AUTH_USERNAME'):
app.config['BASIC_AUTH_USERNAME'] = os.environ['BASIC_AUTH_USERNAME']
app.config['BASIC_AUTH_PASSWORD'] = <PASSWORD>['BASIC_AUTH_PASSWORD']
app.config['BASIC_AUTH_FORCE'] = True
basic_auth = BasicAuth(app)
# Sentry exception reporting
if 'SENTRY_DSN' in os.environ:
sentry = Sentry(app, dsn=os.environ['SENTRY_DSN'])
# Logging
@app.before_first_request
def setup_logging():
if not app.debug:
app.logger.addHandler(logging.StreamHandler())
app.logger.setLevel(logging.INFO)
@app.route('/')
def index():
return render_template("index.html")
if __name__ == '__main__':
app.run(host="0.0.0.0", port=8000, debug=True)
| 1.90625
| 2
|
examplepackage/tests/test_badmodule.py
|
dionysisbacchus/python-project-template
| 0
|
12784177
|
from examplepackage.badmodule import bad_function
def test_bad_function():
assert bad_function(1) == 1
| 1.960938
| 2
|
lib/logic/persistence_handler.py
|
SBC2000/excel-scripts
| 0
|
12784178
|
import os
import pickle
from lib.model.model import Model
class PersistenceHandler:
def __init__(self, folder):
self.__model_file_name = os.path.join("model.bin")
def store_model(self, model):
"""
@type model: Model
@return: None
"""
with open(self.__model_file_name, "wb") as output_file:
pickle.dump(model, output_file)
def load_model(self):
"""
@rtype: Model
"""
with open(self.__model_file_name, "rb") as input_file:
return pickle.load(input_file)
| 3.0625
| 3
|
ICPC/10-20-30_1996_A/Cardgame.py
|
otkaverappa/AdventureOfCode
| 0
|
12784179
|
<reponame>otkaverappa/AdventureOfCode
import unittest
from collections import deque
class Cardgame:
WIN, LOSS, DRAW = 'Win', 'Loss', 'Draw'
def __init__( self, deckState ):
self.cardDeck = deque( deckState )
self.numberOfTurns = 0
totalSlots = 7
self.cardSlots = list()
for _ in range( totalSlots ):
self.cardSlots.append( [ self._getCardFromDeck() ] )
self.currentCardSlot = 0
self.stateCache = set()
self.gameResult = None
def _encodeCurrentGameState( self ):
deckState = '#'.join( [ str( card ) for card in self.cardDeck ] )
slotStateList = list()
for slot in self.cardSlots:
slotStateList.append( '#'.join( [ str( card ) for card in slot ] ) )
slotState = '~'.join( slotStateList )
return (deckState, slotState)
def _moveToNextCardSlot( self ):
if len( self.cardSlots[ self.currentCardSlot ] ) == 0:
del self.cardSlots[ self.currentCardSlot ]
else:
self.currentCardSlot += 1
if self.currentCardSlot == len( self.cardSlots ):
self.currentCardSlot = 0
def _getCardFromDeck( self ):
self.numberOfTurns += 1
return self.cardDeck.popleft()
def _placeCardInTheDeck( self, C1, C2, C3 ):
self.cardDeck.append( C1 )
self.cardDeck.append( C2 )
self.cardDeck.append( C3 )
def _simulateOneTurn( self ):
currentSlot = self.cardSlots[ self.currentCardSlot ]
drawCard = self._getCardFromDeck()
currentSlot.append( drawCard )
while True:
if len( currentSlot ) < 3:
break
sumFound = False
for i, j, k, s, e in [ (0, 1, -1, 2, -1), (0, -2, -1, 1, -2), (-3, -2, -1, 0, -3) ]:
C1, C2, C3 = currentSlot[ i ], currentSlot[ j ], currentSlot[ k ]
if C1 + C2 + C3 in (10, 20, 30):
self.cardSlots[ self.currentCardSlot ] = currentSlot = currentSlot[ s : e ]
self._placeCardInTheDeck( C1, C2, C3 )
sumFound = True
break
if not sumFound:
break
self._moveToNextCardSlot()
gameState = self._encodeCurrentGameState()
if len( self.cardSlots ) == 0:
self.gameResult = Cardgame.WIN
elif len( self.cardDeck ) == 0:
self.gameResult = Cardgame.LOSS
elif gameState in self.stateCache:
self.gameResult = Cardgame.DRAW
else:
self.stateCache.add( gameState )
def play( self ):
while self.gameResult is None:
self._simulateOneTurn()
return self.gameResult, self.numberOfTurns
class CardgameTest( unittest.TestCase ):
def test_play( self ):
for filename in ('sample', 'cardgame'):
self._verify( filename )
def _verify( self, filename ):
print( 'Testcase file = {}'.format( filename ) )
deckStateList = list()
deckSize, emptyDeckSize = 52, 1
with open( 'tests/{}.in'.format( filename ) ) as inputFile:
while True:
deckState = list( map( int, inputFile.readline().strip().split() ) )
if len( deckState ) == emptyDeckSize:
break
assert len( deckState ) == deckSize
deckStateList.append( deckState )
resultList = list()
with open( 'tests/{}.ans'.format( filename ) ) as solutionFile:
for state in solutionFile.readlines():
result, numberOfTurns = [ token.strip() for token in state.split( ':' ) ]
resultList.append( (result, int( numberOfTurns ) ) )
self.assertEqual( len( deckStateList ), len( resultList ) )
for i in range( len( deckStateList ) ):
result, numberOfTurns = resultList[ i ]
print( 'Testcase = {} Expected result = {}:{}'.format( i + 1, result, numberOfTurns ) )
self.assertEqual( Cardgame( deckStateList[ i ] ).play(), resultList[ i ] )
if __name__ == '__main__':
unittest.main()
| 3.5
| 4
|
commands/HelpCommand.py
|
WeeTomatoBall/pentest
| 6
|
12784180
|
import discord
from commands.framework.CommandBase import CommandBase
class HelpCommand(CommandBase):
def __init__(self):
super(HelpCommand, self).__init__('help')
async def execute(self, client, message, args):
embed = discord.Embed(
title="Help Page",
description="Prefix: **!**",
color=discord.Colour.red()
)
embed.set_thumbnail(
url="https://media.discordapp.net/attachments/519223258428735511/520234344313257984/badboy.jpg")
embed.add_field(name="!whois", value="Verify informations abou the site.", inline=True)
embed.add_field(name="!ping", value="Ping some target.", inline=True)
embed.add_field(name="!hibp", value="Check if your email got leaked.", inline=True)
embed.add_field(name="!geoip", value="GeoIp lookup.", inline=True)
embed.add_field(name="!nmap", value="Simple port scan an ip address.", inline=True)
embed.add_field(name="!sqli", value="Test if a url is vulnerable to SQLi.", inline=True)
embed.add_field(name="!shodan", value="Search host in shodan.", inline=True)
embed.add_field(name="!exploitdb", value="Search exploits in ExploitDB.", inline=True)
embed.add_field(name="!reverseip", value="Verify domains in a host.", inline=True)
embed.set_footer(text="Type ![command] for more info about command")
await client.send_message(message.channel, embed=embed)
| 2.65625
| 3
|
jobhunter/admin.py
|
alexhyang/portfolio
| 0
|
12784181
|
<reponame>alexhyang/portfolio
from django.contrib import admin
from .models import Posting
# Register your models here.
admin.site.register(Posting)
| 1.203125
| 1
|
m2/beats2audio/cli.py
|
m2march/beats2audio
| 0
|
12784182
|
<gh_stars>0
import argparse
import sys
import os
import numpy as np
import m2.beats2audio
from m2.beats2audio import create_beats_audio
from m2.beats2audio import defaults, ACCEPTABLE_MP3_SAMPLE_RATES
FORMAT_OPTIONS = ['wav', 'mp3']
def main():
parser = argparse.ArgumentParser(
description=('Produce an audio file with click sounds at determined '
'positions. This utility is developed to generate '
'stimuli in the context of '
'"Simple and cheap setup for measuring timed responses '
'to auditory stimuli" (Miguel et. al. 2020).')
)
parser.add_argument('clicks', type=argparse.FileType('r'),
help=('Input file with click locations. Locations '
'are expected in milliseconds (unless '
'--as-seconds flag is used)'))
parser.add_argument('-o', dest='output_file', type=str,
help=('Path to output audio'),
default=None)
parser.add_argument('-c', '--click_gain', dest='click_gain', type=float,
help=('Gain in dB to add to the click sound.'),
default=defaults.CLICK_GAIN)
parser.add_argument('-r', '--sample_rate', type=int, dest='sr',
help=('Sample rate to use in output audio.'),
default=defaults.SAMPLE_RATE)
parser.add_argument('-d', '--min-duration', type=int, dest='min_duration',
help=('Minimun duration of the output audio. If the '
'last click ends before the minimun duration, '
'the audio is filled with silence until the '
'duration is reached.'),
default=0)
parser.add_argument('-f', '--format', choices=FORMAT_OPTIONS, type=str,
dest='format',
help=('Format of the output file.'),
default=None)
parser.add_argument('--as-seconds', action='store_true', dest='as_seconds',
help=('Click times in input file is given in seconds'),
default=False)
args = parser.parse_args()
if args.output_file is not None and os.path.exists(args.output_file):
print('File already exists {}'.format(args.output_file))
sys.exit()
click_times = np.array([float(x) for x in args.clicks])
if (args.as_seconds):
click_times = click_times * 1000
if (args.format is not None):
if (args.output_file is not None):
out_file_format = os.path.splitext(args.output_file)[1].lstrip('.')
if (args.format != out_file_format):
print('Output file name extension does not match provided '
'format ({} != {})'.format(out_file_format, args.format))
sys.exit()
format = args.format
else:
format = args.format
else:
if (args.output_file is not None):
format = os.path.splitext(args.output_file)[1].lstrip('.')
else:
format = defaults.FORMAT
output_file = (args.output_file
if args.output_file is not None
else defaults.OUTPUT_FILENAME_TPL.format(format))
if (format == 'mp3') and (args.sr not in ACCEPTABLE_MP3_SAMPLE_RATES):
print('Specified sample rate ({}) for mp3 format is not acceptable. '
'Accepted sample rates are: {}'.format(
args.sr, ACCEPTABLE_MP3_SAMPLE_RATES))
sys.exit()
m2.beats2audio.create_beats_audio(
click_times, output_file, format,
args.click_gain, args.min_duration, args.sr)
| 3.078125
| 3
|
plot_results.py
|
Bhaskers-Blu-Org1/SIC
| 12
|
12784183
|
from os import listdir
from os.path import isdir, isfile, join
from itertools import chain
import numpy as np
import matplotlib.pyplot as plt
from utils import shelf
def dlist(key, dat):
r"""Runs over a list of dictionaries and outputs a list of values corresponding to `key`
Short version (no checks): return np.array([d[key] for d in dat])
"""
ret = []
for i, d in enumerate(dat):
if key in d:
ret.append(d[key])
else:
print('key {} is not in dat[{}]. Skip.'.format(key, i))
return np.array(ret)
def get_data(select_dict, ARGS, key_list, DAT):
data = []
for sel, key in zip(select_dict, key_list):
# Select DAT
k, v = next(iter(sel.items()))
dat = [da[0] for da in zip(DAT, ARGS) if k in da[1] and da[1][k] == v][0]
data.append(dlist(key, dat))
return data
def color_bplot(bplot, colors):
r"""Color the boxplots"""
for patch, color in zip(bplot['boxes'], colors):
patch.set_facecolor(color)
for median in bplot['medians']:
median.set(color='k', linewidth=1.5,)
def label_axis(ax, labels, xpos, ypos, fontsize=16, target_fdr=0.1):
# Partially remove frame
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
# y label
ax.set_ylabel('Power and FDR', fontsize=fontsize)
ax.set_ylim([-0.05, 1.05])
# Hortizontal line for target fdr
if target_fdr:
ax.plot(ax.get_xlim(), [target_fdr, target_fdr], '--r')
# New Axis
new_ax = ax.twiny()
new_ax.set_xticks(xpos)
new_ax.set_xticklabels(labels)
new_ax.xaxis.set_ticks_position('bottom') # set the position of the second x-axis to bottom
new_ax.xaxis.set_label_position('bottom') # set the position of the second x-axis to bottom
new_ax.spines['bottom'].set_position(('outward', ypos)) # positions below
# Remove frame for new_ax
new_ax.spines['bottom'].set_visible(False)
new_ax.spines['top'].set_visible(False)
new_ax.spines['left'].set_visible(False)
new_ax.spines['right'].set_visible(False)
new_ax.tick_params(length=0, labelsize=fontsize)
new_ax.set_xlim(ax.get_xlim())
return new_ax
if __name__ == "__main__":
# Load data
PATH = 'output/'
DIRS = [d for d in listdir(PATH) if isdir(join(PATH, d))]
FILES = [join(PATH, d, f) for d in DIRS for f in listdir(join(PATH, d))
if isfile(join(PATH, d, f)) and f[-3:]=='.pt']
ARGS, DAT, MODELS = [], [], []
for f in FILES:
sh = shelf()._load(f)
ARGS.append(sh.args)
if 'd' in sh:
DAT.append(sh['d'])
MODELS.append(sh.args['model'])
else:
print("WARNING: There is no data field d field in file {}. Skip.".format(f))
continue
# ---------------------------
# Process data
# ---------------------------
select_dict, key_list, labels, positions, ax_labels, ax_positions = [], [], [], [-2], [], [-2]
# Baseline models
for m, l in zip(['en', 'rf'], ['Elastic Net', 'Random Forest']):
if m in MODELS:
select_dict += 4*[{'model': m}]
key_list += ['tpr_selected', 'fdr_selected', 'hrt_tpr_selected', 'hrt_fdr_selected']
labels += ['TPR', 'FDR', 'TPR\nHRT', 'FDR\nHRT']
p = positions[-1] + 2
positions += [1+p, 2+p, 4+p, 5+p]
ax_labels += [l]
ax_positions += [ax_positions[-1] + len(l)/2]
# Our models
for m, l, pos in zip(['sic_supervised', 'sic'], ['Sobolev Penalty', 'SIC'], [5.5, 4]):
if m in MODELS:
select_dict += 2*[{'model': m}]
key_list += ['hrt_tpr_selected', 'hrt_fdr_selected']
labels += ['TPR\nHRT', 'FDR\nHRT']
p = positions[-1] + 2
positions += [1+p, 2+p]
ax_labels += [l]
ax_positions += [ax_positions[-1] + pos]
positions.pop(0);
ax_positions.pop(0);
data = get_data(select_dict, ARGS, key_list, DAT)
# ---------------------------
# Plot
# ---------------------------
dataset = ARGS[0]['dataset'].upper()
n_samples = ARGS[0]['numSamples']
fig = plt.figure(figsize=(8, 3))
ax = plt.subplot(111)
bplot = plt.boxplot(data, positions=positions, labels=labels, patch_artist=True)
label_axis(ax, ax_labels, ax_positions, 32, fontsize=13)
color_bplot(bplot, len(positions)//2*['lightblue', 'orange'])
fig.suptitle(f'Dataset {dataset}, N={n_samples}');
fig.tight_layout()
fig.savefig(f"output/{dataset}_{n_samples}.png", bbox_inches='tight')
| 2.625
| 3
|
utils/wfuzzbasicauthbrute/wfuzz/framework/core/myexception.py
|
ismailbozkurt/kubebot
| 171
|
12784184
|
class FuzzException(Exception):
FATAL, SIGCANCEL = range(2)
def __init__(self, etype, msg):
self.etype = etype
self.msg = msg
Exception.__init__(self, msg)
| 2.390625
| 2
|
test/python/quantum_info/operators/test_measures.py
|
EnriqueL8/qiskit-terra
| 2
|
12784185
|
<filename>test/python/quantum_info/operators/test_measures.py
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2018.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Tests for operator measures."""
import unittest
import numpy as np
from qiskit.quantum_info import Operator, Choi
from qiskit.quantum_info import process_fidelity
from qiskit.quantum_info import average_gate_fidelity
from qiskit.quantum_info import gate_error
from qiskit.test import QiskitTestCase
class TestOperatorMeasures(QiskitTestCase):
"""Tests for Operator measures"""
def test_operator_process_fidelity(self):
"""Test the process_fidelity function for operator inputs"""
# Orthogonal operator
op = Operator.from_label('X')
f_pro = process_fidelity(op, require_cp=True, require_tp=True)
self.assertAlmostEqual(f_pro, 0.0, places=7)
# Global phase operator
op1 = Operator.from_label('X')
op2 = -1j * op1
f_pro = process_fidelity(op1, op2, require_cp=True, require_tp=True)
self.assertAlmostEqual(f_pro, 1.0, places=7)
def test_channel_process_fidelity(self):
"""Test the process_fidelity function for channel inputs"""
depol = Choi(np.eye(4) / 2)
iden = Choi(Operator.from_label('I'))
# Completely depolarizing channel
f_pro = process_fidelity(depol, require_cp=True, require_tp=True)
self.assertAlmostEqual(f_pro, 0.25, places=7)
# Identity
f_pro = process_fidelity(iden, require_cp=True, require_tp=True)
self.assertAlmostEqual(f_pro, 1.0, places=7)
# Depolarizing channel
prob = 0.3
chan = prob * depol + (1 - prob) * iden
f_pro = process_fidelity(chan, require_cp=True, require_tp=True)
f_target = prob * 0.25 + (1 - prob)
self.assertAlmostEqual(f_pro, f_target, places=7)
# Depolarizing channel
prob = 0.5
op = Operator.from_label('Y')
chan = (prob * depol + (1 - prob) * iden) @ op
f_pro = process_fidelity(chan, op, require_cp=True, require_tp=True)
target = prob * 0.25 + (1 - prob)
self.assertAlmostEqual(f_pro, target, places=7)
def test_operator_average_gate_fidelity(self):
"""Test the average_gate_fidelity function for operator inputs"""
# Orthogonal operator
op = Operator.from_label('Z')
f_ave = average_gate_fidelity(op, require_cp=True, require_tp=True)
self.assertAlmostEqual(f_ave, 1 / 3, places=7)
# Global phase operator
op1 = Operator.from_label('Y')
op2 = -1j * op1
f_ave = average_gate_fidelity(op1,
op2,
require_cp=True,
require_tp=True)
self.assertAlmostEqual(f_ave, 1.0, places=7)
def test_channel_average_gate_fidelity(self):
"""Test the average_gate_fidelity function for channel inputs"""
depol = Choi(np.eye(4) / 2)
iden = Choi(Operator.from_label('I'))
# Completely depolarizing channel
f_ave = average_gate_fidelity(depol, require_cp=True, require_tp=True)
self.assertAlmostEqual(f_ave, 0.5, places=7)
# Identity
f_ave = average_gate_fidelity(iden, require_cp=True, require_tp=True)
self.assertAlmostEqual(f_ave, 1.0, places=7)
# Depolarizing channel
prob = 0.11
chan = prob * depol + (1 - prob) * iden
f_ave = average_gate_fidelity(chan, require_cp=True, require_tp=True)
f_target = (2 * (prob * 0.25 + (1 - prob)) + 1) / 3
self.assertAlmostEqual(f_ave, f_target, places=7)
# Depolarizing channel
prob = 0.5
op = Operator.from_label('Y')
chan = (prob * depol + (1 - prob) * iden) @ op
f_ave = average_gate_fidelity(chan,
op,
require_cp=True,
require_tp=True)
target = (2 * (prob * 0.25 + (1 - prob)) + 1) / 3
self.assertAlmostEqual(f_ave, target, places=7)
def test_operator_gate_error(self):
"""Test the gate_error function for operator inputs"""
# Orthogonal operator
op = Operator.from_label('Z')
err = gate_error(op, require_cp=True, require_tp=True)
self.assertAlmostEqual(err, 2 / 3, places=7)
# Global phase operator
op1 = Operator.from_label('Y')
op2 = -1j * op1
err = gate_error(op1, op2, require_cp=True, require_tp=True)
self.assertAlmostEqual(err, 0, places=7)
def test_channel_gate_error(self):
"""Test the gate_error function for channel inputs"""
depol = Choi(np.eye(4) / 2)
iden = Choi(Operator.from_label('I'))
# Depolarizing channel
prob = 0.11
chan = prob * depol + (1 - prob) * iden
err = gate_error(chan, require_cp=True, require_tp=True)
target = 1 - average_gate_fidelity(chan)
self.assertAlmostEqual(err, target, places=7)
# Depolarizing channel
prob = 0.5
op = Operator.from_label('Y')
chan = (prob * depol + (1 - prob) * iden) @ op
err = gate_error(chan, op, require_cp=True, require_tp=True)
target = 1 - average_gate_fidelity(chan, op)
self.assertAlmostEqual(err, target, places=7)
if __name__ == '__main__':
unittest.main()
| 2.3125
| 2
|
cogs/city.py
|
Greenfoot5/Discord-City
| 2
|
12784186
|
<filename>cogs/city.py
import discord
from discord.ext import commands
import cairocffi as cairo
from io import BytesIO
import random
import math
def PolarToCartesian(coord):
angle = coord[0]
magnitude = coord[1]
x = magnitude*math.cos(angle)+512
y = magnitude*math.sin(angle)+512
return [x,y]
def createCity(memberList, houseSize):
#memberList format should be, for example, [(r,g,b)*x,(r1,g1,b1)*y...]
#houseSize should be between 5-20
#castle formation process:
#street formation process;
streetNum = random.randint(5,8)
streets =[]
temp = random.randint(0,int(360/streetNum))
streets.append(temp)
shapeArray=[]
for i in range(0,streetNum-1):
temp1 = streets[i]
temp2 = random.randint(0,int(360/streetNum))
streets.append(temp1+temp2)
streets.append(360)
memberCount=0
r=0
rAppend=20
band=0
while memberCount<len(memberList):
r+=rAppend
band+=1
if band % 3 != 2:
for i in range(1, streetNum):
temp1 = (streets[i] - streets[i-1])/360*2*math.pi*r
angle = streets[i-1]/360*2*math.pi
houseAngle = houseSize/r
temp2=0
while True:
if temp1>=houseSize:
temp1-=houseSize
temp2+=1
coord0=PolarToCartesian([angle+houseAngle*temp2,r])
coord3=PolarToCartesian([angle+houseAngle*temp2,r+rAppend])
if temp1-houseSize>=0:
coord1=PolarToCartesian([angle+houseAngle*(temp2+1),r])
coord2=PolarToCartesian([angle+houseAngle*(temp2+1),r+rAppend])
else:
coord1=PolarToCartesian([streets[i]/360*2*math.pi,r])
coord2=PolarToCartesian([streets[i]/360*2*math.pi,r+rAppend])
if memberCount==len(memberList):
break
shapeArray.append([coord0,coord1,coord2,coord3,memberList[memberCount]])
memberCount+=1
else:
break
return shapeArray
def ListToImage(thislist):
surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, 1024, 1024)
with open("paper.png", "rb") as f:
surface = cairo.ImageSurface.create_from_png(f)
ctx = cairo.Context(surface)
ctx.scale(1,1)
for i in range(0, len(thislist)):
ctx.move_to(thislist[i][0][0],thislist[i][0][1])
for j in range(1, len(thislist[i])-1):
x=thislist[i][j][0]
y=thislist[i][j][1]
ctx.line_to(x,y)
k=j+1
ctx.close_path()
r=thislist[i][k][0]/255
g=thislist[i][k][1]/255
b=thislist[i][k][2]/255
ctx.set_source_rgb(r,g,b)
ctx.stroke()
buff = BytesIO()
surface.write_to_png(buff)
buff.seek(0)
return buff
def do_city(colors):
city = createCity(colors, 10)
img = ListToImage(city)
return img
def flatten(list_):
out = []
for elem in list_:
for elem2 in elem:
out.append(elem2)
return out
class City(commands.Cog):
"""
The commands related about generating the city.
"""
def __init__(self, bot):
self.bot = bot
def any_offline(self, city_members):
for cat in city_members:
for m in cat:
if m.status == discord.Status.offline:
return True
return False
def get_city_members(self, guild, max_members):
added = []
city_members = []
# roles are now in descending order
for role in sorted(guild.roles, reverse=True):
mem = role.members
if mem == []: # empty role
continue
cat = []
for m in mem:
if m.display_name.startswith("!") and m.top_role == guild.default_role:
added.append(m) # hoister
if m == guild.owner:
added.append(m)
owner = [[m]]
if m in added:
continue
added.append(m)
cat.append(m)
city_members.append(
sorted(cat.copy(),
key=lambda m_: str(m_))
)
cat.clear()
while len(flatten(city_members)) > max_members and self.any_offline(city_members):
for cat in reversed(city_members):
for m in cat:
if m.status == discord.Status.offline:
cat.pop(cat.index(m))
break
continue
city_members = owner + city_members
if len(flatten(city_members)) > max_members:
while len(flatten(city_members)) > max_members:
for cat in reversed(city_members):
if cat == []:
city_members.pop()
break
cat.pop()
break
return city_members
# debug command
@commands.command()
@commands.is_owner()
async def show_members(self, ctx):
members = self.get_city_members(ctx.guild, 5) # debug, will change
# once image gen is set up
out = []
for cat in members:
out.append(" | ".join([str(m) for m in cat]))
await ctx.send("\n".join(out))
@commands.command()
@commands.cooldown(1, 10, commands.BucketType.guild)
async def city(self, ctx):
"""
Generates a city out of the current server.
"""
async with ctx.typing():
colors = [m.color.to_rgb() for m in ctx.guild.members]
out = await self.bot.loop.run_in_executor(None, do_city, colors)
await ctx.send(file=discord.File(out, "out.png"))
def setup(bot):
bot.add_cog(City(bot))
| 2.890625
| 3
|
python/qtools/respond.py
|
ssorj/qtools
| 12
|
12784187
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import proton as _proton
import proton.handlers as _handlers
import proton.reactor as _reactor
import runpy as _runpy
import sys as _sys
import traceback as _traceback
from .common import *
_description = """
Respond to AMQP requests. Use qrespond in combination with the
qrequest command to transfer requests through an AMQP message server.
"""
_epilog = """
Example usage:
$ qrespond amqps://example.net/queue1 # Respond to requests indefinitely
$ qrespond jobs --count 1 # Respond to one request
$ qrespond jobs --upper --reverse # Transform the request text
"""
class RespondCommand(MessagingCommand):
def __init__(self, home_dir):
super().__init__(home_dir, "qrespond", _Handler(self))
self.parser.description = _description + suite_description
self.parser.epilog = url_epilog + _epilog
self.parser.add_argument("url", metavar="URL",
help="The location of a message source or target")
self.parser.add_argument("-c", "--count", metavar="COUNT", type=int,
help="Exit after processing COUNT requests")
processing_options = self.parser.add_argument_group \
("Request processing options",
"By default, qrespond returns the request text unchanged")
processing_options.add_argument("--upper", action="store_true",
help="Convert the request text to upper case")
processing_options.add_argument("--reverse", action="store_true",
help="Reverse the request text")
processing_options.add_argument("--append", metavar="STRING",
help="Append STRING to the request text")
def init(self, args):
super().init(args)
self.scheme, self.host, self.port, self.address = self.parse_url(args.url)
self.desired_messages = args.count
self.upper = args.upper
self.reverse = args.reverse
self.append = args.append
def process(self, request, response):
text = request.body
if text is None:
return
if self.upper:
text = text.upper()
if self.reverse:
text = "".join(reversed(text))
if self.append is not None:
text += self.append
response.body = text
class _Handler(MessagingHandler):
def __init__(self, command):
super().__init__(command, auto_accept=False)
self.receiver = None
self.processed_requests = 0
def open(self, event):
super().open(event)
self.receiver = event.container.create_receiver(self.connection, self.command.address)
self.sender = event.container.create_sender(self.connection, None)
def close(self, event):
super().close(event)
self.command.notice("Processed {} {}", self.processed_requests, plural("request", self.processed_requests))
def on_message(self, event):
request = event.message
self.command.info("Received request {} from {} on {}", request, self.receiver, event.connection)
response = _proton.Message()
response.address = request.reply_to
response.correlation_id = request.id
try:
self.command.process(request, response)
except:
processing_succeeded = False
_traceback.print_exc()
else:
processing_succeeded = True
self.processed_requests += 1
if processing_succeeded:
self.sender.send(response)
self.command.info("Sent response {} to address '{}' on {}", response, response.address, event.connection)
self.accept(event.delivery)
else:
self.command.warn("Processing request {} failed", request)
self.reject(event.delivery)
if self.processed_requests == self.command.desired_messages:
self.close(event)
| 1.96875
| 2
|
sample_style_img.py
|
AustinXY/super-res-stylegan2
| 1
|
12784188
|
import argparse
import math
import random
import os
import copy
from numpy.core.fromnumeric import resize
import dnnlib
import numpy as np
import torch
from torch import nn, autograd, optim
from torch.nn import functional as F
from torch.utils import data
import torch.distributed as dist
from torchvision import transforms, utils
from tqdm import tqdm
from torch_utils import image_transforms
os.environ["CUDA_VISIBLE_DEVICES"]="1"
from model import Generator, MappingNetwork, G_NET
from finegan_config import finegan_config
from dataset import MultiResolutionDataset
from distributed import (
get_rank,
synchronize,
reduce_loss_dict,
reduce_sum,
get_world_size,
)
if __name__ == "__main__":
device = "cuda"
parser = argparse.ArgumentParser(description="mpnet trainer")
parser.add_argument('--arch', type=str, default='stylegan2', help='model architectures (stylegan2 | swagan)')
parser.add_argument(
"--iter", type=int, default=800000, help="total training iterations"
)
parser.add_argument(
"--batch", type=int, default=16, help="batch sizes for each gpus"
)
parser.add_argument(
"--n_sample",
type=int,
default=8,
help="number of the samples generated during training",
)
parser.add_argument(
"--size", type=int, default=256, help="image sizes for the model"
)
parser.add_argument(
"--mixing", type=float, default=0.9, help="probability of latent code mixing"
)
parser.add_argument(
"--style_model",
type=str,
default=None,
help="path to stylegan",
)
parser.add_argument("--lr", type=float, default=0.002, help="learning rate")
parser.add_argument(
"--channel_multiplier",
type=int,
default=2,
help="channel multiplier factor for the model. config-f = 2, else = 1",
)
args = parser.parse_args()
n_gpu = int(os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1
args.distributed = n_gpu > 1
if args.distributed:
torch.cuda.set_device(args.local_rank)
torch.distributed.init_process_group(backend="nccl", init_method="env://")
synchronize()
args.latent = 512
args.n_mlp = 8
args.start_iter = 0
if args.arch == 'stylegan2':
from model import Generator, Discriminator
elif args.arch == 'swagan':
from swagan import Generator, Discriminator
style_generator = Generator(
size=args.size,
style_dim=args.latent,
n_mlp=args.n_mlp,
channel_multiplier=args.channel_multiplier
).to(device)
discriminator = Discriminator(
args.size
).to(device)
assert args.style_model is not None
print("load style model:", args.style_model)
style_dict = torch.load(args.style_model, map_location=lambda storage, loc: storage)
style_generator.load_state_dict(style_dict["g_ema"], strict=False)
style_generator.eval()
discriminator.load_state_dict(style_dict["d"], strict=False)
discriminator.eval()
with torch.no_grad():
sample_z = torch.randn(args.batch, args.latent, device=device)
sample_img, _ = style_generator([sample_z])
print(discriminator(sample_img))
utils.save_image(
sample_img,
f"style_sample.png",
nrow=8,
normalize=True,
range=(-1, 1),
)
| 1.890625
| 2
|
conway.py
|
dvtate/single-file-programs
| 2
|
12784189
|
<gh_stars>1-10
from tkinter import *;
from random import randint;
from time import sleep;
WIDTH = 500;
HEIGHT = 500;
# create the cells matrix
def init_cells(window, population):
global HEIGHT;
global WIDTH;
cells = [[0 for x in range(WIDTH)] for y in range(HEIGHT)]; # declare matrix
# populate
for cell in range(population):
cells[randint(0, WIDTH - 1)][randint(0, HEIGHT - 1)] = True;
return cells
def update_cells(cells):
ncells = cells;
for x in range(WIDTH):
for y in range(HEIGHT):
neighbors_count = getNeighborCount(cells, (x, y));
if cells[x][y] and (neighbors_count > 3 or neighbors_count < 2):
ncells[x][y] = False;
elif neighbors_count == 3:
ncells[x][y] = True;
return ncells;
def draw_cells(window, cells):
for x in range(WIDTH):
for y in range(HEIGHT):
if (cells[x][y] == True):
window.create_rectangle(x, y, x, y, fill = "black");
else:
window.create_rectangle(x, y, x, y, fill = "white", outline = "white");
def getNeighborCount(cells, coords):
'''
ret = [];
if coords[0] - 1 > 0 and cells[coords[0] - 1][coords[1]]:
ret.append([coords[0] - 1, coords[1]]);
if coords[1] - 1 > 0 and cells[coords[0]][coords[1] - 1]:
ret.append([coords[0], coords[1] - 1]);
if coords[0] + 1 < WIDTH and cells[coords[0] - 1][coords[1]]:
ret.append([coords[0] + 1, coords[1]]);
if coords[1] + 1 < HEIGHT and cells[coords[0] - 1][coords[1]]:
ret.append([coords[0], coords[1] + 1]);
'''
ret = 0;
if coords[0] - 1 > 0 and cells[coords[0] - 1][coords[1]]:
ret += 1;
if coords[1] - 1 > 0 and cells[coords[0]][coords[1] - 1]:
ret += 1;
if coords[0] + 1 < WIDTH and cells[coords[0] - 1][coords[1]]:
ret += 1;
if coords[1] + 1 < HEIGHT and cells[coords[0] - 1][coords[1]]:
ret += 1;
return ret;
def main():
global WIDTH;
global HEIGHT;
master = Tk();
window = Canvas(master, width = WIDTH, height = HEIGHT, background = "white");
window.grid();
cells = init_cells(window, 500);
#cells[4][4] = cells[4][5] = cells[5][4] = cells[5][5] = True;
while True:
draw_cells(window, cells);
cells = update_cells(cells);
window.update();
sleep(0.5);
# emulating the functionality of C....
if __name__ == '__main__':
main();
| 3.390625
| 3
|
pgm/inference/MetropolisHastings.py
|
koriavinash1/pgm
| 4
|
12784190
|
<reponame>koriavinash1/pgm
import numpy as np
import math
import pandas as pd
import matplotlib.pyplot as plt
def proposalDistribution(sigma=2):
"""
Describes example proposal distribution
considers gaussion distribution with fixed sigma
as the mean keeps changing it's made an inner function argument
"""
def QDistribution(param = 0):
return lambda x: (1/((2*np.pi)**0.5 * sigma))*np.exp((x-param)**2/ sigma**2)
return QDistribution
class MH(object):
"""
This class implements the MH algorithm for any given
distribution and proposal fucntions
It checks the conditions for proposal functions
which confirms stationarity
Estimates Acceptance probability for new proposals
if accepted saves the value in x_seq list
"""
def __init__(self, function, burninT, proposalDistribution = proposalDistribution(), proposalSampler = None):
"""
function: <lambda or any function> complex distribution to
sample points
burinT : <int> number of burnin iterations
proposalDistribution: <lambda or any function> simple
distribution
"""
self.function = function
self.burninT = burninT
self.nonSymmetricP = True
self.proposalDistribution = proposalDistribution
self.proposalSampler = proposalSampler
self.check_proposalDistribution()
self.x = np.random.rand()
self.x_seq = []
self.burninAcc = []
self.collectionAcc = []
def check_proposalDistribution(self):
"""
checks for stationarity and symmetiricity
"""
# Symmetry check:
Q_x = self.proposalDistribution(param = 0)(0.5)
Q_xn = self.proposalDistribution(param = 0.5)(0)
if np.abs(Q_x - Q_xn) < 1e-3:
self.nonSymmetricP = False
def check_point(self, x_next):
"""
computes Acceptance probability
A(X'|X) = min(1, P(X')Q(X|X')/(P(X)Q(X'|X)
accept next point if it's greater than the
threshold
x_next: can be scalar or numpy array
based on dimensionality of probability
distribution
"""
if self.nonSymmetricP:
Q_x = self.proposalDistribution(param = self.x)(x_next)
Q_xnext = self.proposalDistribution(param = x_next)(self.x)
self.A_xn = min(1, (self.function(x_next)*Q_x)/(self.function(self.x)*Q_xnext))
else:
self.A_xn = min(1, self.function(x_next)/self.function(self.x))
# print(A_xn, self.function(x_next), self.function(self.x), x_next, self.x)
self.threshold = np.random.uniform(0,1)
if self.A_xn > self.threshold:
self.x = x_next
def sampler(self):
"""
Sampler returns a value after burninT
returns: iterator
"""
while True:
for i in range(self.burninT):
x_next = self.proposalSampler(self.x)
self.check_point(x_next)
if i < self.burninT - 1:
self.burninAcc.append(self.A_xn)
else:
self.collectionAcc.append(self.A_xn)
self.x_seq.append(self.x)
yield self.x
| 3.609375
| 4
|
remarshal/remarshal.py
|
sseveran/rules_poetry
| 39
|
12784191
|
import remarshal
if __name__ == '__main__':
remarshal.main()
| 1.046875
| 1
|
tests/core/trio/test_trio_endpoint_compat_with_asyncio.py
|
gsalgado/lahja
| 400
|
12784192
|
<filename>tests/core/trio/test_trio_endpoint_compat_with_asyncio.py<gh_stars>100-1000
import asyncio
import multiprocessing
import pytest
from lahja.asyncio import AsyncioEndpoint
from lahja.common import BaseEvent, ConnectionConfig
class EventTest(BaseEvent):
def __init__(self, value):
self.value = value
def run_asyncio(coro, *args):
loop = asyncio.get_event_loop()
loop.run_until_complete(coro(*args))
loop.close()
async def _do_asyncio_client_endpoint(name, ipc_path):
config = ConnectionConfig(name, ipc_path)
async with AsyncioEndpoint(name + "client").run() as client:
await client.connect_to_endpoints(config)
assert client.is_connected_to(name)
await client.wait_until_endpoint_subscribed_to(config.name, EventTest)
event = EventTest("test")
await client.broadcast(event)
@pytest.mark.trio
async def test_trio_endpoint_serving_asyncio_endpoint(
endpoint_server, endpoint_server_config
):
name = endpoint_server_config.name
path = endpoint_server_config.path
proc = multiprocessing.Process(
target=run_asyncio, args=(_do_asyncio_client_endpoint, name, path)
)
proc.start()
result = await endpoint_server.wait_for(EventTest)
assert isinstance(result, EventTest)
assert result.value == "test"
proc.join()
| 1.9375
| 2
|
slicer/slicer_internal.py
|
interpretml/slicer
| 23
|
12784193
|
""" Lower level layer for slicer.
Mom's spaghetti.
"""
# TODO: Consider boolean array indexing.
from typing import Any, AnyStr, Union, List, Tuple
from abc import abstractmethod
import numbers
class AtomicSlicer:
""" Wrapping object that will unify slicing across data structures.
What we support:
Basic indexing (return references):
- (start:stop:step) slicing
- support ellipses
Advanced indexing (return references):
- integer array indexing
Numpy Reference:
Basic indexing (return views):
- (start:stop:step) slicing
- support ellipses and newaxis (alias for None)
Advanced indexing (return copy):
- integer array indexing, i.e. X[[1,2], [3,4]]
- boolean array indexing
- mixed array indexing (has integer array, ellipses, newaxis in same slice)
"""
def __init__(self, o: Any, max_dim: Union[None, int, AnyStr] = "auto"):
""" Provides a consistent slicing API to the object provided.
Args:
o: Object to enable consistent slicing.
Currently supports numpy dense arrays, recursive lists ending with list or numpy.
max_dim: Max number of dimensions the wrapped object has.
If set to "auto", max dimensions will be inferred. This comes at compute cost.
"""
self.o = o
self.max_dim = max_dim
if self.max_dim == "auto":
self.max_dim = UnifiedDataHandler.max_dim(o)
def __repr__(self) -> AnyStr:
""" Override default repr for human readability.
Returns:
String to display.
"""
return f"{self.__class__.__name__}({self.o.__repr__()})"
def __getitem__(self, item: Any) -> Any:
""" Consistent slicing into wrapped object.
Args:
item: Slicing key of type integer or slice.
Returns:
Sliced object.
Raises:
ValueError: If slicing is not compatible with wrapped object.
"""
# Turn item into tuple if not already.
index_tup = unify_slice(item, self.max_dim)
# Slice according to object type.
return UnifiedDataHandler.slice(self.o, index_tup, self.max_dim)
def unify_slice(item: Any, max_dim: int, alias_lookup=None) -> Tuple:
""" Resolves aliases and ellipses in a slice item.
Args:
item: Slicing key that is passed to __getitem__.
max_dim: Max dimension of object to be sliced.
alias_lookup: AliasLookup structure.
Returns:
A tuple representation of the item.
"""
item = _normalize_slice_key(item)
index_tup = _normalize_subkey_types(item)
index_tup = _handle_newaxis_ellipses(index_tup, max_dim)
if alias_lookup:
index_tup = _handle_aliases(index_tup, alias_lookup)
return index_tup
def _normalize_subkey_types(index_tup: Tuple) -> Tuple:
""" Casts subkeys into basic types such as int.
Args:
key: Slicing key that is passed within __getitem__.
Returns:
Tuple with subkeys casted to basic types.
"""
new_index_tup = [] # Gets casted to tuple at the end
np_int_types = {
"int8",
"int16",
"int32",
"int64",
"uint8",
"uint16",
"uint32",
"uint64",
}
for subkey in index_tup:
if _safe_isinstance(subkey, "numpy", np_int_types):
new_subkey = int(subkey)
elif _safe_isinstance(subkey, "numpy", "ndarray"):
if len(subkey.shape) == 1:
new_subkey = subkey.tolist()
else:
raise ValueError(f"Cannot use array of shape {subkey.shape} as subkey.")
else:
new_subkey = subkey
new_index_tup.append(new_subkey)
return tuple(new_index_tup)
def _normalize_slice_key(key: Any) -> Tuple:
""" Normalizes slice key into always being a top-level tuple.
Args:
key: Slicing key that is passed within __getitem__.
Returns:
Expanded slice as a tuple.
"""
if not isinstance(key, tuple):
return (key,)
else:
return key
def _handle_newaxis_ellipses(index_tup: Tuple, max_dim: int) -> Tuple:
""" Expands newaxis and ellipses within a slice for simplification.
This code is mostly adapted from: https://github.com/clbarnes/h5py_like/blob/master/h5py_like/shape_utils.py#L111
Args:
index_tup: Slicing key as a tuple.
max_dim: Maximum number of dimensions in the respective sliceable object.
Returns:
Expanded slice as a tuple.
"""
non_indexes = (None, Ellipsis)
concrete_indices = sum(idx not in non_indexes for idx in index_tup)
index_list = []
# newaxis_at = []
has_ellipsis = False
int_count = 0
for item in index_tup:
if isinstance(item, numbers.Number):
int_count += 1
# NOTE: If we need locations of new axis, re-enable this.
if item is None: # pragma: no cover
pass
# newaxis_at.append(len(index_list) + len(newaxis_at) - int_count)
elif item == Ellipsis:
if has_ellipsis: # pragma: no cover
raise IndexError("an index can only have a single ellipsis ('...')")
has_ellipsis = True
initial_len = len(index_list)
while len(index_list) + (concrete_indices - initial_len) < max_dim:
index_list.append(slice(None))
else:
index_list.append(item)
if len(index_list) > max_dim: # pragma: no cover
raise IndexError("too many indices for array")
while len(index_list) < max_dim:
index_list.append(slice(None))
# return index_list, newaxis_at
return tuple(index_list)
def _handle_aliases(index_tup: Tuple, alias_lookup) -> Tuple:
new_index_tup = []
def resolve(item, dim):
if isinstance(item, slice):
return item
# Replace element if in alias lookup, otherwise use original.
item = alias_lookup.get(dim, item, item)
return item
# Go through each element within the index and resolve if needed.
for dim, item in enumerate(index_tup):
if isinstance(item, list):
new_item = []
for sub_item in item:
new_item.append(resolve(sub_item, dim))
else:
new_item = resolve(item, dim)
new_index_tup.append(new_item)
return tuple(new_index_tup)
class Tracked(AtomicSlicer):
""" Tracked defines an object that slicer wraps."""
def __init__(self, o: Any, dim: Union[int, List, tuple, None, str] = "auto"):
""" Defines an object that will be wrapped by slicer.
Args:
o: Object that will be tracked for slicer.
dim: Target dimension(s) slicer will index on for this object.
"""
super().__init__(o)
# Protected attribute that can be overriden.
self._name = None
# Place dim into coordinate form.
if dim == "auto":
self.dim = list(range(self.max_dim))
elif dim is None:
self.dim = []
elif isinstance(dim, int):
self.dim = [dim]
elif isinstance(dim, list):
self.dim = dim
elif isinstance(dim, tuple):
self.dim = list(dim)
else: # pragma: no cover
raise ValueError(f"Cannot handle dim of type: {type(dim)}")
class Obj(Tracked):
""" An object that slicer wraps. """
def __init__(self, o, dim="auto"):
super().__init__(o, dim)
class Alias(Tracked):
""" Defines a tracked object as well as additional __getitem__ keys. """
def __init__(self, o, dim):
if not (
isinstance(dim, int) or (isinstance(dim, (list, tuple)) and len(dim) <= 1)
): # pragma: no cover
raise ValueError("Aliases must track a single dimension")
super().__init__(o, dim)
class AliasLookup:
def __init__(self, aliases):
self._lookup = {}
# Populate lookup and merge indexes.
for _, alias in aliases.items():
self.update(alias)
def update(self, alias):
if alias.dim is None or len(alias.dim) == 0:
return
dim = alias.dim[0]
if dim not in self._lookup:
self._lookup[dim] = {}
dim_lookup = self._lookup[dim]
# NOTE: Alias must be backed by either a list or dictionary.
itr = enumerate(alias.o) if isinstance(alias.o, list) else alias.o.items()
for i, x in itr:
if x not in dim_lookup:
dim_lookup[x] = set()
dim_lookup[x].add(i)
def delete(self, alias):
'''Delete an alias that exists from lookup'''
dim = alias.dim[0]
dim_lookup = self._lookup[dim]
# NOTE: Alias must be backed by either a list or dictionary.
itr = enumerate(alias.o) if isinstance(alias.o, list) else alias.o.items()
for i, x in itr:
del dim_lookup[x]
def get(self, dim, target, default=None):
if dim not in self._lookup:
return default
indexes = self._lookup[dim].get(target, None)
if indexes is None:
return default
if len(indexes) == 1:
return next(iter(indexes))
else:
return list(indexes)
def resolve_dim(slicer_index: Tuple, slicer_dim: List) -> List:
""" Extracts new dim after applying slicing index and maps it back to the original index list. """
new_slicer_dim = []
reduced_mask = []
for _, curr_idx in enumerate(slicer_index):
if isinstance(curr_idx, (tuple, list, slice)):
reduced_mask.append(0)
else:
reduced_mask.append(1)
for curr_dim in slicer_dim:
if reduced_mask[curr_dim] == 0:
new_slicer_dim.append(curr_dim - sum(reduced_mask[:curr_dim]))
return new_slicer_dim
def reduced_o(tracked: Tracked) -> Union[List, Any]:
os = [t.o for t in tracked]
os = os[0] if len(os) == 1 else os
return os
class BaseHandler:
@classmethod
@abstractmethod
def head_slice(cls, o, index_tup, max_dim):
raise NotImplementedError() # pragma: no cover
@classmethod
@abstractmethod
def tail_slice(cls, o, tail_index, max_dim, flatten=True):
raise NotImplementedError() # pragma: no cover
@classmethod
@abstractmethod
def max_dim(cls, o):
raise NotImplementedError() # pragma: no cover
@classmethod
def default_alias(cls, o):
return []
class SeriesHandler(BaseHandler):
@classmethod
def head_slice(cls, o, index_tup, max_dim):
head_index = index_tup[0]
is_element = True if isinstance(head_index, int) else False
sliced_o = o.iloc[head_index]
return is_element, sliced_o, 1
@classmethod
def tail_slice(cls, o, tail_index, max_dim, flatten=True):
# NOTE: Series only has one dimension,
# call slicer again to end the recursion.
return AtomicSlicer(o, max_dim=max_dim)[tail_index]
@classmethod
def max_dim(cls, o):
return len(o.shape)
@classmethod
def default_alias(cls, o):
index_alias = Alias(o.index.to_list(), 0)
index_alias._name = "index"
return [index_alias]
class DataFrameHandler(BaseHandler):
@classmethod
def head_slice(cls, o, index_tup, max_dim):
# NOTE: At head slice, we know there are two fixed dimensions.
cut_index = index_tup
is_element = True if isinstance(cut_index[-1], int) else False
sliced_o = o.iloc[cut_index]
return is_element, sliced_o, 2
@classmethod
def tail_slice(cls, o, tail_index, max_dim, flatten=True):
# NOTE: Dataframe has fixed dimensions,
# call slicer again to end the recursion.
return AtomicSlicer(o, max_dim=max_dim)[tail_index]
@classmethod
def max_dim(cls, o):
return len(o.shape)
@classmethod
def default_alias(cls, o):
index_alias = Alias(o.index.to_list(), 0)
index_alias._name = "index"
column_alias = Alias(o.columns.to_list(), 1)
column_alias._name = "columns"
return [index_alias, column_alias]
class ArrayHandler(BaseHandler):
@classmethod
def head_slice(cls, o, index_tup, max_dim):
# Check if head is string
head_index, tail_index = index_tup[0], index_tup[1:]
cut = 1
for sub_index in tail_index:
if isinstance(sub_index, str) or cut == len(o.shape):
break
cut += 1
# Process native array dimensions
cut_index = index_tup[:cut]
is_element = any([True if isinstance(x, int) else False for x in cut_index])
sliced_o = o[cut_index]
return is_element, sliced_o, cut
@classmethod
def tail_slice(cls, o, tail_index, max_dim, flatten=True):
if flatten:
# NOTE: If we're dealing with a scipy matrix,
# we have to manually flatten it ourselves
# to keep consistent to the rest of slicer's API.
if _safe_isinstance(o, "scipy.sparse.csc", "csc_matrix"):
return AtomicSlicer(o.toarray().flatten(), max_dim=max_dim)[tail_index]
elif _safe_isinstance(o, "scipy.sparse.csr", "csr_matrix"):
return AtomicSlicer(o.toarray().flatten(), max_dim=max_dim)[tail_index]
elif _safe_isinstance(o, "scipy.sparse.dok", "dok_matrix"):
return AtomicSlicer(o.toarray().flatten(), max_dim=max_dim)[tail_index]
elif _safe_isinstance(o, "scipy.sparse.lil", "lil_matrix"):
return AtomicSlicer(o.toarray().flatten(), max_dim=max_dim)[tail_index]
else:
return AtomicSlicer(o, max_dim=max_dim)[tail_index]
else:
inner = [AtomicSlicer(e, max_dim=max_dim)[tail_index] for e in o]
if _safe_isinstance(o, "numpy", "ndarray"):
import numpy
if len(inner) > 0 and hasattr(inner[0], "__len__"):
ragged = not all(len(x) == len(inner[0]) for x in inner)
else:
ragged = False
if ragged:
return numpy.array(inner, dtype=numpy.object)
else:
return numpy.array(inner)
elif _safe_isinstance(o, "torch", "Tensor"):
import torch
if len(inner) > 0 and isinstance(inner[0], torch.Tensor):
return torch.stack(inner)
else:
return torch.tensor(inner)
elif _safe_isinstance(o, "scipy.sparse.csc", "csc_matrix"):
from scipy.sparse import vstack
out = vstack(inner, format='csc')
return out
elif _safe_isinstance(o, "scipy.sparse.csr", "csr_matrix"):
from scipy.sparse import vstack
out = vstack(inner, format='csr')
return out
elif _safe_isinstance(o, "scipy.sparse.dok", "dok_matrix"):
from scipy.sparse import vstack
out = vstack(inner, format='dok')
return out
elif _safe_isinstance(o, "scipy.sparse.lil", "lil_matrix"):
from scipy.sparse import vstack
out = vstack(inner, format='lil')
return out
else:
raise ValueError(f"Cannot handle type {type(o)}.") # pragma: no cover
@classmethod
def max_dim(cls, o):
if _safe_isinstance(o, "numpy", "ndarray") and o.dtype == "object":
return max([UnifiedDataHandler.max_dim(x) for x in o], default=-1) + 1
else:
return len(o.shape)
class DictHandler(BaseHandler):
@classmethod
def head_slice(cls, o, index_tup, max_dim):
head_index = index_tup[0]
if isinstance(head_index, (tuple, list)) and len(index_tup) == 0:
return False, o, 1
if isinstance(head_index, (list, tuple)):
return (
False,
{
sub_index: AtomicSlicer(o, max_dim=max_dim)[sub_index]
for sub_index in head_index
},
1,
)
elif isinstance(head_index, slice):
if head_index == slice(None, None, None):
return False, o, 1
return False, o[head_index], 1
else:
return True, o[head_index], 1
@classmethod
def tail_slice(cls, o, tail_index, max_dim, flatten=True):
if flatten:
return AtomicSlicer(o, max_dim=max_dim)[tail_index]
else:
return {
k: AtomicSlicer(e, max_dim=max_dim)[tail_index] for k, e in o.items()
}
@classmethod
def max_dim(cls, o):
return max([UnifiedDataHandler.max_dim(x) for x in o.values()], default=-1) + 1
class ListTupleHandler(BaseHandler):
@classmethod
def head_slice(cls, o, index_tup, max_dim):
head_index = index_tup[0]
if isinstance(head_index, (tuple, list)) and len(index_tup) == 0:
return False, o, 1
if isinstance(head_index, (list, tuple)):
if len(head_index) == 0:
return False, o, 1
else:
results = [
AtomicSlicer(o, max_dim=max_dim)[sub_index]
for sub_index in head_index
]
results = tuple(results) if isinstance(o, tuple) else results
return False, results, 1
elif isinstance(head_index, slice):
return False, o[head_index], 1
elif isinstance(head_index, int):
return True, o[head_index], 1
else: # pragma: no cover
raise ValueError(f"Invalid key {head_index} for {o}")
@classmethod
def tail_slice(cls, o, tail_index, max_dim, flatten=True):
if flatten:
return AtomicSlicer(o, max_dim=max_dim)[tail_index]
else:
results = [AtomicSlicer(e, max_dim=max_dim)[tail_index] for e in o]
return tuple(results) if isinstance(o, tuple) else results
@classmethod
def max_dim(cls, o):
return max([UnifiedDataHandler.max_dim(x) for x in o], default=-1) + 1
class UnifiedDataHandler:
""" Registry that maps types to their unified slice calls."""
""" Class attribute that maps type to their unified slice calls."""
type_map = {
("builtins", "list"): ListTupleHandler,
("builtins", "tuple"): ListTupleHandler,
("builtins", "dict"): DictHandler,
("torch", "Tensor"): ArrayHandler,
("numpy", "ndarray"): ArrayHandler,
("scipy.sparse.csc", "csc_matrix"): ArrayHandler,
("scipy.sparse.csr", "csr_matrix"): ArrayHandler,
("scipy.sparse.dok", "dok_matrix"): ArrayHandler,
("scipy.sparse.lil", "lil_matrix"): ArrayHandler,
("pandas.core.frame", "DataFrame"): DataFrameHandler,
("pandas.core.series", "Series"): SeriesHandler,
}
@classmethod
def slice(cls, o, index_tup, max_dim):
# NOTE: Unified handles base cases such as empty tuples, which
# specialized handlers do not.
if isinstance(index_tup, (tuple, list)) and len(index_tup) == 0:
return o
# Slice as delegated by data handler.
o_type = _type_name(o)
head_slice = cls.type_map[o_type].head_slice
tail_slice = cls.type_map[o_type].tail_slice
is_element, sliced_o, cut = head_slice(o, index_tup, max_dim)
out = tail_slice(sliced_o, index_tup[cut:], max_dim - cut, is_element)
return out
@classmethod
def max_dim(cls, o):
o_type = _type_name(o)
if o_type not in cls.type_map:
return 0
return cls.type_map[o_type].max_dim(o)
@classmethod
def default_alias(cls, o):
o_type = _type_name(o)
if o_type not in cls.type_map:
return {}
return cls.type_map[o_type].default_alias(o)
def _type_name(o: object) -> Tuple[str, str]:
return o.__class__.__module__, o.__class__.__name__
def _safe_isinstance(
o: object, module_name: str, type_name: Union[str, set, tuple]
) -> bool:
o_module, o_type = _type_name(o)
if isinstance(type_name, str):
return o_module == module_name and o_type == type_name
else:
return o_module == module_name and o_type in type_name
| 3.171875
| 3
|
getWhalePortfolio.py
|
J700070/WhalePortfolioAnalyzer
| 1
|
12784194
|
<reponame>J700070/WhalePortfolioAnalyzer
import requests
import pandas as pd
from bs4 import BeautifulSoup
import numpy as np
def getData(holding_ticker):
# Data Extraction
# We obtain the HTML from the corresponding fund in Dataroma.
html = requests.get(
"https://www.dataroma.com/m/holdings.php?m="+holding_ticker, headers={
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:94.0) Gecko/20100101 Firefox/94.0",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8",
"Accept-Language": "es-ES,es;q=0.8,en-US;q=0.5,en;q=0.3",
"Upgrade-Insecure-Requests": "1",
"Sec-Fetch-Dest": "document",
"Sec-Fetch-Mode": "navigate",
"Sec-Fetch-Site": "same-origin",
"Sec-Fetch-User": "?1",
"Cache-Control": "max-age=0"
}).content
# Non-table Data Parsing
soup = BeautifulSoup(html, "html.parser")
name = soup.find(id="f_name").text
# Name sometimes doesn't get properly formatted
name = name.split("\n")[0]
other_info = soup.find(id="p2").findAll('span')
period = other_info[0].text
portfolio_date = other_info[1].text
df_list = pd.read_html(html)
df = df_list[0]
# Data formatting
# "History", "52WeekLow", "52WeekHigh" & "Unnamed: 7" columns are not useful.
df = df.drop(columns=['History', 'Unnamed: 7', "52WeekLow", "52WeekHigh"])
# Column name corrections.
df = df.rename(columns={"% ofPortfolio": "Portfolio (%)"})
df = df.rename(columns={"+/-ReportedPrice": "Reported Price Change (%)"})
df = df.rename(columns={"ReportedPrice*": "Reported Price"})
df = df.rename(columns={"RecentActivity": "Recent Activity"})
# Nan corrections
df["Reported Price Change (%)"] = df["Reported Price Change (%)"].replace(
np.nan, "0")
# Data format & type corrections.
df["Value"] = df["Value"].apply(parseValueColumnToNumber)
df["Value"] = pd.to_numeric(df["Value"])
df["Reported Price Change (%)"] = df["Reported Price Change (%)"].apply(
parseReturnsColumnToNumber)
df["Reported Price Change (%)"] = pd.to_numeric(
df["Reported Price Change (%)"])
# Ticker and name of the stock are inside the same columns, we are going to slit it into 2 different columns
df["Ticker"] = df["Stock"].apply(lambda x: x.split(" - ")[0])
df.index = df["Stock"].apply(lambda x: x.split(" - ")[0])
df["Stock"] = df["Stock"].apply(lambda x: x.split(" - ")[1])
# We move "Ticker" column to the front
col = df.pop("Ticker")
df.insert(0, col.name, col)
return [name, period, portfolio_date, df]
# We delete the dollar sign and the commas
def parseValueColumnToNumber(string):
res = ""
for char in string:
if(char.isdigit()):
res += char
return res
# We delete the dollar sign and the commas
def parseReturnsColumnToNumber(string):
return string.replace("%", "")
def getDataBuys(holding_ticker):
# Data Extraction
# We obtain the HTML from the corresponding fund in Dataroma.
html = requests.get(
"https://www.dataroma.com/m/m_activity.php?m="+holding_ticker + "&typ=b", headers={
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:94.0) Gecko/20100101 Firefox/94.0",
"Accept": "image/avif,image/webp,*/*",
"Accept-Language": "es-ES,es;q=0.8,en-US;q=0.5,en;q=0.3",
"Sec-Fetch-Dest": "image",
"Sec-Fetch-Mode": "no-cors",
"Sec-Fetch-Site": "same-origin",
"Cache-Control": "max-age=0"
}).content
df_list = pd.read_html(html)
df = df_list[0]
print(df.head())
return
| 3.09375
| 3
|
pylark/api_service_bitable_table_batch_create.py
|
chyroc/pylark
| 7
|
12784195
|
# Code generated by lark_sdk_gen. DO NOT EDIT.
from pylark.lark_request import RawRequestReq, _new_method_option
from pylark import lark_type, lark_type_sheet, lark_type_approval
import attr
import typing
import io
@attr.s
class BatchCreateBitableTableReqTable(object):
name: str = attr.ib(
default="", metadata={"req_type": "json", "key": "name"}
) # 数据表 名字, 示例值:"table1"
@attr.s
class BatchCreateBitableTableReq(object):
user_id_type: lark_type.IDType = attr.ib(
default=None, metadata={"req_type": "query", "key": "user_id_type"}
) # 用户 ID 类型, 示例值:"open_id", 可选值有: `open_id`:用户的 open id, `union_id`:用户的 union id, `user_id`:用户的 user id, 默认值: `open_id`, 当值为 `user_id`, 字段权限要求: 获取用户 user ID
app_token: str = attr.ib(
default="", metadata={"req_type": "path", "key": "app_token"}
) # bitable app token, 示例值:"<KEY>"
tables: typing.List[BatchCreateBitableTableReqTable] = attr.ib(
factory=lambda: [], metadata={"req_type": "json", "key": "tables"}
) # tables
@attr.s
class BatchCreateBitableTableResp(object):
table_ids: typing.List[str] = attr.ib(
factory=lambda: [], metadata={"req_type": "json", "key": "table_ids"}
) # table ids
def _gen_batch_create_bitable_table_req(request, options) -> RawRequestReq:
return RawRequestReq(
dataclass=BatchCreateBitableTableResp,
scope="Bitable",
api="BatchCreateBitableTable",
method="POST",
url="https://open.feishu.cn/open-apis/bitable/v1/apps/:app_token/tables/batch_create",
body=request,
method_option=_new_method_option(options),
need_tenant_access_token=True,
need_user_access_token=True,
)
| 1.757813
| 2
|
openmdao.gui/src/openmdao/gui/test/functional/pageobjects/component.py
|
swryan/OpenMDAO-Framework
| 0
|
12784196
|
import random
import string
from selenium.webdriver.common.by import By
from dialog import DialogPage
from elements import ButtonElement, GridElement, TextElement, InputElement
from workflow import find_workflow_component_figures
from util import ArgsPrompt, NotifierPage
class ComponentPage(DialogPage):
""" Component editor page. """
inputs_tab = ButtonElement((By.XPATH, "div/ul/li/a[text()='Inputs']"))
slots_tab = ButtonElement((By.XPATH, "div/ul/li/a[text()='Slots']"))
outputs_tab = ButtonElement((By.XPATH, "div/ul/li/a[text()='Outputs']"))
events_tab = ButtonElement((By.XPATH, "div/ul/li/a[text()='Events']"))
inputs = GridElement((By.ID, 'Inputs_props'))
outputs = GridElement((By.ID, 'Outputs_props'))
def __init__(self, browser, port, locator):
super(ComponentPage, self).__init__(browser, port, locator)
# It takes a while for the full load to complete.
NotifierPage.wait(self)
def get_inputs(self):
""" Return inputs grid. """
self('inputs_tab').click()
return self.inputs
def set_input(self, name, value):
""" Set input `name` to `value`. """
self('inputs_tab').click()
grid = self.inputs
found = []
for row in grid.rows:
if row[0] == name:
row[2] = value
return
found.append(row[0])
raise RuntimeError('%r not found in inputs %s' % (name, found))
def get_events(self):
""" Return events grid. """
self('events_tab').click()
return self.events
def get_outputs(self):
""" Return outputs grid. """
self('outputs_tab').click()
return self.outputs
def show_slots(self):
"""switch to slots tab"""
self('slots_tab').click()
class DriverPage(ComponentPage):
""" Driver editor page. """
parameters_tab = ButtonElement((By.XPATH, "div/ul/li/a[text()='Parameters']"))
workflow_tab = ButtonElement((By.XPATH, "div/ul/li/a[text()='Workflow']"))
objectives_tab = ButtonElement((By.XPATH, "div/ul/li/a[text()='Objectives']"))
constraints_tab = ButtonElement((By.XPATH, "div/ul/li/a[text()='Constraints']"))
triggers_tab = ButtonElement((By.XPATH, "div/ul/li/a[text()='Triggers']"))
parameters = GridElement((By.ID, 'Parameters_parms'))
objectives = GridElement((By.ID, 'Objectives_objectives'))
constraints = GridElement((By.ID, 'Constraints_constraints'))
triggers = GridElement((By.ID, 'Triggers_triggers'))
add_parameter = ButtonElement((By.XPATH, "//span[text()='Add Parameter']"))
add_objective = ButtonElement((By.XPATH, "//span[text()='Add Objective']"))
add_constraint = ButtonElement((By.XPATH, "//span[text()='Add Constraint']"))
add_trigger = ButtonElement((By.XPATH, "//span[text()='Add Event']"))
def get_parameters(self):
""" Return parameters grid. """
self('parameters_tab').click()
return self.parameters
def get_objectives(self):
""" Return objectives grid. """
self('objectives_tab').click()
return self.objectives
def get_constraints(self):
""" Return constraints grid. """
self('constraints_tab').click()
return self.constraints
def get_triggers(self):
""" Return triggers grid. """
self('triggers_tab').click()
return self.triggers
def new_parameter(self):
""" Return :class:`ParameterDialog`. """
self('add_parameter').click()
return ParameterDialog(self.browser, self.port,
(By.XPATH, "//div[@id='parameter-dialog']/.."))
def new_objective(self):
""" Return :class:`ObjectiveDialog`. """
self('add_objective').click()
return ObjectiveDialog(self.browser, self.port,
(By.XPATH, "//div[@id='objective-dialog']/.."))
def new_constraint(self):
""" Return :class:`ConstraintDialog`. """
self('add_constraint').click()
return ConstraintDialog(self.browser, self.port,
(By.XPATH, "//div[@id='constraint-dialog']/.."))
def new_trigger(self):
""" Return :class:`EventDialog`. """
self('add_trigger').click()
return EventDialog(self.browser, self.port,
(By.XPATH, "//div[@id='event-dialog']/.."))
def show_workflow(self):
"""switch to workflow tab"""
self('workflow_tab').click()
def get_workflow_component_figures(self):
""" Return workflow component figure elements. """
return find_workflow_component_figures(self)
class ParameterDialog(DialogPage):
""" Dialog for adding a new parameter. """
target = InputElement((By.ID, 'parameter-target'))
low = InputElement((By.ID, 'parameter-low'))
high = InputElement((By.ID, 'parameter-high'))
scaler = InputElement((By.ID, 'parameter-scaler'))
adder = InputElement((By.ID, 'parameter-adder'))
name = InputElement((By.ID, 'parameter-name'))
ok = ButtonElement((By.ID, 'parameter-ok'))
cancel = ButtonElement((By.ID, 'parameter-cancel'))
class ObjectiveDialog(DialogPage):
""" Dialog for adding a new objective. """
expr = InputElement((By.ID, 'objective-expr'))
name = InputElement((By.ID, 'objective-name'))
ok = ButtonElement((By.ID, 'objective-ok'))
cancel = ButtonElement((By.ID, 'objective-cancel'))
class ConstraintDialog(DialogPage):
""" Dialog for adding a new constraint. """
expr = InputElement((By.ID, 'constraint-expr'))
scaler = InputElement((By.ID, 'constraint-scaler'))
adder = InputElement((By.ID, 'constraint-adder'))
name = InputElement((By.ID, 'constraint-name'))
ok = ButtonElement((By.ID, 'constraint-ok'))
cancel = ButtonElement((By.ID, 'constraint-cancel'))
class EventDialog(DialogPage):
""" Dialog for adding a new event. """
target = InputElement((By.ID, 'event-target'))
ok = ButtonElement((By.ID, 'event-ok'))
cancel = ButtonElement((By.ID, 'event-cancel'))
class AssemblyPage(ComponentPage):
""" Assembly editor page. """
dataflow_tab = ButtonElement((By.XPATH, "div/ul/li/a[text()='Dataflow']"))
def show_dataflow(self):
self('dataflow_tab').element.click()
class PropertiesPage(DialogPage):
""" Component properties page. """
header = TextElement((By.XPATH, 'h3[1]'))
inputs = GridElement((By.ID, 'Inputs_props'))
outputs = GridElement((By.ID, 'Outputs_props'))
def set_input(self, name, value):
""" Set input `name` to `value`. """
self('inputs_tab').click()
grid = self.inputs
found = []
for row in grid.rows:
if row[0] == name:
row[1] = value
return
found.append(row[0])
raise RuntimeError('%r not found in inputs %s' % (name, found))
class NameInstanceDialog(ArgsPrompt):
""" Adds :meth:`create_and_dismiss` to :class:`ArgsPrompt`. """
def __init__(self, parent):
super(NameInstanceDialog, self).__init__(parent.browser, parent.port)
def create_and_dismiss(self, name=None):
"""Names the instance. Returns the name. Force a name with the name argument"""
chars = string.ascii_uppercase
name = name or ''.join(random.choice(chars).strip() for x in range(8))
self.name = name
self.click_ok()
return name
| 2.484375
| 2
|
django/prof_education/students/migrations/0003_auto_20210418_0504.py
|
sergeymirasov/h-edu
| 0
|
12784197
|
# Generated by Django 3.2 on 2021-04-18 02:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('students', '0002_auto_20210418_0405'),
]
operations = [
migrations.AlterField(
model_name='student',
name='admitted_at',
field=models.DateField(verbose_name='Дата поступления'),
),
migrations.AlterField(
model_name='student',
name='due_at',
field=models.DateField(blank=True, null=True, verbose_name='Дата окончания'),
),
migrations.AlterField(
model_name='student',
name='excluded_at',
field=models.DateField(blank=True, null=True, verbose_name='Дата исключения'),
),
]
| 1.53125
| 2
|
app/gbi_server/model/log.py
|
omniscale/gbi-server
| 2
|
12784198
|
# This file is part of the GBI project.
# Copyright (C) 2013 Omniscale GmbH & Co. KG <http://omniscale.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import shapely
import datetime
from sqlalchemy.orm import backref
from geoalchemy2.types import Geometry
from geoalchemy2.shape import to_shape
from gbi_server.extensions import db
class Log(db.Model):
__tablename__ = 'logs'
id = db.Column(db.Integer, primary_key=True)
time = db.Column(db.DateTime, nullable=False)
user_id = db.Column(db.Integer, db.ForeignKey('users.id'), nullable=False)
user = db.relationship('User', backref=backref('logs', cascade="all,delete,delete-orphan"))
action = db.Column(db.String(24), nullable=False)
geometry = db.Column(Geometry('MULTIPOLYGON', srid=4326))
format = db.Column(db.String)
srs = db.Column(db.String)
mapping = db.Column(db.String)
source = db.Column(db.String)
layer = db.Column(db.String)
zoom_level_start = db.Column(db.Integer)
zoom_level_end = db.Column(db.Integer)
refreshed = db.Column(db.Boolean)
@property
def geometry_as_geojson(self):
if self.geometry is not None:
geom = json.dumps(
shapely.geometry.mapping(to_shape(self.geometry))
)
return geom
return False
class SearchLog(db.Model):
__tablename__ = 'search_logs'
id = db.Column(db.Integer, primary_key=True)
time = db.Column(db.DateTime, default=datetime.datetime.utcnow, nullable=False)
user_id = db.Column(db.Integer, db.ForeignKey('users.id'), nullable=False)
user = db.relationship('User', backref=backref('search_logs', cascade="all,delete,delete-orphan"))
class SearchLogGeometry(db.Model):
__tablename__ = 'search_log_geometries'
id = db.Column(db.Integer, primary_key=True)
search_log_id = db.Column(db.Integer, db.ForeignKey('search_logs.id'), nullable=False)
search_log = db.relationship('SearchLog', backref=backref('geometries', cascade="all,delete,delete-orphan"))
geometry = db.Column(Geometry('POLYGON', srid=3857))
identifier = db.Column(db.String)
| 1.953125
| 2
|
release/stubs.min/Autodesk/Revit/DB/__init___parts/TessellatedBuildIssueType.py
|
YKato521/ironpython-stubs
| 0
|
12784199
|
class TessellatedBuildIssueType(Enum, IComparable, IFormattable, IConvertible):
"""
Issues,which can be encountered while building a polymesh,
or a shell,or a solid from data,describing
tessellated shapes.
enum TessellatedBuildIssueType,values: AllFine (0),DegenOriginalLoop (18),EdgeTraversalForFlip (24),EdgeTwiceUsedByFace (20),EmptyFace (1),EmptyLoop (2),FaceWithIslands (15),InconsistentInnerOuterOriginalLoopCCW (19),InconsitentMultiEdgeTraversalForFlip (25),InternalError (28),InternalLightError (29),InternalMissingError (30),InternalUtilityError (27),IntersectingOriginalLoops (14),LoopOnBestFitSelfIntersects (13),LostAllLoops (9),LostTooManyLoopVertices (6),NonManifoldEdge (21),NonPlanarFace (10),NotSetYet (32),NumberOfIssueTypes (33),OriginalLoopGeomAcuteAngle (7),OriginalLoopMeshAcuteAngle (8),OriginalLoopsProximity (16),OriginalPointsTooFarFromTheirPlane (11),OuterLoopIsNotFirst (17),OverlappingAdjacentFaces (22),PartitionPointsTooFarFromTrueEdge (23),TooFewOriginalVertices (3),TooShortOriginalLoopGeomSegment (5),TooShortOriginalLoopMeshSegment (4),TooSmallVertexSegementDistInFinalLoop (26),TooSmallVertexSegementDistInOriginalLoop (12),UnarticulatedNonManifoldEdge (31)
"""
def __eq__(self, *args):
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self, *args):
""" __format__(formattable: IFormattable,format: str) -> str """
pass
def __ge__(self, *args):
pass
def __gt__(self, *args):
pass
def __init__(self, *args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self, *args):
pass
def __lt__(self, *args):
pass
def __ne__(self, *args):
pass
def __reduce_ex__(self, *args):
pass
def __str__(self, *args):
pass
AllFine = None
DegenOriginalLoop = None
EdgeTraversalForFlip = None
EdgeTwiceUsedByFace = None
EmptyFace = None
EmptyLoop = None
FaceWithIslands = None
InconsistentInnerOuterOriginalLoopCCW = None
InconsitentMultiEdgeTraversalForFlip = None
InternalError = None
InternalLightError = None
InternalMissingError = None
InternalUtilityError = None
IntersectingOriginalLoops = None
LoopOnBestFitSelfIntersects = None
LostAllLoops = None
LostTooManyLoopVertices = None
NonManifoldEdge = None
NonPlanarFace = None
NotSetYet = None
NumberOfIssueTypes = None
OriginalLoopGeomAcuteAngle = None
OriginalLoopMeshAcuteAngle = None
OriginalLoopsProximity = None
OriginalPointsTooFarFromTheirPlane = None
OuterLoopIsNotFirst = None
OverlappingAdjacentFaces = None
PartitionPointsTooFarFromTrueEdge = None
TooFewOriginalVertices = None
TooShortOriginalLoopGeomSegment = None
TooShortOriginalLoopMeshSegment = None
TooSmallVertexSegementDistInFinalLoop = None
TooSmallVertexSegementDistInOriginalLoop = None
UnarticulatedNonManifoldEdge = None
value__ = None
| 1.882813
| 2
|
src/models.py
|
akkapakasaikiran/fooling-NNs
| 0
|
12784200
|
from torch import nn
class NeuralNetwork(nn.Module):
def __init__(self, h1, h2, num_classes=10, name='NN'):
super().__init__()
self.name = name
self.flatten = nn.Flatten()
self.linear_relu_stack = nn.Sequential(
nn.Linear(28*28, h1),
nn.ReLU(),
nn.Linear(h1, h2),
nn.ReLU(),
nn.Linear(h2, 10)
)
def forward(self, x):
x = self.flatten(x)
x = self.linear_relu_stack(x)
return x
def get_name(self): return self.name
def get_type(self): return 'NeuralNetwork'
def num_params(self):
return sum(p.numel() for p in self.parameters() if p.requires_grad)
class CNN(nn.Module):
def __init__(self, h1=64, h2=128, input_size=28, num_classes=10, name='CNN'):
super().__init__()
self.name = name
self.conv1 = nn.Sequential(
nn.Conv2d(1, h1, 5, padding='same'),
nn.ReLU(),
nn.MaxPool2d(2, 2)
)
self.conv2 = nn.Sequential(
nn.Conv2d(h1, h2, 5, padding='same'),
nn.ReLU(),
nn.MaxPool2d(2, 2)
)
self.flatten = nn.Flatten()
num_neurons = h2 * (input_size // (2*2))**2
self.fc = nn.Linear(num_neurons, num_classes)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.flatten(x)
x = self.fc(x)
return x
def get_name(self): return self.name
def get_type(self): return 'CNN'
def num_params(self):
return sum(p.numel() for p in self.parameters() if p.requires_grad)
| 3.28125
| 3
|
osarchiver/destination/db/__init__.py
|
pcibot/osarchiver
| 18
|
12784201
|
<reponame>pcibot/osarchiver<filename>osarchiver/destination/db/__init__.py
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
# Copyright 2019 The OSArchiver Authors. All rights reserved.
"""
init file that allow to import Db from osarchiver.destination.db whithout
loading submodules
"""
from osarchiver.destination.db.db import Db
| 1.179688
| 1
|
hqca/opts/gradient/bfgs.py
|
damazz/HQCA
| 0
|
12784202
|
<reponame>damazz/HQCA
from hqca.opts.core import *
import numpy as np
from functools import reduce
from copy import deepcopy as copy
from math import pi
from hqca.opts.gradient.linesearch import BacktrackingLineSearch
def para(xs):
# row vector to
return xs.tolist()[0]
class BFGS(OptimizerInstance):
'''
See Nocedal & Wright, chapter 6, for more information on the
implementation of the BFGS algorithm.
'''
def __init__(self,**kwargs):
OptimizerInstance.__init__(self,**kwargs)
OptimizerInstance._gradient_keywords(self,**kwargs)
def initialize(self,start):
OptimizerInstance.initialize(self,start)
# find approximate hessian
self.x0 = np.asmatrix(start) # row vec?
#self.g0 = np.asmatrix(self.g(np.asarray(self.x0)[0,:])) # row vec
self.g0 = np.asmatrix(self.g(para(self.x0)))
if self.verbose:
print('Step: -01 ')
print('G0: ',self.g0)
# set initial Hessian and inverse to identity
self.B0 = np.identity(self.N)
self.B0i = np.identity(self.N)
self.p0 = -1*np.dot(self.B0i,self.g0.T).T # row vec
# set initial search direction
if self.verbose:
print('Starting line search...')
self._line_search()
if self.verbose:
print('LS: ',self.f_avg)
self.s0 = self.p0*self.alp
self.x1 = self.x0+self.s0
self.g1 = np.asmatrix(self.g(para(self.x1)))
self.y0 = self.g1 - self.g0
#self.y0 = np.asmatrix(self.g(para(self.x1)))-self.g0
if self.verbose:
print('Y: ',self.y0)
Bn = np.dot(self.y0.T,self.y0)
Bd = (np.dot(self.y0,self.s0.T)[0,0])
if abs(Bd)<1e-30:
if Bd<0:
Bd = -1e-30
else:
Bd = 1e-30
Ba = Bn*(1/Bd)
S = np.dot(self.s0.T,self.s0)
d = reduce(np.dot, (self.s0,self.B0,self.s0.T))[0,0]
if abs(d)<1e-30:
if d<0:
d = -1e-30
else:
d = 1e-30
Bb = reduce(np.dot, (self.B0,S,self.B0.T))*(1/d)
self.B1 = self.B0 + Ba - Bb
syT = reduce(np.dot, (self.s0.T,self.y0))
yTs = reduce(np.dot, (self.y0,self.s0.T))[0,0]
if abs(yTs)<1e-30:
if yTs<0:
yTs = -1e-30
else:
yTs = 1e-30
ysT = reduce(np.dot, (self.y0.T,self.s0))
L = np.identity(self.N)-syT*(1/yTs)
R = np.identity(self.N)-ysT*(1/yTs)
self.B1i = reduce(np.dot, (L,self.B0i,R))+S*(1/yTs)
# reassign
self.x0 = self.x1.copy()
#self.g0 = np.asmatrix(self.g(np.asarray(self.x0)[0,:]))
self.g0 = self.g1.copy()
if self.verbose:
print('G: ',self.g0)
self.B0 = self.B1.copy()
self.B0i = self.B1i.copy()
self.best_x = self.x0.copy()
self.best_f = self.f(np.asarray(self.x0)[0,:])
#
self.crit = np.linalg.norm(self.g0)
#
self.stuck = np.zeros((3,self.N))
self.stuck_ind = 0
def next_step(self):
if self.stuck_ind==0:
self.stuck_ind = 1
self.stuck[0,:]= self.x0
elif self.stuck_ind==1:
self.stuck_ind = 2
self.stuck[1,:]= self.x0
elif self.stuck_ind==2:
self.stuck_ind=0
self.stuck[2,:]= self.x0
self.N_stuck=0
def check_stuck(self):
v1 = self.stuck[0,:]
v2 = self.stuck[1,:]
v3 = self.stuck[2,:]
d13 = np.sqrt(np.sum(np.square(v1-v3)))
if d13<1e-15:
shrink = 0.5
self.x0 = self.x0-(1-shrink)*self.s0
if self.verbose:
print('Was stuck!')
self.N_stuck+=1
check_stuck(self)
self.p0 = -1*np.dot(self.B0i,self.g0.T).T # row vec
# now, line search
self._line_search()
if self.verbose:
print('LS: ',self.f_avg)
self.s0 = self.p0*self.alp
self.x1 = self.x0+self.s0
self.y0 = np.asmatrix(self.g(np.asarray(self.x1)[0,:]))-self.g0
if self.verbose:
print('Y: ',self.y0)
B_num = np.dot(self.y0.T,self.y0)
B_den = (np.dot(self.y0,self.s0.T)[0,0])
if abs(B_den)<1e-30:
if B_den<0:
B_den = -1e-30
else:
B_den = 1e-30
Ba = B_num*(1/B_den)
S = np.dot(self.s0.T,self.s0)
d = reduce(np.dot, (self.s0,self.B0,self.s0.T))[0,0]
if abs(d)<=1e-30:
if d<0:
d = -1e-30
else:
d = 1e-30
Bb = reduce(np.dot, (self.B0,S,self.B0.T))*(1/d)
self.B1 = self.B0 + Ba - Bb
syT = reduce(np.dot, (self.s0.T,self.y0))
yTs = reduce(np.dot, (self.y0,self.s0.T))[0,0]
if abs(yTs)<1e-30:
if yTs<0:
yTs = -1e-30
else:
yTs = 1e-30
ysT = reduce(np.dot, (self.y0.T,self.s0))
L = np.identity(self.N)-syT*(1/yTs)
R = np.identity(self.N)-ysT*(1/yTs)
self.B1i = reduce(np.dot, (L,self.B0i,R))+S*(1/yTs)
# reassign
self.x0 = copy(self.x1)
self.g0 = np.asmatrix(self.g(np.asarray(self.x0)[0,:]))
if self.verbose:
print('G: ',self.g0)
self.B0 = copy(self.B1)
self.B0i = copy(self.B1i)
self.best_x = copy(self.x0)
self.best_f = self.f(np.asarray(self.x0)[0,:])
if self._conv_crit=='default':
self.crit = np.sqrt(np.sum(np.square(self.g0)))
def _line_search(self):
'''
algorithm 3.5,3.6 from Nocedal & Wright
attempting to find alpha that satisfies Wolfe conditions
'''
self.f_evals = 0
self.g_evals = 0
c1,c2 = 0.6,0.9 #0 < c1 < c2 < 1
try:
f_zed = self.best_f
except AttributeError as e:
f_zed = self.f(para(self.x0))
self.f_evals+=1
p = self.p0
x = self.x0
g_zed = np.dot(self.g0,p.T)[0,0]
def phi(alpha):
self.f_evals +=1
a = para(x+alpha*p)
return self.f(a)
def dphi(alpha):
a = para(x+alpha*p)
self.g_evals +=1
return np.dot(self.g(a),p.T)[0,0]
def zoom(alp_l,alp_h,f_l,f_h):
# biset low and high
done = False
iters = 0
while not done:
#print(alp_l,alp_h)
alp_j = 0.5*(alp_l+alp_h)
f_j = phi(alp_j)
if f_j > f_zed + c1*alp_j*g_zed or f_j >= f_l:
alp_h = alp_j
f_h = f_j
else:
gj = dphi(alp_j)
if abs(gj)<= -c2* g_zed:
done = True
alp_star = alp_j
if gj*(alp_h-alp_l)>=0:
alp_h = alp_l
alp_l = alp_j
f_l = copy(f_j)
iters+=1
if iters>20:
done = True
raise OptimizerError
return alp_star,f_j
alp_0,alp_max = 0,5
alp_1 = 1
done = False
iters = 1
f0 = copy(f_zed) #actual alp=0, not alp=alp_0
g0 = copy(g_zed) #same
while not done:
f1 = phi(alp_1)
if f1>f_zed+c1*alp_1*g_zed or (f1>= f0 and iters>1):
alp_star,f_star = zoom(alp_0,alp_1,f0,f1)
done = True
continue
g1 = dphi(alp_1)
if abs(g1)<= -c2*g_zed:
alp_star = alp_1
f_star = f1
done = True
continue
if g1>= 0 :
alp_star,f_star = zoom(alp_0,alp_1,f0,f1)
done = True
continue
alp_0 = copy(alp_1)
f0 = copy(f1)
alp_1 = 0.5*(alp_1+alp_max) #bisect
self.alp = alp_star
self.f_avg = f_star
if self.verbose:
print('f_calls = ({}),g_calls = ({}),alpha = {}'.format(self.f_evals,self.g_evals,alp_star))
def _line_search_backtracking(self):
'''
uses backtracking linesearch
'''
f_evals = 0
try:
f = self.best_f
except AttributeError as e:
f = self.f(para(self.x0))
f_evals+=1
c,rho,alpha = 0.5,0.75,1
temp = self.x0+alpha*self.p0
f1 = self.f(para(temp))
f_evals+=1
y = np.dot(self.g0,self.p0.T)[0,0]
while not f1<= f+c*alpha*y:
alpha*= rho
temp = self.x0 + alpha*self.p0
f1 = self.f(para(temp))
f_evals+=1
if self.verbose:
print('f_calls = ({}),alpha = {}'.format(f_evals,alpha))
self.alp = alpha
self.f_avg = f1
def _line_search_old(self):
'''
uses self.p0, and some others stuff
'''
bound = False
f_l = self.f(np.asarray(self.x0)[0,:])
a_l = 0
a_r = 1
while not bound:
temp = self.x0+self.p0*a_r
f_r = self.f(np.asarray(temp)[0,:])
if f_r<f_l:
a_l = copy(a_r)
f_l = copy(f_r)
a_r*=2
else:
bound=True
while a_r-a_l-0.01>0:
a_tmp = 0.5*(a_l+a_r)
temp = self.x0+self.p0*a_tmp
f_tmp = self.f(np.asarray(temp)[0,:])
if f_tmp<f_l:
a_l = copy(a_tmp)
f_l = copy(f_tmp)
else:
a_r = copy(a_tmp)
f_r = copy(f_tmp)
self.alp = 0.5*(a_l+a_r)
self.f_avg = 0.5*(f_l+f_r)
| 2.75
| 3
|
lib/WindowParent.py
|
aganders3/python-0.9.1
| 116
|
12784203
|
# A 'WindowParent' is the only module that uses real stdwin functionality.
# It is the root of the tree.
# It should have exactly one child when realized.
import stdwin
from stdwinevents import *
from TransParent import ManageOneChild
Error = 'WindowParent.Error' # Exception
class WindowParent() = ManageOneChild():
#
def create(self, (title, size)):
self.title = title
self.size = size # (width, height)
self._reset()
return self
#
def _reset(self):
self.child = 0
self.win = 0
self.itimer = 0
self.do_mouse = 0
self.do_timer = 0
#
def destroy(self):
if self.child: self.child.destroy()
self._reset()
#
def need_mouse(self, child): self.do_mouse = 1
def no_mouse(self, child): self.do_mouse = 0
#
def need_timer(self, child): self.do_timer = 1
def no_timer(self, child): self.do_timer = 0
#
def realize(self):
if self.win:
raise Error, 'realize(): called twice'
if not self.child:
raise Error, 'realize(): no child'
size = self.child.minsize(self.beginmeasuring())
self.size = max(self.size[0], size[0]), \
max(self.size[1], size[1])
#stdwin.setdefwinsize(self.size)
# XXX Compensate stdwin bug:
stdwin.setdefwinsize(self.size[0]+4, self.size[1]+2)
self.win = stdwin.open(self.title)
if self.itimer:
self.win.settimer(self.itimer)
bounds = (0, 0), self.win.getwinsize()
self.child.setbounds(bounds)
#
def beginmeasuring(self):
# Return something with which a child can measure text
if self.win:
return self.win.begindrawing()
else:
return stdwin
#
def begindrawing(self):
if self.win:
return self.win.begindrawing()
else:
raise Error, 'begindrawing(): not realized yet'
#
def change(self, area):
if self.win:
self.win.change(area)
#
def scroll(self, args):
if self.win:
self.win.scroll(args)
#
def settimer(self, itimer):
if self.win:
self.win.settimer(itimer)
else:
self.itimer = itimer
#
# Only call dispatch if we have a child
#
def dispatch(self, (type, win, detail)):
if win <> self.win:
return
elif type = WE_DRAW:
d = self.win.begindrawing()
self.child.draw(d, detail)
elif type = WE_MOUSE_DOWN:
if self.do_mouse: self.child.mouse_down(detail)
elif type = WE_MOUSE_MOVE:
if self.do_mouse: self.child.mouse_move(detail)
elif type = WE_MOUSE_UP:
if self.do_mouse: self.child.mouse_up(detail)
elif type = WE_TIMER:
if self.do_timer: self.child.timer()
elif type = WE_SIZE:
self.win.change((0, 0), (10000, 10000)) # XXX
bounds = (0, 0), self.win.getwinsize()
self.child.setbounds(bounds)
#
| 2.625
| 3
|
src/command_modules/azure-cli-dla/azure/cli/command_modules/dla/_help.py
|
viananth/azure-cli
| 0
|
12784204
|
<reponame>viananth/azure-cli
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from azure.cli.core.help_files import helps
helps['dla'] = """
type: group
short-summary: Commands to manage Data Lake Analytics accounts, jobs, and catalogs.
long-summary: If you don't have the Data Lake Analytics component installed, add it with `az component update --add dla`. These commands are in preview.
"""
helps['dla job'] = """
type: group
short-summary: Commands to manage Data Lake Analytics jobs.
long-summary: These commands are in preview.
"""
helps['dla job submit'] = """
type: command
short-summary: submits the job to the Data Lake Analytics account.
parameters:
- name: --job-name
type: string
short-summary: 'Job name for the job'
- name: --script
type: string
short-summary: 'The script to submit'
long-summary: This is either the script contents or use `@<file path>` to load the script from a file
- name: --runtime-version
short-summary: 'The runtime version to use'
long-summary: This parameter is used for explicitly overwriting the default runtime. It should only be done if you know what you are doing.
- name: --degree-of-parallelism
short-summary: 'The degree of parallelism for the job'
long-summary: Higher values equate to more parallelism and will usually yield faster running jobs, at the cost of more AUs consumed by the job.
- name: --priority
short-summary: 'The priority of the job'
long-summary: Lower values increase the priority, with the lowest value being 1. This determines the order jobs are run in.
"""
helps['dla job cancel'] = """
type: command
short-summary: cancels the job in the Data Lake Analytics account.
"""
helps['dla job show'] = """
type: command
short-summary: Retrieves the job in the Data Lake Analytics account.
"""
helps['dla job wait'] = """
type: command
short-summary: Waits for the job in the Data Lake Analytics account to finish, returning the job once finished
parameters:
- name: --job-id
type: string
short-summary: 'Job ID for the job to poll'
"""
helps['dla job list'] = """
type: command
short-summary: lists jobs in the Data Lake Analytics account.
"""
helps['dla catalog'] = """
type: group
short-summary: Commands to manage Data Lake Analytics catalogs.
long-summary: These commands are in preview.
"""
helps['dla catalog database'] = """
type: group
short-summary: Commands to manage Data Lake Analytics catalog databases.
long-summary: These commands are in preview.
"""
helps['dla catalog assembly'] = """
type: group
short-summary: Commands to manage Data Lake Analytics catalog assemblies.
long-summary: These commands are in preview.
"""
helps['dla catalog external-data-source'] = """
type: group
short-summary: Commands to manage Data Lake Analytics catalog external data sources.
long-summary: These commands are in preview.
"""
helps['dla catalog procedure'] = """
type: group
short-summary: Commands to manage Data Lake Analytics catalog stored procedures.
long-summary: These commands are in preview.
"""
helps['dla catalog schema'] = """
type: group
short-summary: Commands to manage Data Lake Analytics catalog schemas.
long-summary: These commands are in preview.
"""
helps['dla catalog table'] = """
type: group
short-summary: Commands to manage Data Lake Analytics catalog tables.
long-summary: These commands are in preview.
"""
helps['dla catalog table list'] = """
type: command
short-summary: Lists all tables in the database or in the database and schema combination
parameters:
- name: --database-name
type: string
short-summary: 'The name of the database to list tables for'
- name: --schema-name
type: string
short-summary: 'The name of the schema in the database to list tables for.'
"""
helps['dla catalog table-partition'] = """
type: group
short-summary: Commands to manage Data Lake Analytics catalog table partitions.
long-summary: These commands are in preview.
"""
helps['dla catalog table-stats'] = """
type: group
short-summary: Commands to manage Data Lake Analytics catalog table statistics.
long-summary: These commands are in preview.
"""
helps['dla catalog table-stats list'] = """
type: command
short-summary: Lists all table statistics in the database or in the database and schema or in a specific table
parameters:
- name: --database-name
type: string
short-summary: 'The name of the database to list table statitics for'
- name: --schema-name
type: string
short-summary: 'The name of the schema in the database to list table statistics for.'
- name: --table-name
type: string
short-summary: 'The name of the table to list statistics in. --schema-name must also be specified for this parameter to be honored'
"""
helps['dla catalog table-type'] = """
type: group
short-summary: Commands to manage Data Lake Analytics catalog table types.
long-summary: These commands are in preview.
"""
helps['dla catalog tvf'] = """
type: group
short-summary: Commands to manage Data Lake Analytics catalog table valued functions, or TVFs.
long-summary: These commands are in preview.
"""
helps['dla catalog tvf list'] = """
type: command
short-summary: Lists all table valued functions in the database or in the database and schema combination
parameters:
- name: --database-name
type: string
short-summary: 'The name of the database to list table valued functions for'
- name: --schema-name
type: string
short-summary: 'The name of the schema in the database to list table valued functions for.'
"""
helps['dla catalog view'] = """
type: group
short-summary: Commands to manage Data Lake Analytics catalog views.
long-summary: These commands are in preview.
"""
helps['dla catalog view list'] = """
type: command
short-summary: Lists all views in the database or in the database and schema combination
parameters:
- name: --database-name
type: string
short-summary: 'The name of the database to list views for'
- name: --schema-name
type: string
short-summary: 'The name of the schema in the database to list views for.'
"""
helps['dla catalog credential'] = """
type: group
short-summary: Commands to manage Data Lake Analytics catalog credentials.
long-summary: These commands are in preview.
"""
helps['dla catalog credential create'] = """
type: command
short-summary: Creates a new catalog credential for use with an external data source.
parameters:
- name: --credential-name
type: string
short-summary: 'The name of the credential.'
- name: --database-name
type: string
short-summary: 'The name of the database in which to create the credential.'
- name: --user-name
type: string
short-summary: 'The user name that will be used when authenticating with this credential'
"""
helps['dla catalog credential update'] = """
type: command
short-summary: Updates the catalog credential for use with an external data source.
parameters:
- name: --credential-name
type: string
short-summary: 'The name of the credential to update.'
- name: --database-name
type: string
short-summary: 'The name of the database in which the credential exists.'
- name: --user-name
type: string
short-summary: "The user name associated with the credential that will have it's password updated."
"""
helps['dla catalog credential show'] = """
type: command
short-summary: Retrieves the catalog credential.
"""
helps['dla catalog credential list'] = """
type: command
short-summary: Lists the catalog credentials.
"""
helps['dla catalog credential delete'] = """
type: command
short-summary: deletes the catalog credential.
"""
helps['dla catalog package'] = """
type: group
short-summary: Commands to manage Data Lake Analytics catalog packages.
long-summary: These commands are in preview.
"""
helps['dla account'] = """
type: group
short-summary: Commands to manage Data Lake Analytics accounts.
long-summary: These commands are in preview.
"""
helps['dla account create'] = """
type: command
short-summary: Creates a Data Lake Analytics account.
parameters:
- name: --default-data-lake-store
type: string
short-summary: 'The default Data Lake Store account to associate with the Data Lake Analytics account being created'
- name: --max-degree-of-parallelism
type: int
short-summary: 'The maximum supported degree of parallelism for this account.'
- name: --max-job-count
type: int
short-summary: 'The maximum supported jobs running under the account at the same time.'
- name: --query-store-retention
type: int
short-summary: 'The number of days that job metadata is retained.'
"""
helps['dla account update'] = """
type: command
short-summary: Updates a Data Lake Analytics account.
parameters:
- name: --max-degree-of-parallelism
type: int
short-summary: 'The maximum supported degree of parallelism for this account.'
- name: --max-job-count
type: int
short-summary: 'The maximum supported jobs running under the account at the same time.'
- name: --query-store-retention
type: int
short-summary: 'Optionally enable/disable existing firewall rules.'
- name: --firewall-state
type: string
short-summary: 'The number of days that job metadata is retained.'
- name: --allow-azure-ips
type: string
short-summary: 'Optionally allow/block Azure originating IPs through the firewall.'
"""
helps['dla account show'] = """
type: command
short-summary: Retrieves the Data Lake Analytics account.
"""
helps['dla account list'] = """
type: command
short-summary: Lists Data Lake Analytics accounts in a subscription or a specific resource group.
"""
helps['dla account delete'] = """
type: command
short-summary: Deletes the Data Lake Analytics account.
"""
helps['dla account blob-storage'] = """
type: group
short-summary: Commands to manage Data Lake Analytics account linked Azure Storage.
long-summary: These commands are in preview.
"""
helps['dla account data-lake-store'] = """
type: group
short-summary: Commands to manage Data Lake Analytics account linked Data Lake Store accounts.
long-summary: These commands are in preview.
"""
helps['dla account firewall'] = """
type: group
short-summary: Commands to manage Data Lake Analytics account firewall rules.
long-summary: These commands are in preview.
"""
helps['dla account firewall create'] = """
type: command
short-summary: Creates a firewall rule in the Data Lake Analytics account.
parameters:
- name: --end-ip-address
type: string
short-summary: 'The end of the valid IP range for the firewall rule.'
- name: --start-ip-address
type: string
short-summary: 'The start of the valid IP range for the firewall rule.'
- name: --firewall-rule-name
type: string
short-summary: 'The name of the firewall rule.'
"""
helps['dla account firewall update'] = """
type: command
short-summary: Updates a firewall rule in the Data Lake Analytics account.
"""
helps['dla account firewall show'] = """
type: command
short-summary: Retrieves a firewall rule in the Data Lake Analytics account.
"""
helps['dla account firewall list'] = """
type: command
short-summary: Lists firewall rules in the Data Lake Analytics account.
"""
helps['dla account firewall delete'] = """
type: command
short-summary: Deletes a firewall rule in the Data Lake Analytics account.
"""
helps['dla account compute-policy'] = """
type: group
short-summary: Commands to manage Data Lake Analytics account compute policies.
long-summary: These commands are in preview.
"""
helps['dla account compute-policy create'] = """
type: command
short-summary: Creates a compute policy in the Data Lake Analytics account.
parameters:
- name: --max-dop-per-job
type: int
short-summary: 'The maximum degree of parallelism allowed per job for this policy. At least one of --min-priority-per-job and --max-dop-per-job must be specified.'
- name: --min-priority-per-job
type: int
short-summary: 'The minimum priority allowed per job for this policy. At least one of --min-priority-per-job and --max-dop-per-job must be specified.'
- name: --compute-policy-name
type: string
short-summary: 'The name of the compute policy to create.'
- name: --object-id
type: string
short-summary: 'The Azure Active Directory object ID of the user, group or service principal to apply the policy to.'
- name: --object-type
type: string
short-summary: 'The Azure Active Directory object type associated with the supplied object id.'
"""
helps['dla account compute-policy update'] = """
type: command
short-summary: Updates a compute policy in the Data Lake Analytics account.
parameters:
- name: --max-dop-per-job
type: int
short-summary: 'The maximum degree of parallelism allowed per job for this policy. At least one of --min-priority-per-job and --max-dop-per-job must be specified.'
- name: --min-priority-per-job
type: int
short-summary: 'The minimum priority allowed per job for this policy. At least one of --min-priority-per-job and --max-dop-per-job must be specified.'
- name: --compute-policy-name
type: string
short-summary: 'The name of the compute policy to update.'
"""
helps['dla account compute-policy show'] = """
type: command
short-summary: Retrieves a compute policy in the Data Lake Analytics account.
"""
helps['dla account compute-policy list'] = """
type: command
short-summary: Lists compute policies in the Data Lake Analytics account.
"""
helps['dla account compute-policy delete'] = """
type: command
short-summary: Deletes a compute policy in the Data Lake Analytics account.
"""
helps['dla job pipeline'] = """
type: group
short-summary: Commands to manage Data Lake Analytics job pipelines.
long-summary: These commands are in preview.
"""
helps['dla job pipeline show'] = """
type: command
short-summary: Retrieves a specific job pipeline in the Data Lake Analytics account.
"""
helps['dla job pipeline list'] = """
type: command
short-summary: Lists job pipelines in the Data Lake Analytics account.
"""
helps['dla job recurrence'] = """
type: group
short-summary: Commands to manage Data Lake Analytics job recurrences.
long-summary: These commands are in preview.
"""
helps['dla job recurrence show'] = """
type: command
short-summary: Retrieves a specific job recurrence in the Data Lake Analytics account.
"""
helps['dla job recurrence list'] = """
type: command
short-summary: Lists job recurrences in the Data Lake Analytics account.
"""
| 1.601563
| 2
|
ribosome/rpc/args.py
|
tek/ribosome-py
| 0
|
12784205
|
import inspect
from typing import Callable, Any, Tuple, get_type_hints
from amino import Maybe, _, Just, Boolean, Lists, Nothing, Either, L, List, Nil, Map, Left
from amino.dat import Dat
from amino.state import StateT
from amino.util.tpe import first_type_arg, type_arg, is_subclass
from ribosome.rpc.data.nargs import Nargs
def analyse_state_type(tpe: type) -> Tuple[Either[str, type], Either[str, type]]:
return (
(first_type_arg(tpe), type_arg(tpe, 1))
if tpe is not None and is_subclass(tpe, StateT)
else (Left('not a StateT'), Left('not a StateT'))
)
def analyse_return_type(fun: Callable[..., Any], hints: Map[str, type]
) -> Tuple[type, Either[str, type], Either[str, type]]:
main_rettype = getattr(fun, 'tpe', hints.lift('return') | None)
state_type, return_type = analyse_state_type(main_rettype)
return main_rettype, state_type, return_type
def cons_params_spec(fun: Callable[..., Any]) -> None:
argspec = inspect.getfullargspec(fun)
hints = Map(get_type_hints(fun))
params = Lists.wrap(argspec.args)
defaults = Lists.wrap(argspec.defaults or ())
method = Boolean(params.head.contains('self'))
param_count = params.length - method.to_int
min = param_count - defaults.length
max = (~Boolean(argspec.varargs or argspec.varkw)).m(param_count)
nargs = Nargs.cons(min, max)
types = params.traverse(hints.lift, Maybe) | Nil
main_rettype, state_type, return_type = analyse_return_type(fun, hints)
return ParamsSpec(nargs, min, max, method, types, main_rettype, state_type, return_type | (lambda: main_rettype))
class ParamsSpec(Dat['ParamsSpec']):
@staticmethod
def from_function(fun: Callable[..., Any]) -> 'ParamsSpec':
f = getattr(fun, '__wrapped__', fun)
return cons_params_spec(f)
@staticmethod
def from_type(tpe: type) -> 'ParamsSpec':
return cons_params_spec(tpe.__init__)
def __init__(
self,
nargs: Nargs,
min: int,
max: Maybe[int],
method: Boolean,
types: List[type],
rettype: type,
state_type: Maybe[type],
return_type: type,
) -> None:
self.nargs = nargs
self.min = min
self.max = max
self.method = method
self.types = types
self.rettype = rettype
self.state_type = state_type
self.return_type = return_type
@property
def exact_count(self) -> Maybe[int]:
return Just(self.min) if self.max.contains(self.min) else Nothing
class ArgValidator(Dat['ArgValidator']):
def __init__(self, spec: ParamsSpec) -> None:
self.spec = spec
@property
def min(self) -> int:
return self.spec.min
@property
def max(self) -> Maybe[int]:
return self.spec.max
def validate(self, count: int) -> Boolean:
return Boolean(self.min <= count and not self.max.exists(_ < count))
def error(self, args: tuple, desc: str, name: str) -> str:
return f'argument count for {desc} `{name}` is {len(args)}, must be {self.count_spec} ({args})'
@property
def count_spec(self) -> str:
return (
self.spec.exact_count /
(lambda a: f'exactly {a}' if a > 0 else 'none') | (
self.max /
(lambda mx: f'between {self.min} and {mx}') |
f'at least {self.min}'
)
)
def either(self, args: tuple, desc: str, name: str) -> Either[str, None]:
return self.validate(len(args)).e(L(self.error)(args, desc, name), None)
__all__ = ('ArgValidator', 'ParamsSpec')
| 2.1875
| 2
|
2020/day6/day6.py
|
dasm/AdventOfCode
| 2
|
12784206
|
#!/usr/bin/env python3
with open("input") as file_:
lines = file_.read()
groups = lines.split("\n\n")
suma = 0
for group in groups:
group = group.replace("\n", "")
group = set(group)
suma += len(group)
print(suma)
suma = 0
for group in groups:
group = group.split()
result = set(group[0]).intersection(*group)
suma += len(result)
print(suma)
| 3.625
| 4
|
jobs.py
|
honmaple/flask-apscheduler
| 1
|
12784207
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# **************************************************************************
# Copyright © 2017 jianglin
# File Name: jobs.py
# Author: jianglin
# Email: <EMAIL>
# Created: 2017-02-02 14:28:16 (CST)
# Last Update: Sunday 2018-09-30 17:50:05 (CST)
# By:
# Description:
# **************************************************************************
from time import time, sleep
def scheduler_vvv():
'''输出hello world'''
print('hello world')
def scheduler_kkk():
'''输出helloorld'''
print('helloorld')
def scheduler_time(a):
'''
输出时间,参数a
asasdsda
'''
print('{}{}'.format(a, time()))
def scheduler_vvvv():
'''
sleep 20s
'''
print('sleep start')
sleep(10)
print('sleep end')
| 2.5625
| 3
|
src/unicon/plugins/hvrp/setting.py
|
mibotiaf/unicon.plugins
| 0
|
12784208
|
<reponame>mibotiaf/unicon.plugins
"""
Module:
unicon.plugins.hvrp
Authors:
<NAME> (<EMAIL>), <NAME> (<EMAIL>)
Description:
This module defines the HVRP settings to setup the unicon environment required for generic based unicon connection.
"""
from unicon.plugins.generic import GenericSettings
class HvrpSettings(GenericSettings):
"""" Hvrp platform settings """
def __init__(self):
super().__init__()
self.HA_INIT_EXEC_COMMANDS = [
'screen-length 0 temporary',
'undo terminal alarm',
'undo terminal logging',
'undo terminal debugging',
'undo terminal monitor'
]
self.HA_INIT_CONFIG_COMMANDS = []
self.CONSOLE_TIMEOUT = 60
| 1.882813
| 2
|
ansible/roles/lib_gcloud/build/src/gcloud_compute_projectinfo.py
|
fahlmant/openshift-tools
| 164
|
12784209
|
# pylint: skip-file
class GcloudComputeProjectInfoError(Exception):
'''exception class for projectinfo'''
pass
# pylint: disable=too-many-instance-attributes
class GcloudComputeProjectInfo(GcloudCLI):
''' Class to wrap the gcloud compute projectinfo command'''
# pylint allows 5
# pylint: disable=too-many-arguments
def __init__(self,
metadata=None,
metadata_from_file=None,
remove_keys=None,
verbose=False):
''' Constructor for gcloud resource '''
super(GcloudComputeProjectInfo, self).__init__()
self._metadata = metadata
self.metadata_from_file = metadata_from_file
self.remove_keys = remove_keys
self._existing_metadata = None
self.verbose = verbose
@property
def metadata(self):
'''property for existing metadata'''
return self._metadata
@property
def existing_metadata(self):
'''property for existing metadata'''
if self._existing_metadata == None:
self._existing_metadata = []
metadata = self.list_metadata()
metadata = metadata['results']['commonInstanceMetadata']
if metadata.has_key('items'):
self._existing_metadata = metadata['items']
return self._existing_metadata
def list_metadata(self):
'''return metatadata'''
results = self._list_metadata('project-info')
if results['returncode'] == 0:
results['results'] = yaml.load(results['results'])
return results
def exists(self):
''' return whether the metadata that we are removing exists '''
# currently we aren't opening up files for comparison so always return False
if self.metadata_from_file:
return False
for key, val in self.metadata.items():
for data in self.existing_metadata:
if key == 'sshKeys' and data['key'] == key:
ssh_keys = {}
# get all the users and their public keys out of the project
for user_pub_key in data['value'].strip().split('\n'):
col_index = user_pub_key.find(':')
user = user_pub_key[:col_index]
pub_key = user_pub_key[col_index+1:]
ssh_keys[user] = pub_key
# compare the users that were passed in to see if we need to update
for inc_user, inc_pub_key in val.items():
if not ssh_keys.has_key(inc_user) or ssh_keys[inc_user] != inc_pub_key:
return False
# matched all ssh keys
break
elif data['key'] == str(key) and str(data['value']) == str(val):
break
else:
return False
return True
def keys_exist(self):
''' return whether the keys exist in the metadata'''
for key in self.remove_keys:
for mdata in self.existing_metadata:
if key == mdata['key']:
break
else:
# NOT FOUND
return False
return True
def needs_update(self):
''' return whether an we need to update '''
# compare incoming values with metadata returned
# for each key in user supplied check against returned data
return not self.exists()
def delete_metadata(self, remove_all=False):
''' attempt to remove metadata '''
return self._delete_metadata(self.remove_keys, remove_all=remove_all)
def create_metadata(self):
'''create an metadata'''
results = None
if self.metadata and self.metadata.has_key('sshKeys'):
# create a file and pass it to create
ssh_strings = ["%s:%s" % (user, pub_key) for user, pub_key in self.metadata['sshKeys'].items()]
ssh_keys = {'sshKeys': Utils.create_file('ssh_keys', '\n'.join(ssh_strings), 'raw')}
results = self._create_metadata('project-info', self.metadata, ssh_keys)
# remove them and continue
del self.metadata['sshKeys']
if len(self.metadata.keys()) == 0:
return results
new_results = self._create_metadata('project-info', self.metadata, self.metadata_from_file)
if results:
return [results, new_results]
return new_results
| 2.40625
| 2
|
BOJ/dp_boj/file_sum.py
|
mrbartrns/swacademy_structure
| 0
|
12784210
|
<gh_stars>0
# BOJ 11066
import sys
si = sys.stdin.readline
INF = 1e9
t = int(si())
for _ in range(t):
# 챕터의 갯수 및 챕터의 크기를 배열로 만들기
k = int(si())
cost = [0] + list(map(int, si().split()))
# 만들수 있는 최대 배열의 크기로 dp 배열 만들기
dp = [[0 for _ in range(k + 1)] for _ in range(k + 1)]
psum = [0] * 501
for i in range(1, k + 1):
# 부분합을 만들어 저장하기
psum[i] = psum[i - 1] + cost[i]
# d가 하는 역할은? 동일한 배열을 이용하므로 중복되는 계산을 방지하기 위한 배열
for d in range(1, k):
for tx in range(1, k + 1):
ty = tx + d
if ty <= k:
dp[tx][ty] = INF
for mid in range(tx, ty):
dp[tx][ty] = min(
dp[tx][ty],
dp[tx][mid] + dp[mid + 1][ty] + psum[ty] - psum[tx - 1],
)
else:
break
print(dp[1][k])
| 2.453125
| 2
|
setup.py
|
tarrow/queryCitefile
| 0
|
12784211
|
#!/usr/bin/env python
from distutils.core import setup
setup(name='queryCitefile',
install_requires=[],
version='0.1',
description='Simple tool to process mwcites output',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/tarrow/queryCiteFile',
py_modules=['queryCiteFile']
)
| 1.203125
| 1
|
src/camera/test_run.py
|
jphacks/TK_1804
| 1
|
12784212
|
<reponame>jphacks/TK_1804
import numpy as np
from head_vector import HeadVector
from select_speakers import SelectSpeakers
if __name__ == '__main__':
face_landmark_path = './src/camera/shape_predictor_68_face_landmarks.dat'
K = [6.523417721418979909e+02, 0.0, 3.240992613348381610e+02,
0.0, 6.314784883620466189e+02, 2.369864861289960629e+02,
0.0, 0.0, 1.0]
D = [-4.425469845416301617e-01,4.114960065684757362e-01,5.860505097580077059e-03,3.197849383691316570e-03,-3.379210829526543836e-01]
cam_matrix = np.array(K).reshape(3, 3).astype(np.float32)
dist_coeffs = np.array(D).reshape(5, 1).astype(np.float32)
object_pts = np.float32([[6.825897, 6.760612, 4.402142],
[1.330353, 7.122144, 6.903745],
[-1.330353, 7.122144, 6.903745],
[-6.825897, 6.760612, 4.402142],
[5.311432, 5.485328, 3.987654],
[1.789930, 5.393625, 4.413414],
[-1.789930, 5.393625, 4.413414],
[-5.311432, 5.485328, 3.987654],
[2.005628, 1.409845, 6.165652],
[-2.005628, 1.409845, 6.165652],
[2.774015, -2.080775, 5.048531],
[-2.774015, -2.080775, 5.048531],
[0.000000, -3.116408, 6.097667],
[0.000000, -7.415691, 4.070434]])
reprojectsrc = np.float32([[10.0, 10.0, 10.0],
[10.0, 10.0, -10.0],
[10.0, -10.0, -10.0],
[10.0, -10.0, 10.0],
[-10.0, 10.0, 10.0],
[-10.0, 10.0, -10.0],
[-10.0, -10.0, -10.0],
[-10.0, -10.0, 10.0]])
line_pairs = [[0, 1], [1, 2], [2, 3], [3, 0],
[4, 5], [5, 6], [6, 7], [7, 4],
[0, 4], [1, 5], [2, 6], [3, 7]]
select_speaker = SelectSpeakers(K, D, object_pts, reprojectsrc, line_pairs, face_landmark_path)
while(True):
print(select_speaker.estimate_head_orientation(1))
| 1.84375
| 2
|
bfg9000/builtins/find.py
|
jmgao/bfg9000
| 0
|
12784213
|
import fnmatch
import os
import posixpath
import re
from enum import IntEnum
from . import builtin
from ..file_types import File, Directory
from ..iterutils import iterate, listify
from ..backends.make import writer as make
from ..backends.ninja import writer as ninja
from ..backends.make.syntax import Writer, Syntax
from ..build_inputs import build_input
from ..path import Path, Root
from ..platforms import known_platforms
build_input('find_dirs')(lambda build_inputs, env: set())
depfile_name = '.bfg_find_deps'
exclude_globs = ['.*#', '*~', '#*#']
@builtin.function()
class FindResult(IntEnum):
include = 0
not_now = 1
exclude = 2
def write_depfile(env, path, output, seen_dirs, makeify=False):
with open(path.string(env.base_dirs), 'w') as f:
# Since this file is in the build dir, we can use relative dirs for
# deps also in the build dir.
roots = env.base_dirs.copy()
roots[Root.builddir] = None
out = Writer(f)
out.write(output.string(roots), Syntax.target)
out.write_literal(':')
for i in seen_dirs:
out.write_literal(' ')
out.write(i.string(roots), Syntax.dependency)
out.write_literal('\n')
if makeify:
for i in seen_dirs:
out.write(i.string(roots), Syntax.target)
out.write_literal(':\n')
def _listdir(path):
dirs, nondirs = [], []
try:
names = os.listdir(path)
for name in names:
# Use POSIX paths so that the result is platform-agnostic.
curpath = posixpath.join(path, name)
if os.path.isdir(curpath):
dirs.append((name, curpath))
else:
nondirs.append((name, curpath))
except Exception:
pass
return dirs, nondirs
def _walk_flat(top):
if os.path.exists(top):
yield (top,) + _listdir(top)
def _walk_recursive(top):
if not os.path.exists(top):
return
dirs, nondirs = _listdir(top)
yield top, dirs, nondirs
for name, path in dirs:
if not os.path.islink(path):
for i in _walk_recursive(path):
yield i
def _filter_from_glob(match_type, matches, extra, exclude):
matches = [re.compile(fnmatch.translate(i)) for i in iterate(matches)]
extra = [re.compile(fnmatch.translate(i)) for i in iterate(extra)]
exclude = [re.compile(fnmatch.translate(i)) for i in iterate(exclude)]
def fn(name, path, type):
if match_type in {type, '*'}:
if any(ex.match(name) for ex in exclude):
return FindResult.exclude
if any(ex.match(name) for ex in matches):
return FindResult.include
elif any(ex.match(name) for ex in extra):
return FindResult.not_now
return FindResult.exclude
return fn
def _find_files(paths, filter, flat, as_object):
# "Does the walker choose the path, or the path the walker?" - <NAME>
walker = _walk_flat if flat else _walk_recursive
results, dist_results, seen_dirs = [], [], []
filetype = File if isinstance(as_object, bool) else as_object
def do_filter(files, type):
cls = filetype if type == 'f' else lambda p: Directory(p, None)
for name, path in files:
fileobj = cls(Path(path, Root.srcdir))
matched = filter(name, path, type)
if matched == FindResult.include:
dist_results.append(fileobj)
results.append(fileobj if as_object else path)
elif matched == FindResult.not_now:
dist_results.append(fileobj)
do_filter(( (os.path.basename(p), p) for p in paths ), 'd')
for p in paths:
for base, dirs, files in walker(p):
seen_dirs.append(Path(base, Root.srcdir))
do_filter(dirs, 'd')
do_filter(files, 'f')
return results, dist_results, seen_dirs
def find(path='.', name='*', type='*', extra=None, exclude=exclude_globs,
flat=False):
glob_filter = _filter_from_glob(type, name, extra, exclude)
return _find_files(listify(path), glob_filter, flat, False)[0]
@builtin.function('env')
def filter_by_platform(env, name, path, type):
my_plat = set([env.target_platform.name, env.target_platform.flavor])
sub = '|'.join(re.escape(i) for i in known_platforms if i not in my_plat)
ex = r'(^|/|_)(' + sub + r')(\.[^\.]$|$|/)'
return FindResult.not_now if re.search(ex, path) else FindResult.include
@builtin.function('builtins', 'build_inputs', 'env')
def find_files(builtins, build_inputs, env, path='.', name='*', type='*',
extra=None, exclude=exclude_globs, filter=filter_by_platform,
flat=False, cache=True, dist=True, as_object=False):
glob_filter = _filter_from_glob(type, name, extra, exclude)
if filter:
if filter == filter_by_platform:
filter = builtins['filter_by_platform']
def final_filter(name, path, type):
return max(filter(name, path, type), glob_filter(name, path, type))
else:
final_filter = glob_filter
paths = [i.path.string(env.base_dirs) if isinstance(i, File) else i
for i in iterate(path)]
found, dist, seen_dirs = _find_files(paths, final_filter, flat, as_object)
if cache:
build_inputs['find_dirs'].update(seen_dirs)
build_inputs['regenerate'].depfile = depfile_name
if dist:
for i in dist:
build_inputs.add_source(i)
return found
@make.post_rule
def make_find_dirs(build_inputs, buildfile, env):
if build_inputs['find_dirs']:
write_depfile(env, Path(depfile_name), make.filepath,
build_inputs['find_dirs'], makeify=True)
buildfile.include(depfile_name)
@ninja.post_rule
def ninja_find_dirs(build_inputs, buildfile, env):
if build_inputs['find_dirs']:
write_depfile(env, Path(depfile_name), ninja.filepath,
build_inputs['find_dirs'])
| 2.109375
| 2
|
python/qisys/test/test_worktree.py
|
aldebaran/qibuild
| 51
|
12784214
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2012-2021 SoftBank Robotics. All rights reserved.
# Use of this source code is governed by a BSD-style license (see the COPYING file).
""" Automatic testing for worktree """
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
import os
import py
import mock
import pytest
import qisys.sh
import qisys.worktree
def test_read_projects(tmpdir):
""" Test Read Projects """
tmpdir.mkdir("core").mkdir("naoqi")
tmpdir.mkdir("lib").mkdir("libqi")
xml_path = tmpdir.mkdir(".qi").join("worktree.xml")
xml_path.write("""
<worktree>
<project src="core/naoqi" />
<project src="lib/libqi" />
</worktree>
""")
worktree = qisys.worktree.WorkTree(tmpdir.strpath)
p_srcs = [p.src for p in worktree.projects]
assert p_srcs == ["core/naoqi", "lib/libqi"]
def test_normalize_path(tmpdir):
""" Test Nomalize Path """
worktree = qisys.worktree.WorkTree(tmpdir.strpath)
foo_abs_path = tmpdir.join("bar").join("foo").strpath
assert worktree.normalize_path(foo_abs_path) == "bar/foo"
assert worktree.normalize_path("bar/foo") == "bar/foo"
def test_add_project_simple(worktree):
""" Test Add Project Simple """
tmp = py.path.local(worktree.root) # pylint:disable=no-member
tmp.mkdir("foo")
worktree.add_project("foo")
assert len(worktree.projects) == 1
foo1 = worktree.get_project("foo")
assert foo1.src == "foo"
def test_fails_when_root_does_not_exists(tmpdir):
""" Test Fails When Root Does Not Exists """
non_existing = tmpdir.join("doesnotexist")
with pytest.raises(Exception) as e:
qisys.worktree.WorkTree(non_existing.strpath)
assert "does not exist" in str(e.value)
def test_ignore_src_dot(tmpdir):
""" Test Ignore Src Dot """
_foo_path = tmpdir.mkdir("foo")
tmpdir.join("foo", "qiproject.xml").write("""
<project>
<project src="." />
</project>
""")
worktree = qisys.worktree.WorkTree(tmpdir.strpath)
worktree.add_project("foo")
def test_remove_project(worktree):
""" Test Remove Project """
tmp = py.path.local(worktree.root) # pylint:disable=no-member
foo_src = tmp.mkdir("foo")
worktree.add_project("foo")
with pytest.raises(qisys.worktree.WorkTreeError) as e:
worktree.remove_project("bar")
assert "No project in 'bar'" in str(e)
worktree.remove_project("foo")
assert worktree.projects == list()
worktree.add_project("foo")
assert worktree.projects[0].src == "foo"
worktree.remove_project("foo", from_disk=True)
assert worktree.projects == list()
assert not os.path.exists(foo_src.strpath)
def test_nested_qiprojects(tmpdir):
""" Test Nested Project """
a_project = tmpdir.mkdir("a")
worktree_xml = tmpdir.mkdir(".qi").join("worktree.xml")
worktree_xml.write("""
<worktree>
<project src="a" />
</worktree>
""")
a_xml = a_project.join("qiproject.xml")
a_xml.write("""
<project name="a">
<project src="b" />
</project>
""")
b_project = a_project.mkdir("b")
b_xml = b_project.join("qiproject.xml")
b_xml.write("""
<project name="b">
<project src="c" />
</project>
""")
c_project = b_project.mkdir("c")
c_xml = c_project.join("qiproject.xml")
c_xml.write('<project name="c" />\n')
worktree = qisys.worktree.WorkTree(tmpdir.strpath)
assert len(worktree.projects) == 3
assert [p.src for p in worktree.projects] == ["a", "a/b", "a/b/c"]
def test_non_exiting_path_are_removed(tmpdir, interact):
""" All projects registered should exist """
wt = qisys.worktree.WorkTree(tmpdir.strpath)
a_path = tmpdir.mkdir("a")
wt.add_project(a_path.strpath)
a_path.remove()
wt2 = qisys.worktree.WorkTree(tmpdir.strpath)
assert wt2.projects == list()
def test_check_subprojects_exist(tmpdir):
""" Subprojets in qiproject.xml should exist """
wt = qisys.worktree.WorkTree(tmpdir.strpath)
a_path = tmpdir.mkdir("a")
a_qiproject = a_path.join("qiproject.xml")
a_qiproject.write(""" \
<project >
<project src="b" />
</project>
""")
with pytest.raises(qisys.worktree.WorkTreeError) as e:
wt.add_project("a")
assert "invalid sub project" in str(e.value)
def test_observers_are_notified(worktree):
""" Test Observers Are Notified """
mock_observer = mock.Mock()
worktree.register(mock_observer)
worktree.create_project("foo")
assert mock_observer.reload.called
def test_add_nested_projects(worktree):
""" Test Add Nested Project """
worktree.create_project("foo")
tmpdir = worktree.tmpdir
spam = tmpdir.mkdir("spam")
spam.join("qiproject.xml").write(""" \
<project>
<project src="eggs" />
</project>
""")
spam.mkdir("eggs")
worktree.add_project("spam")
assert [p.src for p in worktree.projects] == ["foo", "spam", "spam/eggs"]
worktree.remove_project("spam")
assert [p.src for p in worktree.projects] == ["foo"]
def test_warns_on_nested_worktrees(tmpdir, record_messages):
""" Test Warns On Nested WorkTrees """
work1 = tmpdir.mkdir("work1")
work1.mkdir(".qi")
work2 = work1.mkdir("work2")
work2.mkdir(".qi")
qisys.worktree.WorkTree(work2.strpath)
assert record_messages.find("Nested worktrees")
@pytest.mark.skip(reason="no way of currently testing this")
def test_non_ascii_path(tmpdir):
""" Test Non ASCII Path """
coffee_dir = tmpdir.mkdir("café")
qisys.worktree.WorkTree(coffee_dir.strpath)
| 2.0625
| 2
|
endorsement/dao/uwnetid_supported.py
|
uw-it-aca/service-endorsement
| 3
|
12784215
|
<filename>endorsement/dao/uwnetid_supported.py
# Copyright 2021 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
"""
This module encapsulates the interactions with
the UW NeTID Subscription code 60
"""
import logging
import traceback
from uw_uwnetid.supported import get_supported_resources
from endorsement.dao import handel_err
from restclients_core.exceptions import DataFailureException
logger = logging.getLogger(__name__)
def get_supported_resources_for_netid(netid):
"""
Return supported resources
"""
try:
supported = []
for resource in get_supported_resources(netid):
if resource.status != 'former':
supported.append(resource)
return supported
except DataFailureException as ex:
logger.error(
'uw_uwnetid get_supported_resources({}) returned {}'.format(
netid, ex.status))
except Exception:
handel_err(logger,
'{0} supported resources '.format(netid),
traceback.format_exc())
return []
| 1.890625
| 2
|
cube5.py
|
Horia73/MultiCuberX
| 0
|
12784216
|
<filename>cube5.py
from subprocess import check_output
from time import sleep
import logging
import imutils
import serial
import time
import cv2
global Down, elevator1, elevator2, Up, FlUp, FlDown, coord, l,start,i
start = True
i = 0
l = []
coord = []
log = logging.getLogger(__name__)
Down=True
elevator1=False
medium1=False
medium2=False
elevator2=False
Up=False
FlUp=True
FlDown=False
def prepare():
global s1, camera
print("Conectarea la Arduino Mega...")
s1 = serial.Serial('/dev/tty.usbmodem21401', 9600)
print("Conectat! - MegaA")
print(" ")
camera = cv2.VideoCapture(0)
sleep(1.5)
(retval, img) = camera.read()
cv2.imshow("Capture", img)
print("Gata! Introdu un cub 5x5x5 amestecat iar apoi apasa 'Solve' pentru a rezolva cubul!")
def photo(name, angle):
global camera
sleep(0.4)
(retval, img) = camera.read()
name = '/Users/horia/MultiCuber/CubeScan/' + str(name)
name = str(name)
print(name)
angle = int(angle)
img = imutils.rotate(img, angle)
cv2.imshow("Capture", img)
cv2.waitKey(1)
cv2.imwrite(name, img)
def status():
s1.write(b'M')
while(1):
r = s1.read()
r = r.decode()
if r=='A':
break
def slow():
s1.write(b'5')
s1.write(b'7')
def normal():
s1.write(b'5')
s1.write(b'8')
def fast():
s1.write(b'5')
s1.write(b'9')
def ElevatorDown():
global Down, elevator1, medium1, medium2, elevator2,Up
if Down:
pass
elif elevator1:
s1.write(b'g')
elif medium1:
s1.write(b'n')
elif medium2:
s1.write(b'i')
elif elevator2:
s1.write(b'd')
elif Up:
s1.write(b'e')
if Down==False:
Down=True
elevator1=False
medium1 = False
medium2=False
elevator2=False
Up=False
status()
def Elevator1():
global Down, elevator1, medium1, medium2, elevator2,Up
if Down:
s1.write(b'G')
elif elevator1:
pass
elif medium1:
s1.write(b'j')
elif medium2:
s1.write(b'p')
elif elevator2:
s1.write(b'o')
elif Up:
s1.write(b'v')
if elevator1==False:
Down=False
elevator1=True
medium1=False
medium2=False
elevator2=False
Up=False
status()
def ElevatorMedium1():
global Down, elevator1, medium1, medium2, elevator2,Up
if Down:
s1.write(b'N')
elif elevator1:
s1.write(b'J')
elif medium1:
pass
elif medium2:
s1.write(b'j')
elif elevator2:
s1.write(b'p')
Down=False
elif Up:
s1.write(b'o')
if medium1==False:
Down=False
elevator1=False
medium1=True
medium2=False
elevator2=False
Up=False
status()
def ElevatorMedium2():
global Down, elevator1, medium1, medium2, elevator2,Up
if Down:
s1.write(b'I')
elif elevator1:
s1.write(b'P')
elif medium1:
s1.write(b'J')
elif medium2:
pass
elif elevator2:
s1.write(b'j')
elif Up:
s1.write(b'p')
if medium2==False:
Down=False
elevator1=False
medium1=False
medium2=True
elevator2=False
Up=False
status()
def Elevator2():
global Down, elevator1, medium1, medium2, elevator2,Up
if Down:
s1.write(b'D')
elif elevator1:
s1.write(b'O')
elif medium1:
s1.write(b'P')
elif medium2:
s1.write(b'J')
elif elevator2:
pass
elif Up:
s1.write(b'j')
if elevator2==False:
Down=False
elevator1=False
medium1=False
medium2=False
elevator2=True
Up=False
status()
def ElevatorUp():
global Down, elevator1, medium1, medium2, elevator2,Up
if Down:
s1.write(b'E')
elif elevator1:
s1.write(b'V')
elif medium1:
s1.write(b'O')
elif medium2:
s1.write(b'P')
elif elevator2:
s1.write(b'J')
elif Up:
pass
if Up==False:
Down=False
elevator1=False
medium1=False
medium2=False
elevator2=False
Up=True
status()
def ElevatorUpScan():
s1.write(b'S')
status()
def ElevatorDownScan():
s1.write(b's')
status()
def RotatorPositive():
s1.write(b'R')
if elevator2 or Up or medium2:
for n, i in enumerate(l):
if i=='F':
l[n]='L'
elif i=='L':
l[n]='B'
elif i=='B':
l[n]='R'
elif i=='R':
l[n]='F'
status()
def RotatorNegative():
s1.write(b'r')
if elevator2 or Up or medium2:
for n, i in enumerate(l):
if i=='F':
l[n]='R'
elif i=='L':
l[n]='F'
elif i=='B':
l[n]='L'
elif i=='R':
l[n]='B'
status()
def RotatorDouble():
s1.write(b'B')
if elevator2 or Up or medium2:
for n, i in enumerate(l):
if i=='F':
l[n]='B'
elif i=='L':
l[n]='R'
elif i=='B':
l[n]='F'
elif i=='R':
l[n]='L'
status()
def FlipperUp():
global FlUp, FlDown
if FlDown:
if Down:
s1.write(b'F')
for n, i in enumerate(l):
if i=='F':
l[n]='U'
elif i=='U':
l[n]='B'
elif i=='B':
l[n]='D'
elif i=='D':
l[n]='F'
elif Down==False:
s1.write(b'X')
elif FlUp:
pass
FlUp = True
FlDown = False
status()
def FlipperDown():
global FlUp, FlDown
if FlUp:
if Down:
s1.write(b'f')
for n, i in enumerate(l):
if i=='F':
l[n]='D'
elif i=='U':
l[n]='F'
elif i=='B':
l[n]='U'
elif i=='D':
l[n]='B'
elif Down==False:
s1.write(b'x')
elif FlDown:
pass
FlDown = True
FlUp = False
status()
def close():
global camera
s1.write(b'H')
FlipperUp()
sleep(0.2)
s1.write(b'h')
del (camera)
camera = None
s1.write(b'Q')
def pattern1():
global l
l = []
l.extend(("U", "2", "D", "2", "F", "2", "B", "2", "R", "2", "L", "2"))
solver()
def pattern2():
global l
l = []
l.extend(("U", "D", "'", "R", "L", "'", "F", "B", "'", "U", "D", "'"))
solver()
def pattern3():
global l
l = []
l.extend(("U", "F", "B", "'", "L", "2", "U", "2", "L", "2", "F", "'", "B", "U", "2", "L", "2", "U"))
solver()
def pattern4():
global l
l = []
l.extend(("R", "2", "L", "'", "D", "F", "2", "R", "'", "D", "'", "R", "'", "L", "U", "'", "D", "R", "D", "B", "2", "R", "'", "U", "D", "2"))
solver()
def scanner():
global l, coord, b1, a1, b2, a2, b3, a3
a1 = time.time()
s1.write(b'H')
FlipperDown()
photo('rubiks-side-U.png', '270')
ElevatorUpScan()
FlipperUp()
ElevatorDownScan()
FlipperDown()
photo('rubiks-side-R.png', '180')
ElevatorUpScan()
FlipperUp()
ElevatorDownScan()
FlipperDown()
photo('rubiks-side-D.png', '90')
ElevatorUpScan()
FlipperUp()
ElevatorDownScan()
FlipperDown()
photo('rubiks-side-L.png', '0')
ElevatorUp()
RotatorNegative()
ElevatorDown()
FlipperUp()
FlipperDown()
photo('rubiks-side-B.png', '0')
ElevatorUp()
RotatorDouble()
ElevatorDown()
FlipperUp()
FlipperDown()
photo('rubiks-side-F.png', '0')
s1.write(b'h')
b1 = time.time()
def analyzer():
global l,coord,b1,a1,b2,a2,b3,a3,q
a2 = time.time()
cmd1 = ("cd ~/MultiCuber/rubiks-cube-tracker/usr/bin; python3 rubiks-cube-tracker.py --directory ~/MultiCuber/CubeScan")
log.info(cmd1)
output1 = check_output(cmd1, shell=True)
output1 = str(output1)
output1 = output1[2:]
output1 = output1.rstrip(output1[-1])
output1 = output1.rstrip(output1[-1])
output1 = output1.rstrip(output1[-1])
cmd2 = ("cd ~/MultiCuber/rubiks-color-resolver/usr/bin; python3 rubiks-color-resolver.py --json --rgb" + " " + "'" + output1 + "'")
log.info(cmd2)
output2 = check_output(cmd2, shell=True)
output2 = str(output2)
contents = output2[22:172]
print(contents)
cmd3 = ("cd ~/MultiCuber/rubiks-cube-NxNxN-solver/; ./rubiks-cube-solver.py --state " + contents)
log.info(cmd3)
output3 = check_output(cmd3, shell=True)
output3 = str(output3)
output3 = output3[12:]
output3 = output3.rstrip(output3[-1])
output3 = output3.rstrip(output3[-1])
output3 = output3.rstrip(output3[-1])
l = list(output3)
l = [e for e in l if e.strip()]
l.append('Terminat!')
print(l)
print("Scanarea si gasirea algoritmului s-a finlizat!")
print("Incepem sa rezolvam cubul!")
c1 = l.count("w")
print("Mutari pentru stratul mijlociu (w):")
print(c1)
c2 = l.count("'")
print("Mutari prime ('):")
print(c2)
c3 = l.count('2')
print("Mutari duble:")
print(c3)
c4 = len(l)
q = c4 - c3 - c2 - c1
print("Mutari totale:")
print(q)
b2 = time.time()
def solver():
global l,coord,b1,a1,b2,a2,b3,a3,start,i
a3 = time.time()
s1.write(b'H')
for x in range(q):
if x>1 and x <3:
start = False
if l[0]=="F" and l[1]=="w" and l[2]=="'":
FlipperDown()
ElevatorDown()
FlipperUp()
ElevatorMedium1()
RotatorNegative()
del l[0]
del l[0]
del l[0]
elif l[0]=="F" and l[1]=="w" and l[2]=="2":
FlipperDown()
ElevatorDown()
FlipperUp()
ElevatorMedium1()
RotatorDouble()
del l[0]
del l[0]
del l[0]
elif l[0]=="R" and l[1]=="w" and l[2]=="'":
ElevatorUp()
RotatorPositive()
FlipperDown()
ElevatorDown()
FlipperUp()
ElevatorMedium1()
RotatorNegative()
del l[0]
del l[0]
del l[0]
elif l[0]=="R" and l[1]=="w" and l[2]=="2":
ElevatorUp()
RotatorPositive()
FlipperDown()
ElevatorDown()
FlipperUp()
ElevatorMedium1()
RotatorDouble()
del l[0]
del l[0]
del l[0]
elif l[0]=="U" and l[1]=="w" and l[2]=="'":
ElevatorMedium1()
RotatorNegative()
del l[0]
del l[0]
del l[0]
elif l[0]=="U" and l[1]=="w" and l[2]=="2":
ElevatorMedium1()
RotatorDouble()
del l[0]
del l[0]
del l[0]
elif l[0]=="B" and l[1]=="w" and l[2]=="'":
if start:
FlipperDown()
ElevatorDown()
FlipperUp()
ElevatorMedium2()
RotatorNegative()
else:
FlipperUp()
ElevatorDown()
FlipperDown()
ElevatorMedium1()
RotatorNegative()
del l[0]
del l[0]
del l[0]
elif l[0]=="B" and l[1]=="w" and l[2]=="2":
if start:
FlipperDown()
ElevatorDown()
FlipperUp()
ElevatorMedium2()
RotatorDouble()
else:
FlipperUp()
ElevatorDown()
FlipperDown()
ElevatorMedium1()
RotatorDouble()
del l[0]
del l[0]
del l[0]
elif l[0]=="D" and l[1]=="w" and l[2]=="'":
ElevatorMedium2()
RotatorNegative()
del l[0]
del l[0]
del l[0]
elif l[0]=="D" and l[1]=="w" and l[2]=="2":
ElevatorMedium2()
RotatorDouble()
del l[0]
del l[0]
del l[0]
elif l[0]=="L" and l[1]=="w" and l[2]=="'":
ElevatorUp()
RotatorNegative()
FlipperDown()
ElevatorDown()
FlipperUp()
ElevatorMedium1()
RotatorNegative()
del l[0]
del l[0]
del l[0]
elif l[0]=="L" and l[1]=="w" and l[2]=="2":
ElevatorUp()
RotatorNegative()
FlipperDown()
ElevatorDown()
FlipperUp()
ElevatorMedium1()
RotatorDouble()
del l[0]
del l[0]
del l[0]
elif l[0]=="F" and l[1]=="w":
FlipperDown()
ElevatorDown()
FlipperUp()
ElevatorMedium1()
RotatorPositive()
del l[0]
del l[0]
elif l[0]=="R" and l[1]=="w":
ElevatorUp()
RotatorPositive()
FlipperDown()
ElevatorDown()
FlipperUp()
ElevatorMedium1()
RotatorPositive()
del l[0]
del l[0]
elif l[0]=="U" and l[1]=="w":
ElevatorMedium1()
RotatorPositive()
del l[0]
del l[0]
elif l[0]=="B" and l[1]=="w":
if start:
FlipperDown()
ElevatorDown()
FlipperUp()
ElevatorMedium2()
RotatorPositive()
else:
FlipperUp()
ElevatorDown()
FlipperDown()
ElevatorMedium1()
RotatorPositive()
del l[0]
del l[0]
elif l[0]=="D" and l[1]=="w":
ElevatorMedium2()
RotatorPositive()
del l[0]
del l[0]
elif l[0]=="L" and l[1]=="w":
ElevatorUp()
RotatorNegative()
FlipperDown()
ElevatorDown()
FlipperUp()
ElevatorMedium1()
RotatorPositive()
del l[0]
del l[0]
elif l[0]=="F" and l[1]=="'":
FlipperDown()
ElevatorDown()
FlipperUp()
Elevator1()
RotatorNegative()
del l[0]
del l[0]
elif l[0]=="F" and l[1]=="2":
FlipperDown()
ElevatorDown()
FlipperUp()
Elevator1()
RotatorDouble()
del l[0]
del l[0]
elif l[0]=="R" and l[1]=="'":
ElevatorUp()
RotatorPositive()
FlipperDown()
ElevatorDown()
FlipperUp()
Elevator1()
RotatorNegative()
del l[0]
del l[0]
elif l[0]=="R" and l[1]=="2":
ElevatorUp()
RotatorPositive()
FlipperDown()
ElevatorDown()
FlipperUp()
Elevator1()
RotatorDouble()
del l[0]
del l[0]
elif l[0]=="U" and l[1]=="'":
Elevator1()
RotatorNegative()
del l[0]
del l[0]
elif l[0]=="U" and l[1]=="2":
Elevator1()
RotatorDouble()
del l[0]
del l[0]
elif l[0]=="B" and l[1]=="'":
if start:
FlipperDown()
ElevatorDown()
FlipperUp()
Elevator2()
RotatorNegative()
else:
FlipperUp()
ElevatorDown()
FlipperDown()
Elevator1()
RotatorNegative()
del l[0]
del l[0]
elif l[0]=="B" and l[1]=="2":
if start:
FlipperDown()
ElevatorDown()
FlipperUp()
Elevator2()
RotatorDouble()
else:
FlipperUp()
ElevatorDown()
FlipperDown()
Elevator1()
RotatorDouble()
del l[0]
del l[0]
elif l[0]=="D" and l[1]=="'":
Elevator2()
RotatorNegative()
del l[0]
del l[0]
elif l[0]=="D" and l[1]=="2":
Elevator2()
RotatorDouble()
del l[0]
del l[0]
elif l[0]=="L" and l[1]=="'":
ElevatorUp()
RotatorNegative()
FlipperDown()
ElevatorDown()
FlipperUp()
Elevator1()
RotatorNegative()
del l[0]
del l[0]
elif l[0]=="L" and l[1]=="2":
ElevatorUp()
RotatorNegative()
FlipperDown()
ElevatorDown()
FlipperUp()
Elevator1()
RotatorDouble()
del l[0]
del l[0]
elif l[0]=="F":
FlipperDown()
ElevatorDown()
FlipperUp()
Elevator1()
RotatorPositive()
del l[0]
elif l[0]=="R":
ElevatorUp()
RotatorPositive()
FlipperDown()
ElevatorDown()
FlipperUp()
Elevator1()
RotatorPositive()
del l[0]
elif l[0]=="U":
Elevator1()
RotatorPositive()
del l[0]
elif l[0]=="B":
if start:
FlipperDown()
ElevatorDown()
FlipperUp()
Elevator2()
RotatorPositive()
else:
FlipperUp()
ElevatorDown()
FlipperDown()
Elevator1()
RotatorPositive()
del l[0]
elif l[0]=="D":
Elevator2()
RotatorPositive()
del l[0]
elif l[0]=="L":
ElevatorUp()
RotatorNegative()
FlipperDown()
ElevatorDown()
FlipperUp()
Elevator1()
RotatorPositive()
del l[0]
elif l[0]=="x" and l[1]=="'":
del l[0]
del l[0]
for n, i in enumerate(l):
if i=='F':
l[n]='D'
elif i=='U':
l[n]='F'
elif i=='D':
l[n]='B'
elif i=='B':
l[n]='U'
elif l[0]=="x" and l[1]=="2":
del l[0]
del l[0]
for n, i in enumerate(l):
if i=='F':
l[n]='B'
elif i=='U':
l[n]='D'
elif i=='D':
l[n]='U'
elif i=='B':
l[n]='F'
elif l[0]=="x":
del l[0]
for n, i in enumerate(l):
if i=='F':
l[n]='U'
elif i=='U':
l[n]='B'
elif i=='D':
l[n]='F'
elif i=='B':
l[n]='D'
elif l[0]=="y" and l[1]=="'":
del l[0]
del l[0]
for n, i in enumerate(l):
if i=='F':
l[n]='R'
elif i=='R':
l[n]='B'
elif i=='L':
l[n]='F'
elif i=='B':
l[n]='L'
elif l[0]=="y" and l[1]=="2":
del l[0]
del l[0]
for n, i in enumerate(l):
if i=='F':
l[n]='B'
elif i=='R':
l[n]='L'
elif i=='L':
l[n]='R'
elif i=='B':
l[n]='F'
elif l[0]=="y":
del l[0]
for n, i in enumerate(l):
if i=='F':
l[n]='L'
elif i=='R':
l[n]='F'
elif i=='L':
l[n]='B'
elif i=='B':
l[n]='R'
elif l[0]=="z" and l[1]=="'":
del l[0]
del l[0]
for n, i in enumerate(l):
if i=='R':
l[n]='U'
elif i=='U':
l[n]='L'
elif i=='L':
l[n]='D'
elif i=='D':
l[n]='R'
elif l[0]=="z" and l[1]=="2":
del l[0]
del l[0]
for n, i in enumerate(l):
if i=='R':
l[n]='L'
elif i=='U':
l[n]='D'
elif i=='L':
l[n]='R'
elif i=='D':
l[n]='U'
elif l[0]=="z":
del l[0]
for n, i in enumerate(l):
if i=='R':
l[n]='D'
elif i=='U':
l[n]='R'
elif i=='L':
l[n]='U'
elif i=='D':
l[n]='L'
elif l[0]=="Terminat!":
del l[0]
print("Cubul a fost rezolvat! Introdu alt cub si apasa 'Solve' pentru a-l rezolva!")
print(" ")
ElevatorDown()
FlipperDown()
FlipperUp()
status()
s1.write(b'h')
b3 = time.time()
t1=b1-a1
t2=b2-a2
t3=b3-a3
t=t1+t2+t3
if q==1:
med=0
else:
med=t3/(q-1)
print('Scanarea a durat ' + str(round(t1,2)) + ' secunde.')
print('Analizarea imaginilor si cautarea solutiei a durat ' + str(round(t2,2)) + ' secunde.')
print('Rezolvarea cubului a durat ' + str(round(t3,2)) + ' secunde.')
print('Timp mediu pe mutare: ' + str(round(med,2)) + ' secunde.')
print('Timp total: ' + str(round(t,2)) + ' secunde.')
else:
i=i+1
print("Prea multe mutari:" + i)
return(t)
| 2.359375
| 2
|
reference_submissions/imagenet/imagenet_pytorch/submission.py
|
ClashLuke/algorithmic-efficiency
| 2
|
12784217
|
<gh_stars>1-10
"""Training algorithm track submission functions for ImageNet."""
from typing import Iterator, List, Tuple
import torch
from torch.optim.lr_scheduler import CosineAnnealingLR
from torch.optim.lr_scheduler import LinearLR
from torch.optim.lr_scheduler import SequentialLR
from algorithmic_efficiency import spec
def get_batch_size(workload_name):
# Return the global batch size.
batch_sizes = {'imagenet': 128}
return batch_sizes[workload_name]
def init_optimizer_state(workload: spec.Workload,
model_params: spec.ParameterContainer,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparamters,
rng: spec.RandomState) -> spec.OptimizerState:
del workload
del model_state
del rng
base_lr = hyperparameters.learning_rate * get_batch_size('imagenet') / 256.
optimizer_state = {
'optimizer':
torch.optim.SGD(
model_params.parameters(),
lr=base_lr,
momentum=hyperparameters.momentum,
weight_decay=hyperparameters.l2)
}
scheduler1 = LinearLR(
optimizer_state['optimizer'],
start_factor=1e-5,
end_factor=1.,
total_iters=hyperparameters.warmup_epochs)
cosine_epochs = max(
hyperparameters.num_epochs - hyperparameters.warmup_epochs, 1)
scheduler2 = CosineAnnealingLR(
optimizer_state['optimizer'], T_max=cosine_epochs)
optimizer_state['scheduler'] = SequentialLR(
optimizer_state['optimizer'],
schedulers=[scheduler1, scheduler2],
milestones=[hyperparameters.warmup_epochs])
return optimizer_state
def update_params(
workload: spec.Workload,
current_param_container: spec.ParameterContainer,
current_params_types: spec.ParameterTypeTree,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparamters,
input_batch: spec.Tensor,
label_batch: spec.Tensor,
# This will define the output activation via `output_activation_fn`.
loss_type: spec.LossType,
optimizer_state: spec.OptimizerState,
eval_results: List[Tuple[int, float]],
global_step: int,
rng: spec.RandomState) -> spec.UpdateReturn:
"""Return (updated_optimizer_state, updated_params)."""
del current_params_types
del hyperparameters
del loss_type
del eval_results
input_batch, label_batch = (
workload.preprocess_for_train(input_batch, label_batch, None, None, None))
current_model = current_param_container
current_param_container.train()
optimizer_state['optimizer'].zero_grad()
logits_batch, new_model_state = workload.model_fn(
params=current_model,
input_batch=input_batch,
model_state=model_state,
mode=spec.ForwardPassMode.TRAIN,
rng=rng,
update_batch_norm=True)
loss = workload.loss_fn(
label_batch=label_batch, logits_batch=logits_batch).mean()
loss.backward()
optimizer_state['optimizer'].step()
steps_per_epoch = workload.num_train_examples // get_batch_size('imagenet')
if (global_step + 1) % steps_per_epoch == 0:
optimizer_state['scheduler'].step()
return (optimizer_state, current_param_container, new_model_state)
# Not allowed to update the model parameters, hyperparameters, global step, or
# optimzier state.
def data_selection(workload: spec.Workload,
input_queue: Iterator[Tuple[spec.Tensor, spec.Tensor]],
optimizer_state: spec.OptimizerState,
current_param_container: spec.ParameterContainer,
hyperparameters: spec.Hyperparamters,
global_step: int,
rng: spec.RandomState) -> Tuple[spec.Tensor, spec.Tensor]:
"""Select data from the infinitely repeating, pre-shuffled input queue.
Each element of the queue is a single training example and label.
We left out `current_params_types` because we do not believe that it would
# be necessary for this function.
Return a tuple of input label batches.
"""
del optimizer_state
del current_param_container
del global_step
del rng
return next(input_queue)
| 2.0625
| 2
|
dev/local/optimizers/__init__.py
|
KeremTurgutlu/fast-kaggle
| 8
|
12784218
|
<reponame>KeremTurgutlu/fast-kaggle
from .radam import *
from .novograd import *
from .ranger import *
from .ralamb import *
from .rangerlars import *
from .lookahead import *
from .lamb import *
| 0.835938
| 1
|
addons/source-python/plugins/warcraft/extensions/levelbank/__init__.py
|
ThomasVieth/WCS-Remastered
| 0
|
12784219
|
"""
"""
## source.python imports
from commands import CommandReturn
from commands.client import ClientCommand
from commands.say import SayCommand
from listeners import OnClientFullyConnect
## warcraft.package imports
from warcraft.database import session
from warcraft.players import player_dict
## extension imports
from .config import *
from .database import *
from .menus import *
## __all__ declaration
__all__ = (
"levelbank_menu",
)
## handling new players
@OnClientFullyConnect
def _on_client_full_connect_setup_levelbank(index):
player = player_dict[index]
player_levelbank = session.query(Levelbank).filter(Levelbank.parent == player._dbinstance).first()
if not player_levelbank:
start_levels = levelbank_start_amount.cvar.get_int()
player_levelbank = Levelbank(levels=start_levels, parent=player._dbinstance)
session.add(player_levelbank)
session.commit()
## handling client/say commands
@ClientCommand(["levelbank", "wcsbank"])
@SayCommand(["levelbank", "wcsbank"])
def _levelbank_say_command(command, index, team_only=None):
levelbank_menu.send(index)
return CommandReturn.BLOCK
| 2.296875
| 2
|
examples/optimizers/swarm/create_ffoa.py
|
anukaal/opytimizer
| 528
|
12784220
|
from opytimizer.optimizers.swarm import FFOA
# Creates a FFOA optimizer
o = FFOA()
| 1.460938
| 1
|
converter.py
|
MelomanCool/telegram-audiomemes
| 1
|
12784221
|
<gh_stars>1-10
from pydub import AudioSegment
from io import BytesIO
def convert_to_ogg(f):
bio = BytesIO()
(AudioSegment.from_file(f)
.set_channels(1) # for compatibility with Android
.export(bio, format='ogg', codec='libopus'))
bio.seek(0)
return bio
| 2.71875
| 3
|
pypint/solvers/parallel_sdc.py
|
DiMoser/PyPinT
| 0
|
12784222
|
# coding=utf-8
"""
.. moduleauthor:: <NAME> <<EMAIL>>
"""
from copy import deepcopy
import warnings as warnings
from collections import OrderedDict
import numpy as np
from pypint.solvers.i_iterative_time_solver import IIterativeTimeSolver
from pypint.solvers.i_parallel_solver import IParallelSolver
from pypint.communicators.message import Message
from pypint.integrators.integrator_base import IntegratorBase
from pypint.integrators.node_providers.gauss_lobatto_nodes import GaussLobattoNodes
from pypint.integrators.weight_function_providers.polynomial_weight_function import PolynomialWeightFunction
from pypint.problems import IInitialValueProblem, problem_has_exact_solution
from pypint.solvers.states.sdc_solver_state import SdcSolverState
from pypint.solvers.diagnosis import IDiagnosisValue
from pypint.solvers.diagnosis.norms import supremum_norm
from pypint.plugins.timers.timer_base import TimerBase
from pypint.utilities.threshold_check import ThresholdCheck
from pypint.utilities import assert_is_instance, assert_condition, func_name, assert_named_argument
from pypint.utilities.logging import *
# General Notes on Implementation
# ===============================
#
# Names and Meaning of Indices
# ----------------------------
# T_max (num_time_steps) | number of time steps
# N (num_nodes) | number of integration nodes per time step
# t | index of current time step; interval: [0, T_max)
# n | index of current node of current time step; interval: [1, N)
# | the current node is always the next node, i.e. the node we are
# | calculating the value for
# i | index of current point in continuous array of points
class ParallelSdc(IIterativeTimeSolver, IParallelSolver):
"""*Spectral Deferred Corrections* method for solving first order ODEs.
The *Spectral Deferred Corrections* (SDC) method is described in [Minion2003]_ (Equation 2.7)
Default Values:
* :py:class:`.ThresholdCheck`
* ``max_threshold``: 10
* ``min_threshold``: 1e-7
* ``conditions``: ``('residual', 'iterations')``
* :py:attr:`.num_time_steps`: 1
* :py:attr:`.num_nodes`: 3
Given the total number of time steps :math:`T_{max}`, number of integration nodes per time
step :math:`N`, current time step :math:`t \\in [0,T_{max})` and the next integration node
to consider :math:`n \\in [0, N)`.
Let :math:`[a,b]` be the total time interval to integrate over.
For :math:`T_{max}=3` and :math:`N=4`, this can be visualized as::
a b
| |
| . . | . . | . . |
t 0 0 0 0 1 1 1 2 2 2
n 0 1 2 3 1 2 3 1 2 3
i 0 1 2 3 4 5 6 7 8 9
In general, the value at :math:`a` (i.e. :math:`t=n=i=0`) is the initial value.
See Also
--------
:py:class:`.IIterativeTimeSolver` :
implemented interface
:py:class:`.IParallelSolver` :
mixed-in interface
"""
def __init__(self, **kwargs):
super(ParallelSdc, self).__init__(**kwargs)
IParallelSolver.__init__(self, **kwargs)
del self._state
self.threshold = ThresholdCheck(min_threshold=1e-7, max_threshold=10, conditions=("residual", "iterations"))
self.timer = TimerBase()
self._num_time_steps = 1
self._dt = 0.0
self._deltas = {
't': 0.0,
'n': np.zeros(0)
}
self._classic = True
self.__nodes_type = GaussLobattoNodes
self.__weights_type = PolynomialWeightFunction
self.__num_nodes = 3
self.__exact = np.zeros(0)
self.__time_points = {
'steps': np.zeros(0),
'nodes': np.zeros(0)
}
def init(self, problem, integrator, **kwargs):
"""Initializes SDC solver with given problem and integrator.
Parameters
----------
num_time_steps : :py:class:`int`
Number of time steps to be used within the time interval of the problem.
num_nodes : :py:class:`int`
*(otional)*
number of nodes per time step
nodes_type : :py:class:`.INodes`
*(optional)*
Type of integration nodes to be used (class name, **NOT instance**).
weights_type : :py:class:`.IWeightFunction`
*(optional)*
Integration weights function to be used (class name, **NOT instance**).
classic : :py:class:`bool`
*(optional)*
Flag for specifying the type of the SDC sweep.
:py:class:`True`: *(default)* For the classic SDC as known from the literature;
:py:class:`False`: For the modified SDC as developed by <NAME>.
Raises
------
ValueError :
* if given problem is not an :py:class:`.IInitialValueProblem`
* if number of nodes per time step is not given; neither through ``num_nodes``, ``nodes_type`` nor
``integrator``
See Also
--------
:py:meth:`.IIterativeTimeSolver.init`
overridden method (with further parameters)
:py:meth:`.IParallelSolver.init`
mixed in overridden method (with further parameters)
"""
assert_is_instance(problem, IInitialValueProblem, descriptor="Initial Value Problem", checking_obj=self)
assert_condition(issubclass(integrator, IntegratorBase),
ValueError, message="Integrator must be an IntegratorBase: NOT %s"
% integrator.__mro__[-2].__name__,
checking_obj=self)
super(ParallelSdc, self).init(problem, integrator=integrator, **kwargs)
if 'num_time_steps' in kwargs:
self._num_time_steps = kwargs['num_time_steps']
if 'num_nodes' in kwargs:
self.__num_nodes = kwargs['num_nodes']
elif 'nodes_type' in kwargs and kwargs['nodes_type'].num_nodes is not None:
self.__num_nodes = kwargs['nodes_type'].num_nodes
elif integrator.nodes_type is not None and integrator.nodes_type.num_nodes is not None:
self.__num_nodes = integrator.nodes_type.num_nodes
else:
raise ValueError(func_name(self) + "Number of nodes per time step not given.")
if 'notes_type' in kwargs:
self.__nodes_type = kwargs['notes_type']
if 'weights_type' in kwargs:
self.__weights_type = kwargs['weights_type']
if 'classic' in kwargs:
assert_is_instance(kwargs['classic'], bool, descriptor="Classic Flag", checking_obj=self)
self._classic = kwargs['classic']
# TODO: need to store the exact solution somewhere else
self.__exact = np.zeros(self.num_time_steps * (self.__num_nodes - 1) + 1, dtype=np.object)
def run(self, core, **kwargs):
"""Applies SDC solver to the initialized problem setup.
Solves the given problem with the explicit SDC algorithm.
Parameters
----------
core : :py:class:`.SdcSolverCore`
core solver stepping method
dt : :py:class:`float`
width of the interval to work on; this is devided into the number of given
time steps this solver has been initialized with
See Also
--------
:py:meth:`.IIterativeTimeSolver.run` : overridden method
"""
super(ParallelSdc, self).run(core, **kwargs)
assert_named_argument('dt', kwargs, types=float, descriptor="Width of Interval", checking_obj=self)
self._dt = kwargs['dt']
self._print_header()
# start iterations
# TODO: exact solution storage handling
self.__exact[0] = self.problem.initial_value
_has_work = True
_previous_flag = Message.SolverFlag.none
_current_flag = Message.SolverFlag.none
__work_loop_count = 1
while _has_work:
LOG.debug("Work Loop: %d" % __work_loop_count)
_previous_flag = _current_flag
_current_flag = Message.SolverFlag.none
# receive dedicated message
_msg = self._communicator.receive()
if _msg.flag == Message.SolverFlag.failed:
# previous solver failed
# --> pass on the failure and abort
_current_flag = Message.SolverFlag.failed
_has_work = False
LOG.debug("Previous Solver Failed")
else:
if _msg.flag == Message.SolverFlag.time_adjusted:
# the previous solver has adjusted its interval
# --> we need to recompute our interval
_current_flag = self._adjust_interval_width()
# we don't immediately start the computation of the newly computed interval
# but try to pass the new interval end to the next solver as soon as possible
# (this should avoid throwing away useless computation)
LOG.debug("Previous Solver Adjusted Time")
else:
if _previous_flag in \
[Message.SolverFlag.none, Message.SolverFlag.converged, Message.SolverFlag.finished,
Message.SolverFlag.time_adjusted]:
# we just started or finished our previous interval
# --> start a new interval
_has_work = self._init_new_interval(_msg.time_point)
if _has_work:
# set initial values
self.state.initial.solution.value = _msg.value.copy()
self.state.initial.solution.time_point = _msg.time_point
self.state.initial.done()
LOG.debug("New Interval Initialized")
# start logging output
self._print_interval_header()
# start global timing (per interval)
self.timer.start()
else:
# pass
LOG.debug("No New Interval Available")
elif _previous_flag == Message.SolverFlag.iterating:
LOG.debug("Next Iteration")
else:
LOG.warn("WARNING!!! Something went wrong here")
if _has_work:
# we are still on the same interval or have just successfully initialized a new interval
# --> do the real computation
LOG.debug("Starting New Solver Main Loop")
# initialize a new iteration state
self.state.proceed()
if _msg.time_point == self.state.initial.time_point:
if _previous_flag == Message.SolverFlag.iterating:
LOG.debug("Updating initial value")
# if the previous solver has a new initial value for us, we use it
self.state.current_iteration.initial.solution.value = _msg.value.copy()
_current_flag = self._main_solver_loop()
if _current_flag in \
[Message.SolverFlag.converged, Message.SolverFlag.finished, Message.SolverFlag.failed]:
_log_msgs = {'': OrderedDict()}
if self.state.last_iteration_index <= self.threshold.max_iterations:
_group = 'Converged after %d iteration(s)' % (self.state.last_iteration_index + 1)
_log_msgs[''][_group] = OrderedDict()
_log_msgs[''][_group] = self.threshold.has_reached(log=True)
_log_msgs[''][_group]['Final Residual'] = "{:.3e}"\
.format(supremum_norm(self.state.last_iteration.final_step.solution.residual))
_log_msgs[''][_group]['Solution Reduction'] = "{:.3e}"\
.format(supremum_norm(self.state.solution
.solution_reduction(self.state.last_iteration_index)))
if problem_has_exact_solution(self.problem, self):
_log_msgs[''][_group]['Error Reduction'] = "{:.3e}"\
.format(supremum_norm(self.state.solution
.error_reduction(self.state.last_iteration_index)))
else:
warnings.warn("{}: Did not converged: {:s}".format(self._core.name, self.problem))
_group = "FAILED: After maximum of {:d} iteration(s)"\
.format(self.state.last_iteration_index + 1)
_log_msgs[''][_group] = OrderedDict()
_log_msgs[''][_group]['Final Residual'] = "{:.3e}"\
.format(supremum_norm(self.state.last_iteration.final_step.solution.residual))
_log_msgs[''][_group]['Solution Reduction'] = "{:.3e}"\
.format(supremum_norm(self.state.solution
.solution_reduction(self.state.last_iteration_index)))
if problem_has_exact_solution(self.problem, self):
_log_msgs[''][_group]['Error Reduction'] = "{:.3e}"\
.format(supremum_norm(self.state.solution
.error_reduction(self.state.last_iteration_index)))
LOG.warn(" {} Failed: Maximum number iterations reached without convergence."
.format(self._core.name))
print_logging_message_tree(_log_msgs)
elif _previous_flag in [Message.SolverFlag.converged, Message.SolverFlag.finished]:
LOG.debug("Solver Finished.")
self.timer.stop()
self._print_footer()
else:
# something went wrong
# --> we failed
LOG.warn("Solver failed.")
_current_flag = Message.SolverFlag.failed
self._communicator.send(value=self.state.current_iteration.final_step.solution.value,
time_point=self.state.current_iteration.final_step.time_point,
flag=_current_flag)
__work_loop_count += 1
# end while:has_work is None
LOG.debug("Solver Main Loop Done")
return [_s.solution for _s in self._states]
@property
def state(self):
"""Read-only accessor for the sovler's state
Returns
-------
state : :py:class:`.ISolverState`
"""
if len(self._states) > 0:
return self._states[-1]
else:
return None
@property
def num_time_steps(self):
"""Accessor for the number of time steps within the interval.
Returns
-------
number_time_steps : :py:class:`int`
Number of time steps within the problem-given time interval.
"""
return self._num_time_steps
@property
def num_nodes(self):
"""Accessor for the number of integration nodes per time step.
Returns
-------
number_of_nodes : :py:class:`int`
Number of integration nodes used within one time step.
"""
return self.__num_nodes
@property
def classic(self):
"""Read-only accessor for the type of SDC
Returns
-------
is_classic : :py:class:`bool`
:py:class:`True` if it's the classic SDC as known from papers;
:py:class:`False` if it's the modified SDC by <NAME>
"""
return self._classic
def _init_new_state(self):
"""Initialize a new state for a work task
Usually, this starts a new work task.
The previous state, if applicable, is stored in a stack.
"""
if self.state:
# finalize the current state
self.state.finalize()
# initialize solver state
self._states.append(SdcSolverState(num_nodes=self.num_nodes - 1, num_time_steps=self.num_time_steps))
def _init_new_interval(self, start):
"""Initializes a new work interval
Parameters
----------
start : :py:class:`float`
start point of new interval
Returns
-------
has_work : :py:class:`bool`
:py:class:`True` if new interval have been initialized;
:py:class:`False` if no new interval have been initialized (i.e. new interval end would exceed end of time
given by problem)
"""
assert_is_instance(start, float, descriptor="Time Point", checking_obj=self)
if start + self._dt > self.problem.time_end:
return False
if self.state and start == self.state.initial.time_point:
return False
self._init_new_state()
# set width of current interval
self.state.delta_interval = self._dt
# compute time step and node distances
self._deltas['t'] = self.state.delta_interval / self.num_time_steps # width of a single time step (equidistant)
# start time points of time steps
self.__time_points['steps'] = np.linspace(start, start + self._dt, self.num_time_steps + 1)
# initialize and transform integrator for time step width
self._integrator.init(self.__nodes_type, self.__num_nodes, self.__weights_type,
interval=np.array([self.__time_points['steps'][0], self.__time_points['steps'][1]],
dtype=np.float))
self.__time_points['nodes'] = np.zeros((self.num_time_steps, self.num_nodes), dtype=np.float)
_deltas_n = np.zeros(self.num_time_steps * (self.num_nodes - 1) + 1)
# copy the node provider so we do not alter the integrator's one
_nodes = deepcopy(self._integrator.nodes_type)
for _t in range(0, self.num_time_steps):
# transform Nodes (copy) onto new time step for retrieving actual integration nodes
_nodes.interval = np.array([self.__time_points['steps'][_t], self.__time_points['steps'][_t + 1]])
self.__time_points['nodes'][_t] = _nodes.nodes.copy()
for _n in range(0, self.num_nodes - 1):
_i = _t * (self.num_nodes - 1) + _n
_deltas_n[_i + 1] = _nodes.nodes[_n + 1] - _nodes.nodes[_n]
self._deltas['n'] = _deltas_n[1:].copy()
return True
def _adjust_interval_width(self):
"""Adjust width of time interval
"""
raise NotImplementedError("Time Adaptivity not yet implemented.")
# return Message.SolverFlag.time_adjusted
def _main_solver_loop(self):
# initialize iteration timer of same type as global timer
_iter_timer = self.timer.__class__()
self._print_iteration(self.state.current_iteration_index + 1)
# iterate on time steps
_iter_timer.start()
for _current_time_step in self.state.current_iteration:
# run this time step
self._time_step()
if self.state.current_time_step_index < len(self.state.current_iteration) - 1:
self.state.current_iteration.proceed()
_iter_timer.stop()
# check termination criteria
self.threshold.check(self.state)
# log this iteration's summary
if self.state.is_first_iteration:
# on first iteration we do not have comparison values
self._print_iteration_end(None, None, None, _iter_timer.past())
else:
if problem_has_exact_solution(self.problem, self) and not self.state.is_first_iteration:
# we could compute the correct error of our current solution
self._print_iteration_end(self.state.solution.solution_reduction(self.state.current_iteration_index),
self.state.solution.error_reduction(self.state.current_iteration_index),
self.state.current_step.solution.residual,
_iter_timer.past())
else:
self._print_iteration_end(self.state.solution.solution_reduction(self.state.current_iteration_index),
None,
self.state.current_step.solution.residual,
_iter_timer.past())
# finalize this iteration (i.e. TrajectorySolutionData.finalize())
self.state.current_iteration.finalize()
_reason = self.threshold.has_reached()
if _reason is None:
# LOG.debug("solver main loop done: no reason")
return Message.SolverFlag.iterating
elif _reason == ['iterations']:
# LOG.debug("solver main loop done: iterations")
self.state.finalize()
return Message.SolverFlag.finished
else:
# LOG.debug("solver main loop done: other")
self.state.finalize()
return Message.SolverFlag.converged
def _time_step(self):
self.state.current_time_step.delta_time_step = self._deltas['t']
for _step in range(0, len(self.state.current_time_step)):
_node_index = self.state.current_time_step_index * (self.num_nodes - 1) + _step
self.state.current_time_step[_step].delta_tau = self._deltas['n'][_node_index]
self.state.current_time_step[_step].solution.time_point = \
self.__time_points['nodes'][self.state.current_time_step_index][_step + 1]
self._print_time_step(self.state.current_time_step_index + 1,
self.state.current_time_step.initial.time_point,
self.state.current_time_step.last.time_point,
self.state.current_time_step.delta_time_step)
# for classic SDC compute integral
_integral = 0.0
_integrate_values = None
if self.classic:
if not self.state.current_time_step.initial.rhs_evaluated:
self.state.current_time_step.initial.rhs = \
self.problem.evaluate_wrt_time(self.state.current_time_step.initial.time_point,
self.state.current_time_step.initial.value)
_integrate_values = np.array([self.state.current_time_step.initial.rhs], dtype=self.problem.numeric_type)
for _step_index in range(0, len(self.state.current_time_step)):
if self.state.is_first_iteration:
_integrate_values = \
np.append(_integrate_values,
np.array([self.state.current_time_step.initial.rhs], dtype=self.problem.numeric_type),
axis=0)
else:
_step = self.state.previous_iteration[self.state.current_time_step_index][_step_index]
if not _step.rhs_evaluated:
_step.rhs = self.problem.evaluate_wrt_time(_step.time_point, _step.value)
_integrate_values = \
np.append(_integrate_values,
np.array([_step.rhs], dtype=self.problem.numeric_type),
axis=0)
assert_condition(_integrate_values.shape[0] == self.num_nodes,
ValueError, message="Number of integration values not correct: {:d} != {:d}"
.format(_integrate_values.shape[0], self.num_nodes),
checking_obj=self)
_full_integral = 0.0
# do the actual SDC steps of this SDC sweep
for _step_index in range(0, len(self.state.current_time_step)):
_current_step = self.state.current_time_step[_step_index]
if self.classic:
_integral = self._integrator.evaluate(_integrate_values,
from_node=_step_index, target_node=_step_index + 1)
# we successively compute the full integral, which is used for the residual at the end
_full_integral += _integral
_current_step.integral = _integral.copy()
# do the SDC step of this sweep
self._sdc_step()
if self.state.current_step_index < len(self.state.current_time_step) - 1:
self.state.current_time_step.proceed()
del _integrate_values
# compute residual and print step details
for _step_index in range(0, len(self.state.current_time_step)):
_step = self.state.current_time_step[_step_index]
self._core.compute_residual(self.state, step=_step, integral=_full_integral)
# finalize this step (i.e. StepSolutionData.finalize())
_step.done()
if _step_index > 0:
_previous_time = self.state.current_time_step[_step_index - 1].time_point
else:
_previous_time = self.state.current_time_step.initial.time_point
if problem_has_exact_solution(self.problem, self):
self._print_step(_step_index + 2,
_previous_time,
_step.time_point,
supremum_norm(_step.value),
_step.solution.residual,
_step.solution.error)
else:
self._print_step(_step_index + 2,
_previous_time,
_step.time_point,
supremum_norm(_step.value),
_step.solution.residual,
None)
self._print_time_step_end()
# finalizing the current time step (i.e. TrajectorySolutionData.finalize)
self.state.current_time_step.finalize()
def _sdc_step(self):
# helper variables
_current_time_step_index = self.state.current_time_step_index
_current_step_index = self.state.current_step_index
# copy solution of previous iteration to this one
if self.state.is_first_iteration:
self.state.current_step.value = self.state.initial.value.copy()
else:
self.state.current_step.value = \
self.state.previous_iteration[_current_time_step_index][_current_step_index].value.copy()
# TODO: review the custom modification
# if not self.classic:
# # gather values for integration and evaluate problem at given points
# # initial value for this time step
# _integrate_values = \
# np.array(
# [self.problem.evaluate_wrt_time(self.state.current_time_step.initial.time_point,
# self.state.current_time_step.initial.value.copy())
# ], dtype=self.problem.numeric_type)
#
# if _current_step_index > 0:
# # values from this iteration (already calculated)
# _from_current_iteration_range = range(0, _current_step_index)
# for _index in _from_current_iteration_range:
# _integrate_values = \
# np.append(_integrate_values,
# np.array(
# [self.problem.evaluate_wrt_time(self.state.current_time_step[_index].solution.time_point,
# self.state.current_time_step[_index].solution.value.copy())
# ], dtype=self.problem.numeric_type
# ), axis=0)
#
# # values from previous iteration
# _from_previous_iteration_range = range(_current_step_index, self.num_nodes - 1)
# for _index in _from_previous_iteration_range:
# if self.state.is_first_iteration:
# _this_value = self.problem.initial_value
# else:
# _this_value = self.state.previous_iteration[_current_time_step_index][_index].solution.value.copy()
# _integrate_values = \
# np.append(_integrate_values,
# np.array(
# [self.problem.evaluate_wrt_time(self.state.current_time_step[_index].solution.time_point,
# _this_value)
# ], dtype=self.problem.numeric_type
# ), axis=0)
# assert_condition(_integrate_values.shape[0] == self.num_nodes,
# ValueError, message="Number of integration values not correct: {:d} != {:d}"
# .format(_integrate_values.shape[0], self.num_nodes),
# checking_obj=self)
#
# # integrate
# self.state.current_step.integral = self._integrator.evaluate(_integrate_values,
# from_node=_current_step_index,
# target_node=_current_step_index + 1)
# del _integrate_values
# # END if not self.classic
# compute step
self._core.run(self.state, problem=self.problem)
# calculate error
self._core.compute_error(self.state, problem=self.problem)
# step gets finalized after computation of residual
def print_lines_for_log(self):
_lines = super(ParallelSdc, self).print_lines_for_log()
if 'Number Nodes per Time Step' not in _lines['Integrator']:
_lines['Integrator']['Number Nodes per Time Step'] = "%d" % self.__num_nodes
if 'Number Time Steps' not in _lines['Integrator']:
_lines['Integrator']['Number Time Steps'] = "%d" % self._num_time_steps
return _lines
def _print_interval_header(self):
LOG.info("%s%s" % (VERBOSITY_LVL1, SEPARATOR_LVL3))
LOG.info("{} Interval: [{:.3f}, {:.3f}]"
.format(VERBOSITY_LVL1, self.state.initial.time_point, self.state.initial.time_point + self._dt))
self._print_output_tree_header()
def _print_output_tree_header(self):
LOG.info("%s iter" % VERBOSITY_LVL1)
LOG.info("%s \\" % VERBOSITY_LVL2)
LOG.info("%s |- time start end delta" % VERBOSITY_LVL2)
LOG.info("%s | \\" % VERBOSITY_LVL3)
LOG.info("%s | |- step t_0 t_1 phi(t_1) resid err" % VERBOSITY_LVL3)
LOG.info("%s | \\_" % VERBOSITY_LVL2)
LOG.info("%s \\_ sol r.red err r.red resid time" % VERBOSITY_LVL1)
def _print_iteration(self, _iter):
_iter = self._output_format(_iter, 'int', width=5)
LOG.info("%s %s" % (VERBOSITY_LVL1, _iter))
LOG.info("%s \\" % VERBOSITY_LVL2)
def _print_iteration_end(self, solred, errred, resid, time):
_solred = self._output_format(solred, 'exp')
_errred = self._output_format(errred, 'exp')
_resid = self._output_format(resid, 'exp')
_time = self._output_format(time, 'float', width=6.3)
LOG.info("%s \\_ %s %s %s %s" % (VERBOSITY_LVL1, _solred, _errred, _resid, _time))
def _print_time_step(self, time_step, start, end, delta):
_time_step = self._output_format(time_step, 'int', width=3)
_start = self._output_format(start, 'float', width=6.3)
_end = self._output_format(end, 'float', width=6.3)
_delta = self._output_format(delta, 'float', width=6.3)
LOG.info("%s |- %s %s %s %s" % (VERBOSITY_LVL2, _time_step, _start, _end, _delta))
LOG.info("%s | \\" % VERBOSITY_LVL3)
self._print_step(1, None, self.state.current_time_step.initial.time_point,
supremum_norm(self.state.current_time_step.initial.solution.value),
None, None)
def _print_time_step_end(self):
LOG.info("%s | \\_" % VERBOSITY_LVL2)
def _print_step(self, step, t0, t1, phi, resid, err):
_step = self._output_format(step, 'int', width=2)
_t0 = self._output_format(t0, 'float', width=6.3)
_t1 = self._output_format(t1, 'float', width=6.3)
_phi = self._output_format(phi, 'float', width=6.3)
_resid = self._output_format(resid, 'exp')
_err = self._output_format(err, 'exp')
LOG.info("%s | |- %s %s %s %s %s %s"
% (VERBOSITY_LVL3, _step, _t0, _t1, _phi, _resid, _err))
def _output_format(self, value, _type, width=None):
def _value_to_numeric(val):
if isinstance(val, (np.ndarray, IDiagnosisValue)):
return supremum_norm(val)
else:
return val
if _type and width is None:
if _type == 'float':
width = 10.3
elif _type == 'int':
width = 10
elif _type == 'exp':
width = 9.2
else:
width = 10
if value is None:
_outstr = "{: ^{width}s}".format('na', width=int(width))
else:
if _type == 'float':
_outstr = "{: {width}f}".format(_value_to_numeric(value), width=width)
elif _type == 'int':
_outstr = "{: {width}d}".format(_value_to_numeric(value), width=width)
elif _type == 'exp':
_outstr = "{: {width}e}".format(_value_to_numeric(value), width=width)
else:
_outstr = "{: >{width}s}".format(value, width=width)
return _outstr
__all__ = ['ParallelSdc']
| 1.976563
| 2
|
base/grouped_estimator.py
|
palicand/mi-pdd
| 0
|
12784223
|
<reponame>palicand/mi-pdd<gh_stars>0
import sklearn.base as base
class GroupedEstimator(base.BaseEstimator):
"""GroupedClassifier is meant to group together classifiers
that should run be fitted to the same data. It is meant
to make scoring of many classifiers easier"""
def __init__(self, estimators=None, labels=None, group_name=None):
super(GroupedEstimator, self).__init__()
if labels is None:
self.labels = self.__generate_labels(estimators)
elif len(labels) == len(estimators):
self.labels = labels
else:
raise ValueError('The length of estimators and labels must be the same')
self.estimators = {}
for idx, label in enumerate(self.labels):
self.estimators[label] = estimators[idx]
if group_name is None:
self.group_name = 'Group'
@staticmethod
def __generate_labels(estimators):
return ['estimator ' + str(i) for i in range(len(estimators))]
def add_estimator(self, estimator, label=None):
'''Adds a classifier to the group.
The classifier must be fitted to the same data
as the others, or fit method must be run afterwards
to fit all the classifiers to the same data'''
if label is None:
label = 'estimator ' + str(len(self.estimators))
self.estimators[label] = estimator
def clear(self):
'''Clears classifiers'''
self.estimators.clear()
| 3.046875
| 3
|
minical/__init__.py
|
grimpy/minical
| 0
|
12784224
|
from .minical import Calendar
__all__ = ["Calendar"]
| 1.054688
| 1
|
surround/experiment/file_storage_driver.py
|
agrantdeakin/surround
| 1
|
12784225
|
import os
import shutil
from .storage_driver import StorageDriver
class FileStorageDriver(StorageDriver):
def __init__(self, path):
super().__init__(path)
os.makedirs(path, exist_ok=True)
def pull(self, remote_path, local_path=None, override_ok=False):
if not self.exists(remote_path):
raise FileNotFoundError("That file doesn't exist")
path = os.path.join(self.url, remote_path)
if local_path:
if not override_ok and os.path.exists(local_path):
raise FileExistsError("File already exists at pull location!")
os.makedirs(os.path.dirname(local_path), exist_ok=True)
shutil.copyfile(path, local_path)
return True
with open(path, "rb") as f:
contents = f.read()
return contents
def push(self, remote_path, local_path=None, bytes_data=None, override_ok=False):
if not override_ok and self.exists(remote_path):
raise FileExistsError("This file already exists")
if not local_path and not bytes_data:
raise ValueError("local_path or bytes_data need to have values!")
if local_path and bytes_data:
raise ValueError("local_path and bytes_data are mutually exclusive!")
path = os.path.join(self.url, remote_path)
if local_path:
if not os.path.exists(local_path):
raise FileNotFoundError("Could not find the file to push!")
os.makedirs(os.path.dirname(path), exist_ok=True)
shutil.copyfile(local_path, path)
else:
os.makedirs(os.path.dirname(path), exist_ok=True)
with open(path, "wb+") as f:
f.write(bytes_data)
def delete(self, remote_path):
if not self.exists(remote_path):
raise FileNotFoundError("Could not find the file/folder to delete!")
path = os.path.join(self.url, remote_path)
if os.path.isdir(path):
shutil.rmtree(path)
else:
os.remove(path)
def exists(self, remote_path):
path = os.path.join(self.url, remote_path)
return os.path.exists(path)
def get_files(self, base_url=None):
results = []
path = self.url
if base_url:
path = os.path.join(self.url, base_url)
for root, _, files in os.walk(path):
for f in files:
results.append(os.path.relpath(os.path.join(root, f), path))
return results
| 3.03125
| 3
|
www/server/panel/class/panelAuth.py
|
mickeywaley/bt_panel_docker_vlome
| 0
|
12784226
|
<reponame>mickeywaley/bt_panel_docker_vlome<filename>www/server/panel/class/panelAuth.py<gh_stars>0
#coding: utf-8
#-------------------------------------------------------------------
# 宝塔Linux面板
#-------------------------------------------------------------------
# Copyright (c) 2015-2019 宝塔软件(http:#bt.cn) All rights reserved.
#-------------------------------------------------------------------
# Author: 黄文良 <<EMAIL>>
#-------------------------------------------------------------------
#------------------------------
# AUTH验证接口
#------------------------------
import public,time,json,os
from BTPanel import session
class panelAuth:
__product_list_path = 'data/product_list.pl'
__product_bay_path = 'data/product_bay.pl';
__product_id = '100000011';
def create_serverid(self,get):
userPath = 'data/userInfo.json';
if not os.path.exists(userPath): return public.returnMsg(False,'请先登陆宝塔官网用户');
tmp = public.readFile(userPath);
if len(tmp) < 2: tmp = '{}'
data = json.loads(tmp);
if not data: return public.returnMsg(False,'请先登陆宝塔官网用户');
if not hasattr(data,'serverid'):
s1 = self.get_mac_address() + self.get_hostname()
s2 = self.get_cpuname();
serverid = public.md5(s1) + public.md5(s2);
data['serverid'] = serverid;
public.writeFile(userPath,json.dumps(data));
return data;
def check_serverid(self,get):
if get.serverid != self.create_serverid(get): return False;
return True;
def get_plugin_price(self,get):
userPath = 'data/userInfo.json';
if not 'pluginName' in get: return public.returnMsg(False,'参数错误!');
if not os.path.exists(userPath): return public.returnMsg(False,'请先登陆宝塔官网帐号!');
params = {}
params['pid'] = self.get_plugin_info(get.pluginName)['id'];
#params['ajax2'] = '1';
data = self.send_cloud('get_product_discount', params)
return data;
def get_plugin_info(self,pluginName):
data = self.get_business_plugin(None);
if not data: return None
for d in data:
if d['name'] == pluginName: return d;
return None;
def get_plugin_list(self,get):
try:
if not session.get('get_product_bay') or not os.path.exists(self.__product_bay_path):
data = self.send_cloud('get_order_list_byuser', {});
if data: public.writeFile(self.__product_bay_path,json.dumps(data));
session['get_product_bay'] = True;
data = json.loads(public.readFile(self.__product_bay_path))
return data
except: return None
def get_buy_code(self,get):
params = {}
params['pid'] = get.pid;
params['cycle'] = get.cycle;
data = self.send_cloud('create_order', params);
if not data: return public.returnMsg(False,'连接服务器失败!')
return data;
def check_pay_status(self,get):
params = {}
params['id'] = get.id;
data = self.send_cloud('check_product_pays', params);
if not data: return public.returnMsg(False,'连接服务器失败!')
if data['status'] == True:
self.flush_pay_status(get);
if 'get_product_bay' in session: del(session['get_product_bay']);
return data;
def flush_pay_status(self,get):
if 'get_product_bay' in session: del(session['get_product_bay'])
data = self.get_plugin_list(get)
if not data: return public.returnMsg(False,'连接服务器失败!')
return public.returnMsg(True,'状态刷新成功!')
def get_renew_code(self):
pass
def check_renew_code(self):
pass
def get_business_plugin(self,get):
try:
if not session.get('get_product_list') or not os.path.exists(self.__product_list_path):
data = self.send_cloud('get_product_list', {});
if data: public.writeFile(self.__product_list_path,json.dumps(data));
session['get_product_list'] = True
data = json.loads(public.readFile(self.__product_list_path))
return data
except: return None
def get_ad_list(self):
pass
def check_plugin_end(self):
pass
def get_re_order_status_plugin(self,get):
params = {}
params['pid'] = getattr(get,'pid',0);
data = self.send_cloud('get_re_order_status', params);
if not data: return public.returnMsg(False,'连接服务器失败!');
if data['status'] == True:
self.flush_pay_status(get);
if 'get_product_bay' in session: del(session['get_product_bay']);
return data;
def get_voucher_plugin(self,get):
params = {}
params['pid'] = getattr(get,'pid',0);
params['status'] = '0';
data = self.send_cloud('get_voucher', params);
if not data: return [];
return data;
def create_order_voucher_plugin(self,get):
params = {}
params['pid'] = getattr(get,'pid',0);
params['code'] = getattr(get,'code',0);
data = self.send_cloud('create_order_voucher', params);
if not data: return public.returnMsg(False,'连接服务器失败!');
if data['status'] == True:
self.flush_pay_status(get);
if 'get_product_bay' in session: del(session['get_product_bay']);
return data;
def send_cloud(self,module,params):
try:
cloudURL = 'http://www.bt.cn/api/Plugin/';
userInfo = self.create_serverid(None);
if 'status' in userInfo:
params['uid'] = 0;
params['serverid'] = '';
else:
params['uid'] = userInfo['uid'];
params['serverid'] = userInfo['serverid'];
result = public.httpPost(cloudURL + module,params);
result = json.loads(result.strip());
if not result: return None;
return result;
except: return None
def send_cloud_pro(self,module,params):
try:
cloudURL = 'http://www.bt.cn/api/invite/';
userInfo = self.create_serverid(None);
if 'status' in userInfo:
params['uid'] = 0;
params['serverid'] = '';
else:
params['uid'] = userInfo['uid'];
params['serverid'] = userInfo['serverid'];
result = public.httpPost(cloudURL + module,params);
result = json.loads(result);
if not result: return None;
return result;
except: return None
def get_voucher(self,get):
params = {}
params['product_id'] = self.__product_id;
params['status'] = '0';
data = self.send_cloud_pro('get_voucher', params);
return data;
def get_order_status(self,get):
params = {}
data = self.send_cloud_pro('get_order_status', params);
return data;
def get_product_discount_by(self,get):
params = {}
data = self.send_cloud_pro('get_product_discount_by', params);
return data;
def get_re_order_status(self,get):
params = {}
data = self.send_cloud_pro('get_re_order_status', params);
return data;
def create_order_voucher(self,get):
code = getattr(get,'code','1')
params = {}
params['code'] = code;
data = self.send_cloud_pro('create_order_voucher', params);
return data;
def create_order(self,get):
cycle = getattr(get,'cycle','1');
params = {}
params['cycle'] = cycle;
data = self.send_cloud_pro('create_order', params);
return data;
def get_mac_address(self):
import uuid
mac=uuid.UUID(int = uuid.getnode()).hex[-12:]
return ":".join([mac[e:e+2] for e in range(0,11,2)])
def get_hostname(self):
import socket
return socket.getfqdn(socket.gethostname())
def get_cpuname(self):
return public.ExecShell("cat /proc/cpuinfo|grep 'model name'|cut -d : -f2")[0].strip();
| 2
| 2
|
Curso de Cisco/Actividades/Ejemplos de tuplas.py
|
tomasfriz/Curso-de-Cisco
| 0
|
12784227
|
# Ejemplo 1
t1 = (1, 2, 3)
for elem in t1:
print(elem)
# Ejemplo 2
t2 = (1, 2, 3, 4)
print(5 in t2)
print(5 not in t2)
# Ejemplo 3
t3 = (1, 2, 3, 5)
print(len(t3))
# Ejemplo 4
t4 = t1 + t2
t5 = t3 * 2
print(t4)
print(t5)
| 3.78125
| 4
|
tflmlib/InputData.py
|
bjascob/SmartLMVocabs
| 10
|
12784228
|
<reponame>bjascob/SmartLMVocabs<gh_stars>1-10
# Copyright 2018 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Acknowledgements:
# This file is adapted from code created by Saarland University, Spoken Language Systems (LSV)
# which is partially based on the Tensorflow PTB-LM recipe
# See https://github.com/uds-lsv/TF-NNLM-TK
# and https://github.com/tensorflow/models/blob/master/tutorials/rnn/ptb/ptb_word_lm.py
from __future__ import print_function
from __future__ import division
import os
import sys
import fnmatch
import numpy
#
#
class InputData(object):
''' Class for reading a directory of data and delivering it for training / test
Read a corpus directory of preprocessed .npy files and transform it into
arrays of input and target batches. All data in the directory is read
until max_words is reached. Data can be optionally split across epochs.
Args:
batch_size (int): number of sequences per batch
seq_length (int): number of word tokens in a sequence
history_size (int): size of history = 1
'''
def __init__(self, batch_size, seq_length, history_size=1):
self.batch_size = batch_size
self.seq_length = seq_length
self.history_size = history_size
def loadIndexedCorpus(self, data_dir, max_words=int(1e9), epoch_splits=1):
''' Load the preprocessed corpus of index data (.npy format)
Files are read in numerical order. Reading is stopped when max_words
is reached.
Args:
data_dir (str): name of directory where data is saved
max_words (int): number or words/tokens to read in
epoch_splits (int): number of epochs to split the data across
'''
fns = [os.path.join(data_dir, fn) for fn in os.listdir(data_dir)
if fnmatch.fnmatch(fn, '*.npy')]
data = numpy.zeros(max_words, dtype='int32')
ptr = 0
for fn in sorted(fns):
print('Loading data from ', fn)
with open(fn, 'r') as f:
new_data = numpy.load(fn)
load_size = min(new_data.shape[0], data.shape[0] - ptr)
data[ptr:ptr + load_size] = new_data[:load_size]
ptr += load_size
if ptr >= max_words:
break
data = data[:ptr]
print('Total size of loaded data is {:,} words.'.format(data.shape[0]))
self.data_to_batches(data) # create the input/target batches from the data array
self.reset_batch_pointer() # make sure that the index points at the first batch
# Split the data across epochs if requested. Truncate data as needed.
if epoch_splits > 1:
self.epoch_splits = epoch_splits
self.batches_per_epoch = int(self.num_batches / epoch_splits)
print('Data split across {:} epochs. Batches per epoch is {:,}. '
'Words per epoch is {:,}'.format(
self.epoch_splits, self.batches_per_epoch,
self.batches_per_epoch * self.batch_size * self.seq_length))
else:
self.epoch_splits = 1
self.batches_per_epoch = self.num_batches
self.set_epoch_num(0)
print('')
def data_to_batches(self, data):
''' Create batches from data and store it in self.input/target
Args:
data (numpy array): Numpy array of data to be split into batches
'''
# Figure number of batches, truncate the data and print an error
# message when the data array is too small
self.num_batches = int((data.size - self.history_size) /
(self.batch_size * self.seq_length))
if self.num_batches == 0:
msg = "ERROR: Cannot create batches ==> data size={}, batch size={}, segment size={}"
assert False, msg.format(data.size, self.batch_size, self.seq_length)
data = data[:(self.num_batches * self.batch_size * self.seq_length) + self.history_size]
# Remove the last words in the input chunk and shift the target words
input = data[:-1]
target = data[self.history_size:]
# Chunk the data for consumption
input = numpy.array(self.chunk(input, (self.num_batches * self.seq_length) +
self.history_size - 1, overlap=self.history_size - 1))
target = numpy.array(self.chunk(target, (self.num_batches * self.seq_length), overlap=0))
self.input = self.chunk(input, self.seq_length + self.history_size - 1,
overlap=self.history_size - 1)
self.target = self.chunk(target, self.seq_length, overlap=0)
self.reset_batch_pointer()
def get_num_batches(self):
''' Get he number of batches for an epoch
Returns:
int: Number of batches per epoch
'''
return self.batches_per_epoch
def next_batch(self):
''' Get X and Y data and increment the internal pointer
Returns:
numpy array: X input data
numpy array: Y target data
'''
ptr = self.pointer + self.epoch_offset
x, y = self.input[ptr], self.target[ptr]
self.pointer += 1
return x, y
def reset_batch_pointer(self):
''' Reset the batch pointer to the beginning of the data'''
self.pointer = 0
def set_epoch_num(self, epoch_num):
''' Set the offset into the data based on the epoch number
This wraps if epoch_num > epoch_splits
'''
self.epoch_offset = self.batches_per_epoch * (epoch_num % self.epoch_splits)
self.pointer = 0
@staticmethod
def chunk(A, seq_len, overlap=0):
''' Chunk an data up for use in the net
This function chunks data up into an array that is indexed appropriately
for input into the network. The chunking is done so that a sequences are
continous across a batch boundaries which makes the order someone unusual.
For an explanation see https://github.com/uds-lsv/TF-NNLM-TK/issues/4
'''
if overlap >= seq_len:
print("ERROR in function chunk: overlap cannot be >= to sequence length")
exit(0)
if A.ndim == 1:
Alen = A.shape[0]
return [A[i:i + seq_len] for i in range(0, Alen - seq_len + 1, seq_len - overlap)]
elif A.ndim == 2:
Alen = A.shape[1]
return [A[:, i:i + seq_len] for i in range(0, Alen - seq_len + 1, seq_len - overlap)]
else:
print("ERROR in function chunk: this function works only for 1-D and 2-D arrays")
exit(0)
| 2.078125
| 2
|
Week6/pandas-intro.py
|
aliahari/python-lecture
| 1
|
12784229
|
<gh_stars>1-10
import pandas as pd
data = pd.read_csv('grades.csv')
data["Total"]= (0.25*data["Final"]+0.75*data["MidTerm"])
print(data)
data.to_csv("new-grades.csv")
| 3.109375
| 3
|
michelson_kernel/docs.py
|
baking-bad/imichelson
| 16
|
12784230
|
docs = {
'ABS': 'ABS\nABS :: int : A => nat : A\nObtain the absolute value of an integer',
'ADD': 'ADD\n'
'ADD :: nat : nat : A => nat : A\n'
'ADD :: nat : int : A => int : A\n'
'ADD :: int : nat : A => int : A\n'
'ADD :: int : int : A => int : A\n'
'ADD :: timestamp : int : A => timestamp : A\n'
'ADD :: int : timestamp : A => timestamp : A\n'
'ADD :: mutez : mutez : A => mutez : A\n'
'Add two numerical values',
'ADDRESS': 'ADDRESS\nADDRESS :: contract ty1 : A => address : A\nPush the address of a contract',
'AMOUNT': 'AMOUNT\nAMOUNT :: A => mutez : A\nPush the amount of the current transaction',
'AND': 'AND\nAND :: bool : bool : A => bool : A\nAND :: nat : nat : A => nat : A\nBoolean and bitwise AND',
'APPLY': 'APPLY\n'
'APPLY :: ty1 : lambda ( pair ty1 ty2 ) ty3 : A => lambda ty2 ty3 : A\n'
'Partially apply a tuplified function from the stack',
'BALANCE': 'BALANCE\nBALANCE :: A => mutez : A\nPush the current amount of mutez of the executing contract',
'BLAKE2B': 'BLAKE2B\nBLAKE2B :: bytes : A => bytes : A\nCompute a Blake2B cryptographic hash',
'CAR': 'CAR\nCAR :: pair ty1 ty2 : A => ty1 : A\nAccess the left part of a pair',
'CAST': '',
'CDR': 'CDR\nCDR :: pair ty1 ty2 : A => ty2 : A\nAccess the right part of a pair',
'CHAIN_ID': 'CHAIN_ID\nCHAIN_ID :: A => chain_id : A\nPush the chain identifier',
'CHECK_SIGNATURE': 'CHECK_SIGNATURE\n'
'CHECK_SIGNATURE :: key : signature : bytes : A => bool : A\n'
'Verify signature of bytes by key',
'COMPARE': 'COMPARE\nCOMPARE :: cty : cty : A => int : A\nCompare two values',
'CONCAT': 'CONCAT\n'
'CONCAT :: string : string : A => string : A\n'
'CONCAT :: list string : A => string : A\n'
'CONCAT :: bytes : bytes : A => bytes : A\n'
'CONCAT :: list bytes : A => bytes : A\n'
'Concatenate a string, byte sequence, string list or byte sequence list',
'CONS': 'CONS\nCONS :: ty1 : list ty1 : A => list ty1 : A\nPrepend an element to a list',
'CONTRACT': 'CONTRACT ty\n'
'CONTRACT ty1 :: address : A => option ( contract ty1 ) : A\n'
'Cast an address to a typed contract',
'CREATE_ACCOUNT': '\nPush an account creation operation',
'CREATE_CONTRACT': 'CREATE_CONTRACT ty1 ty2 code\n'
'CREATE_CONTRACT ty1 ty2 code :: option key_hash : mutez : ty1 : A => operation : address : A\n'
'Push a contract creation operation',
'DIG': 'DIG n\nDIG n :: A @ ( ty1 : B ) => ty1 : ( A @ B )\nRetrieve the n\\ th element of the stack',
'DIP': 'DIP n code\nDIP n code :: A @ B => A @ C\nRun code protecting the top of the stack',
'DROP': 'DROP n\nDROP n :: A @ B => B\nDrop the top n elements of the stack',
'DUG': 'DUG n\nDUG n :: ty1 : ( A @ B ) => A @ ( ty1 : B )\nInsert the top element at depth n',
'DUP': 'DUP\nDUP :: ty1 : A => ty1 : ty1 : A\nDuplicate the top of the stack',
'EDIV': 'EDIV\n'
'EDIV :: nat : nat : A => option ( pair nat nat ) : A\n'
'EDIV :: nat : int : A => option ( pair int nat ) : A\n'
'EDIV :: int : nat : A => option ( pair int nat ) : A\n'
'EDIV :: int : int : A => option ( pair int nat ) : A\n'
'EDIV :: mutez : nat : A => option ( pair mutez mutez ) : A\n'
'EDIV :: mutez : mutez : A => option ( pair nat mutez ) : A\n'
'Euclidean division',
'EMPTY_BIG_MAP': 'EMPTY_BIG_MAP kty vty\n'
'EMPTY_BIG_MAP kty vty :: A => big_map kty vty : A\n'
'Build a new, empty big_map from kty to vty',
'EMPTY_MAP': 'EMPTY_MAP kty vty\nEMPTY_MAP kty vty :: A => map kty vty : A\nBuild a new, empty map from kty to vty',
'EMPTY_SET': 'EMPTY_SET cty\nEMPTY_SET cty :: A => set cty : A\nBuild a new, empty set for elements of type cty',
'EQ': 'EQ\nEQ :: int : A => bool : A\nCheck that the top of the stack EQuals zero',
'EXEC': 'EXEC\nEXEC :: ty1 : lambda ty1 ty2 : A => ty2 : A\nExecute a function from the stack',
'EXPAND': '',
'FAILWITH': 'FAILWITH\nFAILWITH :: ty1 : A => B\nExplicitly abort the current program',
'GE': 'GE\nGE :: int : A => bool : A\nCheck that the top of the stack is Greater Than or Equal to zero',
'GET': 'GET\n'
'GET :: kty : map kty vty : A => option vty : A\n'
'GET :: kty : big_map kty vty : A => option vty : A\n'
'Access an element in a map or big_map',
'GT': 'GT\nGT :: int : A => bool : A\nCheck that the top of the stack is Greater Than zero',
'HASH_KEY': 'HASH_KEY\nHASH_KEY :: key : A => key_hash : A\nCompute the Base58Check of a public key',
'IF': 'IF code1 code2\nIF code1 code2 :: bool : A => B\nConditional branching',
'IF_CONS': 'IF_CONS code1 code2\nIF_CONS code1 code2 :: list ty1 : A => B\nInspect a list',
'IF_LEFT': 'IF_LEFT code1 code2\nIF_LEFT code1 code2 :: or ty1 ty2 : A => B\nInspect a value of a union',
'IF_NONE': 'IF_NONE code1 code2\nIF_NONE code1 code2 :: option ty1 : A => B\nInspect an optional value',
'IMPLICIT_ACCOUNT': 'IMPLICIT_ACCOUNT\n'
'IMPLICIT_ACCOUNT :: key_hash : A => contract unit : A\n'
'Create an implicit account',
'INT': 'INT\nINT :: nat : A => int : A\nConvert a natural number to an integer',
'ISNAT': 'ISNAT\nISNAT :: int : A => option nat : A\nConvert a non-negative integer to a natural number',
'ITER': 'ITER code\n'
'ITER code :: list ty1 : A => A\n'
'ITER code :: set cty : A => A\n'
'ITER code :: map kty vty : A => A\n'
'Iterate over a set, list or map',
'LAMBDA': 'LAMBDA ty1 ty2 code\nLAMBDA ty1 ty2 code :: A => lambda ty1 ty2 : A\nPush a lambda onto the stack',
'LE': 'LE\nLE :: int : A => bool : A\nCheck that the top of the stack is Less Than or Equal to zero',
'LEFT': 'LEFT ty2\nLEFT ty2 :: ty1 : A => or ty1 ty2 : A\nWrap a value in a union (left case)',
'LOOP': 'LOOP code\nLOOP code :: bool : A => A\nA generic loop',
'LOOP_LEFT': 'LOOP_LEFT code\nLOOP_LEFT code :: or ty1 ty2 : A => ty2 : A\nLoop with accumulator',
'LSL': 'LSL\nLSL :: nat : nat : A => nat : A\nLogically left shift a natural number',
'LSR': 'LSR\nLSR :: nat : nat : A => nat : A\nLogically right shift a natural number',
'LT': 'LT\nLT :: int : A => bool : A\nCheck that the top of the stack is Less Than zero',
'MAP': 'MAP code\n'
'MAP code :: list ty1 : A => list ty2 : A\n'
'MAP code :: map kty ty1 : A => map kty ty2 : A\n'
'Apply the body expression to each element of a list or map.',
'MEM': 'MEM\n'
'MEM :: cty : set cty : A => bool : A\n'
'MEM :: kty : map kty vty : A => bool : A\n'
'MEM :: kty : big_map kty vty : A => bool : A\n'
'Check for the presence of a binding for a key in a map, set or big_map',
'MUL': 'MUL\n'
'MUL :: nat : nat : A => nat : A\n'
'MUL :: nat : int : A => int : A\n'
'MUL :: int : nat : A => int : A\n'
'MUL :: int : int : A => int : A\n'
'MUL :: mutez : nat : A => mutez : A\n'
'MUL :: nat : mutez : A => mutez : A\n'
'Multiply two numerical values',
'NEG': 'NEG\nNEG :: nat : A => int : A\nNEG :: int : A => int : A\nNegate a numerical value',
'NEQ': 'NEQ\nNEQ :: int : A => bool : A\nCheck that the top of the stack does Not EQual zero',
'NIL': 'NIL ty1\nNIL ty1 :: A => list ty1 : A\nPush an empty list',
'NONE': 'NONE ty1\nNONE ty1 :: A => option ty1 : A\nPush the absent optional value',
'NOOP': '{}\n{} :: A => A\nEmpty instruction sequence',
'NOT': 'NOT\n'
'NOT :: bool : A => bool : A\n'
'NOT :: nat : A => int : A\n'
'NOT :: int : A => int : A\n'
'Boolean negation and bitwise complement',
'NOW': 'NOW\nNOW :: A => timestamp : A\nPush block timestamp',
'OR': 'OR\nOR :: bool : bool : A => bool : A\nOR :: nat : nat : A => nat : A\nBoolean and bitwise OR',
'PACK': 'PACK\nPACK :: ty1 : A => bytes : A\nSerialize data',
'PAIR': "PAIR\nPAIR :: ty1 : ty2 : A => pair ty1 ty2 : A\nBuild a pair from the stack's top two elements",
'PUSH': 'PUSH ty1 x\nPUSH ty1 x :: A => ty1 : A\nPush a constant value of a given type onto the stack',
'RENAME': '',
'RIGHT': 'RIGHT ty1\nRIGHT ty1 :: ty2 : A => or ty1 ty2 : A\nWrap a value in a union (right case)',
'SELF': 'SELF\nSELF :: A => contract ty : A\nPush the current contract',
'SENDER': 'SENDER\nSENDER :: A => address : A\nPush the contract that initiated the current internal transaction',
'SEQ': 'code1 ; code2\ncode1 ; code2 :: A => C\nInstruction sequence',
'SET_DELEGATE': 'SET_DELEGATE\nSET_DELEGATE :: option key_hash : A => operation : A\nPush a delegation operation',
'SHA256': 'SHA256\nSHA256 :: bytes : A => bytes : A\nCompute a SHA-256 cryptographic hash',
'SHA512': 'SHA512\nSHA512 :: bytes : A => bytes : A\nCompute a SHA-512 cryptographic hash',
'SIZE': 'SIZE\n'
'SIZE :: set cty : A => nat : A\n'
'SIZE :: map kty vty : A => nat : A\n'
'SIZE :: list ty1 : A => nat : A\n'
'SIZE :: string : A => nat : A\n'
'SIZE :: bytes : A => nat : A\n'
'Obtain size of a string, list, set, map or byte sequence',
'SLICE': 'SLICE\n'
'SLICE :: nat : nat : string : A => option string : A\n'
'SLICE :: nat : nat : bytes : A => option bytes : A\n'
'Obtain a substring or subsequence of a string respectively byte sequence bytes',
'SOME': 'SOME\nSOME :: ty1 : A => option ty1 : A\nWrap an existing optional value',
'SOURCE': 'SOURCE\nSOURCE :: A => address : A\nPush the contract that initiated the current transaction',
'STEPS_TO_QUOTA': '\nPush the remaining steps before the contract execution must terminate',
'SUB': 'SUB\n'
'SUB :: nat : nat : A => int : A\n'
'SUB :: nat : int : A => int : A\n'
'SUB :: int : nat : A => int : A\n'
'SUB :: int : int : A => int : A\n'
'SUB :: timestamp : int : A => timestamp : A\n'
'SUB :: timestamp : timestamp : A => int : A\n'
'SUB :: mutez : mutez : A => mutez : A\n'
'Subtract two numerical values',
'SWAP': 'SWAP\nSWAP :: ty1 : ty2 : A => ty2 : ty1 : A\nSwap the top two elements of the stack',
'TOP': '',
'TRANSFER_TOKENS': 'TRANSFER_TOKENS\n'
'TRANSFER_TOKENS :: ty1 : mutez : contract ty1 : A => operation : A\n'
'Push a transaction operation',
'UNIT': 'UNIT\nUNIT :: A => unit : A\nPush the unit value onto the stack',
'UNPACK': 'UNPACK ty1\nUNPACK ty1 :: bytes : A => option ty1 : A\nDeserialize data, if valid',
'UPDATE': 'UPDATE\n'
'UPDATE :: cty : bool : set cty : A => set cty : A\n'
'UPDATE :: kty : option vty : map kty vty : A => map kty vty : A\n'
'UPDATE :: kty : option vty : big_map kty vty : A => big_map kty vty : A\n'
'Add or remove an element in a map, big_map or set',
'XOR': 'XOR\nXOR :: bool : bool : A => bool : A\nXOR :: nat : nat : A => nat : A\nBoolean and bitwise exclusive OR',
'address': 'address\nAddress of an untyped contract',
'big_map': 'big_map kty vty\nA lazily deserialized map from kty to vty',
'bool': 'bool\nA boolean',
'bytes': 'bytes\nA sequence of bytes',
'chain_id': 'chain_id\nA chain identifier',
'contract': "contract type\nAddress of a contract, where type is the contract's parameter type",
'int': 'int\nAn arbitrary-precision integer',
'key': 'key\nA public cryptography key',
'key_hash': 'key_hash\nA hash of a public cryptography key',
'lambda': 'lambda ty1 ty2\nA lambda with given parameter and return types',
'list': 'list type\nA single, immutable, homogeneous linked list',
'map': 'map kty vty\nAn immutable map from kty to vty',
'mutez': 'mutez\nA specific type for manipulating tokens',
'nat': 'nat\nAn arbitrary-precision natural number',
'operation': 'operation\nAn internal operation emitted by a contract',
'option': 'option type\nAn optional value',
'or': 'or ty1 ty2\nA union of two types',
'pair': 'pair ty1 ty2\nA pair of values',
'set': 'set cty\nAn immutable set of comparable values of type cty',
'signature': 'signature\nA cryptographic signature',
'string': 'string\nA string of characters',
'timestamp': 'timestamp\nA real-world date',
'unit': 'unit\nThe type whose only value is Unit'}
| 1.945313
| 2
|
primer_moulo/models/models.py
|
Ghabrielince/demoodoo_20200719
| 0
|
12784231
|
# -*- coding: utf-8 -*-
from odoo import models, fields
class MascotasMascotas(models.Model):
_name = "mascotas.mascotas"
# Lista de campos de la tabla.
name = fields.Char(string="Nombre")
tipo_id = fields.Many2one("mascotas.tipos", string="Tipo")
raza_id = fields.Many2one("mascotas.razas", string="Raza")
fecha_nacimiento = fields.Date(string="Fec. Nac.")
sexo = fields.Selection([("m", "Macho"), ("h", "Hembra")], string="Sexo")
class MascotasRazas(models.Model):
_name = "mascotas.razas"
name = fields.Char(string="Nombre")
codigo = fields.Char(string="Código")
class MascotasTipos(models.Model):
_name = "mascotas.tipos"
name = fields.Char(string="Nombre")
codigo = fields.Char(string="Código")
| 2.09375
| 2
|
cottonformation/res/imagebuilder.py
|
gitter-badger/cottonformation-project
| 0
|
12784232
|
<gh_stars>0
# -*- coding: utf-8 -*-
"""
This module
"""
import attr
import typing
from ..core.model import (
Property, Resource, Tag, GetAtt, TypeHint, TypeCheck,
)
from ..core.constant import AttrMeta
#--- Property declaration ---
@attr.s
class ImagePipelineImageTestsConfiguration(Property):
"""
AWS Object Type = "AWS::ImageBuilder::ImagePipeline.ImageTestsConfiguration"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-imagebuilder-imagepipeline-imagetestsconfiguration.html
Property Document:
- ``p_ImageTestsEnabled``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-imagebuilder-imagepipeline-imagetestsconfiguration.html#cfn-imagebuilder-imagepipeline-imagetestsconfiguration-imagetestsenabled
- ``p_TimeoutMinutes``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-imagebuilder-imagepipeline-imagetestsconfiguration.html#cfn-imagebuilder-imagepipeline-imagetestsconfiguration-timeoutminutes
"""
AWS_OBJECT_TYPE = "AWS::ImageBuilder::ImagePipeline.ImageTestsConfiguration"
p_ImageTestsEnabled: bool = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(bool)),
metadata={AttrMeta.PROPERTY_NAME: "ImageTestsEnabled"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-imagebuilder-imagepipeline-imagetestsconfiguration.html#cfn-imagebuilder-imagepipeline-imagetestsconfiguration-imagetestsenabled"""
p_TimeoutMinutes: int = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(int)),
metadata={AttrMeta.PROPERTY_NAME: "TimeoutMinutes"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-imagebuilder-imagepipeline-imagetestsconfiguration.html#cfn-imagebuilder-imagepipeline-imagetestsconfiguration-timeoutminutes"""
@attr.s
class ContainerRecipeComponentConfiguration(Property):
"""
AWS Object Type = "AWS::ImageBuilder::ContainerRecipe.ComponentConfiguration"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-imagebuilder-containerrecipe-componentconfiguration.html
Property Document:
- ``p_ComponentArn``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-imagebuilder-containerrecipe-componentconfiguration.html#cfn-imagebuilder-containerrecipe-componentconfiguration-componentarn
"""
AWS_OBJECT_TYPE = "AWS::ImageBuilder::ContainerRecipe.ComponentConfiguration"
p_ComponentArn: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "ComponentArn"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-imagebuilder-containerrecipe-componentconfiguration.html#cfn-imagebuilder-containerrecipe-componentconfiguration-componentarn"""
@attr.s
class ImageRecipeComponentConfiguration(Property):
"""
AWS Object Type = "AWS::ImageBuilder::ImageRecipe.ComponentConfiguration"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-imagebuilder-imagerecipe-componentconfiguration.html
Property Document:
- ``p_ComponentArn``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-imagebuilder-imagerecipe-componentconfiguration.html#cfn-imagebuilder-imagerecipe-componentconfiguration-componentarn
"""
AWS_OBJECT_TYPE = "AWS::ImageBuilder::ImageRecipe.ComponentConfiguration"
p_ComponentArn: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "ComponentArn"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-imagebuilder-imagerecipe-componentconfiguration.html#cfn-imagebuilder-imagerecipe-componentconfiguration-componentarn"""
@attr.s
class ContainerRecipeEbsInstanceBlockDeviceSpecification(Property):
"""
AWS Object Type = "AWS::ImageBuilder::ContainerRecipe.EbsInstanceBlockDeviceSpecification"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-imagebuilder-containerrecipe-ebsinstanceblockdevicespecification.html
Property Document:
- ``p_DeleteOnTermination``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-imagebuilder-containerrecipe-ebsinstanceblockdevicespecification.html#cfn-imagebuilder-containerrecipe-ebsinstanceblockdevicespecification-deleteontermination
- ``p_Encrypted``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-imagebuilder-containerrecipe-ebsinstanceblockdevicespecification.html#cfn-imagebuilder-containerrecipe-ebsinstanceblockdevicespecification-encrypted
- ``p_Iops``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-imagebuilder-containerrecipe-ebsinstanceblockdevicespecification.html#cfn-imagebuilder-containerrecipe-ebsinstanceblockdevicespecification-iops
- ``p_KmsKeyId``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-imagebuilder-containerrecipe-ebsinstanceblockdevicespecification.html#cfn-imagebuilder-containerrecipe-ebsinstanceblockdevicespecification-kmskeyid
- ``p_SnapshotId``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-imagebuilder-containerrecipe-ebsinstanceblockdevicespecification.html#cfn-imagebuilder-containerrecipe-ebsinstanceblockdevicespecification-snapshotid
- ``p_VolumeSize``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-imagebuilder-containerrecipe-ebsinstanceblockdevicespecification.html#cfn-imagebuilder-containerrecipe-ebsinstanceblockdevicespecification-volumesize
- ``p_VolumeType``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-imagebuilder-containerrecipe-ebsinstanceblockdevicespecification.html#cfn-imagebuilder-containerrecipe-ebsinstanceblockdevicespecification-volumetype
"""
AWS_OBJECT_TYPE = "AWS::ImageBuilder::ContainerRecipe.EbsInstanceBlockDeviceSpecification"
p_DeleteOnTermination: bool = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(bool)),
metadata={AttrMeta.PROPERTY_NAME: "DeleteOnTermination"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-imagebuilder-containerrecipe-ebsinstanceblockdevicespecification.html#cfn-imagebuilder-containerrecipe-ebsinstanceblockdevicespecification-deleteontermination"""
p_Encrypted: bool = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(bool)),
metadata={AttrMeta.PROPERTY_NAME: "Encrypted"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-imagebuilder-containerrecipe-ebsinstanceblockdevicespecification.html#cfn-imagebuilder-containerrecipe-ebsinstanceblockdevicespecification-encrypted"""
p_Iops: int = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(int)),
metadata={AttrMeta.PROPERTY_NAME: "Iops"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-imagebuilder-containerrecipe-ebsinstanceblockdevicespecification.html#cfn-imagebuilder-containerrecipe-ebsinstanceblockdevicespecification-iops"""
p_KmsKeyId: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "KmsKeyId"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-imagebuilder-containerrecipe-ebsinstanceblockdevicespecification.html#cfn-imagebuilder-containerrecipe-ebsinstanceblockdevicespecification-kmskeyid"""
p_SnapshotId: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "SnapshotId"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-imagebuilder-containerrecipe-ebsinstanceblockdevicespecification.html#cfn-imagebuilder-containerrecipe-ebsinstanceblockdevicespecification-snapshotid"""
p_VolumeSize: int = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(int)),
metadata={AttrMeta.PROPERTY_NAME: "VolumeSize"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-imagebuilder-containerrecipe-ebsinstanceblockdevicespecification.html#cfn-imagebuilder-containerrecipe-ebsinstanceblockdevicespecification-volumesize"""
p_VolumeType: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "VolumeType"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-imagebuilder-containerrecipe-ebsinstanceblockdevicespecification.html#cfn-imagebuilder-containerrecipe-ebsinstanceblockdevicespecification-volumetype"""
@attr.s
class ImagePipelineSchedule(Property):
"""
AWS Object Type = "AWS::ImageBuilder::ImagePipeline.Schedule"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-imagebuilder-imagepipeline-schedule.html
Property Document:
- ``p_PipelineExecutionStartCondition``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-imagebuilder-imagepipeline-schedule.html#cfn-imagebuilder-imagepipeline-schedule-pipelineexecutionstartcondition
- ``p_ScheduleExpression``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-imagebuilder-imagepipeline-schedule.html#cfn-imagebuilder-imagepipeline-schedule-scheduleexpression
"""
AWS_OBJECT_TYPE = "AWS::ImageBuilder::ImagePipeline.Schedule"
p_PipelineExecutionStartCondition: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "PipelineExecutionStartCondition"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-imagebuilder-imagepipeline-schedule.html#cfn-imagebuilder-imagepipeline-schedule-pipelineexecutionstartcondition"""
p_ScheduleExpression: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "ScheduleExpression"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-imagebuilder-imagepipeline-schedule.html#cfn-imagebuilder-imagepipeline-schedule-scheduleexpression"""
@attr.s
class ImageImageTestsConfiguration(Property):
"""
AWS Object Type = "AWS::ImageBuilder::Image.ImageTestsConfiguration"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-imagebuilder-image-imagetestsconfiguration.html
Property Document:
- ``p_ImageTestsEnabled``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-imagebuilder-image-imagetestsconfiguration.html#cfn-imagebuilder-image-imagetestsconfiguration-imagetestsenabled
- ``p_TimeoutMinutes``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-imagebuilder-image-imagetestsconfiguration.html#cfn-imagebuilder-image-imagetestsconfiguration-timeoutminutes
"""
AWS_OBJECT_TYPE = "AWS::ImageBuilder::Image.ImageTestsConfiguration"
p_ImageTestsEnabled: bool = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(bool)),
metadata={AttrMeta.PROPERTY_NAME: "ImageTestsEnabled"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-imagebuilder-image-imagetestsconfiguration.html#cfn-imagebuilder-image-imagetestsconfiguration-imagetestsenabled"""
p_TimeoutMinutes: int = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(int)),
metadata={AttrMeta.PROPERTY_NAME: "TimeoutMinutes"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-imagebuilder-image-imagetestsconfiguration.html#cfn-imagebuilder-image-imagetestsconfiguration-timeoutminutes"""
@attr.s
class InfrastructureConfigurationS3Logs(Property):
"""
AWS Object Type = "AWS::ImageBuilder::InfrastructureConfiguration.S3Logs"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-imagebuilder-infrastructureconfiguration-s3logs.html
Property Document:
- ``p_S3BucketName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-imagebuilder-infrastructureconfiguration-s3logs.html#cfn-imagebuilder-infrastructureconfiguration-s3logs-s3bucketname
- ``p_S3KeyPrefix``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-imagebuilder-infrastructureconfiguration-s3logs.html#cfn-imagebuilder-infrastructureconfiguration-s3logs-s3keyprefix
"""
AWS_OBJECT_TYPE = "AWS::ImageBuilder::InfrastructureConfiguration.S3Logs"
p_S3BucketName: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "S3BucketName"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-imagebuilder-infrastructureconfiguration-s3logs.html#cfn-imagebuilder-infrastructureconfiguration-s3logs-s3bucketname"""
p_S3KeyPrefix: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "S3KeyPrefix"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-imagebuilder-infrastructureconfiguration-s3logs.html#cfn-imagebuilder-infrastructureconfiguration-s3logs-s3keyprefix"""
@attr.s
class ContainerRecipeInstanceBlockDeviceMapping(Property):
"""
AWS Object Type = "AWS::ImageBuilder::ContainerRecipe.InstanceBlockDeviceMapping"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-imagebuilder-containerrecipe-instanceblockdevicemapping.html
Property Document:
- ``p_DeviceName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-imagebuilder-containerrecipe-instanceblockdevicemapping.html#cfn-imagebuilder-containerrecipe-instanceblockdevicemapping-devicename
- ``p_Ebs``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-imagebuilder-containerrecipe-instanceblockdevicemapping.html#cfn-imagebuilder-containerrecipe-instanceblockdevicemapping-ebs
- ``p_NoDevice``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-imagebuilder-containerrecipe-instanceblockdevicemapping.html#cfn-imagebuilder-containerrecipe-instanceblockdevicemapping-nodevice
- ``p_VirtualName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-imagebuilder-containerrecipe-instanceblockdevicemapping.html#cfn-imagebuilder-containerrecipe-instanceblockdevicemapping-virtualname
"""
AWS_OBJECT_TYPE = "AWS::ImageBuilder::ContainerRecipe.InstanceBlockDeviceMapping"
p_DeviceName: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "DeviceName"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-imagebuilder-containerrecipe-instanceblockdevicemapping.html#cfn-imagebuilder-containerrecipe-instanceblockdevicemapping-devicename"""
p_Ebs: typing.Union['ContainerRecipeEbsInstanceBlockDeviceSpecification', dict] = attr.ib(
default=None,
converter=ContainerRecipeEbsInstanceBlockDeviceSpecification.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(ContainerRecipeEbsInstanceBlockDeviceSpecification)),
metadata={AttrMeta.PROPERTY_NAME: "Ebs"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-imagebuilder-containerrecipe-instanceblockdevicemapping.html#cfn-imagebuilder-containerrecipe-instanceblockdevicemapping-ebs"""
p_NoDevice: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "NoDevice"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-imagebuilder-containerrecipe-instanceblockdevicemapping.html#cfn-imagebuilder-containerrecipe-instanceblockdevicemapping-nodevice"""
p_VirtualName: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "VirtualName"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-imagebuilder-containerrecipe-instanceblockdevicemapping.html#cfn-imagebuilder-containerrecipe-instanceblockdevicemapping-virtualname"""
@attr.s
class DistributionConfigurationLaunchTemplateConfiguration(Property):
"""
AWS Object Type = "AWS::ImageBuilder::DistributionConfiguration.LaunchTemplateConfiguration"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-imagebuilder-distributionconfiguration-launchtemplateconfiguration.html
Property Document:
- ``p_AccountId``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-imagebuilder-distributionconfiguration-launchtemplateconfiguration.html#cfn-imagebuilder-distributionconfiguration-launchtemplateconfiguration-accountid
- ``p_LaunchTemplateId``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-imagebuilder-distributionconfiguration-launchtemplateconfiguration.html#cfn-imagebuilder-distributionconfiguration-launchtemplateconfiguration-launchtemplateid
- ``p_SetDefaultVersion``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-imagebuilder-distributionconfiguration-launchtemplateconfiguration.html#cfn-imagebuilder-distributionconfiguration-launchtemplateconfiguration-setdefaultversion
"""
AWS_OBJECT_TYPE = "AWS::ImageBuilder::DistributionConfiguration.LaunchTemplateConfiguration"
p_AccountId: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "AccountId"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-imagebuilder-distributionconfiguration-launchtemplateconfiguration.html#cfn-imagebuilder-distributionconfiguration-launchtemplateconfiguration-accountid"""
p_LaunchTemplateId: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "LaunchTemplateId"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-imagebuilder-distributionconfiguration-launchtemplateconfiguration.html#cfn-imagebuilder-distributionconfiguration-launchtemplateconfiguration-launchtemplateid"""
p_SetDefaultVersion: bool = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(bool)),
metadata={AttrMeta.PROPERTY_NAME: "SetDefaultVersion"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-imagebuilder-distributionconfiguration-launchtemplateconfiguration.html#cfn-imagebuilder-distributionconfiguration-launchtemplateconfiguration-setdefaultversion"""
@attr.s
class ContainerRecipeInstanceConfiguration(Property):
"""
AWS Object Type = "AWS::ImageBuilder::ContainerRecipe.InstanceConfiguration"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-imagebuilder-containerrecipe-instanceconfiguration.html
Property Document:
- ``p_BlockDeviceMappings``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-imagebuilder-containerrecipe-instanceconfiguration.html#cfn-imagebuilder-containerrecipe-instanceconfiguration-blockdevicemappings
- ``p_Image``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-imagebuilder-containerrecipe-instanceconfiguration.html#cfn-imagebuilder-containerrecipe-instanceconfiguration-image
"""
AWS_OBJECT_TYPE = "AWS::ImageBuilder::ContainerRecipe.InstanceConfiguration"
p_BlockDeviceMappings: typing.List[typing.Union['ContainerRecipeInstanceBlockDeviceMapping', dict]] = attr.ib(
default=None,
converter=ContainerRecipeInstanceBlockDeviceMapping.from_list,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(ContainerRecipeInstanceBlockDeviceMapping), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "BlockDeviceMappings"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-imagebuilder-containerrecipe-instanceconfiguration.html#cfn-imagebuilder-containerrecipe-instanceconfiguration-blockdevicemappings"""
p_Image: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "Image"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-imagebuilder-containerrecipe-instanceconfiguration.html#cfn-imagebuilder-containerrecipe-instanceconfiguration-image"""
@attr.s
class ContainerRecipeTargetContainerRepository(Property):
"""
AWS Object Type = "AWS::ImageBuilder::ContainerRecipe.TargetContainerRepository"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-imagebuilder-containerrecipe-targetcontainerrepository.html
Property Document:
- ``p_RepositoryName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-imagebuilder-containerrecipe-targetcontainerrepository.html#cfn-imagebuilder-containerrecipe-targetcontainerrepository-repositoryname
- ``p_Service``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-imagebuilder-containerrecipe-targetcontainerrepository.html#cfn-imagebuilder-containerrecipe-targetcontainerrepository-service
"""
AWS_OBJECT_TYPE = "AWS::ImageBuilder::ContainerRecipe.TargetContainerRepository"
p_RepositoryName: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "RepositoryName"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-imagebuilder-containerrecipe-targetcontainerrepository.html#cfn-imagebuilder-containerrecipe-targetcontainerrepository-repositoryname"""
p_Service: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "Service"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-imagebuilder-containerrecipe-targetcontainerrepository.html#cfn-imagebuilder-containerrecipe-targetcontainerrepository-service"""
@attr.s
class ImageRecipeEbsInstanceBlockDeviceSpecification(Property):
"""
AWS Object Type = "AWS::ImageBuilder::ImageRecipe.EbsInstanceBlockDeviceSpecification"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-imagebuilder-imagerecipe-ebsinstanceblockdevicespecification.html
Property Document:
- ``p_DeleteOnTermination``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-imagebuilder-imagerecipe-ebsinstanceblockdevicespecification.html#cfn-imagebuilder-imagerecipe-ebsinstanceblockdevicespecification-deleteontermination
- ``p_Encrypted``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-imagebuilder-imagerecipe-ebsinstanceblockdevicespecification.html#cfn-imagebuilder-imagerecipe-ebsinstanceblockdevicespecification-encrypted
- ``p_Iops``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-imagebuilder-imagerecipe-ebsinstanceblockdevicespecification.html#cfn-imagebuilder-imagerecipe-ebsinstanceblockdevicespecification-iops
- ``p_KmsKeyId``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-imagebuilder-imagerecipe-ebsinstanceblockdevicespecification.html#cfn-imagebuilder-imagerecipe-ebsinstanceblockdevicespecification-kmskeyid
- ``p_SnapshotId``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-imagebuilder-imagerecipe-ebsinstanceblockdevicespecification.html#cfn-imagebuilder-imagerecipe-ebsinstanceblockdevicespecification-snapshotid
- ``p_VolumeSize``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-imagebuilder-imagerecipe-ebsinstanceblockdevicespecification.html#cfn-imagebuilder-imagerecipe-ebsinstanceblockdevicespecification-volumesize
- ``p_VolumeType``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-imagebuilder-imagerecipe-ebsinstanceblockdevicespecification.html#cfn-imagebuilder-imagerecipe-ebsinstanceblockdevicespecification-volumetype
"""
AWS_OBJECT_TYPE = "AWS::ImageBuilder::ImageRecipe.EbsInstanceBlockDeviceSpecification"
p_DeleteOnTermination: bool = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(bool)),
metadata={AttrMeta.PROPERTY_NAME: "DeleteOnTermination"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-imagebuilder-imagerecipe-ebsinstanceblockdevicespecification.html#cfn-imagebuilder-imagerecipe-ebsinstanceblockdevicespecification-deleteontermination"""
p_Encrypted: bool = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(bool)),
metadata={AttrMeta.PROPERTY_NAME: "Encrypted"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-imagebuilder-imagerecipe-ebsinstanceblockdevicespecification.html#cfn-imagebuilder-imagerecipe-ebsinstanceblockdevicespecification-encrypted"""
p_Iops: int = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(int)),
metadata={AttrMeta.PROPERTY_NAME: "Iops"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-imagebuilder-imagerecipe-ebsinstanceblockdevicespecification.html#cfn-imagebuilder-imagerecipe-ebsinstanceblockdevicespecification-iops"""
p_KmsKeyId: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "KmsKeyId"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-imagebuilder-imagerecipe-ebsinstanceblockdevicespecification.html#cfn-imagebuilder-imagerecipe-ebsinstanceblockdevicespecification-kmskeyid"""
p_SnapshotId: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "SnapshotId"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-imagebuilder-imagerecipe-ebsinstanceblockdevicespecification.html#cfn-imagebuilder-imagerecipe-ebsinstanceblockdevicespecification-snapshotid"""
p_VolumeSize: int = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(int)),
metadata={AttrMeta.PROPERTY_NAME: "VolumeSize"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-imagebuilder-imagerecipe-ebsinstanceblockdevicespecification.html#cfn-imagebuilder-imagerecipe-ebsinstanceblockdevicespecification-volumesize"""
p_VolumeType: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "VolumeType"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-imagebuilder-imagerecipe-ebsinstanceblockdevicespecification.html#cfn-imagebuilder-imagerecipe-ebsinstanceblockdevicespecification-volumetype"""
@attr.s
class ImageRecipeInstanceBlockDeviceMapping(Property):
"""
AWS Object Type = "AWS::ImageBuilder::ImageRecipe.InstanceBlockDeviceMapping"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-imagebuilder-imagerecipe-instanceblockdevicemapping.html
Property Document:
- ``p_DeviceName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-imagebuilder-imagerecipe-instanceblockdevicemapping.html#cfn-imagebuilder-imagerecipe-instanceblockdevicemapping-devicename
- ``p_Ebs``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-imagebuilder-imagerecipe-instanceblockdevicemapping.html#cfn-imagebuilder-imagerecipe-instanceblockdevicemapping-ebs
- ``p_NoDevice``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-imagebuilder-imagerecipe-instanceblockdevicemapping.html#cfn-imagebuilder-imagerecipe-instanceblockdevicemapping-nodevice
- ``p_VirtualName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-imagebuilder-imagerecipe-instanceblockdevicemapping.html#cfn-imagebuilder-imagerecipe-instanceblockdevicemapping-virtualname
"""
AWS_OBJECT_TYPE = "AWS::ImageBuilder::ImageRecipe.InstanceBlockDeviceMapping"
p_DeviceName: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "DeviceName"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-imagebuilder-imagerecipe-instanceblockdevicemapping.html#cfn-imagebuilder-imagerecipe-instanceblockdevicemapping-devicename"""
p_Ebs: typing.Union['ImageRecipeEbsInstanceBlockDeviceSpecification', dict] = attr.ib(
default=None,
converter=ImageRecipeEbsInstanceBlockDeviceSpecification.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(ImageRecipeEbsInstanceBlockDeviceSpecification)),
metadata={AttrMeta.PROPERTY_NAME: "Ebs"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-imagebuilder-imagerecipe-instanceblockdevicemapping.html#cfn-imagebuilder-imagerecipe-instanceblockdevicemapping-ebs"""
p_NoDevice: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "NoDevice"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-imagebuilder-imagerecipe-instanceblockdevicemapping.html#cfn-imagebuilder-imagerecipe-instanceblockdevicemapping-nodevice"""
p_VirtualName: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "VirtualName"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-imagebuilder-imagerecipe-instanceblockdevicemapping.html#cfn-imagebuilder-imagerecipe-instanceblockdevicemapping-virtualname"""
@attr.s
class InfrastructureConfigurationLogging(Property):
"""
AWS Object Type = "AWS::ImageBuilder::InfrastructureConfiguration.Logging"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-imagebuilder-infrastructureconfiguration-logging.html
Property Document:
- ``p_S3Logs``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-imagebuilder-infrastructureconfiguration-logging.html#cfn-imagebuilder-infrastructureconfiguration-logging-s3logs
"""
AWS_OBJECT_TYPE = "AWS::ImageBuilder::InfrastructureConfiguration.Logging"
p_S3Logs: typing.Union['InfrastructureConfigurationS3Logs', dict] = attr.ib(
default=None,
converter=InfrastructureConfigurationS3Logs.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(InfrastructureConfigurationS3Logs)),
metadata={AttrMeta.PROPERTY_NAME: "S3Logs"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-imagebuilder-infrastructureconfiguration-logging.html#cfn-imagebuilder-infrastructureconfiguration-logging-s3logs"""
@attr.s
class DistributionConfigurationDistribution(Property):
"""
AWS Object Type = "AWS::ImageBuilder::DistributionConfiguration.Distribution"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-imagebuilder-distributionconfiguration-distribution.html
Property Document:
- ``rp_Region``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-imagebuilder-distributionconfiguration-distribution.html#cfn-imagebuilder-distributionconfiguration-distribution-region
- ``p_AmiDistributionConfiguration``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-imagebuilder-distributionconfiguration-distribution.html#cfn-imagebuilder-distributionconfiguration-distribution-amidistributionconfiguration
- ``p_ContainerDistributionConfiguration``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-imagebuilder-distributionconfiguration-distribution.html#cfn-imagebuilder-distributionconfiguration-distribution-containerdistributionconfiguration
- ``p_LaunchTemplateConfigurations``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-imagebuilder-distributionconfiguration-distribution.html#cfn-imagebuilder-distributionconfiguration-distribution-launchtemplateconfigurations
- ``p_LicenseConfigurationArns``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-imagebuilder-distributionconfiguration-distribution.html#cfn-imagebuilder-distributionconfiguration-distribution-licenseconfigurationarns
"""
AWS_OBJECT_TYPE = "AWS::ImageBuilder::DistributionConfiguration.Distribution"
rp_Region: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "Region"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-imagebuilder-distributionconfiguration-distribution.html#cfn-imagebuilder-distributionconfiguration-distribution-region"""
p_AmiDistributionConfiguration: dict = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(dict)),
metadata={AttrMeta.PROPERTY_NAME: "AmiDistributionConfiguration"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-imagebuilder-distributionconfiguration-distribution.html#cfn-imagebuilder-distributionconfiguration-distribution-amidistributionconfiguration"""
p_ContainerDistributionConfiguration: dict = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(dict)),
metadata={AttrMeta.PROPERTY_NAME: "ContainerDistributionConfiguration"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-imagebuilder-distributionconfiguration-distribution.html#cfn-imagebuilder-distributionconfiguration-distribution-containerdistributionconfiguration"""
p_LaunchTemplateConfigurations: typing.List[typing.Union['DistributionConfigurationLaunchTemplateConfiguration', dict]] = attr.ib(
default=None,
converter=DistributionConfigurationLaunchTemplateConfiguration.from_list,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(DistributionConfigurationLaunchTemplateConfiguration), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "LaunchTemplateConfigurations"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-imagebuilder-distributionconfiguration-distribution.html#cfn-imagebuilder-distributionconfiguration-distribution-launchtemplateconfigurations"""
p_LicenseConfigurationArns: typing.List[TypeHint.intrinsic_str] = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "LicenseConfigurationArns"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-imagebuilder-distributionconfiguration-distribution.html#cfn-imagebuilder-distributionconfiguration-distribution-licenseconfigurationarns"""
#--- Resource declaration ---
@attr.s
class Component(Resource):
"""
AWS Object Type = "AWS::ImageBuilder::Component"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-component.html
Property Document:
- ``rp_Name``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-component.html#cfn-imagebuilder-component-name
- ``rp_Platform``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-component.html#cfn-imagebuilder-component-platform
- ``rp_Version``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-component.html#cfn-imagebuilder-component-version
- ``p_ChangeDescription``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-component.html#cfn-imagebuilder-component-changedescription
- ``p_Data``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-component.html#cfn-imagebuilder-component-data
- ``p_Description``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-component.html#cfn-imagebuilder-component-description
- ``p_KmsKeyId``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-component.html#cfn-imagebuilder-component-kmskeyid
- ``p_SupportedOsVersions``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-component.html#cfn-imagebuilder-component-supportedosversions
- ``p_Uri``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-component.html#cfn-imagebuilder-component-uri
- ``p_Tags``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-component.html#cfn-imagebuilder-component-tags
"""
AWS_OBJECT_TYPE = "AWS::ImageBuilder::Component"
rp_Name: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "Name"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-component.html#cfn-imagebuilder-component-name"""
rp_Platform: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "Platform"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-component.html#cfn-imagebuilder-component-platform"""
rp_Version: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "Version"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-component.html#cfn-imagebuilder-component-version"""
p_ChangeDescription: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "ChangeDescription"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-component.html#cfn-imagebuilder-component-changedescription"""
p_Data: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "Data"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-component.html#cfn-imagebuilder-component-data"""
p_Description: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "Description"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-component.html#cfn-imagebuilder-component-description"""
p_KmsKeyId: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "KmsKeyId"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-component.html#cfn-imagebuilder-component-kmskeyid"""
p_SupportedOsVersions: typing.List[TypeHint.intrinsic_str] = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "SupportedOsVersions"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-component.html#cfn-imagebuilder-component-supportedosversions"""
p_Uri: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "Uri"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-component.html#cfn-imagebuilder-component-uri"""
p_Tags: typing.Dict[str, TypeHint.intrinsic_str] = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.deep_mapping(key_validator=attr.validators.instance_of(str), value_validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type))),
metadata={AttrMeta.PROPERTY_NAME: "Tags"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-component.html#cfn-imagebuilder-component-tags"""
@property
def rv_Arn(self) -> GetAtt:
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-component.html#aws-resource-imagebuilder-component-return-values"""
return GetAtt(resource=self, attr_name="Arn")
@property
def rv_Name(self) -> GetAtt:
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-component.html#aws-resource-imagebuilder-component-return-values"""
return GetAtt(resource=self, attr_name="Name")
@property
def rv_Type(self) -> GetAtt:
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-component.html#aws-resource-imagebuilder-component-return-values"""
return GetAtt(resource=self, attr_name="Type")
@property
def rv_Encrypted(self) -> GetAtt:
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-component.html#aws-resource-imagebuilder-component-return-values"""
return GetAtt(resource=self, attr_name="Encrypted")
@attr.s
class InfrastructureConfiguration(Resource):
"""
AWS Object Type = "AWS::ImageBuilder::InfrastructureConfiguration"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-infrastructureconfiguration.html
Property Document:
- ``rp_InstanceProfileName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-infrastructureconfiguration.html#cfn-imagebuilder-infrastructureconfiguration-instanceprofilename
- ``rp_Name``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-infrastructureconfiguration.html#cfn-imagebuilder-infrastructureconfiguration-name
- ``p_Description``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-infrastructureconfiguration.html#cfn-imagebuilder-infrastructureconfiguration-description
- ``p_InstanceTypes``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-infrastructureconfiguration.html#cfn-imagebuilder-infrastructureconfiguration-instancetypes
- ``p_KeyPair``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-infrastructureconfiguration.html#cfn-imagebuilder-infrastructureconfiguration-keypair
- ``p_Logging``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-infrastructureconfiguration.html#cfn-imagebuilder-infrastructureconfiguration-logging
- ``p_ResourceTags``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-infrastructureconfiguration.html#cfn-imagebuilder-infrastructureconfiguration-resourcetags
- ``p_SecurityGroupIds``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-infrastructureconfiguration.html#cfn-imagebuilder-infrastructureconfiguration-securitygroupids
- ``p_SnsTopicArn``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-infrastructureconfiguration.html#cfn-imagebuilder-infrastructureconfiguration-snstopicarn
- ``p_SubnetId``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-infrastructureconfiguration.html#cfn-imagebuilder-infrastructureconfiguration-subnetid
- ``p_TerminateInstanceOnFailure``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-infrastructureconfiguration.html#cfn-imagebuilder-infrastructureconfiguration-terminateinstanceonfailure
- ``p_Tags``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-infrastructureconfiguration.html#cfn-imagebuilder-infrastructureconfiguration-tags
"""
AWS_OBJECT_TYPE = "AWS::ImageBuilder::InfrastructureConfiguration"
rp_InstanceProfileName: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "InstanceProfileName"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-infrastructureconfiguration.html#cfn-imagebuilder-infrastructureconfiguration-instanceprofilename"""
rp_Name: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "Name"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-infrastructureconfiguration.html#cfn-imagebuilder-infrastructureconfiguration-name"""
p_Description: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "Description"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-infrastructureconfiguration.html#cfn-imagebuilder-infrastructureconfiguration-description"""
p_InstanceTypes: typing.List[TypeHint.intrinsic_str] = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "InstanceTypes"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-infrastructureconfiguration.html#cfn-imagebuilder-infrastructureconfiguration-instancetypes"""
p_KeyPair: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "KeyPair"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-infrastructureconfiguration.html#cfn-imagebuilder-infrastructureconfiguration-keypair"""
p_Logging: typing.Union['InfrastructureConfigurationLogging', dict] = attr.ib(
default=None,
converter=InfrastructureConfigurationLogging.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(InfrastructureConfigurationLogging)),
metadata={AttrMeta.PROPERTY_NAME: "Logging"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-infrastructureconfiguration.html#cfn-imagebuilder-infrastructureconfiguration-logging"""
p_ResourceTags: typing.Dict[str, TypeHint.intrinsic_str] = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.deep_mapping(key_validator=attr.validators.instance_of(str), value_validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type))),
metadata={AttrMeta.PROPERTY_NAME: "ResourceTags"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-infrastructureconfiguration.html#cfn-imagebuilder-infrastructureconfiguration-resourcetags"""
p_SecurityGroupIds: typing.List[TypeHint.intrinsic_str] = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "SecurityGroupIds"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-infrastructureconfiguration.html#cfn-imagebuilder-infrastructureconfiguration-securitygroupids"""
p_SnsTopicArn: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "SnsTopicArn"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-infrastructureconfiguration.html#cfn-imagebuilder-infrastructureconfiguration-snstopicarn"""
p_SubnetId: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "SubnetId"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-infrastructureconfiguration.html#cfn-imagebuilder-infrastructureconfiguration-subnetid"""
p_TerminateInstanceOnFailure: bool = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(bool)),
metadata={AttrMeta.PROPERTY_NAME: "TerminateInstanceOnFailure"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-infrastructureconfiguration.html#cfn-imagebuilder-infrastructureconfiguration-terminateinstanceonfailure"""
p_Tags: typing.Dict[str, TypeHint.intrinsic_str] = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.deep_mapping(key_validator=attr.validators.instance_of(str), value_validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type))),
metadata={AttrMeta.PROPERTY_NAME: "Tags"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-infrastructureconfiguration.html#cfn-imagebuilder-infrastructureconfiguration-tags"""
@property
def rv_Arn(self) -> GetAtt:
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-infrastructureconfiguration.html#aws-resource-imagebuilder-infrastructureconfiguration-return-values"""
return GetAtt(resource=self, attr_name="Arn")
@property
def rv_Name(self) -> GetAtt:
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-infrastructureconfiguration.html#aws-resource-imagebuilder-infrastructureconfiguration-return-values"""
return GetAtt(resource=self, attr_name="Name")
@attr.s
class ImagePipeline(Resource):
"""
AWS Object Type = "AWS::ImageBuilder::ImagePipeline"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-imagepipeline.html
Property Document:
- ``rp_InfrastructureConfigurationArn``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-imagepipeline.html#cfn-imagebuilder-imagepipeline-infrastructureconfigurationarn
- ``rp_Name``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-imagepipeline.html#cfn-imagebuilder-imagepipeline-name
- ``p_ContainerRecipeArn``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-imagepipeline.html#cfn-imagebuilder-imagepipeline-containerrecipearn
- ``p_Description``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-imagepipeline.html#cfn-imagebuilder-imagepipeline-description
- ``p_DistributionConfigurationArn``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-imagepipeline.html#cfn-imagebuilder-imagepipeline-distributionconfigurationarn
- ``p_EnhancedImageMetadataEnabled``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-imagepipeline.html#cfn-imagebuilder-imagepipeline-enhancedimagemetadataenabled
- ``p_ImageRecipeArn``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-imagepipeline.html#cfn-imagebuilder-imagepipeline-imagerecipearn
- ``p_ImageTestsConfiguration``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-imagepipeline.html#cfn-imagebuilder-imagepipeline-imagetestsconfiguration
- ``p_Schedule``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-imagepipeline.html#cfn-imagebuilder-imagepipeline-schedule
- ``p_Status``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-imagepipeline.html#cfn-imagebuilder-imagepipeline-status
- ``p_Tags``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-imagepipeline.html#cfn-imagebuilder-imagepipeline-tags
"""
AWS_OBJECT_TYPE = "AWS::ImageBuilder::ImagePipeline"
rp_InfrastructureConfigurationArn: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "InfrastructureConfigurationArn"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-imagepipeline.html#cfn-imagebuilder-imagepipeline-infrastructureconfigurationarn"""
rp_Name: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "Name"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-imagepipeline.html#cfn-imagebuilder-imagepipeline-name"""
p_ContainerRecipeArn: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "ContainerRecipeArn"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-imagepipeline.html#cfn-imagebuilder-imagepipeline-containerrecipearn"""
p_Description: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "Description"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-imagepipeline.html#cfn-imagebuilder-imagepipeline-description"""
p_DistributionConfigurationArn: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "DistributionConfigurationArn"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-imagepipeline.html#cfn-imagebuilder-imagepipeline-distributionconfigurationarn"""
p_EnhancedImageMetadataEnabled: bool = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(bool)),
metadata={AttrMeta.PROPERTY_NAME: "EnhancedImageMetadataEnabled"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-imagepipeline.html#cfn-imagebuilder-imagepipeline-enhancedimagemetadataenabled"""
p_ImageRecipeArn: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "ImageRecipeArn"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-imagepipeline.html#cfn-imagebuilder-imagepipeline-imagerecipearn"""
p_ImageTestsConfiguration: typing.Union['ImagePipelineImageTestsConfiguration', dict] = attr.ib(
default=None,
converter=ImagePipelineImageTestsConfiguration.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(ImagePipelineImageTestsConfiguration)),
metadata={AttrMeta.PROPERTY_NAME: "ImageTestsConfiguration"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-imagepipeline.html#cfn-imagebuilder-imagepipeline-imagetestsconfiguration"""
p_Schedule: typing.Union['ImagePipelineSchedule', dict] = attr.ib(
default=None,
converter=ImagePipelineSchedule.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(ImagePipelineSchedule)),
metadata={AttrMeta.PROPERTY_NAME: "Schedule"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-imagepipeline.html#cfn-imagebuilder-imagepipeline-schedule"""
p_Status: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "Status"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-imagepipeline.html#cfn-imagebuilder-imagepipeline-status"""
p_Tags: typing.Dict[str, TypeHint.intrinsic_str] = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.deep_mapping(key_validator=attr.validators.instance_of(str), value_validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type))),
metadata={AttrMeta.PROPERTY_NAME: "Tags"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-imagepipeline.html#cfn-imagebuilder-imagepipeline-tags"""
@property
def rv_Arn(self) -> GetAtt:
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-imagepipeline.html#aws-resource-imagebuilder-imagepipeline-return-values"""
return GetAtt(resource=self, attr_name="Arn")
@property
def rv_Name(self) -> GetAtt:
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-imagepipeline.html#aws-resource-imagebuilder-imagepipeline-return-values"""
return GetAtt(resource=self, attr_name="Name")
@attr.s
class DistributionConfiguration(Resource):
"""
AWS Object Type = "AWS::ImageBuilder::DistributionConfiguration"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-distributionconfiguration.html
Property Document:
- ``rp_Distributions``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-distributionconfiguration.html#cfn-imagebuilder-distributionconfiguration-distributions
- ``rp_Name``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-distributionconfiguration.html#cfn-imagebuilder-distributionconfiguration-name
- ``p_Description``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-distributionconfiguration.html#cfn-imagebuilder-distributionconfiguration-description
- ``p_Tags``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-distributionconfiguration.html#cfn-imagebuilder-distributionconfiguration-tags
"""
AWS_OBJECT_TYPE = "AWS::ImageBuilder::DistributionConfiguration"
rp_Distributions: typing.List[typing.Union['DistributionConfigurationDistribution', dict]] = attr.ib(
default=None,
converter=DistributionConfigurationDistribution.from_list,
validator=attr.validators.deep_iterable(member_validator=attr.validators.instance_of(DistributionConfigurationDistribution), iterable_validator=attr.validators.instance_of(list)),
metadata={AttrMeta.PROPERTY_NAME: "Distributions"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-distributionconfiguration.html#cfn-imagebuilder-distributionconfiguration-distributions"""
rp_Name: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "Name"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-distributionconfiguration.html#cfn-imagebuilder-distributionconfiguration-name"""
p_Description: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "Description"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-distributionconfiguration.html#cfn-imagebuilder-distributionconfiguration-description"""
p_Tags: typing.Dict[str, TypeHint.intrinsic_str] = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.deep_mapping(key_validator=attr.validators.instance_of(str), value_validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type))),
metadata={AttrMeta.PROPERTY_NAME: "Tags"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-distributionconfiguration.html#cfn-imagebuilder-distributionconfiguration-tags"""
@property
def rv_Arn(self) -> GetAtt:
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-distributionconfiguration.html#aws-resource-imagebuilder-distributionconfiguration-return-values"""
return GetAtt(resource=self, attr_name="Arn")
@property
def rv_Name(self) -> GetAtt:
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-distributionconfiguration.html#aws-resource-imagebuilder-distributionconfiguration-return-values"""
return GetAtt(resource=self, attr_name="Name")
@attr.s
class ContainerRecipe(Resource):
"""
AWS Object Type = "AWS::ImageBuilder::ContainerRecipe"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-containerrecipe.html
Property Document:
- ``rp_Components``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-containerrecipe.html#cfn-imagebuilder-containerrecipe-components
- ``rp_ContainerType``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-containerrecipe.html#cfn-imagebuilder-containerrecipe-containertype
- ``rp_Name``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-containerrecipe.html#cfn-imagebuilder-containerrecipe-name
- ``rp_ParentImage``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-containerrecipe.html#cfn-imagebuilder-containerrecipe-parentimage
- ``rp_TargetRepository``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-containerrecipe.html#cfn-imagebuilder-containerrecipe-targetrepository
- ``rp_Version``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-containerrecipe.html#cfn-imagebuilder-containerrecipe-version
- ``p_Description``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-containerrecipe.html#cfn-imagebuilder-containerrecipe-description
- ``p_DockerfileTemplateData``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-containerrecipe.html#cfn-imagebuilder-containerrecipe-dockerfiletemplatedata
- ``p_DockerfileTemplateUri``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-containerrecipe.html#cfn-imagebuilder-containerrecipe-dockerfiletemplateuri
- ``p_ImageOsVersionOverride``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-containerrecipe.html#cfn-imagebuilder-containerrecipe-imageosversionoverride
- ``p_InstanceConfiguration``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-containerrecipe.html#cfn-imagebuilder-containerrecipe-instanceconfiguration
- ``p_KmsKeyId``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-containerrecipe.html#cfn-imagebuilder-containerrecipe-kmskeyid
- ``p_PlatformOverride``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-containerrecipe.html#cfn-imagebuilder-containerrecipe-platformoverride
- ``p_WorkingDirectory``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-containerrecipe.html#cfn-imagebuilder-containerrecipe-workingdirectory
- ``p_Tags``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-containerrecipe.html#cfn-imagebuilder-containerrecipe-tags
"""
AWS_OBJECT_TYPE = "AWS::ImageBuilder::ContainerRecipe"
rp_Components: typing.List[typing.Union['ContainerRecipeComponentConfiguration', dict]] = attr.ib(
default=None,
converter=ContainerRecipeComponentConfiguration.from_list,
validator=attr.validators.deep_iterable(member_validator=attr.validators.instance_of(ContainerRecipeComponentConfiguration), iterable_validator=attr.validators.instance_of(list)),
metadata={AttrMeta.PROPERTY_NAME: "Components"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-containerrecipe.html#cfn-imagebuilder-containerrecipe-components"""
rp_ContainerType: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "ContainerType"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-containerrecipe.html#cfn-imagebuilder-containerrecipe-containertype"""
rp_Name: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "Name"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-containerrecipe.html#cfn-imagebuilder-containerrecipe-name"""
rp_ParentImage: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "ParentImage"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-containerrecipe.html#cfn-imagebuilder-containerrecipe-parentimage"""
rp_TargetRepository: typing.Union['ContainerRecipeTargetContainerRepository', dict] = attr.ib(
default=None,
converter=ContainerRecipeTargetContainerRepository.from_dict,
validator=attr.validators.instance_of(ContainerRecipeTargetContainerRepository),
metadata={AttrMeta.PROPERTY_NAME: "TargetRepository"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-containerrecipe.html#cfn-imagebuilder-containerrecipe-targetrepository"""
rp_Version: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "Version"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-containerrecipe.html#cfn-imagebuilder-containerrecipe-version"""
p_Description: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "Description"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-containerrecipe.html#cfn-imagebuilder-containerrecipe-description"""
p_DockerfileTemplateData: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "DockerfileTemplateData"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-containerrecipe.html#cfn-imagebuilder-containerrecipe-dockerfiletemplatedata"""
p_DockerfileTemplateUri: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "DockerfileTemplateUri"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-containerrecipe.html#cfn-imagebuilder-containerrecipe-dockerfiletemplateuri"""
p_ImageOsVersionOverride: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "ImageOsVersionOverride"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-containerrecipe.html#cfn-imagebuilder-containerrecipe-imageosversionoverride"""
p_InstanceConfiguration: typing.Union['ContainerRecipeInstanceConfiguration', dict] = attr.ib(
default=None,
converter=ContainerRecipeInstanceConfiguration.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(ContainerRecipeInstanceConfiguration)),
metadata={AttrMeta.PROPERTY_NAME: "InstanceConfiguration"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-containerrecipe.html#cfn-imagebuilder-containerrecipe-instanceconfiguration"""
p_KmsKeyId: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "KmsKeyId"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-containerrecipe.html#cfn-imagebuilder-containerrecipe-kmskeyid"""
p_PlatformOverride: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "PlatformOverride"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-containerrecipe.html#cfn-imagebuilder-containerrecipe-platformoverride"""
p_WorkingDirectory: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "WorkingDirectory"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-containerrecipe.html#cfn-imagebuilder-containerrecipe-workingdirectory"""
p_Tags: typing.Dict[str, TypeHint.intrinsic_str] = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.deep_mapping(key_validator=attr.validators.instance_of(str), value_validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type))),
metadata={AttrMeta.PROPERTY_NAME: "Tags"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-containerrecipe.html#cfn-imagebuilder-containerrecipe-tags"""
@property
def rv_Arn(self) -> GetAtt:
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-containerrecipe.html#aws-resource-imagebuilder-containerrecipe-return-values"""
return GetAtt(resource=self, attr_name="Arn")
@property
def rv_Name(self) -> GetAtt:
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-containerrecipe.html#aws-resource-imagebuilder-containerrecipe-return-values"""
return GetAtt(resource=self, attr_name="Name")
@attr.s
class ImageRecipe(Resource):
"""
AWS Object Type = "AWS::ImageBuilder::ImageRecipe"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-imagerecipe.html
Property Document:
- ``rp_Components``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-imagerecipe.html#cfn-imagebuilder-imagerecipe-components
- ``rp_Name``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-imagerecipe.html#cfn-imagebuilder-imagerecipe-name
- ``rp_ParentImage``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-imagerecipe.html#cfn-imagebuilder-imagerecipe-parentimage
- ``rp_Version``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-imagerecipe.html#cfn-imagebuilder-imagerecipe-version
- ``p_BlockDeviceMappings``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-imagerecipe.html#cfn-imagebuilder-imagerecipe-blockdevicemappings
- ``p_Description``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-imagerecipe.html#cfn-imagebuilder-imagerecipe-description
- ``p_WorkingDirectory``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-imagerecipe.html#cfn-imagebuilder-imagerecipe-workingdirectory
- ``p_Tags``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-imagerecipe.html#cfn-imagebuilder-imagerecipe-tags
"""
AWS_OBJECT_TYPE = "AWS::ImageBuilder::ImageRecipe"
rp_Components: typing.List[typing.Union['ImageRecipeComponentConfiguration', dict]] = attr.ib(
default=None,
converter=ImageRecipeComponentConfiguration.from_list,
validator=attr.validators.deep_iterable(member_validator=attr.validators.instance_of(ImageRecipeComponentConfiguration), iterable_validator=attr.validators.instance_of(list)),
metadata={AttrMeta.PROPERTY_NAME: "Components"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-imagerecipe.html#cfn-imagebuilder-imagerecipe-components"""
rp_Name: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "Name"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-imagerecipe.html#cfn-imagebuilder-imagerecipe-name"""
rp_ParentImage: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "ParentImage"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-imagerecipe.html#cfn-imagebuilder-imagerecipe-parentimage"""
rp_Version: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "Version"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-imagerecipe.html#cfn-imagebuilder-imagerecipe-version"""
p_BlockDeviceMappings: typing.List[typing.Union['ImageRecipeInstanceBlockDeviceMapping', dict]] = attr.ib(
default=None,
converter=ImageRecipeInstanceBlockDeviceMapping.from_list,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(ImageRecipeInstanceBlockDeviceMapping), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "BlockDeviceMappings"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-imagerecipe.html#cfn-imagebuilder-imagerecipe-blockdevicemappings"""
p_Description: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "Description"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-imagerecipe.html#cfn-imagebuilder-imagerecipe-description"""
p_WorkingDirectory: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "WorkingDirectory"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-imagerecipe.html#cfn-imagebuilder-imagerecipe-workingdirectory"""
p_Tags: typing.Dict[str, TypeHint.intrinsic_str] = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.deep_mapping(key_validator=attr.validators.instance_of(str), value_validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type))),
metadata={AttrMeta.PROPERTY_NAME: "Tags"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-imagerecipe.html#cfn-imagebuilder-imagerecipe-tags"""
@property
def rv_Arn(self) -> GetAtt:
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-imagerecipe.html#aws-resource-imagebuilder-imagerecipe-return-values"""
return GetAtt(resource=self, attr_name="Arn")
@property
def rv_Name(self) -> GetAtt:
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-imagerecipe.html#aws-resource-imagebuilder-imagerecipe-return-values"""
return GetAtt(resource=self, attr_name="Name")
@attr.s
class Image(Resource):
"""
AWS Object Type = "AWS::ImageBuilder::Image"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-image.html
Property Document:
- ``rp_InfrastructureConfigurationArn``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-image.html#cfn-imagebuilder-image-infrastructureconfigurationarn
- ``p_ContainerRecipeArn``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-image.html#cfn-imagebuilder-image-containerrecipearn
- ``p_DistributionConfigurationArn``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-image.html#cfn-imagebuilder-image-distributionconfigurationarn
- ``p_EnhancedImageMetadataEnabled``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-image.html#cfn-imagebuilder-image-enhancedimagemetadataenabled
- ``p_ImageRecipeArn``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-image.html#cfn-imagebuilder-image-imagerecipearn
- ``p_ImageTestsConfiguration``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-image.html#cfn-imagebuilder-image-imagetestsconfiguration
- ``p_Tags``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-image.html#cfn-imagebuilder-image-tags
"""
AWS_OBJECT_TYPE = "AWS::ImageBuilder::Image"
rp_InfrastructureConfigurationArn: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "InfrastructureConfigurationArn"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-image.html#cfn-imagebuilder-image-infrastructureconfigurationarn"""
p_ContainerRecipeArn: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "ContainerRecipeArn"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-image.html#cfn-imagebuilder-image-containerrecipearn"""
p_DistributionConfigurationArn: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "DistributionConfigurationArn"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-image.html#cfn-imagebuilder-image-distributionconfigurationarn"""
p_EnhancedImageMetadataEnabled: bool = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(bool)),
metadata={AttrMeta.PROPERTY_NAME: "EnhancedImageMetadataEnabled"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-image.html#cfn-imagebuilder-image-enhancedimagemetadataenabled"""
p_ImageRecipeArn: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "ImageRecipeArn"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-image.html#cfn-imagebuilder-image-imagerecipearn"""
p_ImageTestsConfiguration: typing.Union['ImageImageTestsConfiguration', dict] = attr.ib(
default=None,
converter=ImageImageTestsConfiguration.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(ImageImageTestsConfiguration)),
metadata={AttrMeta.PROPERTY_NAME: "ImageTestsConfiguration"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-image.html#cfn-imagebuilder-image-imagetestsconfiguration"""
p_Tags: typing.Dict[str, TypeHint.intrinsic_str] = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.deep_mapping(key_validator=attr.validators.instance_of(str), value_validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type))),
metadata={AttrMeta.PROPERTY_NAME: "Tags"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-image.html#cfn-imagebuilder-image-tags"""
@property
def rv_Arn(self) -> GetAtt:
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-image.html#aws-resource-imagebuilder-image-return-values"""
return GetAtt(resource=self, attr_name="Arn")
@property
def rv_Name(self) -> GetAtt:
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-image.html#aws-resource-imagebuilder-image-return-values"""
return GetAtt(resource=self, attr_name="Name")
@property
def rv_ImageId(self) -> GetAtt:
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-image.html#aws-resource-imagebuilder-image-return-values"""
return GetAtt(resource=self, attr_name="ImageId")
| 1.984375
| 2
|
all_time_graph.py
|
JupyterJones/LBRYnomics
| 0
|
12784233
|
"""
Get the timestamps of all claims and plot the cumulative number vs. time!
"""
import datetime
import json
import matplotlib.pyplot as plt
import numpy as np
import os
import requests
import sqlite3
import time
def make_graph(mode, show=True):
"""
mode must be "claims" or "channels"
"""
if mode != "claims" and mode != "channels":
return
plt.close("all")
# Open the DB
db_file = "/home/brewer/local/lbry-sdk/lbry/lbryum-data/claims.db"
conn = sqlite3.connect(db_file)
c = conn.cursor()
# List for results
times = []
# Query
if mode == "claims":
x = "<>"
else:
x = "="
query = "SELECT creation_timestamp FROM claim\
WHERE claim_type {x} 2;".format(x=x)
# Iterate over query results
i = 0
for t in c.execute(query):
times.append(t)
i = i + 1
# We can also close the connection if we are done with it.
# Just be sure any changes have been committed or they will be lost.
conn.close()
# Sort the times and convert to a numpy array
times = np.sort(np.array(times).flatten())
# Save some stats to JSON for Electron
now = time.time()
my_dict = {}
my_dict["unix_time"] = now
my_dict["human_time_utc"] = str(datetime.datetime.utcfromtimestamp(int(now))) + " UTC"
my_dict["total_{mode}".format(mode=mode)] = int(\
len(times))
my_dict["new_{mode}_1_hour".format(mode=mode)] = int(\
np.sum(times > (now - 3600.0)))
my_dict["new_{mode}_24_hours".format(mode=mode)] = int(\
np.sum(times > (now - 86400.0)))
my_dict["new_{mode}_7_days".format(mode=mode)] = int(\
np.sum(times > (now - 7*86400.0)))
my_dict["new_{mode}_30_days".format(mode=mode)] = int(\
np.sum(times > (now - 30*86400.0)))
f = open("{mode}_stats.json".format(mode=mode), "w")
f.write(json.dumps(my_dict))
f.close()
# Count new claims this UTC day
count_today = np.sum(times > 86400.0*int(now/86400.0))
if mode == "claims":
string = "publications"
else:
string = "channels"
print("{K} {mode}, {n} from today so far (UTC). ".format(K=len(times), mode=string, n=count_today), end="", flush=True)
# Plotting stuff
plt.rcParams["font.family"] = "Liberation Sans"
plt.rcParams["font.size"] = 14
plt.style.use("dark_background")
plt.rcParams["axes.facecolor"] = "#3c3d3c"
plt.rcParams["savefig.facecolor"] = "#3c3d3c"
plt.figure(figsize=(15, 11))
plt.subplot(2, 1, 1)
times_in_days = (times - 1483228800)/86400.0
days = times_in_days.astype("int64")
plt.plot(times_in_days,
np.arange(len(times)), "w-", linewidth=1.5)
plt.ylabel("Cumulative number of {mode}".format(mode=string))
plt.title("Total number of {mode} = {n}.".format(n=len(times), mode=string))
plt.xlim([0.0, days.max() + 1])
plt.ylim(bottom=-100)
plt.gca().tick_params(labelright=True)
# Add vertical lines for new years (approximately)
new_years = np.arange(0, 5)*365.2425
for year in new_years:
plt.axvline(year, color="r", alpha=0.8, linestyle="--")
# Add text about years
year_names = [2017, 2018, 2019]
for i in range(len(year_names)):
year = new_years[i]
plt.text(year+5.0, 0.95*plt.gca().get_ylim()[1],
"{text} begins".format(text=year_names[i]),
fontsize=10)
# Add line and text about MH's video
plt.axvline(890.0, linestyle="dotted", linewidth=2, color="g")
plt.text(890.0, 0.2*plt.gca().get_ylim()[1],
"@MH video\n\'Why I Left YouTube\'\ngoes viral",
fontsize=10)
plt.subplot(2, 1, 2)
bin_width = 1.0
# Bin edges including right edge of last bin
bins = np.arange(0, np.max(days)+2) - 0.5*bin_width
color = "#6b95ef"
counts = plt.hist(days, bins, alpha=0.9, color=color, label="Raw",
width=bin_width, align="mid")[0]
# Compute 10-day moving average
moving_average = np.zeros(len(bins)-1)
for i in range(len(moving_average)):
subset = counts[0:(i+1)]
if len(subset) >= 10:
subset = subset[-10:]
moving_average[i] = np.mean(subset)
plt.plot(bins[0:-2] + 0.5*bin_width, moving_average[0:-1], "w-",
label="10-day moving average", linewidth=1.5)
plt.xlim([0.0, days.max() + 1])
plt.xlabel("Time (days since 2017-01-01)")
plt.ylabel("New {mode} added each day".format(mode=string))
subset = counts[-31:-1]
plt.title("Recent average rate (last 30 days) = {n} {mode} per day.".\
format(n=int(np.sum(time.time() - times <= 30.0*86400.0)/30.0),
mode=string))
plt.gca().tick_params(labelright=True)
# Year lines
for year in new_years:
plt.axvline(year, color="r", alpha=0.8, linestyle="--")
# MH line
plt.axvline(890.0, linestyle="dotted", linewidth=2, color="g")
# plt.gca().set_yticks([1.0, 10.0, 100.0, 1000.0, 10000.0])
# plt.gca().set_yticklabels(["1", "10", "100", "1000", "10000"])
plt.legend()
plt.savefig("{mode}.svg".format(mode=mode), bbox_inches="tight")
plt.savefig("{mode}.png".format(mode=mode), bbox_inches="tight", dpi=70)
print("Figure saved to {mode}.svg and {mode}.png.".format(mode=mode))
if show:
plt.show()
def aggregate_tips():
"""
Calculate tips over past X amount of time and write JSON output
"""
# The SQL query to perform
now = time.time()
print("Computing tip stats...", end="", flush=True)
labels = ["30_days", "7_days", "24_hours", "1_hour"]
windows = [30*86400.0, 7*86400.0, 1*86400.0, 3600.0]
result = {}
result["unix_time"] = now
result["human_time_utc"] = str(datetime.datetime.utcfromtimestamp(int(now))) + " UTC"
# Agrees with old method, but should it be SUM(amount)?
query = "SELECT support_id, amount, time, claim_name, claim_id, is_nsfw, SUM(to_claim_address) tot FROM (SELECT support.id as support_id, support.support_amount amount,\
transaction.transaction_time time, claim.is_nsfw is_nsfw,\
claim.claim_id claim_id, claim.name claim_name,\
(CASE WHEN (output.address_list LIKE CONCAT('%25', claim_address, '%25')) THEN '1' ELSE '0' END) to_claim_address\
FROM claim\
INNER JOIN support ON support.supported_claim_id = claim.claim_id\
INNER JOIN transaction ON support.transaction_hash_id = transaction.hash\
INNER JOIN output ON transaction.hash = output.transaction_hash \
WHERE transaction.transaction_time > ({now} - {window})\
AND transaction.transaction_time <= {now}) AS result\
GROUP BY support_id, amount;".format(now=now, window=windows[0])
request = requests.get("https://chainquery.lbry.com/api/sql?query=" + query)
the_dict = request.json()
# Get tips into numpy array
times = []
tips = []
is_tip = []
links = []
is_nsfw = []
for row in the_dict["data"]:
times.append(float(row["time"]))
tips.append(float(row["amount"]))
links.append("https://open.lbry.com/" + str(row["claim_name"]) + ":"\
+ str(row["claim_id"]))
is_nsfw.append(row["is_nsfw"])
if row["tot"] > 0:
is_tip.append(True)
else:
is_tip.append(False)
times = np.array(times)
tips = np.array(tips)
is_tip = np.array(is_tip)
links = np.array(links)
is_nsfw = np.array(is_nsfw)
# Write tips
for i in range(len(labels)):
keep = (times > (now - windows[i])) & is_tip
_times = times[keep]
_tips = tips[keep]
_links = links[keep]
_is_nsfw = is_nsfw[keep]
result["num_tips_{label}".format(label=labels[i])] = len(_tips)
result["lbc_tipped_{label}".format(label=labels[i])] = float(_tips.sum())
maxtip = 0
maxtip_link = None
maxtip_is_nsfw = None
if len(_tips) > 0:
maxtip = float(_tips.max())
index = np.argmax(_tips)
maxtip_link = _links[index]
maxtip_is_nsfw = _is_nsfw[index]
result["biggest_tip_{label}".format(label=labels[i])] = maxtip
result["biggest_tip_{label}_link".format(label=labels[i])] = maxtip_link
result["biggest_tip_{label}_is_nsfw".format(label=labels[i])] = bool(maxtip_is_nsfw)
# Write supports
for i in range(len(labels)):
keep = (times > (now - windows[i])) & (~is_tip)
_times = times[keep]
_tips = tips[keep]
_links = links[keep]
_is_nsfw = is_nsfw[keep]
result["num_supports_{label}".format(label=labels[i])] = len(_tips)
result["lbc_supports_{label}".format(label=labels[i])] = float(_tips.sum())
maxtip = 0
maxtip_link = None
maxtip_is_nsfw = None
if len(_tips) > 0:
maxtip = float(_tips.max())
index = np.argmax(_tips)
maxtip_link = _links[index]
maxtip_is_nsfw = _is_nsfw[index]
result["biggest_support_{label}".format(label=labels[i])] = maxtip
result["biggest_support_{label}_link".format(label=labels[i])] = maxtip_link
result["biggest_support_{label}_is_nsfw".format(label=labels[i])] = bool(maxtip_is_nsfw)
f = open("tips_stats.json", "w")
f.write(json.dumps(result))
f.close()
print("done. ", flush=True, end="")
def publish_files():
"""
Publish files to somewhere on the internet.
"""
print("Publishing files to the internet...", end="", flush=True)
import subprocess
try:
subprocess.run("./upload.sh", timeout=120.0)
print("done.\n")
except:
print("failed.\n")
if __name__ == "__main__":
# Do it manually once then enter the infinite loop
now = time.time()
print("The time is " + str(datetime.datetime.utcfromtimestamp(int(now))) + " UTC.")
make_graph("claims")
make_graph("channels")
try:
aggregate_tips()
except:
pass
import os
try:
publish_files()
except:
pass
import time
while True:
print("", flush=True)
time.sleep(530.0)
now = time.time()
print("The time is " + str(datetime.datetime.utcfromtimestamp(int(now))) + " UTC.")
make_graph("claims", show=False)
make_graph("channels", show=False)
try:
aggregate_tips()
except:
pass
try:
publish_files()
except:
pass
| 2.609375
| 3
|
localutils/timetools.py
|
maxmouchet/rtt
| 5
|
12784234
|
<reponame>maxmouchet/rtt
"""
timetools.py provides functions that perform conversion among these types: second since epoch, string, datetime
"""
import datetime
import pytz
from dateutil.parser import parse
epoch = datetime.datetime.utcfromtimestamp(0)
epoch = epoch.replace(tzinfo=pytz.UTC)
TIME_FORMAT = '%Y-%m-%d %H:%M:%S %z'
# TODO: take mplotlib madates conversion take into consideraiton
def string_to_datetime(str_):
""" translate a formatted string to a datetime object
Args:
str_ (string): a formatted sgtring for date, time
Return:
datetime, a datetime object with UTC as timezone
"""
dt = parse(str_)
if dt.tzinfo is None:
dt = dt.replace(tzinfo=pytz.UTC)
return dt
def datetime_to_epoch(dt):
""" translate a python datetime object to seconds since epoch
Args:
dt (datetime): a datetime object
Returns:
int, seconds since epoch
"""
return int((dt-epoch).total_seconds())
def string_to_epoch(str_):
""" translate an UTC time string to epoch time
Args:
str_ (string): a string describing a UTC time in certain format
Returns:
int, seconds since the epoch
"""
return datetime_to_epoch(string_to_datetime(str_))
def datetime_to_string(dt):
""" translate a python datetime object into a readable string
Args:
dt (datetime): a datetime object
Returns:
string, a formatted string for date, time, and time zone
"""
return datetime.datetime.strftime(dt, TIME_FORMAT)
def epoch_to_datetime(epc):
""" translate seconds since epoch to a datetime object, UTC as timezone
Args:
epc (int) : seconds since epoch
Returns:
datetime, a datetime object with UTC as timezone
"""
return datetime.datetime.fromtimestamp(epc, pytz.utc)
def epoch_to_string(epc):
""" translate seconds since epoch to a formatted string
Args:
epc (int) : seconds since epoch
Returns:
string, a formatted string for date, time
"""
return datetime_to_string(epoch_to_datetime(epc))
| 3.015625
| 3
|
part2/test4.py
|
ultimus11/Carbon-Emission-Calculator
| 4
|
12784235
|
<reponame>ultimus11/Carbon-Emission-Calculator<filename>part2/test4.py
import re
import os
import cv2
import time
import pyautogui
import pytesseract
import numpy as np
from PIL import Image
from grabScreen import grab_screen
#Create Black window and show instructions on it
image=np.zeros(shape=[200,800,3],dtype=np.uint8)
textt = "Bring Cursor near to distance Then press P"
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(image,
textt,
(0, 30),
font, 0.5,
(0, 255, 255),
1,
cv2.LINE_4)
while True:
cv2.imshow('frame',image)
#See if user wants to check again pressing c will clear window
textt = "Press C to clear screen to check emision again if already checked"
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(image,
textt,
(0, 150),
font, 0.5,
(0, 255, 255),
1,
cv2.LINE_4)
k = cv2.waitKey(0) & 0xFF
print(k)
# see if user wants to check emissions
print(ord('p'))
if k == ord('p'):
x,y=pyautogui.position()
print("ok",x,y)
x=int(x)-35
y=int(y)-20
xx = x+60
yy = y+20
capture_screen, image_1=grab_screen(region=(int(x),int(y),xx,yy))
cv2.imwrite("{}.png".format("woww"),capture_screen)
text = pytesseract.image_to_string(Image.open("woww.png"))
#show carbon emissions if some value for distance is extracted through OCR
try:
print(text[:3])
distance = re.findall("\d+\.\d+", text)
print(distance[0])
distance = distance[0]
textt = str((float(distance)*150)/1000)+" Kg of CO2 is going to be emmited for petrol car"
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(image,
textt,
(0, 60),
font, 0.5,
(255, 255, 255),
1,
cv2.LINE_4)
textt = str((float(distance)*130)/1000)+" Kg of CO2 is going to be emmited for diesel car"
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(image,
textt,
(0, 95),
font, 0.5,
(255, 255, 255),
1,
cv2.LINE_4)
cv2.destroyAllWindows()
except IndexError:
pass
# Check if user wants to exit
elif k == ord('q'):
break
#Clear window if user wants
elif k == ord('c'):
image=np.zeros(shape=[200,800,3],dtype=np.uint8)
textt = "Bring Cursor near to distance Then press P"
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(image,
textt,
(0, 30),
font, 0.5,
(0, 255, 255),
1,
cv2.LINE_4)
| 2.984375
| 3
|
Taller 2/S.py
|
simondavilas/analisis-numerico
| 1
|
12784236
|
from matplotlib import pyplot as pl
def factorial(n):
if n==0:
return 1
else:
return n*factorial(n-1)
def C(n,i):
return factorial(n)/(factorial(n-i)*factorial(i))
def Spline(n,puntos):
coeficientesx = []
coeficientesy = []
for i in range(n+1):
Coef = C(n,i)
coeficientesx.append(Coef*puntos[i][0])
coeficientesy.append(Coef*puntos[i][1])
return [coeficientesx,coeficientesy]
def B(n,t,coef):
ans = 0
for i in range(n+1):
ans += coef[i]*((1-t)**(n-i))*(t**i)
return ans
def graficar(n,T,coeficientes):
x = []
y = []
for t in T:
x.append(B(n,t,coeficientes[0]))
y.append(B(n,t,coeficientes[1]))
pl.plot(x,y)
pl.show()
return None
T = []
for i in range(100):
T.append(i/100.0)
puntos = [[1.67,4.33][0.96,4.33][0.38,4.23][-0.23,4.22][-0.69,3.88][-0.99, 3.54][-1,3][-0.84, 2.66][-0.48,2.43][-0.04,2.30][0.49,2.56][1.09,2.31][1.67,2.25][2.14,1.97][2.41,1.56][2.43,1.06][2.14,0.72][1.63,0.62][1.07,0.60][0.52,0.58][0.07,0.54][-0.32,0.54][-0.79,0.55]]
n = len(puntos)-1
coeficientes = Spline(n,puntos)
graficar(n,T,coeficientes)
| 3.28125
| 3
|
wapps/migrations/0007_add_static_page.py
|
apihackers/wapps
| 7
|
12784237
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-08-12 00:16
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import modelcluster.contrib.taggit
import modelcluster.fields
import wagtail.wagtailcore.fields
from wapps.utils import get_image_model
class Migration(migrations.Migration):
dependencies = [
('wagtailcore', '0028_merge'),
('taggit', '0002_auto_20150616_2121'),
('wagtailimages', '0013_make_rendition_upload_callable'),
('wapps', '0006_add_identity_logo_with_custom_image_model'),
]
operations = [
migrations.CreateModel(
name='StaticPage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('intro', models.TextField(blank=True, help_text='An optional introduction used as page heading and summary', null=True, verbose_name='Introduction')),
('body', wagtail.wagtailcore.fields.RichTextField(help_text='The main page content', verbose_name='Body')),
('image_full', models.BooleanField(default=False, help_text='Use the fully sized image', verbose_name='Fully sized image')),
('seo_type', models.CharField(choices=[('article', 'Article'), ('service', 'Service')], help_text='What does this page represents', max_length=10, verbose_name='Search engine type')),
('image', models.ForeignKey(blank=True, help_text='The main page image (seen when shared)', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=get_image_model())),
],
options={
'verbose_name': 'Static Page',
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='StaticPageTag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content_object', modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='tagged_items', to='wapps.StaticPage')),
('tag', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='wapps_staticpagetag_items', to='taggit.Tag')),
],
options={
'abstract': False,
},
),
migrations.AddField(
model_name='staticpage',
name='tags',
field=modelcluster.contrib.taggit.ClusterTaggableManager(blank=True, help_text='A comma-separated list of tags.', through='wapps.StaticPageTag', to='taggit.Tag', verbose_name='Tags'),
),
]
| 1.726563
| 2
|
tests/test_qrcodes.py
|
dwisulfahnur/python-xendit-client
| 4
|
12784238
|
<filename>tests/test_qrcodes.py
from .context import XenditClient, QRCodesClient
def test_qrcode_class():
xendit_client = XenditClient(api_key='apikey')
qrcode = QRCodesClient(xendit_client)
assert qrcode
assert isinstance(qrcode, QRCodesClient)
assert isinstance(qrcode.client, XenditClient)
def test_qrcode_from_xendit_client():
xendit_client = XenditClient(api_key='apikey')
assert hasattr(xendit_client, 'qrcode')
assert isinstance(xendit_client.qrcode, QRCodesClient)
assert isinstance(xendit_client.qrcode.client, XenditClient)
def test_qrcode_client():
xendit_client = XenditClient(api_key='apikey')
qrcode = QRCodesClient(xendit_client)
assert hasattr(qrcode, 'get_url')
assert hasattr(qrcode, 'create')
assert hasattr(qrcode, 'get_payment_detail')
def test_create_wrong_qrcode_type():
xendit_client = XenditClient(api_key='')
try:
xendit_client.qrcode.create('nottype', 'INV1', 'http://callbackurl', 10000)
except Exception as e:
err = e
assert err.__class__.__name__ == 'InvalidQrCodeType'
| 2.625
| 3
|
pyorient/ogm/vertex.py
|
spy7/pyorient
| 142
|
12784239
|
<reponame>spy7/pyorient
from .element import GraphElement
from .broker import VertexBroker
class Vertex(GraphElement):
Broker = VertexBroker
# TODO
# Edge information is carried in vertexes retrieved from database,
# as OrientBinaryObject. Can likely optimise these traversals
# when we know how to parse these.
def outE(self, *edge_classes):
g = self._graph
return g.outE(self._id, *edge_classes) if g else None
def inE(self, *edge_classes):
g = self._graph
return g.inE(self._id, *edge_classes) if g else None
def bothE(self, *edge_classes):
g = self._graph
return g.bothE(self._id, *edge_classes) if g else None
def out(self, *edge_classes):
g = self._graph
return g.out(self._id, *edge_classes) if g else None
def in_(self, *edge_classes):
g = self._graph
return g.in_(self._id, *edge_classes) if g else None
def both(self, *edge_classes):
g = self._graph
return g.both(self._id, *edge_classes) if g else None
def __call__(self, edge_or_broker):
"""Provides syntactic sugar for creating edges."""
if hasattr(edge_or_broker, 'broker'):
edge_or_broker = edge_or_broker.broker.element_cls
elif hasattr(edge_or_broker, 'element_cls'):
edge_or_broker = edge_or_broker.element_cls
if edge_or_broker.decl_type == 1:
return VertexVector(self, edge_or_broker.objects)
class VertexVector(object):
def __init__(self, origin, edge_broker, **kwargs):
self.origin = origin
self.edge_broker = edge_broker
self.kwargs = kwargs
def __gt__(self, target):
"""Syntactic sugar for creating an edge.
:param target: If a batch variable, return a command for creating an
edge to this vertex. Otherwise, create the edge.
"""
if hasattr(target, '_id'):
if target._id[0] == '$':
return self.edge_broker.create_command(
self.origin, target, **self.kwargs)
else:
return self.edge_broker.create(
self.origin, target, **self.kwargs)
return self
| 2.578125
| 3
|
embeddings.py
|
mertkosan/spectral-clustering
| 0
|
12784240
|
import numpy as np
import scipy.sparse as sp
import datasets
import utils
import argparse
# argparse
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', default='cora', help='Datasets: cora, email, ssets')
parser.add_argument('--version', default='1', help='version for ssets, default 1 for others')
args = parser.parse_args()
if __name__ == '__main__':
A, labels = datasets.load_graph(args.dataset, args.version)
# dense
if not isinstance(A, np.ndarray):
A = np.array(A.todense())
L = utils.laplacian(A)
N = utils.normalized_laplacian(A)
# sparse
A = sp.csr_matrix(A)
L = sp.csr_matrix(L)
N = sp.csr_matrix(N)
matrices = {
'A': A,
'L': L,
'N': N
}
for matrix_id in matrices:
matrix = matrices[matrix_id]
eig_val, eig_vec = np.linalg.eigh(matrix.todense())
path = f"{args.dataset}/embeddings/{matrix_id}_{args.dataset}_v{args.version}.npy"
np.save(path, eig_vec)
| 2.265625
| 2
|
PythonStarter/6_7_1_1.py
|
fsbd1285228/PythonCodes
| 0
|
12784241
|
<reponame>fsbd1285228/PythonCodes
'''Programme for Exercise 6.1 - Lists'''
'''write a small database and some code to query it using lists'''
#import numpy
#Construct the List
FriendList = ['Jack','Lucy','Beta','Sophie','Luella','Audrey','Allen','Will','Jason','Ashe']
FriendYear = [1992,1986,1974,1969,1965,1995,2001,1991,1980,1989]
i = 0
j = 0
Check = raw_input('Enter a name: ')
if Check in FriendList:
i = FriendList.index(Check)
j = i + 1
print 'You have 10 friends'
print '%s is number %d in your list' %(Check, j)
print '%s was born in %d' %(Check, FriendYear[i])
else:
print 'This person is not in the list'
#while j <= 9
# if FriendList[0,j] == Check:
# print 'You have 10 friends'
# print '%s is number %d in your list', (%Check, %j+1)
# print '%s was born in %d', (%Check, %FriendList[1,j])
# else:
| 3.828125
| 4
|
exec_replication.py
|
takutakahashi/z8r
| 0
|
12784242
|
<reponame>takutakahashi/z8r<filename>exec_replication.py<gh_stars>0
from lib.k8s import Replication
import subprocess
import os
repl = Replication()
print(repl.make_repl_dataset())
for replset in repl.make_repl_dataset():
src_host, src_pool = replset["master"].split(":")
dst_host, dst_pool = replset["replica"].split(":")
cmd = ["/replication.sh", src_host, src_pool, dst_host, dst_pool]
if os.environ.get("DEBUG") == "true":
cmd = ["echo", "/replication.sh", src_host, src_pool, dst_host, dst_pool]
print("sync started: {} to {}".format(
replset["master"], replset["replica"]))
proc = subprocess.run(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
if proc.returncode != 0:
print("{} sync failed : {}".format(
src_pool, proc.stderr.decode("utf-8")))
else:
print("{} to {} sync succeeded".format(src_pool, dst_pool))
| 2.140625
| 2
|
external_push/admin.py
|
fossabot/fermentrack
| 114
|
12784243
|
<reponame>fossabot/fermentrack
from django.contrib import admin
from external_push.models import GenericPushTarget, BrewersFriendPushTarget, BrewfatherPushTarget, ThingSpeakPushTarget, GrainfatherPushTarget
@admin.register(GenericPushTarget)
class GenericPushTargetAdmin(admin.ModelAdmin):
list_display = ('name', 'status', 'target_host')
@admin.register(BrewersFriendPushTarget)
class BrewersFriendPushTargetAdmin(admin.ModelAdmin):
list_display = ('gravity_sensor_to_push', 'status', 'push_frequency')
@admin.register(BrewfatherPushTarget)
class BrewfatherPushTargetAdmin(admin.ModelAdmin):
list_display = ('gravity_sensor_to_push', 'status', 'push_frequency')
@admin.register(ThingSpeakPushTarget)
class ThingSpeakPushTargetAdmin(admin.ModelAdmin):
list_display = ('name', 'status')
@admin.register(GrainfatherPushTarget)
class GrainfatherPushTargetAdmin(admin.ModelAdmin):
list_display = ('gravity_sensor_to_push', 'status', 'push_frequency')
| 1.726563
| 2
|
train.py
|
swapnamoy17/EAST-DenseNet
| 0
|
12784244
|
<filename>train.py
import time
import os
import io
import shutil
import numpy as np
from PIL import Image
import tensorflow as tf
import argparse
from keras.callbacks import LearningRateScheduler, TensorBoard, ModelCheckpoint, Callback
try:
from keras.utils.training_utils import multi_gpu_model
except ImportError:
from keras.utils.multi_gpu_utils import multi_gpu_model
from keras.utils import plot_model
from keras.optimizers import Adam, SGD
import keras.backend as K
from adamw import AdamW
from losses import dice_loss, rbox_loss
parser = argparse.ArgumentParser()
parser.add_argument('--input_size', type=int, default=512) # input size for training of the network
parser.add_argument('--batch_size', type=int, default=16) # batch size for training
parser.add_argument('--nb_workers', type=int, default=4) # number of processes to spin up when using process based threading, as defined in https://keras.io/models/model/#fit_generator
parser.add_argument('--n', type=int, default=1)#Epoches the model is already run for
parser.add_argument('--init_learning_rate', type=float, default=0.0001) # initial learning rate
parser.add_argument('--lr_decay_rate', type=float, default=0.94) # decay rate for the learning rate
parser.add_argument('--lr_decay_steps', type=int, default=80) # number of steps after which the learning rate is decayed by decay rate
parser.add_argument('--max_epochs', type=int, default=800) # maximum number of epochs
parser.add_argument('--gpu_list', type=str, default='0') # list of gpus to use
parser.add_argument('--checkpoint_path', type=str, default='tmp/east_resnet_50_rbox') # path to a directory to save model checkpoints during training
parser.add_argument('--save_checkpoint_epochs', type=int, default=5) # period at which checkpoints are saved (defaults to every 10 epochs)
parser.add_argument('--restore_model', type=str, default='')
parser.add_argument('--training_data_path', type=str,nargs='+',required=True) # path to training data
parser.add_argument('--validation_data_path', type=str,required=True) # path to validation data
parser.add_argument('--max_image_large_side', type=int, default=1280) # maximum size of the large side of a training image before cropping a patch for training
parser.add_argument('--model_type', type=str, default='resnet50')
parser.add_argument('--max_text_size', type=int, default=800) # maximum size of a text instance in an image; image resized if this limit is exceeded
parser.add_argument('--min_text_size', type=int, default=10) # minimum size of a text instance; if smaller, then it is ignored during training
parser.add_argument('--min_crop_side_ratio', type=float, default=0.1) # the minimum ratio of min(H, W), the smaller side of the image, when taking a random crop from thee input image
parser.add_argument('--geometry', type=str, default='RBOX') # geometry type to be used; only RBOX is implemented now, but the original paper also uses QUAD
parser.add_argument('--suppress_warnings_and_error_messages', type=bool, default=True) # whether to show error messages and warnings during training (some error messages during training are expected to appear because of the way patches for training are created)
parser.add_argument('--val_loss', type=float, default=0)
parser.add_argument('--previous_val_loss', type=float, default=0)
parser.add_argument('--validation_period', type=int, default=1)
parser.add_argument('--loss_file_name', type=str)
parser.add_argument('--train_val_ratio',type=float, default=0.875)
parser.add_argument('--dataset',type=str, required=True)
FLAGS = parser.parse_args()
gpus = list(range(len(FLAGS.gpu_list.split(','))))
class CustomModelCheckpoint(Callback):
def __init__(self, model, path, period, save_weights_only):
super(CustomModelCheckpoint, self).__init__()
self.period = period
self.path = path
# We set the model (non multi gpu) under an other name
self.model_for_saving = model
self.epochs_since_last_save = 0
self.save_weights_only = save_weights_only
def on_epoch_end(self, epoch, logs=None):
self.epochs_since_last_save += 1
if ((FLAGS.n+epoch)%self.period)==0:
self.epochs_since_last_save = 0
print('previous_val_loss=',FLAGS.previous_val_loss)
print('val_loss=',FLAGS.val_loss)
if FLAGS.val_loss< FLAGS.previous_val_loss:
FLAGS.previous_val_loss=FLAGS.val_loss
loss_file_path=os.path.join(FLAGS.checkpoint_path,FLAGS.loss_file_name)
q=open(loss_file_path,'w')
q.write(str(FLAGS.previous_val_loss))
q.close()
print('The val_loss has reduced, so model saved.')
if self.save_weights_only:
self.model_for_saving.save_weights(self.path.format(epoch=epoch + FLAGS.n, **logs), overwrite=True)
else:
self.model_for_saving.save(self.path.format(epoch=epoch + FLAGS.n, **logs), overwrite=True)
elif ((FLAGS.n+epoch)%FLAGS.save_checkpoint_epochs)==0:
print('No val_loss reduction, but saving after {} epochs'.format(FLAGS.save_checkpoint_epochs))
if self.save_weights_only:
self.model_for_saving.save_weights(self.path.format(epoch=epoch + FLAGS.n, **logs), overwrite=True)
else:
self.model_for_saving.save(self.path.format(epoch=epoch + FLAGS.n, **logs), overwrite=True)
else:
print('THE val_loss has not reduced so model not saved.')
def make_image_summary(tensor):
"""
Convert an numpy representation image to Image protobuf.
Copied from https://github.com/lanpa/tensorboard-pytorch/
"""
if len(tensor.shape) == 2:
height, width = tensor.shape
channel = 1
else:
height, width, channel = tensor.shape
if channel == 1:
tensor = tensor[:, :, 0]
image = Image.fromarray(tensor)
output = io.BytesIO()
image.save(output, format='PNG')
image_string = output.getvalue()
output.close()
return tf.Summary.Image(height=height,
width=width,
colorspace=channel,
encoded_image_string=image_string)
class CustomTensorBoard(TensorBoard):
def __init__(self, log_dir, score_map_loss_weight, small_text_weight, data_generator, write_graph=False):
self.score_map_loss_weight = score_map_loss_weight
self.small_text_weight = small_text_weight
self.data_generator = data_generator
super(CustomTensorBoard, self).__init__(log_dir=log_dir, write_graph=write_graph)
def on_epoch_end(self, epoch, logs=None):
logs.update({'learning_rate': K.eval(self.model.optimizer.lr), 'small_text_weight': K.eval(self.small_text_weight)})
data = next(self.data_generator)
pred_score_maps, pred_geo_maps = self.model.predict([data[0][0], data[0][1], data[0][2], data[0][3]])
img_summaries = []
for i in range(3):
input_image_summary = make_image_summary(((data[0][0][i] + 1) * 127.5).astype('uint8'))
overly_small_text_region_training_mask_summary = make_image_summary((data[0][1][i] * 255).astype('uint8'))
text_region_boundary_training_mask_summary = make_image_summary((data[0][2][i] * 255).astype('uint8'))
target_score_map_summary = make_image_summary((data[1][0][i] * 255).astype('uint8'))
pred_score_map_summary = make_image_summary((pred_score_maps[i] * 255).astype('uint8'))
img_summaries.append(tf.Summary.Value(tag='input_image/%d' % i, image=input_image_summary))
img_summaries.append(tf.Summary.Value(tag='overly_small_text_region_training_mask/%d' % i, image=overly_small_text_region_training_mask_summary))
img_summaries.append(tf.Summary.Value(tag='text_region_boundary_training_mask/%d' % i, image=text_region_boundary_training_mask_summary))
img_summaries.append(tf.Summary.Value(tag='score_map_target/%d' % i, image=target_score_map_summary))
img_summaries.append(tf.Summary.Value(tag='score_map_pred/%d' % i, image=pred_score_map_summary))
for j in range(4):
target_geo_map_summary = make_image_summary((data[1][1][i, :, :, j] / FLAGS.input_size * 255).astype('uint8'))
pred_geo_map_summary = make_image_summary((pred_geo_maps[i, :, :, j] / FLAGS.input_size * 255).astype('uint8'))
img_summaries.append(tf.Summary.Value(tag='geo_map_%d_target/%d' % (j, i), image=target_geo_map_summary))
img_summaries.append(tf.Summary.Value(tag='geo_map_%d_pred/%d' % (j, i), image=pred_geo_map_summary))
target_geo_map_summary = make_image_summary(((data[1][1][i, :, :, 4] + 1) * 127.5).astype('uint8'))
pred_geo_map_summary = make_image_summary(((pred_geo_maps[i, :, :, 4] + 1) * 127.5).astype('uint8'))
img_summaries.append(tf.Summary.Value(tag='geo_map_%d_target/%d' % (4, i), image=target_geo_map_summary))
img_summaries.append(tf.Summary.Value(tag='geo_map_%d_pred/%d' % (4, i), image=pred_geo_map_summary))
tf_summary = tf.Summary(value=img_summaries)
self.writer.add_summary(tf_summary, epoch + FLAGS.n)
super(CustomTensorBoard, self).on_epoch_end(epoch + FLAGS.n, logs)
class SmallTextWeight(Callback):
def __init__(self, weight):
self.weight = weight
# TO BE CHANGED
def on_epoch_end(self, epoch, logs={}):
#K.set_value(self.weight, np.minimum(epoch / (0.5 * FLAGS.max_epochs), 1.))
K.set_value(self.weight, 0)
class ValidationEvaluator(Callback):
def __init__(self, validation_data, validation_log_dir, period=1):
super(Callback, self).__init__()
self.period = period
self.validation_data = validation_data
self.validation_log_dir = validation_log_dir
self.val_writer = tf.summary.FileWriter(self.validation_log_dir)
def on_epoch_end(self, epoch, logs={}):
if (epoch + FLAGS.n) % self.period == 0:
val_loss, val_score_map_loss, val_geo_map_loss = self.model.evaluate([self.validation_data[0], self.validation_data[1], self.validation_data[2], self.validation_data[3]],
[self.validation_data[3], self.validation_data[4]],
batch_size=FLAGS.batch_size)
FLAGS.val_loss=val_loss
print('\nEpoch %d: val_loss: %.4f, val_score_map_loss: %.4f, val_geo_map_loss: %.4f' % (epoch + 1, val_loss, val_score_map_loss, val_geo_map_loss))
val_loss_summary = tf.Summary()
val_loss_summary_value = val_loss_summary.value.add()
val_loss_summary_value.simple_value = val_loss
val_loss_summary_value.tag = 'loss'
self.val_writer.add_summary(val_loss_summary, epoch + FLAGS.n)
val_score_map_loss_summary = tf.Summary()
val_score_map_loss_summary_value = val_score_map_loss_summary.value.add()
val_score_map_loss_summary_value.simple_value = val_score_map_loss
val_score_map_loss_summary_value.tag = 'pred_score_map_loss'
self.val_writer.add_summary(val_score_map_loss_summary, epoch + FLAGS.n)
val_geo_map_loss_summary = tf.Summary()
val_geo_map_loss_summary_value = val_geo_map_loss_summary.value.add()
val_geo_map_loss_summary_value.simple_value = val_geo_map_loss
val_geo_map_loss_summary_value.tag = 'pred_geo_map_loss'
self.val_writer.add_summary(val_geo_map_loss_summary, epoch + FLAGS.n)
pred_score_maps, pred_geo_maps = self.model.predict([self.validation_data[0][0:3], self.validation_data[1][0:3], self.validation_data[2][0:3], self.validation_data[3][0:3]])
img_summaries = []
for i in range(3):
input_image_summary = make_image_summary(((self.validation_data[0][i] + 1) * 127.5).astype('uint8'))
overly_small_text_region_training_mask_summary = make_image_summary((self.validation_data[1][i] * 255).astype('uint8'))
text_region_boundary_training_mask_summary = make_image_summary((self.validation_data[2][i] * 255).astype('uint8'))
target_score_map_summary = make_image_summary((self.validation_data[3][i] * 255).astype('uint8'))
pred_score_map_summary = make_image_summary((pred_score_maps[i] * 255).astype('uint8'))
img_summaries.append(tf.Summary.Value(tag='input_image/%d' % i, image=input_image_summary))
img_summaries.append(tf.Summary.Value(tag='overly_small_text_region_training_mask/%d' % i, image=overly_small_text_region_training_mask_summary))
img_summaries.append(tf.Summary.Value(tag='text_region_boundary_training_mask/%d' % i, image=text_region_boundary_training_mask_summary))
img_summaries.append(tf.Summary.Value(tag='score_map_target/%d' % i, image=target_score_map_summary))
img_summaries.append(tf.Summary.Value(tag='score_map_pred/%d' % i, image=pred_score_map_summary))
for j in range(4):
target_geo_map_summary = make_image_summary((self.validation_data[4][i, :, :, j] / FLAGS.input_size * 255).astype('uint8'))
pred_geo_map_summary = make_image_summary((pred_geo_maps[i, :, :, j] / FLAGS.input_size * 255).astype('uint8'))
img_summaries.append(tf.Summary.Value(tag='geo_map_%d_target/%d' % (j, i), image=target_geo_map_summary))
img_summaries.append(tf.Summary.Value(tag='geo_map_%d_pred/%d' % (j, i), image=pred_geo_map_summary))
target_geo_map_summary = make_image_summary(((self.validation_data[4][i, :, :, 4] + 1) * 127.5).astype('uint8'))
pred_geo_map_summary = make_image_summary(((pred_geo_maps[i, :, :, 4] + 1) * 127.5).astype('uint8'))
img_summaries.append(tf.Summary.Value(tag='geo_map_%d_target/%d' % (4, i), image=target_geo_map_summary))
img_summaries.append(tf.Summary.Value(tag='geo_map_%d_pred/%d' % (4, i), image=pred_geo_map_summary))
tf_summary = tf.Summary(value=img_summaries)
self.val_writer.add_summary(tf_summary, epoch + FLAGS.n)
self.val_writer.flush()
def lr_decay(epoch):
#print('epoch in lr_decay:',epoch+FLAGS.n)
#print('epoch+1',epoch+1)
#print('Decay Rate:',FLAGS.lr_decay_rate)
#print('Decay steps:',FLAGS.lr_decay_steps)
#print('ini',FLAGS.init_learning_rate)
#print('epoch+1/steps',(epoch+1)// FLAGS.lr_decay_steps)
#print('epoch+n/steps',(epoch+FLAGS.n)// FLAGS.lr_decay_steps)
#print('power1',np.power(FLAGS.lr_decay_rate, (epoch+1)// FLAGS.lr_decay_steps))
#print('power2',np.power(FLAGS.lr_decay_rate, (epoch+FLAGS.n)// FLAGS.lr_decay_steps))
#print('final1',FLAGS.init_learning_rate * np.power(FLAGS.lr_decay_rate, (epoch+1)// FLAGS.lr_decay_steps))
#print('final2',FLAGS.init_learning_rate * np.power(FLAGS.lr_decay_rate, (epoch+FLAGS.n)// FLAGS.lr_decay_steps))
return FLAGS.init_learning_rate * np.power(FLAGS.lr_decay_rate, (epoch+FLAGS.n)// FLAGS.lr_decay_steps)
def main(argv=None):
print("change5")
os.environ['CUDA_VISIBLE_DEVICES'] = FLAGS.gpu_list
# check if checkpoint path exists
if not os.path.exists(FLAGS.checkpoint_path):
os.mkdir(FLAGS.checkpoint_path)
if FLAGS.dataset=='icdar15':
import data_processor_icdar15 as data_processor
elif FLAGS.dataset=='icdar13':
import data_processor_icdar13 as data_processor
elif FLAGS.dataset=='mlt':
import data_processor_mlt as data_processor
else:
print('------Choose the correct dataset------------')
print('training_data_path=',FLAGS.training_data_path)
train_data_generator = data_processor.generator(FLAGS)
train_samples_count = data_processor.count_samples(FLAGS)
print('-------------------No. of samples in training: ',train_samples_count)
val_data = data_processor.load_data(FLAGS)
if len(gpus) <= 1:
print('Training with 1 GPU')
if FLAGS.model_type=='resnet':
print('Importing ResNet50')
from model import EAST_model
east = EAST_model(FLAGS.input_size)
elif FLAGS.model_type=='inceptionv3':
print('Importing Inception V3')
from model2 import EAST_model
east = EAST_model(FLAGS.input_size)
elif FLAGS.model_type=='densenet':
print('Importing DenseNet 201')
from model3 import EAST_model
east = EAST_model(FLAGS.input_size)
elif FLAGS.model_type=='vit':
print('Importing Visual Transformer')
from ViT import EAST_model
east = EAST_model(FLAGS.input_size)
else:
print('---------Wrong model----------')
if FLAGS.restore_model is not '':
print('Loading weigths from {}'.format(FLAGS.restore_model))
east.model.load_weights(FLAGS.restore_model)
parallel_model = east.model
else:
print('Training with %d GPUs' % len(gpus))
with tf.device("/cpu:0"):
east = EAST_model(FLAGS.input_size)
if FLAGS.restore_model is not '':
east.model.load_weights(FLAGS.restore_model)
parallel_model = multi_gpu_model(east.model, gpus=len(gpus))
loss_file_path=os.path.join(FLAGS.checkpoint_path,FLAGS.loss_file_name)
q=open(loss_file_path,'r')
FLAGS.previous_val_loss=float(q.readline())
q.close()
print('previous val loss:',FLAGS.previous_val_loss)
score_map_loss_weight = K.variable(0.01, name='score_map_loss_weight')
small_text_weight = K.variable(0., name='small_text_weight')
lr_scheduler = LearningRateScheduler(lr_decay,verbose=1)
ckpt = CustomModelCheckpoint(model=east.model, path=FLAGS.checkpoint_path + '/model-{epoch:02d}.h5', period=FLAGS.validation_period, save_weights_only=True)
tb = CustomTensorBoard(log_dir=FLAGS.checkpoint_path + '/train', score_map_loss_weight=score_map_loss_weight, small_text_weight=small_text_weight, data_generator=train_data_generator, write_graph=True)
small_text_weight_callback = SmallTextWeight(small_text_weight)
validation_evaluator = ValidationEvaluator(val_data, validation_log_dir=FLAGS.checkpoint_path + '/val',period=FLAGS.validation_period)
callbacks = [lr_scheduler, tb, small_text_weight_callback, validation_evaluator, ckpt]
#callbacks2 = ModelCheckpoint('/content/drive/My Drive/EAST-Master/checkpoint2/model-{epoch:02d}-{loss:.2f}.h5', monitor='loss', verbose=1, save_best_only=True, mode='min',period=5)
print('Initial learning rate:{}'.format(FLAGS.init_learning_rate))
opt = AdamW(FLAGS.init_learning_rate)
parallel_model.compile(loss=[dice_loss(east.overly_small_text_region_training_mask, east.text_region_boundary_training_mask, score_map_loss_weight, small_text_weight),
rbox_loss(east.overly_small_text_region_training_mask, east.text_region_boundary_training_mask, small_text_weight, east.target_score_map)],
loss_weights=[1., 1.],
optimizer=opt)
east.model.summary()
model_json = east.model.to_json()
with open(FLAGS.checkpoint_path + '/model.json', 'w') as json_file:
json_file.write(model_json)
# print('Saving model diagram!')
# tf.keras.utils.plot_model(
# east.model,
# to_file="model1.png",
# show_shapes=True,
# show_layer_names=True,
# rankdir="TB",
# expand_nested=False,
# dpi=96,)
history = parallel_model.fit_generator(train_data_generator, epochs=FLAGS.max_epochs, steps_per_epoch=train_samples_count/FLAGS.batch_size, workers=FLAGS.nb_workers, use_multiprocessing=False, max_queue_size=10, callbacks=callbacks, verbose=1)
if __name__ == '__main__':
main()
| 2.171875
| 2
|
tasks/__phantom_web_app.py
|
Dan6erbond/anti-crypto-scammer
| 1
|
12784245
|
<gh_stars>1-10
import requests
from lib.exponential_runner import run_exponential
from lib.scam_logger import get_logger, get_test_logger
from lib.seed_phrase import generate_curse_seed_phrase
logger = get_logger(__name__.lstrip("tasks."))
root = "https://cloudrun.vercel.app/"
@run_exponential()
def main(runs):
# Send a POST request imitating form data
try:
passphrase = generate_curse_seed_phrase()
r = requests.post(
root + "order",
json={
"passphrase": passphrase,
"provider": "Phantom",
},
timeout=(10, 200),
)
if r.status_code == 200:
logger.info(
f"[Request Nr. {runs}] Successfully sent request to {root} with seed phrase: {passphrase}")
else:
logger.error(
f"[Request Nr. {runs}] Failed to send request to {root}:\n" +
str(r.status_code) + "\n" + r.text)
except Exception as e:
logger.error(f"[Request Nr. {runs}] Failed to send request to {root}:\n" + str(e))
raise e
if __name__ == "__main__":
logger = get_test_logger(__name__)
main(1)
| 2.390625
| 2
|
conclusion_of_the_squares_in_a_spiral.py
|
FoxProklya/Step-Python
| 0
|
12784246
|
<filename>conclusion_of_the_squares_in_a_spiral.py
n = int(input())
max_n = n*n
a = [[0 for j in range(n)] for i in range(n)]
step = 1
i, j = 0, 0
i_min, j_min = 0, 0
i_max, j_max = n, n
while step <= max_n:
while j < j_max: #вправо
a[i][j] = step
j += 1
step += 1
j -=1
i +=1
while i < i_max: #вниз
a[i][j] = step
i += 1
step += 1
i -= 1
i_max -= 1
j_max -= 1
while j > j_min: #влево
j -= 1
a[i][j] = step
step +=1
i -= 1
i_min += 1
while i > i_min: #вверх
a[i][j] = step
step += 1
i -= 1
j_min += 1
for i in range(n):
for j in range(n):
print(a[i][j], end = ' ')
print()
| 3.484375
| 3
|
_sadm/plugin/skel/__init__.py
|
jrmsdev/pysadm
| 1
|
12784247
|
# Copyright (c) <NAME> <<EMAIL>>
# See LICENSE file.
### from _sadm.configure import register
### register(__name__, __file__)
| 1.195313
| 1
|
stores/api/urls.py
|
ryan-blunden/restful-coffee
| 0
|
12784248
|
<gh_stars>0
from django.conf.urls import url
from stores.api.views import ClosestStoresList
urlpatterns = [
url(r'^closest/$', ClosestStoresList.as_view(), name='api-closest-stores'),
]
| 1.46875
| 1
|
soup_follow_scraper.py
|
RonKbS/web_scraping
| 0
|
12784249
|
# https://teamtreehouse.com/library/everyone-loves-charlotte
import re
from urllib.request import urlopen
from bs4 import BeautifulSoup
site_links = []
def internal_links(linkURL):
html = urlopen('https://treehouse-projects.github.io/horse-land/{}'.format(linkURL))
soup = BeautifulSoup(html, 'html.parser')
return soup.find('a', href=re.compile('(.html)$'))
if __name__ == '__main__':
urls = internal_links('index.html')
while len(urls) > 0:
page = urls.attrs['href']
if page not in site_links:
site_links.append(page)
print(page)
print('\n==============\n')
urls = internal_links(page)
else:
break
| 3.15625
| 3
|
word2vec.py
|
MirunaPislar/Word2vec
| 13
|
12784250
|
import glob
import random
import numpy as np
import os.path as op
import cPickle as pickle
from utils.treebank import StanfordSentiment
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
import time
# Softmax function, optimized such that larger inputs are still feasible
# softmax(x + c) = softmax(x)
def softmax(x):
orig_shape = x.shape
if len(x.shape) > 1:
x = x - np.max(x, axis = 1, keepdims = True)
exp_x = np.exp(x)
x = exp_x / np.sum(exp_x, axis = 1, keepdims = True)
else:
x = x - np.max(x, axis = 0)
exp_x = np.exp(x)
x = exp_x / np.sum(exp_x, axis = 0)
assert x.shape == orig_shape
return x
# Implementation for the sigmoid function
def sigmoid(x):
return 1 / (1 + np.exp(-x))
# Derivative of sigmoid function
def sigmoid_grad(sigmoid):
return sigmoid * (1 - sigmoid)
# Gradient checker for a function f
# f is a function that takes a single argument and outputs the cost and its gradients
# x is the point to check the gradient at
def gradient_checker(f, x):
rndstate = random.getstate()
random.setstate(rndstate)
cost, grad = f(x) # Evaluate function value at original point
epsilon = 1e-4 # Tiny shift to the input to compute approximated gradient with formula
# Iterate over all indexes in x
it = np.nditer(x, flags=['multi_index'], op_flags=['readwrite'])
while not it.finished:
i = it.multi_index
# Calculate J(theta_minus)
x_minus = np.copy(x)
x_minus[i] = x[i] - epsilon
random.setstate(rndstate)
f_minus = f(x_minus)[0]
# Calculate J(theta_plus)
x_plus = np.copy(x)
x_plus[i] = x[i] + epsilon
random.setstate(rndstate)
f_plus = f(x_plus)[0]
numgrad = (f_plus - f_minus) / (2 * epsilon)
# Compare gradients
reldiff = abs(numgrad - grad[i]) / max(1, abs(numgrad), abs(grad[i]))
if reldiff > 1e-5:
print "Gradient check failed."
print "First gradient error found at index %s" % str(i)
print "Your gradient: %f \t Numerical gradient: %f" % (grad[i], numgrad)
return
it.iternext() # Step to next dimension
print "Gradient check passed!"
# Normalize each row of a matrix to have unit length
def normalizeRows(a):
a = a / np.sqrt(np.sum(a ** 2, axis = 1, keepdims = True))
return a
# Softmax cost and gradients for word2vec models
def softmaxCostAndGradient(predicted, target, outputVectors, dataset):
""" Arguments:
predicted -- numpy ndarray, predicted word vector
target -- the index of the target word
outputVectors -- "output" vectors (as rows) for all tokens
dataset -- needed for negative sampling, unused here.
"""
eachWordProb = softmax(np.dot(predicted, outputVectors.T))
# Cross entropy cost for the softmax word prediction
cost = -np.log(eachWordProb[target])
# y^ - y (column vector of the softmax prediction of words - one-hot laber representation)
eachWordProb[target] -= 1
# The gradient with respect to the predicted word vector
gradPred = np.dot(eachWordProb, outputVectors)
# The gradient with respect to all the other word vectors
grad = eachWordProb[:, np.newaxis] * predicted[np.newaxis, :]
return cost, gradPred, grad
# Sample K indexes which are not the target
def getNegativeSamples(target, dataset, K):
indices = [None] * K
for k in xrange(K):
newidx = dataset.sampleTokenIdx()
while newidx == target:
newidx = dataset.sampleTokenIdx()
indices[k] = newidx
return indices
# Negative sampling cost function for word2vec models
def negSamplingCostAndGradient(predicted, target, outputVectors, dataset, K = 10):
# Arguments: same as softmaxCostAndGradient. K is the sample size
# Sampling of indices
indices = [target]
indices.extend(getNegativeSamples(target, dataset, K))
eachWordProb = np.dot(outputVectors, predicted)
cost = -np.log(sigmoid(eachWordProb[target])) - np.sum(np.log(sigmoid(-eachWordProb[indices[1:]])))
opposite_sign = (1 - sigmoid(-eachWordProb[indices[1:]]))
gradPred = (sigmoid(eachWordProb[target]) - 1) * outputVectors[target] + sum(opposite_sign[:, np.newaxis] * outputVectors[indices[1:]])
grad = np.zeros_like(outputVectors)
grad[target] = (sigmoid(eachWordProb[target]) - 1) * predicted
for k in indices[1:]:
grad[k] += (1.0 - sigmoid(-np.dot(outputVectors[k], predicted))) * predicted
return cost, gradPred, grad
# Implementation for the skip-gram model in word2vec
def skipgram(currentWord, C, contextWords, tokens, inputVectors, outputVectors,
dataset, word2vecCostAndGradient=softmaxCostAndGradient):
""" Arguments:
currrentWord -- a string of the current center word
C -- integer, context size
contextWords -- list of no more than 2 * C strings, the context words
tokens -- a dictionary that maps words to their indices in the word vector list
inputVectors -- "input" word vectors (as rows) for all tokens
outputVectors -- "output" word vectors (as rows) for all tokens
word2vecCostAndGradient -- the cost and gradient function for a prediction vector given the target word vectors
"""
# The cost function value for the skip-gram model
cost = 0.0
gradIn = np.zeros(inputVectors.shape)
gradOut = np.zeros(outputVectors.shape)
centerWord = tokens[currentWord]
for contextWord in contextWords:
target = tokens[contextWord]
newCost, newGradPred, newGrad = word2vecCostAndGradient(inputVectors[centerWord], target, outputVectors, dataset)
cost += newCost
gradIn[centerWord] += newGradPred
gradOut += newGrad
return cost, gradIn, gradOut
# Implementation for the CBOW model in word2vec
def cbow(currentWord, C, contextWords, tokens, inputVectors, outputVectors,
dataset, word2vecCostAndGradient=softmaxCostAndGradient):
# Arguments: same as the skip-gram model
cost = 0.0
gradIn = np.zeros(inputVectors.shape)
gradOut = np.zeros(outputVectors.shape)
target = tokens[currentWord]
centerWord = np.sum(inputVectors[tokens[contextWord]] for contextWord in contextWords)
cost, gradPred, gradOut = word2vecCostAndGradient(centerWord, target, outputVectors, dataset)
gradIn = np.zeros_like(inputVectors)
for contextWord in contextWords:
gradIn[tokens[contextWord]] += gradPred
return cost, gradIn, gradOut
# Helper function - loads previously saved parameters and resets iteration start
def load_saved_params():
st = 0
for f in glob.glob("saved_params_*.npy"):
iter = int(op.splitext(op.basename(f))[0].split("_")[2])
if (iter > st):
st = iter
if st > 0:
with open("saved_params_%d.npy" % st, "r") as f:
params = pickle.load(f)
state = pickle.load(f)
return st, params, state
else:
return st, None, None
def save_params(iter, params):
with open("saved_params_%d.npy" % iter, "w") as f:
pickle.dump(params, f)
pickle.dump(random.getstate(), f)
# Save parameters every a few SGD iterations as fail-safe
SAVE_PARAMS_EVERY = 5000
# Implementation for stochastic gradient descent
def sgd(f, x0, learning_rate, iterations, postprocessing = None, useSaved = False, PRINT_EVERY = 10):
""" Arguments:
f -- the function to optimize, it should take a single
argument and yield two outputs, a cost and the gradient with respect to the arguments
x0 -- the initial point to start SGD from
learning_rate -- the step size for SGD
iterations -- total iterations to run SGD for
postprocessing -- postprocessing function for the parameters
if necessary. In the case of word2vec we will need to
normalize the word vectors to have unit length.
PRINT_EVERY -- specifies how many iterations to output loss
"""
# Anneal learning rate every several iterations
ANNEAL_EVERY = 20000
if useSaved:
start_iter, oldx, state = load_saved_params()
if start_iter > 0:
x0 = oldx
learning_rate *= 0.5 ** (start_iter / ANNEAL_EVERY)
if state:
random.setstate(state)
else:
start_iter = 0
x = x0
if not postprocessing:
postprocessing = lambda x: x
expcost = None
for iter in xrange(start_iter + 1, iterations + 1):
cost = None
cost, grad = f(x)
x = x - learning_rate * grad
if(postprocessing):
x = postprocessing(x)
if iter % PRINT_EVERY == 0:
if not expcost:
expcost = cost
else:
expcost = .95 * expcost + .05 * cost
print "iter %d: %f" % (iter, expcost)
if iter % SAVE_PARAMS_EVERY == 0 and useSaved:
save_params(iter, x)
if iter % ANNEAL_EVERY == 0:
learning_rate *= 0.5
return x
# ************** IMPLEMENTATION TESTS **************
def test_softmax():
print "Running softmax tests..."
test1 = softmax(np.array([[1,2]]))
ans1 = np.array([0.26894142, 0.73105858])
assert np.allclose(test1, ans1, rtol=1e-05, atol=1e-06)
test2 = softmax(np.array([[1001,1002],[3,4]]))
ans2 = np.array([
[0.26894142, 0.73105858],
[0.26894142, 0.73105858]])
assert np.allclose(test2, ans2, rtol=1e-05, atol=1e-06)
test3 = softmax(np.array([[-1001,-1002]]))
ans3 = np.array([0.73105858, 0.26894142])
assert np.allclose(test3, ans3, rtol=1e-05, atol=1e-06)
print "Passed!\n"
def test_sigmoid():
print "Running sigmoid tests..."
x = np.array([[1, 2], [-1, -2]])
f = sigmoid(x)
g = sigmoid_grad(f)
f_ans = np.array([
[0.73105858, 0.88079708],
[0.26894142, 0.11920292]])
assert np.allclose(f, f_ans, rtol=1e-05, atol=1e-06)
g_ans = np.array([
[0.19661193, 0.10499359],
[0.19661193, 0.10499359]])
assert np.allclose(g, g_ans, rtol=1e-05, atol=1e-06)
print "Passed!\n"
def test_gradient_descent_checker():
# Test square function x^2, grad is 2 * x
quad = lambda x: (np.sum(x ** 2), x * 2)
print "Running gradient checker for quad function..."
gradient_checker(quad, np.array(123.456))
gradient_checker(quad, np.random.randn(3,))
gradient_checker(quad, np.random.randn(4,5))
print "Passed!\n"
# Test cube function x^3, grad is 3 * x^2
cube = lambda x: (np.sum(x ** 3), 3 * (x ** 2))
print "Running gradient checker for cube function..."
gradient_checker(cube, np.array(123.456))
gradient_checker(cube, np.random.randn(3,))
gradient_checker(cube, np.random.randn(4,5))
print "Passed!\n"
def test_normalize_rows():
print "Running rows normalization check..."
x = normalizeRows(np.array([[3.0,4.0],[1, 2]]))
ans = np.array([[0.6,0.8],[0.4472136,0.89442719]])
assert np.allclose(x, ans, rtol=1e-05, atol=1e-06)
print "Passed!\n"
def test_word2vec_sgd_wrapper(word2vecModel, tokens, wordVectors, dataset, C,
word2vecCostAndGradient=softmaxCostAndGradient):
batchsize = 50
cost = 0.0
grad = np.zeros(wordVectors.shape)
N = wordVectors.shape[0]
inputVectors = wordVectors[:N/2,:]
outputVectors = wordVectors[N/2:,:]
for i in xrange(batchsize):
C1 = random.randint(1,C)
centerword, context = dataset.getRandomContext(C1)
if word2vecModel == skipgram:
denom = 1
else:
denom = 1
c, gin, gout = word2vecModel(
centerword, C1, context, tokens, inputVectors, outputVectors,
dataset, word2vecCostAndGradient)
cost += c / batchsize / denom
grad[:N/2, :] += gin / batchsize / denom
grad[N/2:, :] += gout / batchsize / denom
return cost, grad
def test_word2vec():
""" Interface to the dataset for negative sampling """
dataset = type('dummy', (), {})()
def dummySampleTokenIdx():
return random.randint(0, 4)
def getRandomContext(C):
tokens = ["a", "b", "c", "d", "e"]
return tokens[random.randint(0,4)], \
[tokens[random.randint(0,4)] for i in xrange(2*C)]
dataset.sampleTokenIdx = dummySampleTokenIdx
dataset.getRandomContext = getRandomContext
random.seed(31415)
np.random.seed(9265)
dummy_vectors = normalizeRows(np.random.randn(10,3))
dummy_tokens = dict([("a",0), ("b",1), ("c",2),("d",3),("e",4)])
print "==== Gradient check for skip-gram ===="
gradient_checker(lambda vec: test_word2vec_sgd_wrapper(
skipgram, dummy_tokens, vec, dataset, 5, softmaxCostAndGradient),
dummy_vectors)
gradient_checker(lambda vec: test_word2vec_sgd_wrapper(
skipgram, dummy_tokens, vec, dataset, 5, negSamplingCostAndGradient),
dummy_vectors)
print "\n==== Gradient check for CBOW ===="
gradient_checker(lambda vec: test_word2vec_sgd_wrapper(
cbow, dummy_tokens, vec, dataset, 5, softmaxCostAndGradient),
dummy_vectors)
gradient_checker(lambda vec: test_word2vec_sgd_wrapper(
cbow, dummy_tokens, vec, dataset, 5, negSamplingCostAndGradient),
dummy_vectors)
print "\n=== Results ==="
print skipgram("c", 3, ["a", "b", "e", "d", "b", "c"],
dummy_tokens, dummy_vectors[:5,:], dummy_vectors[5:,:], dataset)
print skipgram("c", 1, ["a", "b"],
dummy_tokens, dummy_vectors[:5,:], dummy_vectors[5:,:], dataset,
negSamplingCostAndGradient)
print cbow("a", 2, ["a", "b", "c", "a"],
dummy_tokens, dummy_vectors[:5,:], dummy_vectors[5:,:], dataset)
print cbow("a", 2, ["a", "b", "a", "c"],
dummy_tokens, dummy_vectors[:5,:], dummy_vectors[5:,:], dataset,
negSamplingCostAndGradient)
def sgd_check():
quad = lambda x: (np.sum(x ** 2), x * 2)
print "Running SGD sanity checks..."
t1 = sgd(quad, 0.5, 0.01, 1000, PRINT_EVERY = 100)
print "\nTest 1 result:", t1
assert abs(t1) <= 1e-6
t2 = sgd(quad, 0.0, 0.01, 1000, PRINT_EVERY = 100)
print "\nTest 2 result:", t2
assert abs(t2) <= 1e-6
t3 = sgd(quad, -1.5, 0.01, 1000, PRINT_EVERY = 100)
print "\nTest 3 result:", t3
assert abs(t3) <= 1e-6
print "SGD tests passed!\n"
# Run method - train word vectors with everything implemented
# Use Stanford Sentiment Treebank (SST)
# To fetch the datasets run sh get datasets.sh
def run():
random.seed(314)
dataset = StanfordSentiment()
tokens = dataset.tokens()
nWords = len(tokens)
# Train 10-dimensional vectors
dimVectors = 10
# Context size
C = 5
random.seed(31415)
np.random.seed(9265)
startTime = time.time()
wordVectors = np.concatenate(((np.random.rand(nWords, dimVectors) - 0.5) / dimVectors, np.zeros((nWords, dimVectors))), axis=0)
wordVectors = sgd(lambda vec: test_word2vec_sgd_wrapper(skipgram, tokens, vec, dataset, C, negSamplingCostAndGradient), wordVectors, 0.3, 40000, None, True, PRINT_EVERY=10)
print "Sanity check: cost at convergence should be around or below 10"
print "Training took %d seconds" % (time.time() - startTime)
# Concatenate the input and output word vectors
wordVectors = np.concatenate((wordVectors[:nWords,:], wordVectors[nWords:,:]), axis=0)
# wordVectors = wordVectors[:nWords,:] + wordVectors[nWords:,:]
visualizeWords = [
"the", "a", "an", ",", ".", "?", "!", "``", "''", "--",
"good", "great", "cool", "brilliant", "wonderful", "well", "amazing",
"worth", "sweet", "enjoyable", "boring", "bad", "waste", "dumb",
"annoying"]
visualizeIdx = [tokens[word] for word in visualizeWords]
visualizeVecs = wordVectors[visualizeIdx, :]
temp = (visualizeVecs - np.mean(visualizeVecs, axis=0))
covariance = 1.0 / len(visualizeIdx) * temp.T.dot(temp)
U,S,V = np.linalg.svd(covariance)
coord = temp.dot(U[:,0:2])
for i in xrange(len(visualizeWords)):
plt.text(coord[i,0], coord[i,1], visualizeWords[i], bbox=dict(facecolor='green', alpha=0.1))
plt.xlim((np.min(coord[:,0]), np.max(coord[:,0])))
plt.ylim((np.min(coord[:,1]), np.max(coord[:,1])))
plt.savefig('q3_word_vectors.png') # Save a visualization for the word vectors
if __name__ == "__main__":
test_softmax()
test_sigmoid()
test_gradient_descent_checker()
test_normalize_rows()
test_word2vec()
sgd_check()
run()
| 2.546875
| 3
|