blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9eeb4be1cb93ab85fd14e38c367ec1ba4dc52f74
|
a4e8849dfcbb64cb6b56b9eb45fb7e431c9cfdc0
|
/s061-repaso/p03.py
|
b8bb2e0aeace2f6522ad3a560ec74647955b7d7a
|
[] |
no_license
|
marvjaramillo/ulima-intro210-clases
|
96b546eb79fbe34dbfa3e5726b1b8ed57523e110
|
fef2d2ef487ef386196e0b9dd2fa66338de141bf
|
refs/heads/main
| 2023-04-27T13:56:57.898602
| 2023-04-19T13:17:06
| 2023-04-19T13:17:06
| 344,644,221
| 2
| 0
| null | 2021-03-05T00:08:57
| 2021-03-05T00:08:56
| null |
UTF-8
|
Python
| false
| false
| 1,466
|
py
|
'''
Los minutos de tardanza de un grupo de empleados se encuentran almacenados en un diccionario
que tiene como clave el codigo de empleado y como valor una lista con los minutos de
tardanza por dia.
Implemente un programa que reciba este diccionario, un listado de codigos de empleado
y permita mostrar el empleado de la lista que tuvo la mayor cantidad de minutos acumulados
por tardanza.
Ejemplo:
dicc = {"E001": [5, 10, 3, 4], "E002": {}, "E003":[30, 10] }
lista = ["E001", "E003"]
E001 --> [5, 10, 3, 4] --> 22
E003 --> [30, 10] --> 40
Comparando los minutos de tardanza, el empleado con mayor cantidad de minutos de
tardanza es "E003".
'''
def sumar_tardanzas(lista):
suma = 0
for i in range(len(lista)):
suma = suma + lista[i]
return suma
def mostrar_mayor_tardanza(dic_tardanzas, lista_empleados):
cod_elegido = ""
total_elegido = 0
for i in range(len(lista_empleados)):
cod_emp = lista_empleados[i]
tardanzas_emp = dic_tardanzas[cod_emp]
total_minutos = sumar_tardanzas(tardanzas_emp)
if(total_minutos > total_elegido):
total_elegido = total_minutos
cod_elegido = cod_emp
print("Empleado con mas minutos de tardanza:", cod_elegido)
print("Minutos de tardanza: ", total_elegido)
if __name__ == "__main__":
dicc = {"E001": [50, 10, 3, 4], "E002": {}, "E003":[30, 10] }
lista = ["E001", "E003"]
mostrar_mayor_tardanza(dicc, lista)
|
[
"usuario@correo.com"
] |
usuario@correo.com
|
ffd52c187b40075684ae17e912ffaad85f787083
|
82260f32dcf1597ddf4902b0b88b11c9d82ac1ae
|
/A6/6.1.py
|
1dbdc6f1e148660aba65b0ae4a6d80eface54fb9
|
[] |
no_license
|
jorgeacosta19/BrandTech_WebDev
|
ac0ff9c0ee024353b9f9c046b6104a2db3bcc7fc
|
1fd573ea1b0f67c6d654c9dbfe71c273b26a391e
|
refs/heads/main
| 2023-01-14T13:22:12.235950
| 2020-11-24T20:31:42
| 2020-11-24T20:31:42
| 301,190,543
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 91
|
py
|
# 1- Write a program that prints ‘Hello World’ to the screen.
print("Hello World")
|
[
"noreply@github.com"
] |
jorgeacosta19.noreply@github.com
|
2d5ccf17197699d50e0b2fa57a4243eb7ca907aa
|
c609730a43596a2d3303f072fc97d9cf681fac7b
|
/cagey/carbuisness/main_currency_supply.py
|
ed84e5c37083ff51e2afabd4f2216adcf44c254f
|
[] |
no_license
|
sinnettluo/ChenProject
|
5403311c0c7b78c484145e16d692abff00d2a110
|
0e33ecf1683afb22f1deb4bd54294c41aed8a46b
|
refs/heads/master
| 2023-03-22T23:48:08.430178
| 2020-09-02T15:05:02
| 2020-09-02T15:05:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 198
|
py
|
from scrapy.cmdline import execute
import sys
import os
website = "currency_supply"
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
execute(["scrapy", "crawl", website])
|
[
"1316446041@qq.com"
] |
1316446041@qq.com
|
a2c1d5da1c0a0a81f541829e0fa78e83503a4b56
|
7177274b29e5daece1c00585ec92090571b5cd28
|
/__init__.py
|
72734e593d1390178430c23e0923102259ae01af
|
[
"MIT"
] |
permissive
|
tmizu23/SlideShow_plugin
|
cdd76a973269fa016f95a1b02f0b090b63a61db8
|
8634728fe497d11cd81467dc5aa29aee101887af
|
refs/heads/master
| 2021-01-10T21:20:01.755222
| 2014-10-25T14:48:48
| 2014-10-25T14:48:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,506
|
py
|
# -*- coding: utf-8 -*-
"""
/***************************************************************************
SlideShow
A QGIS plugin
This Plugin is SlideShow
-------------------
begin : 2014-09-20
copyright : (C) 2014 by Takayuki Mizutani
email : mizutani.takayuki+slideshow@gmai.com
git sha : $Format:%H$
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
This script initializes the plugin, making it known to QGIS.
"""
# noinspection PyPep8Naming
def classFactory(iface): # pylint: disable=invalid-name
"""Load SlideShow class from file SlideShow.
:param iface: A QGIS interface instance.
:type iface: QgsInterface
"""
#
from .slide_show import SlideShow
return SlideShow(iface)
|
[
"mizutani.takayuki@gmail.com"
] |
mizutani.takayuki@gmail.com
|
8df3b3f50a43565b98eb313b84920ee53a5850e9
|
c86b2d4e8431e35681e9725f6174042ad7411d5f
|
/Exercise_02/Shop/SH_10.py
|
ecfd62cbe230b3c2f2c659b55a98e198083c89a9
|
[] |
no_license
|
nadung65/Assignment_10
|
a44a04cd47838abf37634791e4aa4e67b93561d4
|
03faa49cba5a105475cc980001e60a88e8ff3dd8
|
refs/heads/main
| 2023-04-22T12:53:10.754476
| 2021-05-13T14:26:17
| 2021-05-13T14:26:17
| 367,067,897
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,774
|
py
|
import unittest
import time
from selenium import webdriver
PATH = "C:\Program Files\chromedriver_win32\chromedriver.exe"
class SH_10(unittest.TestCase):
def setUp(self):
self.driver = webdriver.Chrome(PATH)
def testSH_10(self):
driver = self.driver
driver.get('http://practice.automationtesting.in/')
driver.find_element_by_link_text('Shop').click()
# Check Add to cart button
driver.find_element_by_class_name('add_to_cart_button').click()
time.sleep(1)
cart_content = driver.find_element_by_xpath('//*[@id="wpmenucartli"]/a/span[1]').text
self.assertEqual('1 Item', cart_content, 'User can not view that book in menu!')
# Test clicking View Basket link
driver.find_element_by_link_text('View Basket').click()
current_url = driver.current_url
self.assertEqual('http://practice.automationtesting.in/basket/', current_url, 'Can not click View basket link!')
time.sleep(1)
# Check if subtotal < total
subtotal = float(driver.find_element_by_css_selector('.cart-subtotal td span').text[1:])
total = float(driver.find_element_by_css_selector('.order-total td span').text[1:])
self.assertTrue(subtotal < total, "Subtotal is not less than total!")
# Test Check out button
driver.find_element_by_class_name('checkout-button').click()
current_url = driver.current_url
self.assertEqual('http://practice.automationtesting.in/checkout/', current_url, "Can not navigate to check out page!")
# Fill details in check out page
driver.find_element_by_id('billing_first_name').send_keys('AD')
driver.find_element_by_id('billing_last_name').send_keys('Nguyen')
driver.find_element_by_id('billing_email').send_keys('nadung@gmail.com')
driver.find_element_by_id('billing_phone').send_keys('0123456789')
driver.find_element_by_id('select2-chosen-1').click()
driver.find_element_by_id('s2id_autogen1_search').send_keys('Vietnam')
driver.find_element_by_class_name('select2-match').click()
driver.find_element_by_id('billing_address_1').send_keys('Nam Ky Khoi Nghia')
driver.find_element_by_id('billing_city').send_keys('Danang')
driver.find_element_by_id('payment_method_cod').click()
# Test Place order button
driver.find_element_by_id('place_order').click()
time.sleep(3)
message = driver.find_element_by_class_name('woocommerce-thankyou-order-received').text
self.assertEqual('Thank you. Your order has been received.', message, "Fail to check out!")
def tearDown(self):
self.driver.close()
if __name__ == "__main__":
unittest.main()
|
[
"nadung.18it1@vku.udn.vn"
] |
nadung.18it1@vku.udn.vn
|
d69370d7a2f4e7087b2969610f4b97703dddf151
|
2f5e406579e965acb535183f4c4cb0e889db2ecd
|
/ExtraDataset.py
|
557cddf77b561247ca30c66f56771cc0edc5b273
|
[] |
no_license
|
rm3028/Deep-Generative-Model
|
7504296de65739e842274cec824ec045526a59d2
|
b7587c5f2f6aac0530d460e76e6c2614360bd570
|
refs/heads/master
| 2023-02-25T13:19:44.853641
| 2021-01-29T17:48:04
| 2021-01-29T17:48:04
| 329,917,999
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 671
|
py
|
import pandas as pd
from skimage import io
import torch
from torch.utils.data import Dataset
class ExtraDataset(Dataset):
def __init__(self, dataset_dir):
self.dataset_dir = dataset_dir
self.dataset_df = pd.read_csv(dataset_dir + '/tags.csv', names=['id', 'tag'])
def __len__(self):
return len(self.dataset_df)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
img_name = self.dataset_dir + '/images/' + str(self.dataset_df['id'][idx]) + '.jpg'
image = io.imread(img_name)
img_tag = self.dataset_df['tag'][idx]
return { 'image': image, 'tag': img_tag }
|
[
"rm3028@hotmail.com.tw"
] |
rm3028@hotmail.com.tw
|
244c6743b325be89e3cda486203303f568032386
|
8ea28a828b808acedb405670fa1be13f3ce1b463
|
/pyqtdeploy/sysroot/packages/pyqt3d.py
|
aba52d3b28fdd883d1c52b50b4988d66d839de32
|
[
"BSD-3-Clause"
] |
permissive
|
GreatFruitAndy/pyqtdeploy
|
bed2c784e9ce554ac448ae9355bf3ffb802b885a
|
ea1ade32f8f5bff203ae24400381f6697da2221e
|
refs/heads/master
| 2021-05-07T03:05:51.241234
| 2017-11-10T17:02:57
| 2017-11-10T17:02:57
| 110,604,244
| 1
| 0
| null | 2017-11-16T23:12:52
| 2017-11-13T21:26:41
|
Python
|
UTF-8
|
Python
| false
| false
| 3,206
|
py
|
# Copyright (c) 2017, Riverbank Computing Limited
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import os
from ... import AbstractPackage, PackageOption
class PyQt3DPackage(AbstractPackage):
""" The PyQt3D package. """
# The package-specific options.
options = [
PackageOption('source', str, required=True,
help="The archive containing the PyQt3D source code."),
]
def build(self, sysroot):
""" Build PyQt3D for the target. """
sysroot.progress("Building PyQt3D")
# Unpack the source.
archive = sysroot.find_file(self.source)
sysroot.unpack_archive(archive)
# Create a configuration file.
cfg = '''py_platform = {0}
py_inc_dir = {1}
py_pylib_dir = {2}
py_pylib_lib = {3}
py_sip_dir = {4}
[PyQt 5]
module_dir = {5}
'''.format(sysroot.target_py_platform, sysroot.target_py_include_dir,
sysroot.target_lib_dir, sysroot.target_py_lib,
sysroot.target_sip_dir,
os.path.join(sysroot.target_sitepackages_dir, 'PyQt5'))
disabled_features = sysroot.find_package('pyqt5').disabled_features
if disabled_features:
cfg += 'pyqt_disabled_features = {0}\n'.format(
' '.join(disabled_features))
cfg_name = 'pyqt3d-' + sysroot.target_arch_name + '.cfg'
with open(cfg_name, 'wt') as cfg_file:
cfg_file.write(cfg)
# Configure, build and install.
args = [sysroot.host_python, 'configure.py', '--static', '--qmake',
sysroot.host_qmake, '--sysroot', sysroot.sysroot_dir,
'--no-qsci-api', '--no-sip-files', '--no-stubs', '--configuration',
cfg_name, '--sip', sysroot.host_sip, '-c']
if sysroot.verbose_enabled:
args.append('--verbose')
sysroot.run(*args)
sysroot.run(sysroot.host_make)
sysroot.run(sysroot.host_make, 'install')
|
[
"phil@riverbankcomputing.com"
] |
phil@riverbankcomputing.com
|
5fc764e2fc52a3262e04593a0fbc5a6b954f383e
|
89f3ba8905ce2ebad1a9605f683024dcd9ae1f7f
|
/api/models.py
|
8ff6448a8317132d187dd5c7b219dbd43e49f6fc
|
[] |
no_license
|
vishnualapra/carservice
|
1d26efb355ff54cb942ea6f36e96590e41df88d1
|
69aba53576aad96c169f64b5384ebe7b49a73234
|
refs/heads/master
| 2020-08-22T16:06:48.903210
| 2019-10-23T21:07:17
| 2019-10-23T21:07:17
| 216,432,482
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,313
|
py
|
from django.db import models
# Create your models here.
#manufacturer
class Manufacturer(models.Model):
manufacturer_code = models.IntegerField(primary_key=True)
manufacturer_name = models.CharField(max_length=100)
manufacturer_detail = models.TextField()
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return self.manufacturer_name
class Model(models.Model):
model_code = models.IntegerField(primary_key=True)
daily_hire_rate = models.IntegerField()
model_name = models.CharField(max_length=100)
manufacturer = models.ForeignKey(Manufacturer,on_delete=models.PROTECT)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return self.model_name
class Mechanic(models.Model):
mechanic_id = models.AutoField(primary_key=True)
mechanic_name = models.CharField(max_length=100)
other_mechanic_details = models.TextField()
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return self.mechanic_name
class Customer(models.Model):
customer_id = models.AutoField(primary_key=True)
first_name = models.CharField(max_length=100)
last_name = models.CharField(max_length=100)
title = models.CharField(max_length=20)
gender = models.CharField(max_length=10)
email_address = models.EmailField()
phone_number = models.CharField(max_length=15)
address_line_1 = models.CharField(max_length=500)
address_line_2 = models.CharField(max_length=500)
address_line_3 = models.CharField(max_length=500)
city = models.CharField(max_length=200)
state = models.CharField(max_length=100)
other_customer_details = models.TextField()
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return self.last_name
class Car(models.Model):
license_number = models.IntegerField(primary_key=True)
current_milage = models.CharField(max_length=50)
engine_size = models.CharField(max_length=50)
other_car_details = models.TextField()
model = models.ForeignKey(Model,on_delete=models.PROTECT)
customer = models.ForeignKey(Customer,on_delete=models.PROTECT)
on_service = models.BooleanField(default=False)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return str(self.license_number)
class Booking(models.Model):
booking_id = models.AutoField(primary_key=True)
datetime_of_service = models.DateTimeField(null=True)
payment_received_yn = models.BooleanField(default=False)
completed = models.BooleanField(default=False)
other_bookin_details = models.TextField()
service_date = models.DateField()
day_position = models.IntegerField()
car = models.ForeignKey(Car,on_delete=models.PROTECT)
customer = models.ForeignKey(Customer,on_delete=models.PROTECT)
mechanic = models.ForeignKey(Mechanic,on_delete=models.PROTECT)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
|
[
"vishnualapra@gmail.com"
] |
vishnualapra@gmail.com
|
434f059f47cc43ee8c54755a5358bb465f552f55
|
36466c39d3ae94c2f936d4fdfe0fd4b034bbfa80
|
/3rdparty/tvm/python/tvm/relay/ir_pass.py
|
6de6437b9eb9aad573e7603f12fc20fde1da7c86
|
[
"Apache-2.0",
"Intel",
"LicenseRef-scancode-unknown-license-reference",
"BSL-1.0",
"MIT",
"BSD-2-Clause",
"Zlib",
"NCSA",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSD-2-Clause-Views"
] |
permissive
|
zhouhuaman/dgt
|
ccc674dc6abb055eeb5b88eaa0177de3a051b362
|
a1df50efa3b635c20ddaa6bc5068e5f7bb863b5e
|
refs/heads/master
| 2022-11-27T21:53:05.980980
| 2020-01-13T09:33:14
| 2020-01-13T09:33:14
| 233,558,790
| 1
| 2
|
Apache-2.0
| 2022-11-23T15:05:17
| 2020-01-13T09:29:56
|
C++
|
UTF-8
|
Python
| false
| false
| 1,556
|
py
|
# pylint: disable=no-else-return,
# pylint: disable=unidiomatic-typecheck
"""The set of passes for Relay.
Exposes an interface for configuring the passes and scripting
them in Python.
"""
from . import _ir_pass
from . import _make
# pylint: disable=invalid-name
def infer_type(env, expr):
"""Infer the type of expr under the context of env.
Parameters
----------
env : relay.Environment
The global environment.
expr : relay.Expr
The input expression.
Returns
-------
checked_expr : relay.Expr
The checked expression.
"""
return _ir_pass.infer_type(env, expr)
well_formed = _ir_pass.well_formed
check_kind = _ir_pass.check_kind
free_vars = _ir_pass.free_vars
free_type_vars = _ir_pass.free_type_vars
def dead_code_elimination(e):
""" Remove expressions which does not effect the program result (dead code).
Parameters
----------
e: relay.Expr
The input Expression
Returns
-------
result: relay.Expr
An expression which is semantically equal to the input expression,
but with dead code removed.
"""
return _ir_pass.dead_code_elimination(e)
def alpha_equal(lhs, rhs):
"""Compare two Relay expr for structural equivalence (alpha equivalence).
Parameters
----------
lhs: relay.Expr
One of the input Expression.
rhs: relay.Expr
One of the input Expression.
Returns
-------
result: bool
True iff lhs is alpha equal to rhs.
"""
return bool(_make._alpha_equal(lhs, rhs))
|
[
"zhouhuman@163.com"
] |
zhouhuman@163.com
|
38968e8d9f98d633ef3f2e85e0e1b808a3a42451
|
be3f8597b2d3224c7a6d9d64eba54b382f3e5936
|
/WebApp/TextRank.py
|
798e266b8092c584de82cc4b02a3b9fb45e010e9
|
[] |
no_license
|
ya2366/unilever_nlp_capstone
|
a979e7717af1e97a83a36dbb30f89be5cfe23cff
|
5df3d094765ae01874fe66b8b3579aca02648e99
|
refs/heads/master
| 2021-09-02T10:44:28.980591
| 2018-01-02T01:37:56
| 2018-01-02T01:37:56
| 113,112,355
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,973
|
py
|
"""
From this paper: https://web.eecs.umich.edu/~mihalcea/papers/mihalcea.emnlp04.pdf
External dependencies: nltk, numpy, networkx
Based on https://gist.github.com/voidfiles/1646117
"""
import io
import nltk
import itertools
from operator import itemgetter
import networkx as nx
import os
# apply syntactic filters based on POS tags
def filter_for_tags(tagged, tags=['NN', 'JJ', 'NNP']):
return [item for item in tagged if item[1] in tags]
def normalize(tagged):
return [(item[0].replace('.', ''), item[1]) for item in tagged]
def unique_everseen(iterable, key=None):
"List unique elements, preserving order. Remember all elements ever seen."
# unique_everseen('AAAABBBCCDAABBB') --> A B C D
# unique_everseen('ABBCcAD', str.lower) --> A B C D
seen = set()
seen_add = seen.add
if key is None:
for element in itertools.filterfalse(seen.__contains__, iterable):
seen_add(element)
yield element
else:
for element in iterable:
k = key(element)
if k not in seen:
seen_add(k)
yield element
def lDistance(firstString, secondString):
"Function to find the Levenshtein distance between two words/sentences - gotten from http://rosettacode.org/wiki/Levenshtein_distance#Python"
if len(firstString) > len(secondString):
firstString, secondString = secondString, firstString
distances = range(len(firstString) + 1)
for index2, char2 in enumerate(secondString):
newDistances = [index2 + 1]
for index1, char1 in enumerate(firstString):
if char1 == char2:
newDistances.append(distances[index1])
else:
newDistances.append(1 + min((distances[index1], distances[index1 + 1], newDistances[-1])))
distances = newDistances
return distances[-1]
def buildGraph(nodes):
"nodes - list of hashables that represents the nodes of the graph"
gr = nx.Graph() # initialize an undirected graph
gr.add_nodes_from(nodes)
nodePairs = list(itertools.combinations(nodes, 2))
# add edges to the graph (weighted by Levenshtein distance)
for pair in nodePairs:
firstString = pair[0]
secondString = pair[1]
levDistance = lDistance(firstString, secondString)
gr.add_edge(firstString, secondString, weight=levDistance)
return gr
def extractKeyphrases(text,top_n):
# tokenize the text using nltk
wordTokens = nltk.word_tokenize(text)
print("Tokenized Words")
# assign POS tags to the words in the text
tagged = nltk.pos_tag(wordTokens)
textlist = [x[0] for x in tagged]
print("Pos Tagging")
tagged = filter_for_tags(tagged)
tagged = normalize(tagged)
unique_word_set = unique_everseen([x[0] for x in tagged])
word_set_list = list(unique_word_set)
# this will be used to determine adjacent words in order to construct keyphrases with two words
graph = buildGraph(word_set_list)
print("Graph Builded")
# pageRank - initial value of 1.0, error tolerance of 0,0001,
calculated_page_rank = nx.pagerank(graph, weight='weight')
print("")
# most important words in ascending order of importance
keyphrases = sorted(calculated_page_rank, key=calculated_page_rank.get, reverse=True)
# the number of keyphrases returned will be relative to the size of the text (a third of the number of vertices)
aThird = int(len(word_set_list) / 3)
keyphrases = keyphrases[0:aThird + 1]
# take keyphrases with multiple words into consideration as done in the paper - if two words are adjacent in the text and are selected as keywords, join them
# together
modifiedKeyphrases = set([])
dealtWith = set([]) # keeps track of individual keywords that have been joined to form a keyphrase
i = 0
j = 1
while j < len(textlist):
firstWord = textlist[i]
secondWord = textlist[j]
if firstWord in keyphrases and secondWord in keyphrases:
keyphrase = firstWord + ' ' + secondWord
modifiedKeyphrases.add(keyphrase)
dealtWith.add(firstWord)
dealtWith.add(secondWord)
else:
if firstWord in keyphrases and firstWord not in dealtWith:
modifiedKeyphrases.add(firstWord)
# if this is the last word in the text, and it is a keyword,
# it definitely has no chance of being a keyphrase at this point
if j == len(textlist) - 1 and secondWord in keyphrases and secondWord not in dealtWith:
modifiedKeyphrases.add(secondWord)
i = i + 1
j = j + 1
result=list(modifiedKeyphrases)
if top_n>len(result):
return_result=result
else:
return_result=result[0:top_n]
return return_result
def extractSentences(text):
sent_detector = nltk.data.load('tokenizers/punkt/english.pickle')
sentenceTokens = sent_detector.tokenize(text.strip())
graph = buildGraph(sentenceTokens)
calculated_page_rank = nx.pagerank(graph, weight='weight')
# most important sentences in ascending order of importance
sentences = sorted(calculated_page_rank, key=calculated_page_rank.get, reverse=True)
# return a 100 word summary
summary = ' '.join(sentences)
summaryWords = summary.split()
summaryWords = summaryWords[0:101]
summary = ' '.join(summaryWords)
return summary
def writeFiles(summary, keyphrases, fileName):
"outputs the keyphrases and summaries to appropriate files"
print("Generating output to " + 'keywords/' + fileName)
keyphraseFile = io.open('keywords/' + fileName, 'w')
for keyphrase in keyphrases:
keyphraseFile.write(keyphrase + '\n')
keyphraseFile.close()
print("Generating output to " + 'summaries/') + fileName
summaryFile = io.open('summaries/' + fileName, 'w')
summaryFile.write(summary)
summaryFile.close()
print("-")
|
[
"yutingan@graphen.ai"
] |
yutingan@graphen.ai
|
28ae56610dcda85516ba0f5cbeda86fcbdc07548
|
862c806d1d277ad4444af13b05f0d364f1c24b83
|
/examples/operator_v1.py
|
85a5ba5aa1f47f2f57e738add72e9c953fbd2a2f
|
[] |
no_license
|
irvinlim/pymesos-0.3.4-bugrepro
|
babc1f057093f3e291c780e337b856d67b3e581e
|
38909cad4f1feb7d7b996ac701340f305e364905
|
refs/heads/master
| 2020-03-24T07:43:13.893083
| 2018-07-27T12:11:28
| 2018-07-27T12:11:28
| 142,572,827
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,287
|
py
|
#!/usr/bin/env python2.7
from __future__ import print_function
import sys
from pymesos import MesosOperatorMasterDriver, OperatorMaster
class MinimalOperator(OperatorMaster):
def __init__(self):
pass
def taskAdded(self, task_info):
logging.debug('Task added')
logging.debug(task_info)
def taskUpdated(self, task_info):
logging.debug('Task updated')
logging.debug(task_info)
def frameworkAdded(self, framework_info):
logging.debug('Framework added')
logging.debug(framework_info)
def frameworkUpdated(self, framework_info):
logging.debug('Framework updated')
logging.debug(framework_info)
def frameworkRemoved(self, framework_info):
logging.debug('Framework removed')
logging.debug(framework_info)
def agentAdded(self, agent_info):
logging.debug('Agent added')
logging.debug(agent_info)
def agentRemoved(self, agent_info):
logging.debug('Agent removed')
logging.debug(agent_info)
def main(master):
driver = MesosOperatorMasterDriver(master, MinimalOperator())
res = driver.getHealth()
logging.debug(res)
driver.run()
if __name__ == '__main__':
import logging
logging.basicConfig(level=logging.DEBUG)
if len(sys.argv) != 2:
logging.error('Usage: {} <mesos_master>'.format(sys.argv[0]))
sys.exit(1)
else:
main(sys.argv[1])
|
[
"limir@seagroup.com"
] |
limir@seagroup.com
|
69bef76ac68fc60f87f5f5e549027b0bcfae66f7
|
91a2ecfaf5dc6c917ec2fda31f56291103f68ceb
|
/tests/protos/test_ctc_loss.py
|
6da44120062bdda6381ed74e2c0f8225fffc8ae4
|
[
"BSD-3-Clause"
] |
permissive
|
MyrtleSoftware/myrtlespeech
|
635d1d16d1bd60fb07a4d30edbf9acb61786c13f
|
8522048fd37744ffa06827a0cbd202b839a15453
|
refs/heads/master
| 2021-07-16T14:55:00.479967
| 2020-03-20T14:33:15
| 2020-03-20T14:33:15
| 192,501,300
| 12
| 1
|
NOASSERTION
| 2020-03-20T14:33:17
| 2019-06-18T08:44:33
|
Python
|
UTF-8
|
Python
| false
| false
| 1,042
|
py
|
from typing import Dict
from typing import Optional
from typing import Tuple
from typing import Union
import hypothesis.strategies as st
from myrtlespeech.protos import ctc_loss_pb2
from tests.protos.utils import all_fields_set
# Fixtures and Strategies -----------------------------------------------------
@st.composite
def ctc_losses(
draw, return_kwargs: bool = False, alphabet_len: Optional[int] = None
) -> Union[
st.SearchStrategy[ctc_loss_pb2.CTCLoss],
st.SearchStrategy[Tuple[ctc_loss_pb2.CTCLoss, Dict]],
]:
"""Returns a SearchStrategy for CTCLoss plus maybe the kwargs."""
kwargs = {}
end = 1000
if alphabet_len is not None:
end = max(0, alphabet_len - 1)
kwargs["blank_index"] = draw(st.integers(0, end))
kwargs["reduction"] = draw(
st.sampled_from(ctc_loss_pb2.CTCLoss.REDUCTION.values())
)
all_fields_set(ctc_loss_pb2.CTCLoss, kwargs)
ctc_loss = ctc_loss_pb2.CTCLoss(**kwargs)
if not return_kwargs:
return ctc_loss
return ctc_loss, kwargs
|
[
"sam@samgd.com"
] |
sam@samgd.com
|
641393e4ba73eb019ef8abc5d60bcf52802b1b08
|
b82efae8184e01630e0befb2be675cbcec254758
|
/src/GraphGP.py
|
1a3daddddffb4d1351f884553595eff014a03f1b
|
[] |
no_license
|
tankred-saanum/Cognitive-maps-for-rewards
|
9ba16e3252c1c4698b719d017cc4d4e9a262802b
|
1ebb133af8e3a37bec4863ee38b233f1c15c4edd
|
refs/heads/main
| 2023-04-07T03:28:04.269511
| 2023-01-16T20:29:54
| 2023-01-16T20:29:54
| 371,415,219
| 4
| 3
| null | 2023-01-16T20:29:30
| 2021-05-27T15:08:34
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 8,842
|
py
|
import matplotlib
from matplotlib import pyplot as plt
import networkx as nx
import numpy as np
import copy
import scipy
from scipy.optimize import minimize
#from scipy import minimize
from MonsterPrior import MonsterPrior
import pickle
class LaplacianGP():
''' A GP model which computes the kernel function over a graph based on the graph Laplacian. However,
you can also pass this object a covariance matrix, accompanied by a set of training indices and rewards,
and it will use those observations to condition its predictions when calling the mean function.
Example:
gp = LaplacianGP()
gp.set_training_data(training_idx, y)
gp.set_covariance(K)
mu = gp.mean()
Here K is the kernel matrix for all output points
This object also contains methods for maximizing the marginal likelihood of the data using gradient descent (scipy.optimize integration).
This works both for the RBF kernel, as well as the diffusion kernel, if the object is given a graph Laplacian.
'''
def train(self, graph, observed_nodes, y, alpha = 1):
'''
graph: This is a networkx graph object, or something that inherits from it.
observed_nodes: an array of integers indexing the nodes whose values were observed
y: an array of outcome values
alpha: the lengthscale parameter
'''
self.L = nx.normalized_laplacian_matrix(graph).todense()
self.training_idx = observed_nodes
self.y = y
self.alpha = alpha
self.sigma = 0.01
self.__K(self.L, self.alpha)
def __K(self, L, alpha):
''' A method which creates the 3 kernel matrices needed to compute the posterior mean and
covariance using the exponential of the graph laplacian weighted by negative alpha. Note that
it is assumed that the conditioning points are included in the set of evaluation points (self.K)'''
# the full covariance matrix
self.K = scipy.linalg.expm(-alpha * L)
# the matrix which will contain the covariance between all training points
self.K_obs = np.zeros((len(self.training_idx), len(self.training_idx)))
# first get the rows of the observed points
K_obs_rows = self.K[self.training_idx]
# fill in with the corresponding values at the indices of the observed points
for i, arr in enumerate(K_obs_rows):
self.K_obs[i] = arr[self.training_idx]
# create matrix containing covariance between all input points and all observed points
self.K_input_obs = np.zeros((len(self.K), len(self.training_idx)))
# fill in with the values of indices of observations
for i in range(len(self.K)):
self.K_input_obs[i] = self.K[i][self.training_idx]
def mean(self, sigma=0.01, jitter = 0.0000001):
''' computes the posterior mean function '''
self.inv_K = np.linalg.inv(self.K_obs + (sigma*np.eye(len(self.K_obs))))
return self.K_input_obs @ (self.inv_K) @ self.y
def covariance(self, sigma = 0.1):
''' computes the posterior covariance '''
return self.K - (self.K_input_obs @ np.linalg.inv(self.K_obs + sigma * np.eye(len(self.K_obs))) @ self.K_input_obs.T)
def get_prior_covariance(self):
''' Getter for the kernel matrix'''
return self.K
def set_training_data(self, training_idx, y):
''' Set training data for the GP'''
self.training_idx = training_idx
self.y = y
def set_covariance(self, covariance_matrix):
''' This method allows one to set the full covariance matrix needed to arbitrary matrices
(i.e. the matrix isn't computed from the graph Laplacian). This is useful if the covariance
one wishes to use is already known for instance'''
self.K = covariance_matrix
# the matrix which will contain the covariance between all training points
self.K_obs = np.zeros((len(self.training_idx), len(self.training_idx)))
# first get the rows of the observed points
K_obs_rows = self.K[self.training_idx]
# fill in with the corresponding values at the indices of the observed points
for i, arr in enumerate(K_obs_rows):
self.K_obs[i] = arr[self.training_idx]
self.K_input_obs = np.zeros((len(self.K), len(self.training_idx)))
# fill in with the values of indices of observations
for i in range(len(self.K)):
self.K_input_obs[i] = self.K[i][self.training_idx]
def RBF(self, X1, X2, var = 1, l = 1):
''' Computes the RBF similarity between two n x m matrices, where n is
the number of observations, and m is the number of feature dimensions'''
sqdist = np.sum(X1**2, 1).reshape(-1, 1) + np.sum(X2**2, 1) - 2 * np.dot(X1, X2.T)
return var**2 * np.exp(-0.5 / l**2 * sqdist)
def assign_inputs(self, X):
'''Convenience function for nll minimization'''
if len(list(X.shape)) == 1:
self.X = X.reshape(-1, 1)
else:
self.X = X
def nll(self, theta):
''' This function is adapted from Martin Krasser's tutorial on GP regression,
using a Cholesky decomposition as a more numerically stable method for getting
the negative log likelihood, introduced in Rasmussen and Williams'''
l = theta[0]
noise = theta[1]
K = self.RBF(self.X, self.X, var=noise, l=l)
K = K + ((noise**2) *np.eye(len(self.y)))
L = np.linalg.cholesky(K)
S1 = scipy.linalg.solve_triangular(L, self.y, lower=True)
S2 = scipy.linalg.solve_triangular(L.T, self.y, lower=False)
return np.sum(np.log(np.diagonal(L))) + \
0.5 * self.y.dot(S2) + \
0.5 * len(self.training_idx) * np.log(2*np.pi)
def set_laplacian_matrix(self, L):
self.L = L
def nll_diffusion_kernel(self, theta):
''' Performs nll minimization with scipy on a diffusion kernel'''
l = theta[0]
noise = 0.01 ## add jitter
self.__K(self.L, l)
K_ = self.K_obs.copy()
K_ = K_ + ((noise**2)*np.eye(len(self.y)))
try:
L = np.linalg.cholesky(K_)
# L = scipy.linalg.cholesky(K_)
except np.linalg.LinAlgError as err:
print("Warning: Cholesky didn't work - trying to remove negative eigenvalues and reconstruct using Eigendecomposition")
# print(l)
eig_v, eig_vec = np.linalg.eig(K_)
eig_v[eig_v < 0] = -eig_v[eig_v < 0]
lam = np.eye(len(K_))
np.fill_diagonal(lam, eig_v)
K_ = eig_vec @ lam @ np.linalg.inv(eig_vec + (np.eye(len(eig_vec))*0.000000001))
try:
L = np.linalg.cholesky(K_)
except np.linalg.LinAlgError:
raise np.linalg.LinAlgError("Could not compute Cholesky decomposition after removing negative eigenvalues")
S1 = scipy.linalg.solve_triangular(L, self.y, lower=True)
S2 = scipy.linalg.solve_triangular(L.T, self.y, lower=False)
return np.sum(np.log(np.diagonal(L))) + \
0.5 * self.y.dot(S2) + \
0.5 * len(self.training_idx) * np.log(2*np.pi)
def evaluate_nll(self, noise=0.01):
''' This one is better suited if you just want the nll of the GP's kernel kernel.
Assuming 0 noise'''
K_ = self.K_obs.copy()
K_ += ((noise**2)*np.eye(len(self.y)))
L = np.linalg.cholesky(K_)
S1 = scipy.linalg.solve_triangular(L, self.y, lower=True)
S2 = scipy.linalg.solve_triangular(L.T, self.y, lower=False)
return np.sum(np.log(np.diagonal(L))) + \
0.5 * self.y.dot(S2) + \
0.5 * len(self.training_idx) * np.log(2*np.pi)
def minimize_nll(self, X, X_train):
''' Minimize nll function to be called when the kernel is RBF'''
self.assign_inputs(X_train)
l = np.random.uniform(0.01, 4)
n = np.random.uniform(0.0001, 1)
output = minimize(self.nll, [l, n], bounds=((1e-5, None), (1e-5, None)),
method='L-BFGS-B')
l, n = output.x
if len(list(X.shape)) == 1:
X = X.reshape(-1, 1)
else:
X = X
return self.RBF(X, X, var=n, l=l), l, n
def minimize_nll_diffusion(self):
''' Minimize nll function to be called when the kernel is a diffusion kernel'''
l = np.random.uniform(0.01, 4)
try:
output = minimize(self.nll_diffusion_kernel, [l], bounds=((1e-5, None), ),
method='L-BFGS-B')
except np.linalg.LinAlgError:
print("Could not compute cholesky - lengthscale is set to 1")
return 1
l = output.x
return l
|
[
"tankred.saanum@gmail.com"
] |
tankred.saanum@gmail.com
|
35614a4b8e4a335c54fd174d3cf65ff29c823483
|
db9ff8accaa4d8d4a96d3f9122c0fdc5e83ea2a5
|
/test/test_price_quantity.py
|
12635c2d23b1dcacf3ca517e059fcaba37c32bd5
|
[] |
no_license
|
agtt/ebay-openapi-inventory
|
4754cdc8b6765acdb34f6b8f89b017ccbc6b1d2b
|
d990c26f16e811431892ac6401c73c4599c2d414
|
refs/heads/master
| 2023-06-17T10:53:43.204075
| 2021-07-14T18:32:38
| 2021-07-14T18:32:38
| 386,039,734
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,200
|
py
|
"""
Inventory API
The Inventory API is used to create and manage inventory, and then to publish and manage this inventory on an eBay marketplace. There are also methods in this API that will convert eligible, active eBay listings into the Inventory API model. # noqa: E501
The version of the OpenAPI document: 1.13.0
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import openapi_client
from openapi_client.model.offer_price_quantity import OfferPriceQuantity
from openapi_client.model.ship_to_location_availability import ShipToLocationAvailability
globals()['OfferPriceQuantity'] = OfferPriceQuantity
globals()['ShipToLocationAvailability'] = ShipToLocationAvailability
from openapi_client.model.price_quantity import PriceQuantity
class TestPriceQuantity(unittest.TestCase):
"""PriceQuantity unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testPriceQuantity(self):
"""Test PriceQuantity"""
# FIXME: construct object with mandatory attributes with example values
# model = PriceQuantity() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"csd@hotmail.com.tr"
] |
csd@hotmail.com.tr
|
73a212ad058bfe0804c7b0bca1a93042ce35c082
|
8783d015169267c27062a231c33aa7450fc7153d
|
/hackers_rank/euler/0013_large_sum.py
|
c36466ed1a90eb344d6aadd42097768775c0189f
|
[] |
no_license
|
thangarajan8/misc_python
|
51619e932ffd972be78a23b62ad69b34f84f035d
|
b00ad259e240a3897348bc80fb9040a257db208f
|
refs/heads/master
| 2021-06-26T02:14:13.613212
| 2021-02-05T04:35:25
| 2021-02-05T04:35:25
| 209,036,549
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 369
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 7 17:16:29 2019
@author: Thanga
"""
a = [37107287533902102798797998220837590246510135740250,
46376937677490009712648124896970078050417018260538,
74324986199524741059474233309513058123726617309629,
91942213363574161572522430563301811072406154908250,
23067588207539346171171980310421047513778063246676]
str(sum(a))[:10]
|
[
"Thangarajan.P@tvscredit.com"
] |
Thangarajan.P@tvscredit.com
|
5d314b91eab30ca0734edabfe18f84b0b0ac2a17
|
9aab31e0a55d1f56c5e4eff383760f93cf7445ca
|
/RamseyNumber/classification/irrep_preloaded.py
|
fff97eaf5329ea2031f367a9a5aa6fecd051f6be
|
[] |
no_license
|
chngr/kakko
|
d6ecbe252dfed19e62e221116aea9e2ec696a1f6
|
92ab05ccda63d92a0f8c81df82b1f7d624dc03f6
|
refs/heads/master
| 2020-12-03T05:10:43.592407
| 2017-08-02T17:21:53
| 2017-08-02T17:21:53
| 95,740,495
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,491
|
py
|
# irrep.py
# weight_space_gen(): generates root spaces
# Input: cartan_basis -- list with Cartan basis set
# diag_mat_list -- list of diagonal matrices corresponding to Cartan basis
# (with corresponding indices)
# alg_dim -- dimension of overall Lie algebra
# Output: weight_space_list -- ker((rho(H_i) - a{ij} * id)^{dim V}) for all i and j
def weight_space_gen(cartan_basis, diag_mat_list, alg_dim):
weight_space_list = []
mat_size = cartan_basis[0].ncols()
# for each element in Cartan basis
for i in range(len(cartan_basis)):
elem = cartan_basis[i]
cur_diag = diag_mat_list[i].diagonal()
sub_list = []
# for each eigenvalue
for eigenvalue in cur_diag:
cur_space = ((elem - eigenvalue * matrix.identity(mat_size))^alg_dim).kernel()
# add to list for given i and j
sub_list.append(cur_space)
# add sublist for given i to overall list
weight_space_list.append(sub_list)
return weight_space_list
# weight_space_decomp(): calculates root space decomposition
# Input: weight_space_list -- list with sublists: each sublist has root spaces for
# given element in Cartan basis
# Output: decomp_list -- list with spaces in root space decomposition
def weight_space_decomp(weight_space_list):
# max_index for tuple set of indices
max_index = len(weight_space_list[0]) - 1
# length of each tuple in tuple set of indices
basis_size = len(weight_space_list)
index_set = get_tuples(max_index,basis_size)
# direct_sum stores all of the intersections
to_direct_sum = []
# for each index
for index in index_set:
list_to_intersect = []
# pair index with each sublist
for i in range(len(index)):
cur_index = index[i]
list_to_intersect.append(weight_space_list[i][cur_index])
cur_intersection = intersect_spaces(list_to_intersect)
to_direct_sum.append(cur_intersection)
to_direct_sum = list(set(to_direct_sum))
for elem in to_direct_sum:
if elem.dimension() == 0:
to_direct_sum.remove(elem)
return to_direct_sum
# get_tuples(): generates all possible tuples from 0 to max_val, inclusive
# Input: max_val -- maximum value in tuple
# list_len -- length of each tuple
# Output: tuple_list -- list of all possible tuples within range
def get_tuples(max_val, list_len):
tuple_list = []
# perform recursion
if list_len > 1:
return tuple_helper(get_tuples(max_val,list_len-1),max_val)
# base case
else:
for i in range(max_val+1):
tuple_list.append([i])
return tuple_list
# tuple_helper(): helper function to perform recursion for get_tuples()
# Input: old_list -- list before current step of the recursion
# max_val -- maximum value in tuple
# Output: new_list -- list after current step of the recursion
def tuple_helper(old_list, max_val):
new_list = []
for i in range(len(old_list)):
cur_tuple = old_list[i]
for j in range(max_val+1):
new_cur_tuple = []
new_cur_tuple = cur_tuple + [j]
new_list.append(new_cur_tuple)
return new_list
# adjoint_rep(): computes adjoint representation matrices of
# Lie algebra
# Input: input_elems -- set of matrices to compute adjoint rep of
# basis -- compute with respect to this basis
# Output: ad -- list of adjoint representation matrices
def adjoint_rep(input_elems, basis):
basis_vec = []
ad = []
# find matrix of basis
for b in basis:
basis_vec.append(b.transpose().list())
basis_mat = matrix(QQ,basis_vec).transpose()
# find adjoint rep matrices
for mat_elem in input_elems:
mat_list = []
for basis_elem in basis:
bracket_vec = vector(QQ,bracket(mat_elem,basis_elem).transpose().list())
coords = basis_mat.solve_right(bracket_vec)
mat_list.append(coords.list())
adj_mat = matrix(QQ,mat_list).transpose()
ad.append(adj_mat)
return ad
# ------------------------------------------------------------------------------------------
from random import randint
# simultaneous_diag(): simultaneously diagonalizes a commuting basis set
# Input: basis -- commuting basis
# Output: P -- matrix P of D = P^{-1} * A * P that simultaneously diagonalizes
# diag_mat_list -- list of diagonalized matrices
def simultaneous_diag(basis):
valid_elem = False
# common P and unique D for each element in Cartan
P = None
diag_mat_list = []
# find element that diagonalizes the Cartan basis
while not valid_elem:
diag_mat_list = []
# compute a random element of the Cartan subalgebra
cartan_elem = compute_random_element(basis)
# diagonalize random element
D, P = cartan_elem.eigenmatrix_right()
# assume the diagonalization works
valid_elem = True
# check if diagonalizes all elements
for elem in basis:
cur_diag_mat = P.inverse() * elem * P
diag_mat_list.append(cur_diag_mat)
# check if each element is diagonalized
if not gap.IsDiagonalMat(cur_diag_mat):
valid_elem = False
break
return P, diag_mat_list
# compute_random_element(): computes random matrix element, random linear
# combination of basis vectors
# Input: basis -- basis of Lie algebra
# Output: random_elem -- random element of Lie algebra
def compute_random_element(basis):
mat_size = basis[0].ncols()
# choose coefficients from 1 to 100 inclusive
scaling = [randint(1,100) for p in range(len(basis))]
random_elem = matrix(QQ,mat_size)
for i in range(len(basis)):
random_elem = random_elem + scaling[i] * basis[i]
return random_elem
# extract_weights(): determines a list of weights
# Input: diag_mat_list -- set of diagonal matrices after simultaneously
# diagonalizing basis for the Cartan
# Output: weight_vec_list -- list of weights
def extract_weights(diag_mat_list):
# extract the diagonals from the diagonalized matrices
diag_vec_list = []
for elem in diag_mat_list:
diag_vec_list.append(elem.diagonal())
# dim_H is the dimension of Cartan subalgebra
# dim_V is the dimension of the entire space
dim_H = len(diag_vec_list)
dim_V = len(diag_vec_list[0])
weight_vec_list = []
# for ith index in each diagonal
for i in range(dim_V):
# for jth diagonal vector, create a vector across a common index
cur_vec = []
for j in range(dim_H):
cur_vec.append(diag_vec_list[j][i])
weight_vec_list.append(cur_vec)
return weight_vec_list
# highest_weight_gen(): determines direct sum of highest weight spaces
# Input: pos_root_vec -- set of positive root vectors
# Output: highest_weight_intersection -- direct sum of highest weight spaces
def highest_weight_gen(pos_root_vec):
spaces_to_intersect = []
for elem in pos_root_vec:
spaces_to_intersect.append(elem.right_kernel())
highest_weight_intersection = intersect_spaces(spaces_to_intersect)
return highest_weight_intersection
# intersect_spaces(): computes intersection of vector spaces in space_list
# Input: space_list -- list of vector spaces over common base ring
# Output: inter_space -- intersection of spaces
def intersect_spaces(space_list):
inter_space = space_list[0]
for space in space_list:
inter_space = inter_space.intersection(space)
return inter_space
# find_highest_weights(): finds the weights in weight_list which are highest weights
# Input: highest_weight_intersection -- intersection of the highest weight spaces
# weight_list -- list of all weights
# P -- matrix of simultaneous eigenvectors
# Output: highest_weights -- weights in weight_list which are highest weights
def find_highest_weights(highest_weight_intersection, weight_list, P):
highest_weights = []
col_list = P.columns()
for i in range(len(col_list)):
cur_weight_space = span([col_list[i]],QQ)
if highest_weight_intersection.intersection(cur_weight_space).dimension() != 0:
highest_weights.append(weight_list[i])
return highest_weights
# find_irreps(): finds the multiplicities of irreps
# Input: simple_roots -- list of simple roots
# highest_weights -- list of highest weights
# Output: irrep_dict -- dictionary mapping irrep identifier to frequency
def find_irreps(simple_roots, highest_weights):
# map from tuple to frequency
irrep_dict = {}
# build matrix of simple roots
simple_root_mat = matrix(QQ,simple_roots).transpose()
# solve for int coordinates of highest_weights wrt simple_root_mat
for elem in highest_weights:
coords = tuple(simple_root_mat.solve_right(vector(QQ,elem)))
if coords not in irrep_dict:
irrep_dict[coords] = 1
else:
irrep_dict[coords] += 1
return irrep_dict
# --------------------- MAIN SCRIPT ---------------------
# SL_3 Test
# e_1 = matrix([[0,1,0],[0,0,0],[0,0,0]])
# e_2 = matrix([[0,0,0],[1,0,0],[0,0,0]])
# e_3 = matrix([[0,0,0],[0,0,1],[0,0,0]])
# e_4 = matrix([[0,0,0],[0,0,0],[0,1,0]])
# gens = [e_1,e_2,e_3,e_4]
# SO_4 Test
# e_1 = matrix([[0,0,1,0],[0,0,0,0],[0,0,0,0],[0,0,0,0]])
# e_2 = matrix([[0,0,0,0],[0,0,0,1],[0,0,0,0],[0,0,0,0]])
# e_3 = matrix([[0,0,0,0],[0,0,0,0],[1,0,0,0],[0,0,0,0]])
# e_4 = matrix([[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,1,0,0]])
# gens = [e_1,e_2,e_3,e_4]
# # P+1, P=6
# e = matrix([[0, 1, 0, 0, 0, 0],[0, 0, 0, 0, 0, 0],[0, 0, 0, 2, 0, 0],[0, 0, 0, 0, 0, 0],[0, 0, 0, 0, 0, 7],[0, 0, 0, 0, 0, 0]])
# f = matrix([[0, 0, 0, 0, 0, 0],[1, 0, 0, 0, 0, 0],[0, 0, 0, 0, 0, 0],[0, 0, 1, 0, 0, 0],[0, 0, 0, 0, 0, 0],[0, 0, 0, 0, 1, 0]])
# gens = [e,f]
# In GAP -- Compute:
# Lie algebra
# dimension of Lie algebra
# Cartan subalgebra
# basis for Cartan subalgebra
# root System for Lie algebra
# simple roots of Lie algebra
# positive root vectors of Lie algebra
# gens = [E,F]
# lie_alg = gap.LieAlgebra('Rationals',gens)
# alg_dim = gap.Dimension(lie_alg)
# cartan_alg = gap.CartanSubalgebra(lie_alg)
# cartan_basis = gap.BasisVectors(gap.Basis(cartan_alg))
# root_sys = gap.RootSystem(lie_alg)
# simple_roots = gap.SimpleSystem(root_sys)
# pos_root_vec = gap.PositiveRootVectors(root_sys)
# # convert from GAP to Sage format: cartan_basis
# sage_cartan_basis = []
# for elem in cartan_basis:
# sage_cartan_basis.append(matrix(QQ,elem))
# # convert from GAP to Sage format: pos_root_vec
# sage_pos_root_vec = []
# for elem in pos_root_vec:
# sage_pos_root_vec.append(matrix(QQ,elem))
# # convert from GAP to Sage format: simple_roots
# sage_simple_roots = []
# for elem in simple_roots:
# sage_simple_roots.append(list(elem))
# simultaneously diagonalize the Cartan basis
P, diag_mat_list = simultaneous_diag(sage_cartan_basis)
# extract the weights from the diagonalized matrices
weight_list = extract_weights(diag_mat_list)
# find the intersection of highest weight spaces
highest_weight_intersection = highest_weight_gen(sage_pos_root_vec)
# find the highest weights
highest_weights = find_highest_weights(highest_weight_intersection, weight_list, P)
# find coordinates of highest weights wrt simple roots
irrep_dict = find_irreps(sage_simple_roots, highest_weights)
|
[
"alb2281@columbia.edu"
] |
alb2281@columbia.edu
|
1d50b61828a456cb2f62f40d2b4df66539beed6a
|
262867f5676720d60387d39028079ba564bb0d87
|
/bot_news/ml_news/ml_news/ml.py
|
9110b160ffc7066ad520b72b573909cc937ae916
|
[] |
no_license
|
carlosb1/projects-rust
|
665da7a98a3c73bb6d23208f63718deb888e4f6b
|
43415681cd15a5a3745f135173654eba79fe6908
|
refs/heads/master
| 2023-09-03T15:46:34.422455
| 2023-08-18T20:53:24
| 2023-08-18T20:53:24
| 163,627,222
| 5
| 0
| null | 2023-03-24T23:41:54
| 2018-12-31T00:26:47
|
Rust
|
UTF-8
|
Python
| false
| false
| 872
|
py
|
from transformers import AutoTokenizer, AutoConfig
from transformers import AutoModelForSequenceClassification
from transformers import TextClassificationPipeline
def model_fn(name_model):
tokenizer = AutoTokenizer.from_pretrained(name_model)
model = AutoModelForSequenceClassification.from_pretrained(name_model)
return model, tokenizer
def predict_fn(input_data, model):
trained_model, tokenizer = model
pipe = TextClassificationPipeline(model=trained_model, tokenizer=tokenizer)
output = pipe(input_data)
return output
SENTIMENT_MODEL = 'nlptown/bert-base-multilingual-uncased-sentiment'
class MyBertTransformerSentimentAnalysis():
def __init__(self, name_model: str = SENTIMENT_MODEL):
self.model_tuple = model_fn(name_model)
def run(self, input_data: str) -> dict:
predict_fn(input_data, self.model_tuple)
|
[
"carlos.baezruiz@gmail.com"
] |
carlos.baezruiz@gmail.com
|
b03d463ca4f81654c0ca10f1a8a910e295f5ae85
|
8a6bac97182629f426e442308f6db53ee932e537
|
/venv/Lib/site-packages/django/contrib/gis/db/backends/oracle/adapter.py
|
40989df765a8ea953c4834167ea168d8fd853b8e
|
[] |
no_license
|
AmalioF96/DashBoard
|
8b8af75e7db7ab095c0cd05acb8b2b2764ab5fd5
|
4500a84a934fd5c24199d1864f0667c0d90e6174
|
refs/heads/master
| 2023-01-08T02:03:05.168925
| 2020-11-07T12:19:53
| 2020-11-07T12:19:53
| 230,789,973
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,507
|
py
|
from cx_Oracle import CLOB
from django.contrib.gis.db.backends.base.adapter import WKTAdapter
from django.contrib.gis.geos import GeometryCollection, Polygon
class OracleSpatialAdapter(WKTAdapter):
input_size = CLOB
def __init__(self, geom):
"""
Oracle requires that polygon rings are in proper orientation. This
affects spatial operations and an invalid orientation may cause
failures. Correct orientations are:
* Outer ring - counter clockwise
* Inner ring(s) - clockwise
"""
if isinstance(geom, Polygon):
self._fix_polygon(geom)
elif isinstance(geom, GeometryCollection):
self._fix_geometry_collection(geom)
self.wkt = geom.wkt
self.srid = geom.srid
def _fix_polygon(self, poly):
"""Fix single polygon orientation as described in __init__()."""
if poly.empty:
return poly
if not poly.exterior_ring.is_counterclockwise:
poly.exterior_ring = list(reversed(poly.exterior_ring))
for i in range(1, len(poly)):
if poly[i].is_counterclockwise:
poly[i] = list(reversed(poly[i]))
return poly
def _fix_geometry_collection(self, coll):
"""
Fix polygon orientations in geometry collections as described in
__init__().
"""
for i, geom in enumerate(coll):
if isinstance(geom, Polygon):
coll[i] = self._fix_polygon(geom)
|
[
"amaliocabeza.16@gmail.com"
] |
amaliocabeza.16@gmail.com
|
5804b448d279b66e3077be6b2016ef4e6230d463
|
46279163a543cd8820bdc38133404d79e787c5d2
|
/benchmarks/tensorexpr/reduction.py
|
bc3e4e158a1750a0c9732c91297461f01ff5126b
|
[
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0",
"BSD-2-Clause"
] |
permissive
|
erwincoumans/pytorch
|
31738b65e7b998bfdc28d0e8afa7dadeeda81a08
|
ae9f39eb580c4d92157236d64548b055f71cf14b
|
refs/heads/master
| 2023-01-23T10:27:33.628897
| 2020-12-06T01:22:00
| 2020-12-06T01:23:40
| 318,930,000
| 5
| 1
|
NOASSERTION
| 2020-12-06T01:58:57
| 2020-12-06T01:58:56
| null |
UTF-8
|
Python
| false
| false
| 5,706
|
py
|
from . import benchmark
class ReduceBench(benchmark.Benchmark):
def __init__(self, mode, device, dtype, case, M, N, K):
super().__init__(mode, device, dtype)
self.case = case
self.M = M
self.N = N
self.K = K
self.inputs = [self.randn(
[M, N, K], device=device, dtype=dtype, requires_grad=self.requires_grad
)]
if case == "row":
self.dims = [1, 2]
elif case == "mid":
self.dims = [0, 2]
elif case == "col":
self.dims = [0, 1]
else:
raise ValueError("invalid case: %s" % case)
def forward(self, inputs):
x = self.add(inputs, 0.001)
y = self.sum(x, self.dims)
return y
def config(self):
return [self.M, self.N, self.K]
@staticmethod
def default_configs():
return [
# [512, 512, 512],
[512, 64, 512],
]
@staticmethod
def module():
return "reduce"
def memory_workload(self):
if self.mode == "fwd":
sol_count = 1
algorithmic_count = 1
else:
sol_count = (1) + (1)
algorithmic_count = 1 + 1
buffer_size = self.M * self.N * self.K
return {
"sol": buffer_size * sol_count,
"algorithmic": buffer_size * algorithmic_count,
}
class ReduceRowBench(ReduceBench):
def __init__(self, mode, device, dtype, M, N, K):
super(ReduceRowBench, self).__init__(mode, device, dtype, "row", M, N, K)
@staticmethod
def module():
return "reduce_row"
class ReduceMidBench(ReduceBench):
def __init__(self, mode, device, dtype, M, N, K):
super(ReduceMidBench, self).__init__(mode, device, dtype, "mid", M, N, K)
@staticmethod
def module():
return "reduce_mid"
class ReduceColBench(ReduceBench):
def __init__(self, mode, device, dtype, M, N, K):
super(ReduceColBench, self).__init__(mode, device, dtype, "col", M, N, K)
@staticmethod
def module():
return "reduce_col"
class Reduce2DBench(benchmark.Benchmark):
'''
A benchmark class to validate 2 dimensional reduction performance.
Only a simple add is fused to induce the fuser and isolate reduction perf.
'''
def __init__(self, mode, device, dtype, red_dim, dim0, dim1):
super().__init__(mode, device, dtype)
self.red_dim = red_dim
self.dim0 = dim0
self.dim1 = dim1
self.inputs = [self.randn(
[dim0, dim1], device=device, dtype=dtype, requires_grad=self.requires_grad
)]
if red_dim != 0 and red_dim != 1 :
raise ValueError("invalid reduction dimension: {}".format(red_dim))
def forward(self, inputs):
x = self.add(inputs, 0.001)
y = self.sum(x, [self.red_dim])
return y
def config(self):
return [self.red_dim, self.dim0, self.dim1]
@staticmethod
def default_configs():
return [
[1, 640, 524288],
]
@staticmethod
def module():
return "reduce2d"
@staticmethod
def input_iterable() :
return True
def memory_workload(self):
assert self.mode == "fwd", "Only the forward operation is modeled!"
buffer_size = self.dim0 * self.dim1
if self.red_dim == 0 :
buffer_size += self.dim1
else :
buffer_size += self.dim0
return {
"sol": buffer_size,
"algorithmic": buffer_size,
}
class Reduce2DInnerBench(Reduce2DBench):
def __init__(self, mode, device, dtype, dim0, dim1):
super(Reduce2DInnerBench, self).__init__(mode, device, dtype, 1, dim0, dim1)
@staticmethod
def module():
return "reduce2d_inner"
class Reduce2DOuterBench(Reduce2DBench):
def __init__(self, mode, device, dtype, dim0, dim1):
super(Reduce2DOuterBench, self).__init__(mode, device, dtype, 0, dim0, dim1)
@staticmethod
def module():
return "reduce2d_outer"
benchmark.register_benchmark_class(ReduceRowBench)
benchmark.register_benchmark_class(ReduceMidBench)
benchmark.register_benchmark_class(ReduceColBench)
benchmark.register_benchmark_class(Reduce2DInnerBench)
benchmark.register_benchmark_class(Reduce2DOuterBench)
class DynamicReduce2DBench(benchmark.DynamicShape, Reduce2DBench):
'''
A benchmark class to validate 2 dimensional reduction performance.
Only a simple add is fused to induce the fuser and isolate reduction perf.
'''
def __init__(self, mode, device, dtype, red_dim, dim0, dim1):
benchmark.DynamicShape.__init__(self)
Reduce2DBench.__init__(self, mode, device, dtype, red_dim, dim0, dim1)
def instantiate_input(self):
dim0, dim1 = self.rand_shape([self.dim0, self.dim1])
self.inputs = [self.randn(
[dim0, dim1], device=self.device, dtype=self.dtype, requires_grad=self.requires_grad
)]
@staticmethod
def module():
return "dynamicreduce2d"
class DynamicReduce2DInnerBench(DynamicReduce2DBench):
def __init__(self, mode, device, dtype, dim0, dim1):
super().__init__(mode, device, dtype, 1, dim0, dim1)
@staticmethod
def module():
return "reduce2d_dynamic_inner"
class DynamicReduce2DOuterBench(DynamicReduce2DBench):
def __init__(self, mode, device, dtype, dim0, dim1):
super().__init__(mode, device, dtype, 0, dim0, dim1)
@staticmethod
def module():
return "reduce2d_dynamic_outer"
benchmark.register_benchmark_class(DynamicReduce2DInnerBench)
benchmark.register_benchmark_class(DynamicReduce2DOuterBench)
|
[
"facebook-github-bot@users.noreply.github.com"
] |
facebook-github-bot@users.noreply.github.com
|
084d8ca89f293bf5398b5ab07d7076af43a5fb8d
|
590a0c3a7254b8dac85ab18072dbf766aca7af93
|
/Python-Exercise-100/python-exercise-example07.py
|
01777ba168c7f8e9c5ee7615fd7642d9f407aaf6
|
[
"MIT"
] |
permissive
|
MiracleWong/PythonPractice
|
90c66d29a9cdf0200d3dbac946d05f12dd856e91
|
40aecd84045ad18f6aff95d5b8be8e352ca0a726
|
refs/heads/master
| 2021-08-15T17:19:51.543013
| 2021-06-15T03:59:51
| 2021-06-15T03:59:51
| 98,256,005
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 164
|
py
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# 地址:http://www.runoob.com/python/python-exercise-example7.html
a = [1, 2, 4, 5, 5, 6, 7, 7]
b = a[:]
print(b)
|
[
"cfwr1991@126.com"
] |
cfwr1991@126.com
|
49ad24efef53d23c86760ee96c78f87e3dbe2cf5
|
7200d065030f2daf00a5249e9e4fe569438c78c7
|
/scrapers/dizilab_scraper.py
|
76713de8e84af6b17220f3eaed0295e7b7a714f8
|
[] |
no_license
|
matt2005/salts
|
c765b037be1a2bb0e486ae9b30eceaf2b7c3bf14
|
5f71bc71e7b0b480f40d948d5568604dd181b6ad
|
refs/heads/master
| 2020-12-31T04:16:45.574380
| 2015-12-07T22:57:31
| 2015-12-07T22:57:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,957
|
py
|
"""
SALTS XBMC Addon
Copyright (C) 2014 tknorris
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import scraper
import re
import urlparse
import urllib
from salts_lib import kodi
from salts_lib import dom_parser
from salts_lib.constants import VIDEO_TYPES
from salts_lib.constants import FORCE_NO_MATCH
BASE_URL = 'http://dizilab.com'
class Dizilab_Scraper(scraper.Scraper):
base_url = BASE_URL
def __init__(self, timeout=scraper.DEFAULT_TIMEOUT):
self.timeout = timeout
self.base_url = kodi.get_setting('%s-base_url' % (self.get_name()))
@classmethod
def provides(cls):
return frozenset([VIDEO_TYPES.TVSHOW, VIDEO_TYPES.EPISODE])
@classmethod
def get_name(cls):
return 'Dizilab'
def resolve_link(self, link):
return link
def format_source_label(self, item):
label = '[%s] %s ' % (item['quality'], item['host'])
return label
def get_sources(self, video):
source_url = self.get_url(video)
hosters = []
if source_url and source_url != FORCE_NO_MATCH:
url = urlparse.urljoin(self.base_url, source_url)
html = self._http_get(url, cache_limit=.5)
for match in re.finditer('{\s*file\s*:\s*"([^"]+)', html):
stream_url = match.group(1)
if 'dizlab' in stream_url.lower():
continue
hoster = {'multi-part': False, 'host': self._get_direct_hostname(stream_url), 'class': self, 'quality': self._gv_get_quality(stream_url), 'views': None, 'rating': None, 'url': stream_url, 'direct': True}
hosters.append(hoster)
return hosters
def get_url(self, video):
return super(Dizilab_Scraper, self)._default_get_url(video)
def _get_episode_url(self, show_url, video):
episode_pattern = 'class="episode"\s+href="([^"]+/sezon-%s/bolum-%s)"' % (video.season, video.episode)
title_pattern = 'class="episode-name"\s+href="(?P<url>[^"]+)">(?P<title>[^<]+)'
return super(Dizilab_Scraper, self)._default_get_episode_url(show_url, video, episode_pattern, title_pattern)
def search(self, video_type, title, year):
search_url = urlparse.urljoin(self.base_url, '/arsiv?limit=&tur=&orderby=&ulke=&order=&yil=&dizi_adi=')
search_url += urllib.quote_plus(title)
html = self._http_get(search_url, cache_limit=8)
results = []
for item in dom_parser.parse_dom(html, 'div', {'class': 'tv-series-single'}):
try:
url = re.search('href="([^"]+)', item).group(1)
except:
url = ''
try:
match_year = re.search('<span>\s*(\d{4})\s*</span>', item).group(1)
except:
match_year = ''
try:
match_title = dom_parser.parse_dom(item, 'a', {'class': 'title'})
match_title = re.search('([^>]+)$', match_title[0]).group(1)
match_title = match_title.strip()
except:
match_title = ''
if url and match_title and (not year or not match_year or year == match_year):
result = {'url': self._pathify_url(url), 'title': match_title, 'year': ''}
results.append(result)
return results
|
[
"tknorris@gmail.com"
] |
tknorris@gmail.com
|
6b51b24a86d97f35f69a59c8dbc0e913bf0876c9
|
cdf9ba7b329d66a1b664d505332d4a441f6bf075
|
/benchmarks/SimResults/_bigLittle_hrrs_spec_tugberk_pinned/cmp_mcf/power.py
|
ba961d5f8f3483e208416648d0c7e4f2c4795df5
|
[
"MIT"
] |
permissive
|
TugberkArkose/MLScheduler
|
3247c0bbc11c09261a3bad777f3940a465e5f15a
|
e493b6cbf7b9d29a2c9300d7dd6f0c2f102e4061
|
refs/heads/master
| 2021-03-27T19:11:44.207818
| 2020-03-19T11:32:08
| 2020-03-19T11:32:08
| 92,518,861
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 68,592
|
py
|
power = {'BUSES': {'Area': 1.33155,
'Bus/Area': 1.33155,
'Bus/Gate Leakage': 0.00662954,
'Bus/Peak Dynamic': 0.0,
'Bus/Runtime Dynamic': 0.0,
'Bus/Subthreshold Leakage': 0.0691322,
'Bus/Subthreshold Leakage with power gating': 0.0259246,
'Gate Leakage': 0.00662954,
'Peak Dynamic': 0.0,
'Runtime Dynamic': 0.0,
'Subthreshold Leakage': 0.0691322,
'Subthreshold Leakage with power gating': 0.0259246},
'Core': [{'Area': 32.6082,
'Execution Unit/Area': 8.2042,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.202689,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.0,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.122718,
'Execution Unit/Instruction Scheduler/Area': 2.17927,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.328073,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.00115349,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.20978,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.115405,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.017004,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00962066,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00730101,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 1.00996,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00529112,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 2.07911,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.19984,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0800117,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0455351,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 4.84781,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.841232,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.000856399,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.55892,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.114614,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.0178624,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00897339,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.429859,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.114878,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.0641291,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.114073,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 5.08077,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.00418352,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.030252,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0309397,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.030252,
'Execution Unit/Register Files/Runtime Dynamic': 0.0351232,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0442632,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00607074,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.0731013,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.213101,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.0920413,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0345155,
'Execution Unit/Runtime Dynamic': 1.28615,
'Execution Unit/Subthreshold Leakage': 1.83518,
'Execution Unit/Subthreshold Leakage with power gating': 0.709678,
'Gate Leakage': 0.372997,
'Instruction Fetch Unit/Area': 5.86007,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.000506958,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.000506958,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.000440908,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000170326,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.000444452,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00189928,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.00488396,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0590479,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0297431,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 1.89192,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.0581824,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.101021,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 4.20366,
'Instruction Fetch Unit/Runtime Dynamic': 0.19573,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932587,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.408542,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0379509,
'L2/Runtime Dynamic': 0.00918222,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80969,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 2.39798,
'Load Store Unit/Data Cache/Runtime Dynamic': 0.571277,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0351387,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0375566,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0375566,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 2.57605,
'Load Store Unit/Runtime Dynamic': 0.79405,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.0926082,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.185217,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591622,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283406,
'Memory Management Unit/Area': 0.434579,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0328669,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0334364,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00813591,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.117632,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.00953991,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.332951,
'Memory Management Unit/Runtime Dynamic': 0.0429763,
'Memory Management Unit/Subthreshold Leakage': 0.0769113,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0399462,
'Peak Dynamic': 16.7931,
'Renaming Unit/Area': 0.369768,
'Renaming Unit/FP Front End RAT/Area': 0.168486,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00489731,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 3.33511,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.0,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0437281,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.024925,
'Renaming Unit/Free List/Area': 0.0414755,
'Renaming Unit/Free List/Gate Leakage': 4.15911e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0401324,
'Renaming Unit/Free List/Runtime Dynamic': 0.00590118,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000670426,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000377987,
'Renaming Unit/Gate Leakage': 0.00863632,
'Renaming Unit/Int Front End RAT/Area': 0.114751,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.00038343,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.86945,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.0622644,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00611897,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00348781,
'Renaming Unit/Peak Dynamic': 4.56169,
'Renaming Unit/Runtime Dynamic': 0.0681656,
'Renaming Unit/Subthreshold Leakage': 0.070483,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0362779,
'Runtime Dynamic': 2.39625,
'Subthreshold Leakage': 6.21877,
'Subthreshold Leakage with power gating': 2.58311},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.202689,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.0,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.0870089,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.140342,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.07084,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.298191,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.0995127,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 4.01747,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.00364955,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.0263907,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0269906,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.0263907,
'Execution Unit/Register Files/Runtime Dynamic': 0.0306402,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.0555979,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.162075,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 1.09897,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.000458365,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.000458365,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.000402941,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000158012,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.000387723,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00170739,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.00426236,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0259468,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 1.65044,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.050756,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.0881269,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 3.94905,
'Instruction Fetch Unit/Runtime Dynamic': 0.170799,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0321542,
'L2/Runtime Dynamic': 0.007576,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 2.24982,
'Load Store Unit/Data Cache/Runtime Dynamic': 0.497683,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0327632,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0327632,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 2.40453,
'Load Store Unit/Runtime Dynamic': 0.692023,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.0807884,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.161577,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0286721,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0291546,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.102618,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.00832216,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.307981,
'Memory Management Unit/Runtime Dynamic': 0.0374767,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 14.3007,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.0,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.0039256,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.0458316,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.0497572,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 2.0566,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.202689,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.0,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.0869202,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.140199,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.0707678,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.297887,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.0994127,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 4.01728,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.00364582,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.0263642,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0269631,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.0263642,
'Execution Unit/Register Files/Runtime Dynamic': 0.0306089,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.055542,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.16191,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 1.09847,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.000457936,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.000457936,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.000402566,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000157866,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.000387327,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00170576,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.00425829,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0259203,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 1.64875,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.0507027,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.0880371,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 3.94729,
'Instruction Fetch Unit/Runtime Dynamic': 0.170624,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0321237,
'L2/Runtime Dynamic': 0.00756408,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 2.24879,
'Load Store Unit/Data Cache/Runtime Dynamic': 0.497168,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0327299,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0327298,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 2.40335,
'Load Store Unit/Runtime Dynamic': 0.691309,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.0807063,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.161412,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0286429,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0291248,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.102513,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.00831343,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.307826,
'Memory Management Unit/Runtime Dynamic': 0.0374383,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 14.2973,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.0,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.0039216,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.0457848,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.0497064,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 2.05511,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.202689,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.0,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.0868907,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.140151,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.0707437,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.297786,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.0993778,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 4.01721,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.00364458,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.026355,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0269539,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.026355,
'Execution Unit/Register Files/Runtime Dynamic': 0.0305985,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.0555225,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.161855,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 1.09831,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.000457793,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.000457793,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.000402441,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000157818,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.000387195,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00170522,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.00425693,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0259115,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 1.64819,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.0506849,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.0880071,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 3.9467,
'Instruction Fetch Unit/Runtime Dynamic': 0.170566,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0321135,
'L2/Runtime Dynamic': 0.00756057,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 2.24844,
'Load Store Unit/Data Cache/Runtime Dynamic': 0.496997,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0327187,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0327186,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 2.40295,
'Load Store Unit/Runtime Dynamic': 0.691073,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.0806787,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.161357,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0286331,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.029115,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.102479,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.00831051,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.307774,
'Memory Management Unit/Runtime Dynamic': 0.0374255,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 14.2962,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.0,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.00392027,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.0457692,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.0496895,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 2.05462,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328}],
'DRAM': {'Area': 0,
'Gate Leakage': 0,
'Peak Dynamic': 5.739548837198542,
'Runtime Dynamic': 5.739548837198542,
'Subthreshold Leakage': 4.252,
'Subthreshold Leakage with power gating': 4.252},
'L3': [{'Area': 61.9075,
'Gate Leakage': 0.0484137,
'Peak Dynamic': 0.280118,
'Runtime Dynamic': 0.0738874,
'Subthreshold Leakage': 6.80085,
'Subthreshold Leakage with power gating': 3.32364}],
'Processor': {'Area': 191.908,
'Gate Leakage': 1.53485,
'Peak Dynamic': 59.9674,
'Peak Power': 93.0796,
'Runtime Dynamic': 8.63648,
'Subthreshold Leakage': 31.5774,
'Subthreshold Leakage with power gating': 13.9484,
'Total Cores/Area': 128.669,
'Total Cores/Gate Leakage': 1.4798,
'Total Cores/Peak Dynamic': 59.6873,
'Total Cores/Runtime Dynamic': 8.56259,
'Total Cores/Subthreshold Leakage': 24.7074,
'Total Cores/Subthreshold Leakage with power gating': 10.2429,
'Total L3s/Area': 61.9075,
'Total L3s/Gate Leakage': 0.0484137,
'Total L3s/Peak Dynamic': 0.280118,
'Total L3s/Runtime Dynamic': 0.0738874,
'Total L3s/Subthreshold Leakage': 6.80085,
'Total L3s/Subthreshold Leakage with power gating': 3.32364,
'Total Leakage': 33.1122,
'Total NoCs/Area': 1.33155,
'Total NoCs/Gate Leakage': 0.00662954,
'Total NoCs/Peak Dynamic': 0.0,
'Total NoCs/Runtime Dynamic': 0.0,
'Total NoCs/Subthreshold Leakage': 0.0691322,
'Total NoCs/Subthreshold Leakage with power gating': 0.0259246}}
|
[
"dn@fabre.ac.upc.es"
] |
dn@fabre.ac.upc.es
|
2c21c9fdf85b8db3d86708de109471dd19577441
|
3ed216ddff0ce7c303c33cfb54c0153518ee26d6
|
/2_Last Position & Math Table.py
|
594b4079ef607f75ec526eb8776c3f43f911e3bb
|
[] |
no_license
|
Tuseeq1/PythonPractice
|
9d289e49b71b00701100e22120d37f76d0bba8f7
|
c1b3f9e1844be11b1211add17dcdffaeaf0820c1
|
refs/heads/master
| 2020-03-26T11:13:28.165390
| 2018-08-15T09:42:47
| 2018-08-15T09:42:47
| 144,834,065
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 637
|
py
|
# Define a procedure, print_multiplication_table,
# that takes as input a positive whole number, and prints out a multiplication,
# table showing all the whole number multiplications up to and including the
# input number. The order in which the equations are printed matters.
def print_multiplication_table( n ):
# your code goes here
#print_multiplication_table(2)
#>>> 1 * 1 = 1
#>>> 1 * 2 = 2
#>>> 2 * 1 = 2
#>>> 2 * 2 = 4
#print_multiplication_table(3)
#>>> 1 * 1 = 1
#>>> 1 * 2 = 2
#>>> 1 * 3 = 3
#>>> 2 * 1 = 2
#>>> 2 * 2 = 4
#>>> 2 * 3 = 6
#>>> 3 * 1 = 3
#>>> 3 * 2 = 6
#>>> 3 * 3 = 9
|
[
"noreply@github.com"
] |
Tuseeq1.noreply@github.com
|
af928c4a421a6a4199fcdf6c6e6f13a037405bf3
|
4870cf316c69e6c404915318839b9bffd19233ba
|
/haystack/pipeline.py
|
bbad3380406c5891a4e24ae9272fa5f263f8dc7d
|
[
"Apache-2.0"
] |
permissive
|
marjanhs/haystack
|
bdf16e3f7365772462efd199ceb3f9654e1c3715
|
2a226daac4ceec3eb9707fa6618500e247929684
|
refs/heads/master
| 2023-07-12T06:42:30.266327
| 2021-08-20T15:01:55
| 2021-08-20T15:01:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 58,675
|
py
|
import copy
import inspect
import logging
import os
import traceback
from abc import ABC
from copy import deepcopy
from pathlib import Path
from typing import List, Optional, Dict, Union, Any
import pickle
import urllib
from functools import wraps
try:
from ray import serve
import ray
except:
ray = None
serve = None
from transformers import AutoTokenizer, AutoModelForSequenceClassification, TextClassificationPipeline
import networkx as nx
import yaml
from networkx import DiGraph
from networkx.drawing.nx_agraph import to_agraph
from haystack import BaseComponent
from haystack.generator.base import BaseGenerator
from haystack.reader.base import BaseReader
from haystack.retriever.base import BaseRetriever
from haystack.summarizer.base import BaseSummarizer
from haystack.translator.base import BaseTranslator
from haystack.knowledge_graph.base import BaseKnowledgeGraph
from haystack.graph_retriever.base import BaseGraphRetriever
logger = logging.getLogger(__name__)
class BasePipeline:
def run(self, **kwargs):
raise NotImplementedError
@classmethod
def load_from_yaml(cls, path: Path, pipeline_name: Optional[str] = None, overwrite_with_env_variables: bool = True):
"""
Load Pipeline from a YAML file defining the individual components and how they're tied together to form
a Pipeline. A single YAML can declare multiple Pipelines, in which case an explicit `pipeline_name` must
be passed.
Here's a sample configuration:
```yaml
| version: '0.8'
|
| components: # define all the building-blocks for Pipeline
| - name: MyReader # custom-name for the component; helpful for visualization & debugging
| type: FARMReader # Haystack Class name for the component
| params:
| no_ans_boost: -10
| model_name_or_path: deepset/roberta-base-squad2
| - name: MyESRetriever
| type: ElasticsearchRetriever
| params:
| document_store: MyDocumentStore # params can reference other components defined in the YAML
| custom_query: null
| - name: MyDocumentStore
| type: ElasticsearchDocumentStore
| params:
| index: haystack_test
|
| pipelines: # multiple Pipelines can be defined using the components from above
| - name: my_query_pipeline # a simple extractive-qa Pipeline
| nodes:
| - name: MyESRetriever
| inputs: [Query]
| - name: MyReader
| inputs: [MyESRetriever]
```
:param path: path of the YAML file.
:param pipeline_name: if the YAML contains multiple pipelines, the pipeline_name to load must be set.
:param overwrite_with_env_variables: Overwrite the YAML configuration with environment variables. For example,
to change index name param for an ElasticsearchDocumentStore, an env
variable 'MYDOCSTORE_PARAMS_INDEX=documents-2021' can be set. Note that an
`_` sign must be used to specify nested hierarchical properties.
"""
pipeline_config = cls._get_pipeline_config_from_yaml(path=path, pipeline_name=pipeline_name)
if pipeline_config["type"] == "Pipeline":
return Pipeline.load_from_yaml(
path=path, pipeline_name=pipeline_name, overwrite_with_env_variables=overwrite_with_env_variables
)
elif pipeline_config["type"] == "RayPipeline":
return RayPipeline.load_from_yaml(
path=path, pipeline_name=pipeline_name, overwrite_with_env_variables=overwrite_with_env_variables
)
else:
raise KeyError(f"Pipeline Type '{pipeline_config['type']}' is not a valid. The available types are"
f"'Pipeline' and 'RayPipeline'.")
@classmethod
def _get_pipeline_config_from_yaml(cls, path: Path, pipeline_name: Optional[str] = None):
"""
Get the definition of Pipeline from a given YAML. If the YAML contains more than one Pipeline,
then the pipeline_name must be supplied.
:param path: Path of Pipeline YAML file.
:param pipeline_name: name of the Pipeline.
"""
with open(path, "r", encoding='utf-8') as stream:
data = yaml.safe_load(stream)
if pipeline_name is None:
if len(data["pipelines"]) == 1:
pipeline_config = data["pipelines"][0]
else:
raise Exception("The YAML contains multiple pipelines. Please specify the pipeline name to load.")
else:
pipelines_in_yaml = list(filter(lambda p: p["name"] == pipeline_name, data["pipelines"]))
if not pipelines_in_yaml:
raise KeyError(f"Cannot find any pipeline with name '{pipeline_name}' declared in the YAML file.")
pipeline_config = pipelines_in_yaml[0]
return pipeline_config
@classmethod
def _read_yaml(cls, path: Path, pipeline_name: Optional[str], overwrite_with_env_variables: bool):
"""
Parse the YAML and return the full YAML config, pipeline_config, and definitions of all components.
:param path: path of the YAML file.
:param pipeline_name: if the YAML contains multiple pipelines, the pipeline_name to load must be set.
:param overwrite_with_env_variables: Overwrite the YAML configuration with environment variables. For example,
to change index name param for an ElasticsearchDocumentStore, an env
variable 'MYDOCSTORE_PARAMS_INDEX=documents-2021' can be set. Note that an
`_` sign must be used to specify nested hierarchical properties.
"""
with open(path, "r", encoding="utf-8") as stream:
data = yaml.safe_load(stream)
pipeline_config = cls._get_pipeline_config_from_yaml(path=path, pipeline_name=pipeline_name)
definitions = {} # definitions of each component from the YAML.
component_definitions = copy.deepcopy(data["components"])
for definition in component_definitions:
if overwrite_with_env_variables:
cls._overwrite_with_env_variables(definition)
name = definition.pop("name")
definitions[name] = definition
return data, pipeline_config, definitions
@classmethod
def _overwrite_with_env_variables(cls, definition: dict):
"""
Overwrite the YAML configuration with environment variables. For example, to change index name param for an
ElasticsearchDocumentStore, an env variable 'MYDOCSTORE_PARAMS_INDEX=documents-2021' can be set. Note that an
`_` sign must be used to specify nested hierarchical properties.
:param definition: a dictionary containing the YAML definition of a component.
"""
env_prefix = f"{definition['name']}_params_".upper()
for key, value in os.environ.items():
if key.startswith(env_prefix):
param_name = key.replace(env_prefix, "").lower()
definition["params"][param_name] = value
class Pipeline(BasePipeline):
"""
Pipeline brings together building blocks to build a complex search pipeline with Haystack & user-defined components.
Under-the-hood, a pipeline is represented as a directed acyclic graph of component nodes. It enables custom query
flows with options to branch queries(eg, extractive qa vs keyword match query), merge candidate documents for a
Reader from multiple Retrievers, or re-ranking of candidate documents.
"""
def __init__(self):
self.graph = DiGraph()
self.root_node = None
self.components: dict = {}
def add_node(self, component, name: str, inputs: List[str]):
"""
Add a new node to the pipeline.
:param component: The object to be called when the data is passed to the node. It can be a Haystack component
(like Retriever, Reader, or Generator) or a user-defined object that implements a run()
method to process incoming data from predecessor node.
:param name: The name for the node. It must not contain any dots.
:param inputs: A list of inputs to the node. If the predecessor node has a single outgoing edge, just the name
of node is sufficient. For instance, a 'ElasticsearchRetriever' node would always output a single
edge with a list of documents. It can be represented as ["ElasticsearchRetriever"].
In cases when the predecessor node has multiple outputs, e.g., a "QueryClassifier", the output
must be specified explicitly as "QueryClassifier.output_2".
"""
if self.root_node is None:
root_node = inputs[0]
if root_node in ["Query", "File"]:
self.root_node = root_node
self.graph.add_node(root_node, component=RootNode())
else:
raise KeyError(f"Root node '{root_node}' is invalid. Available options are 'Query' and 'File'.")
self.graph.add_node(name, component=component, inputs=inputs)
if len(self.graph.nodes) == 2: # first node added; connect with Root
assert len(inputs) == 1 and inputs[0].split(".")[0] == self.root_node, \
f"The '{name}' node can only input from {self.root_node}. " \
f"Set the 'inputs' parameter to ['{self.root_node}']"
self.graph.add_edge(self.root_node, name, label="output_1")
return
for i in inputs:
if "." in i:
[input_node_name, input_edge_name] = i.split(".")
assert "output_" in input_edge_name, f"'{input_edge_name}' is not a valid edge name."
outgoing_edges_input_node = self.graph.nodes[input_node_name]["component"].outgoing_edges
assert int(input_edge_name.split("_")[1]) <= outgoing_edges_input_node, (
f"Cannot connect '{input_edge_name}' from '{input_node_name}' as it only has "
f"{outgoing_edges_input_node} outgoing edge(s)."
)
else:
outgoing_edges_input_node = self.graph.nodes[i]["component"].outgoing_edges
assert outgoing_edges_input_node == 1, (
f"Adding an edge from {i} to {name} is ambiguous as {i} has {outgoing_edges_input_node} edges. "
f"Please specify the output explicitly."
)
input_node_name = i
input_edge_name = "output_1"
self.graph.add_edge(input_node_name, name, label=input_edge_name)
def get_node(self, name: str) -> Optional[BaseComponent]:
"""
Get a node from the Pipeline.
:param name: The name of the node.
"""
graph_node = self.graph.nodes.get(name)
component = graph_node["component"] if graph_node else None
return component
def set_node(self, name: str, component):
"""
Set the component for a node in the Pipeline.
:param name: The name of the node.
:param component: The component object to be set at the node.
"""
self.graph.nodes[name]["component"] = component
def run(self, **kwargs):
node_output = None
queue = {
self.root_node: {"root_node": self.root_node, **kwargs}
} # ordered dict with "node_id" -> "input" mapping that acts as a FIFO queue
i = 0 # the first item is popped off the queue unless it is a "join" node with unprocessed predecessors
while queue:
node_id = list(queue.keys())[i]
node_input = queue[node_id]
node_input["node_id"] = node_id
predecessors = set(nx.ancestors(self.graph, node_id))
if predecessors.isdisjoint(set(queue.keys())): # only execute if predecessor nodes are executed
try:
logger.debug(f"Running node `{node_id}` with input `{node_input}`")
node_output, stream_id = self.graph.nodes[node_id]["component"].run(**node_input)
except Exception as e:
tb = traceback.format_exc()
raise Exception(f"Exception while running node `{node_id}` with input `{node_input}`: {e}, full stack trace: {tb}")
queue.pop(node_id)
next_nodes = self.get_next_nodes(node_id, stream_id)
for n in next_nodes: # add successor nodes with corresponding inputs to the queue
if queue.get(n): # concatenate inputs if it's a join node
existing_input = queue[n]
if "inputs" not in existing_input.keys():
updated_input = {"inputs": [existing_input, node_output]}
else:
existing_input["inputs"].append(node_output)
updated_input = existing_input
queue[n] = updated_input
else:
queue[n] = node_output
i = 0
else:
i += 1 # attempt executing next node in the queue as current `node_id` has unprocessed predecessors
return node_output
def get_next_nodes(self, node_id: str, stream_id: str):
current_node_edges = self.graph.edges(node_id, data=True)
next_nodes = [
next_node
for _, next_node, data in current_node_edges
if not stream_id or data["label"] == stream_id or stream_id == "output_all"
]
return next_nodes
def draw(self, path: Path = Path("pipeline.png")):
"""
Create a Graphviz visualization of the pipeline.
:param path: the path to save the image.
"""
try:
import pygraphviz
except ImportError:
raise ImportError(f"Could not import `pygraphviz`. Please install via: \n"
f"pip install pygraphviz\n"
f"(You might need to run this first: apt install libgraphviz-dev graphviz )")
graphviz = to_agraph(self.graph)
graphviz.layout("dot")
graphviz.draw(path)
@classmethod
def load_from_yaml(cls, path: Path, pipeline_name: Optional[str] = None, overwrite_with_env_variables: bool = True):
"""
Load Pipeline from a YAML file defining the individual components and how they're tied together to form
a Pipeline. A single YAML can declare multiple Pipelines, in which case an explicit `pipeline_name` must
be passed.
Here's a sample configuration:
```yaml
| version: '0.8'
|
| components: # define all the building-blocks for Pipeline
| - name: MyReader # custom-name for the component; helpful for visualization & debugging
| type: FARMReader # Haystack Class name for the component
| params:
| no_ans_boost: -10
| model_name_or_path: deepset/roberta-base-squad2
| - name: MyESRetriever
| type: ElasticsearchRetriever
| params:
| document_store: MyDocumentStore # params can reference other components defined in the YAML
| custom_query: null
| - name: MyDocumentStore
| type: ElasticsearchDocumentStore
| params:
| index: haystack_test
|
| pipelines: # multiple Pipelines can be defined using the components from above
| - name: my_query_pipeline # a simple extractive-qa Pipeline
| nodes:
| - name: MyESRetriever
| inputs: [Query]
| - name: MyReader
| inputs: [MyESRetriever]
```
:param path: path of the YAML file.
:param pipeline_name: if the YAML contains multiple pipelines, the pipeline_name to load must be set.
:param overwrite_with_env_variables: Overwrite the YAML configuration with environment variables. For example,
to change index name param for an ElasticsearchDocumentStore, an env
variable 'MYDOCSTORE_PARAMS_INDEX=documents-2021' can be set. Note that an
`_` sign must be used to specify nested hierarchical properties.
"""
data, pipeline_config, definitions = cls._read_yaml(
path=path, pipeline_name=pipeline_name, overwrite_with_env_variables=overwrite_with_env_variables
)
pipeline = cls()
components: dict = {} # instances of component objects.
for node_config in pipeline_config["nodes"]:
name = node_config["name"]
component = cls._load_or_get_component(name=name, definitions=definitions, components=components)
pipeline.add_node(component=component, name=node_config["name"], inputs=node_config.get("inputs", []))
return pipeline
@classmethod
def _load_or_get_component(cls, name: str, definitions: dict, components: dict):
"""
Load a component from the definition or return if component object already present in `components` dict.
:param name: name of the component to load or get.
:param definitions: dict containing definitions of all components retrieved from the YAML.
:param components: dict containing component objects.
"""
try:
if name in components.keys(): # check if component is already loaded.
return components[name]
component_params = definitions[name].get("params", {})
component_type = definitions[name]["type"]
logger.debug(f"Loading component `{name}` of type `{definitions[name]['type']}`")
for key, value in component_params.items():
# Component params can reference to other components. For instance, a Retriever can reference a
# DocumentStore defined in the YAML. All references should be recursively resolved.
if isinstance(value, str) and value in definitions.keys(): # check if the param value is a reference to another component.
if value not in components.keys(): # check if the referenced component is already loaded.
cls._load_or_get_component(name=value, definitions=definitions, components=components)
component_params[key] = components[value] # substitute reference (string) with the component object.
instance = BaseComponent.load_from_args(component_type=component_type, **component_params)
components[name] = instance
except Exception as e:
raise Exception(f"Failed loading pipeline component '{name}': {e}")
return instance
def save_to_yaml(self, path: Path, return_defaults: bool = False):
"""
Save a YAML configuration for the Pipeline that can be used with `Pipeline.load_from_yaml()`.
:param path: path of the output YAML file.
:param return_defaults: whether to output parameters that have the default values.
"""
nodes = self.graph.nodes
pipeline_name = self.root_node.lower()
pipelines: dict = {pipeline_name: {"name": pipeline_name, "type": "Pipeline", "nodes": []}}
components = {}
for node in nodes:
if node == self.root_node:
continue
component_instance = self.graph.nodes.get(node)["component"]
component_type = component_instance.pipeline_config["type"]
component_params = component_instance.pipeline_config["params"]
components[node] = {"name": node, "type": component_type, "params": {}}
component_signature = inspect.signature(type(component_instance)).parameters
for key, value in component_params.items():
# A parameter for a Component could be another Component. For instance, a Retriever has
# the DocumentStore as a parameter.
# Component configs must be a dict with a "type" key. The "type" keys distinguishes between
# other parameters like "custom_mapping" that are dicts.
# This currently only checks for the case single-level nesting case, wherein, "a Component has another
# Component as a parameter". For deeper nesting cases, this function should be made recursive.
if isinstance(value, dict) and "type" in value.keys(): # the parameter is a Component
components[node]["params"][key] = value["type"]
sub_component_signature = inspect.signature(BaseComponent.subclasses[value["type"]]).parameters
params = {
k: v for k, v in value["params"].items()
if sub_component_signature[k].default != v or return_defaults is True
}
components[value["type"]] = {"name": value["type"], "type": value["type"], "params": params}
else:
if component_signature[key].default != value or return_defaults is True:
components[node]["params"][key] = value
# create the Pipeline definition with how the Component are connected
pipelines[pipeline_name]["nodes"].append({"name": node, "inputs": list(self.graph.predecessors(node))})
config = {"components": list(components.values()), "pipelines": list(pipelines.values()), "version": "0.8"}
with open(path, 'w') as outfile:
yaml.dump(config, outfile, default_flow_style=False)
class BaseStandardPipeline(ABC):
pipeline: Pipeline
def add_node(self, component, name: str, inputs: List[str]):
"""
Add a new node to the pipeline.
:param component: The object to be called when the data is passed to the node. It can be a Haystack component
(like Retriever, Reader, or Generator) or a user-defined object that implements a run()
method to process incoming data from predecessor node.
:param name: The name for the node. It must not contain any dots.
:param inputs: A list of inputs to the node. If the predecessor node has a single outgoing edge, just the name
of node is sufficient. For instance, a 'ElasticsearchRetriever' node would always output a single
edge with a list of documents. It can be represented as ["ElasticsearchRetriever"].
In cases when the predecessor node has multiple outputs, e.g., a "QueryClassifier", the output
must be specified explicitly as "QueryClassifier.output_2".
"""
self.pipeline.add_node(component=component, name=name, inputs=inputs)
def get_node(self, name: str):
"""
Get a node from the Pipeline.
:param name: The name of the node.
"""
component = self.pipeline.get_node(name)
return component
def set_node(self, name: str, component):
"""
Set the component for a node in the Pipeline.
:param name: The name of the node.
:param component: The component object to be set at the node.
"""
self.pipeline.set_node(name, component)
def draw(self, path: Path = Path("pipeline.png")):
"""
Create a Graphviz visualization of the pipeline.
:param path: the path to save the image.
"""
self.pipeline.draw(path)
class ExtractiveQAPipeline(BaseStandardPipeline):
def __init__(self, reader: BaseReader, retriever: BaseRetriever):
"""
Initialize a Pipeline for Extractive Question Answering.
:param reader: Reader instance
:param retriever: Retriever instance
"""
self.pipeline = Pipeline()
self.pipeline.add_node(component=retriever, name="Retriever", inputs=["Query"])
self.pipeline.add_node(component=reader, name="Reader", inputs=["Retriever"])
def run(self, query: str, filters: Optional[Dict] = None, top_k_retriever: int = 10, top_k_reader: int = 10):
output = self.pipeline.run(
query=query, filters=filters, top_k_retriever=top_k_retriever, top_k_reader=top_k_reader
)
return output
class DocumentSearchPipeline(BaseStandardPipeline):
def __init__(self, retriever: BaseRetriever):
"""
Initialize a Pipeline for semantic document search.
:param retriever: Retriever instance
"""
self.pipeline = Pipeline()
self.pipeline.add_node(component=retriever, name="Retriever", inputs=["Query"])
def run(self, query: str, filters: Optional[Dict] = None, top_k_retriever: Optional[int] = None):
output = self.pipeline.run(query=query, filters=filters, top_k_retriever=top_k_retriever)
document_dicts = [doc.to_dict() for doc in output["documents"]]
output["documents"] = document_dicts
return output
class GenerativeQAPipeline(BaseStandardPipeline):
def __init__(self, generator: BaseGenerator, retriever: BaseRetriever):
"""
Initialize a Pipeline for Generative Question Answering.
:param generator: Generator instance
:param retriever: Retriever instance
"""
self.pipeline = Pipeline()
self.pipeline.add_node(component=retriever, name="Retriever", inputs=["Query"])
self.pipeline.add_node(component=generator, name="Generator", inputs=["Retriever"])
def run(
self,
query: str,
filters: Optional[Dict] = None,
top_k_retriever: Optional[int] = None,
top_k_generator: Optional[int] = None
):
output = self.pipeline.run(
query=query, filters=filters, top_k_retriever=top_k_retriever, top_k_generator=top_k_generator
)
return output
class SearchSummarizationPipeline(BaseStandardPipeline):
def __init__(self, summarizer: BaseSummarizer, retriever: BaseRetriever):
"""
Initialize a Pipeline that retrieves documents for a query and then summarizes those documents.
:param summarizer: Summarizer instance
:param retriever: Retriever instance
"""
self.pipeline = Pipeline()
self.pipeline.add_node(component=retriever, name="Retriever", inputs=["Query"])
self.pipeline.add_node(component=summarizer, name="Summarizer", inputs=["Retriever"])
def run(
self,
query: str,
filters: Optional[Dict] = None,
top_k_retriever: Optional[int] = None,
generate_single_summary: Optional[bool] = None,
return_in_answer_format: bool = False,
):
"""
:param query: Your search query
:param filters:
:param top_k_retriever: Number of top docs the retriever should pass to the summarizer.
The higher this value, the slower your pipeline.
:param generate_single_summary: Whether to generate single summary from all retrieved docs (True) or one per doc (False).
:param return_in_answer_format: Whether the results should be returned as documents (False) or in the answer format used in other QA pipelines (True).
With the latter, you can use this pipeline as a "drop-in replacement" for other QA pipelines.
"""
output = self.pipeline.run(
query=query, filters=filters, top_k_retriever=top_k_retriever, generate_single_summary=generate_single_summary
)
# Convert to answer format to allow "drop-in replacement" for other QA pipelines
if return_in_answer_format:
results: Dict = {"query": query, "answers": []}
docs = deepcopy(output["documents"])
for doc in docs:
cur_answer = {
"query": query,
"answer": doc.text,
"document_id": doc.id,
"context": doc.meta.pop("context"),
"score": None,
"offset_start": None,
"offset_end": None,
"meta": doc.meta,
}
results["answers"].append(cur_answer)
else:
results = output
return results
class FAQPipeline(BaseStandardPipeline):
def __init__(self, retriever: BaseRetriever):
"""
Initialize a Pipeline for finding similar FAQs using semantic document search.
:param retriever: Retriever instance
"""
self.pipeline = Pipeline()
self.pipeline.add_node(component=retriever, name="Retriever", inputs=["Query"])
def run(self, query: str, filters: Optional[Dict] = None, top_k_retriever: Optional[int] = None):
output = self.pipeline.run(query=query, filters=filters, top_k_retriever=top_k_retriever)
documents = output["documents"]
results: Dict = {"query": query, "answers": []}
for doc in documents:
# TODO proper calibration of pseudo probabilities
cur_answer = {
"query": doc.text,
"answer": doc.meta["answer"],
"document_id": doc.id,
"context": doc.meta["answer"],
"score": doc.score,
"offset_start": 0,
"offset_end": len(doc.meta["answer"]),
"meta": doc.meta,
}
results["answers"].append(cur_answer)
return results
class TranslationWrapperPipeline(BaseStandardPipeline):
"""
Takes an existing search pipeline and adds one "input translation node" after the Query and one
"output translation" node just before returning the results
"""
def __init__(
self,
input_translator: BaseTranslator,
output_translator: BaseTranslator,
pipeline: BaseStandardPipeline
):
"""
Wrap a given `pipeline` with the `input_translator` and `output_translator`.
:param input_translator: A Translator node that shall translate the input query from language A to B
:param output_translator: A Translator node that shall translate the pipeline results from language B to A
:param pipeline: The pipeline object (e.g. ExtractiveQAPipeline) you want to "wrap".
Note that pipelines with split or merge nodes are currently not supported.
"""
self.pipeline = Pipeline()
self.pipeline.add_node(component=input_translator, name="InputTranslator", inputs=["Query"])
graph = pipeline.pipeline.graph
previous_node_name = ["InputTranslator"]
# Traverse in BFS
for node in graph.nodes:
if node == "Query":
continue
# TODO: Do not work properly for Join Node and Answer format
if graph.nodes[node]["inputs"] and len(graph.nodes[node]["inputs"]) > 1:
raise AttributeError("Split and merge nodes are not supported currently")
self.pipeline.add_node(name=node, component=graph.nodes[node]["component"], inputs=previous_node_name)
previous_node_name = [node]
self.pipeline.add_node(component=output_translator, name="OutputTranslator", inputs=previous_node_name)
def run(self, **kwargs):
output = self.pipeline.run(**kwargs)
return output
class QuestionGenerationPipeline(BaseStandardPipeline):
"""
A simple pipeline that takes documents as input and generates
questions that it thinks can be answered by the documents.
"""
def __init__(self, question_generator):
self.pipeline = Pipeline()
self.pipeline.add_node(component=question_generator, name="QuestionGenerator", inputs=["Query"])
def run(self, documents, **kwargs):
kwargs["documents"] = documents
output = self.pipeline.run(**kwargs)
return output
class RetrieverQuestionGenerationPipeline(BaseStandardPipeline):
"""
A simple pipeline that takes a query as input, performs retrieval, and then generates
questions that it thinks can be answered by the retrieved documents.
"""
def __init__(self, retriever, question_generator):
self.pipeline = Pipeline()
self.pipeline.add_node(component=retriever, name="Retriever", inputs=["Query"])
self.pipeline.add_node(component=question_generator, name="Question Generator", inputs=["Retriever"])
def run(self, query, **kwargs):
kwargs["query"] = query
output = self.pipeline.run(**kwargs)
return output
class QuestionAnswerGenerationPipeline(BaseStandardPipeline):
"""
This is a pipeline which takes a document as input, generates questions that the model thinks can be answered by
this document, and then performs question answering of this questions using that single document.
"""
def __init__(self, question_generator, reader):
question_generator.run = self.formatting_wrapper(question_generator.run)
# Overwrite reader.run function so it can handle a batch of questions being passed on by the QuestionGenerator
reader.run = reader.run_batch
self.pipeline = Pipeline()
self.pipeline.add_node(component=question_generator, name="QuestionGenerator", inputs=["Query"])
self.pipeline.add_node(component=reader, name="Reader", inputs=["QuestionGenerator"])
# This is used to format the output of the QuestionGenerator so that its questions are ready to be answered by the reader
def formatting_wrapper(self, fn):
@wraps(fn)
def wrapper(*args, **kwargs):
output, output_stream = fn(*args, **kwargs)
questions = output["generated_questions"][0]["questions"]
documents = output["documents"]
query_doc_list = []
for q in questions:
query_doc_list.append({"queries": q, "docs": documents})
kwargs["query_doc_list"] = query_doc_list
return kwargs, output_stream
return wrapper
def run(self, document, **kwargs):
kwargs["documents"] = [document]
output = self.pipeline.run(**kwargs)
return output
class RootNode(BaseComponent):
"""
RootNode feeds inputs(`query` or `file`) together with corresponding parameters to a Pipeline.
"""
outgoing_edges = 1
def run(self, **kwargs):
return kwargs, "output_1"
class SklearnQueryClassifier(BaseComponent):
"""
A node to classify an incoming query into one of two categories using a lightweight sklearn model. Depending on the result, the query flows to a different branch in your pipeline
and the further processing can be customized. You can define this by connecting the further pipeline to either `output_1` or `output_2` from this node.
Example:
```python
|{
|pipe = Pipeline()
|pipe.add_node(component=SklearnQueryClassifier(), name="QueryClassifier", inputs=["Query"])
|pipe.add_node(component=elastic_retriever, name="ElasticRetriever", inputs=["QueryClassifier.output_2"])
|pipe.add_node(component=dpr_retriever, name="DPRRetriever", inputs=["QueryClassifier.output_1"])
|# Keyword queries will use the ElasticRetriever
|pipe.run("kubernetes aws")
|# Semantic queries (questions, statements, sentences ...) will leverage the DPR retriever
|pipe.run("How to manage kubernetes on aws")
```
Models:
Pass your own `Sklearn` binary classification model or use one of the following pretrained ones:
1) Keywords vs. Questions/Statements (Default)
query_classifier can be found [here](https://ext-models-haystack.s3.eu-central-1.amazonaws.com/gradboost_query_classifier/model.pickle)
query_vectorizer can be found [here](https://ext-models-haystack.s3.eu-central-1.amazonaws.com/gradboost_query_classifier/vectorizer.pickle)
output_1 => question/statement
output_2 => keyword query
[Readme](https://ext-models-haystack.s3.eu-central-1.amazonaws.com/gradboost_query_classifier/readme.txt)
2) Questions vs. Statements
query_classifier can be found [here](https://ext-models-haystack.s3.eu-central-1.amazonaws.com/gradboost_query_classifier_statements/model.pickle)
query_vectorizer can be found [here](https://ext-models-haystack.s3.eu-central-1.amazonaws.com/gradboost_query_classifier_statements/vectorizer.pickle)
output_1 => question
output_2 => statement
[Readme](https://ext-models-haystack.s3.eu-central-1.amazonaws.com/gradboost_query_classifier_statements/readme.txt)
See also the [tutorial](https://haystack.deepset.ai/docs/latest/tutorial11md) on pipelines.
"""
outgoing_edges = 2
def __init__(
self,
model_name_or_path: Union[
str, Any
] = "https://ext-models-haystack.s3.eu-central-1.amazonaws.com/gradboost_query_classifier/model.pickle",
vectorizer_name_or_path: Union[
str, Any
] = "https://ext-models-haystack.s3.eu-central-1.amazonaws.com/gradboost_query_classifier/vectorizer.pickle"
):
"""
:param model_name_or_path: Gradient boosting based binary classifier to classify between keyword vs statement/question
queries or statement vs question queries.
:param vectorizer_name_or_path: A ngram based Tfidf vectorizer for extracting features from query.
"""
if (
(not isinstance(model_name_or_path, Path))
and (not isinstance(model_name_or_path, str))
) or (
(not isinstance(vectorizer_name_or_path, Path))
and (not isinstance(vectorizer_name_or_path, str))
):
raise TypeError(
"model_name_or_path and vectorizer_name_or_path must either be of type Path or str"
)
# save init parameters to enable export of component config as YAML
self.set_config(model_name_or_path=model_name_or_path, vectorizer_name_or_path=vectorizer_name_or_path)
if isinstance(model_name_or_path, Path):
file_url = urllib.request.pathname2url(r"{}".format(model_name_or_path))
model_name_or_path = f"file:{file_url}"
if isinstance(vectorizer_name_or_path, Path):
file_url = urllib.request.pathname2url(r"{}".format(vectorizer_name_or_path))
vectorizer_name_or_path = f"file:{file_url}"
self.model = pickle.load(urllib.request.urlopen(model_name_or_path))
self.vectorizer = pickle.load(urllib.request.urlopen(vectorizer_name_or_path))
def run(self, **kwargs):
query_vector = self.vectorizer.transform([kwargs["query"]])
is_question: bool = self.model.predict(query_vector)[0]
if is_question:
return (kwargs, "output_1")
else:
return (kwargs, "output_2")
class TransformersQueryClassifier(BaseComponent):
"""
A node to classify an incoming query into one of two categories using a (small) BERT transformer model. Depending on the result, the query flows to a different branch in your pipeline
and the further processing can be customized. You can define this by connecting the further pipeline to either `output_1` or `output_2` from this node.
Example:
```python
|{
|pipe = Pipeline()
|pipe.add_node(component=TransformersQueryClassifier(), name="QueryClassifier", inputs=["Query"])
|pipe.add_node(component=elastic_retriever, name="ElasticRetriever", inputs=["QueryClassifier.output_2"])
|pipe.add_node(component=dpr_retriever, name="DPRRetriever", inputs=["QueryClassifier.output_1"])
|# Keyword queries will use the ElasticRetriever
|pipe.run("kubernetes aws")
|# Semantic queries (questions, statements, sentences ...) will leverage the DPR retriever
|pipe.run("How to manage kubernetes on aws")
```
Models:
Pass your own `Transformer` binary classification model from file/huggingface or use one of the following pretrained ones hosted on Huggingface:
1) Keywords vs. Questions/Statements (Default)
model_name_or_path="shahrukhx01/bert-mini-finetune-question-detection"
output_1 => question/statement
output_2 => keyword query
[Readme](https://ext-models-haystack.s3.eu-central-1.amazonaws.com/gradboost_query_classifier/readme.txt)
2) Questions vs. Statements
`model_name_or_path`="shahrukhx01/question-vs-statement-classifier"
output_1 => question
output_2 => statement
[Readme](https://ext-models-haystack.s3.eu-central-1.amazonaws.com/gradboost_query_classifier_statements/readme.txt)
See also the [tutorial](https://haystack.deepset.ai/docs/latest/tutorial11md) on pipelines.
"""
outgoing_edges = 2
def __init__(
self,
model_name_or_path: Union[
Path, str
] = "shahrukhx01/bert-mini-finetune-question-detection"
):
"""
:param model_name_or_path: Transformer based fine tuned mini bert model for query classification
"""
# save init parameters to enable export of component config as YAML
self.set_config(model_name_or_path=model_name_or_path)
model = AutoModelForSequenceClassification.from_pretrained(model_name_or_path)
tokenizer = AutoTokenizer.from_pretrained(model_name_or_path)
self.query_classification_pipeline = TextClassificationPipeline(
model=model, tokenizer=tokenizer
)
def run(self, **kwargs):
is_question: bool = (
self.query_classification_pipeline(kwargs["query"])[0]["label"] == "LABEL_1"
)
if is_question:
return (kwargs, "output_1")
else:
return (kwargs, "output_2")
class JoinDocuments(BaseComponent):
"""
A node to join documents outputted by multiple retriever nodes.
The node allows multiple join modes:
* concatenate: combine the documents from multiple nodes. Any duplicate documents are discarded.
* merge: merge scores of documents from multiple nodes. Optionally, each input score can be given a different
`weight` & a `top_k` limit can be set. This mode can also be used for "reranking" retrieved documents.
"""
outgoing_edges = 1
def __init__(
self, join_mode: str = "concatenate", weights: Optional[List[float]] = None, top_k_join: Optional[int] = None
):
"""
:param join_mode: `concatenate` to combine documents from multiple retrievers or `merge` to aggregate scores of
individual documents.
:param weights: A node-wise list(length of list must be equal to the number of input nodes) of weights for
adjusting document scores when using the `merge` join_mode. By default, equal weight is given
to each retriever score. This param is not compatible with the `concatenate` join_mode.
:param top_k_join: Limit documents to top_k based on the resulting scores of the join.
"""
assert join_mode in ["concatenate", "merge"], f"JoinDocuments node does not support '{join_mode}' join_mode."
assert not (
weights is not None and join_mode == "concatenate"
), "Weights are not compatible with 'concatenate' join_mode."
# save init parameters to enable export of component config as YAML
self.set_config(join_mode=join_mode, weights=weights, top_k_join=top_k_join)
self.join_mode = join_mode
self.weights = [float(i)/sum(weights) for i in weights] if weights else None
self.top_k_join = top_k_join
def run(self, **kwargs):
inputs = kwargs["inputs"]
if self.join_mode == "concatenate":
document_map = {}
for input_from_node in inputs:
for doc in input_from_node["documents"]:
document_map[doc.id] = doc
elif self.join_mode == "merge":
document_map = {}
if self.weights:
weights = self.weights
else:
weights = [1/len(inputs)] * len(inputs)
for input_from_node, weight in zip(inputs, weights):
for doc in input_from_node["documents"]:
if document_map.get(doc.id): # document already exists; update score
document_map[doc.id].score += doc.score * weight
else: # add the document in map
document_map[doc.id] = deepcopy(doc)
document_map[doc.id].score *= weight
else:
raise Exception(f"Invalid join_mode: {self.join_mode}")
documents = sorted(document_map.values(), key=lambda d: d.score, reverse=True)
if self.top_k_join:
documents = documents[: self.top_k_join]
output = {"query": inputs[0]["query"], "documents": documents, "labels": inputs[0].get("labels", None)}
return output, "output_1"
class RayPipeline(Pipeline):
"""
Ray (https://ray.io) is a framework for distributed computing.
With Ray, the Pipeline nodes can be distributed across a cluster of machine(s).
This allows scaling individual nodes. For instance, in an extractive QA Pipeline, multiple replicas
of the Reader, while keeping a single instance for the Retriever. It also enables efficient resource
utilization as load could be split across GPU vs CPU machines.
In the current implementation, a Ray Pipeline can only be created with a YAML Pipeline config.
>>> from haystack.pipeline import RayPipeline
>>> pipeline = RayPipeline.load_from_yaml(path="my_pipelines.yaml", pipeline_name="my_query_pipeline")
>>> pipeline.run(query="What is the capital of Germany?")
By default, RayPipelines creates an instance of RayServe locally. To connect to an existing Ray instance,
set the `address` parameter when creating RayPipeline instance.
"""
def __init__(self, address: str = None, **kwargs):
"""
:param address: The IP address for the Ray cluster. If set to None, a local Ray instance is started.
:param kwargs: Optional parameters for initializing Ray.
"""
ray.init(address=address, **kwargs)
serve.start()
super().__init__()
@classmethod
def load_from_yaml(
cls,
path: Path, pipeline_name: Optional[str] = None,
overwrite_with_env_variables: bool = True,
address: Optional[str] = None,
**kwargs,
):
"""
Load Pipeline from a YAML file defining the individual components and how they're tied together to form
a Pipeline. A single YAML can declare multiple Pipelines, in which case an explicit `pipeline_name` must
be passed.
Here's a sample configuration:
```yaml
| version: '0.8'
|
| components: # define all the building-blocks for Pipeline
| - name: MyReader # custom-name for the component; helpful for visualization & debugging
| type: FARMReader # Haystack Class name for the component
| params:
| no_ans_boost: -10
| model_name_or_path: deepset/roberta-base-squad2
| - name: MyESRetriever
| type: ElasticsearchRetriever
| params:
| document_store: MyDocumentStore # params can reference other components defined in the YAML
| custom_query: null
| - name: MyDocumentStore
| type: ElasticsearchDocumentStore
| params:
| index: haystack_test
|
| pipelines: # multiple Pipelines can be defined using the components from above
| - name: my_query_pipeline # a simple extractive-qa Pipeline
| nodes:
| - name: MyESRetriever
| inputs: [Query]
| - name: MyReader
| inputs: [MyESRetriever]
```
:param path: path of the YAML file.
:param pipeline_name: if the YAML contains multiple pipelines, the pipeline_name to load must be set.
:param overwrite_with_env_variables: Overwrite the YAML configuration with environment variables. For example,
to change index name param for an ElasticsearchDocumentStore, an env
variable 'MYDOCSTORE_PARAMS_INDEX=documents-2021' can be set. Note that an
`_` sign must be used to specify nested hierarchical properties.
:param address: The IP address for the Ray cluster. If set to None, a local Ray instance is started.
"""
data, pipeline_config, definitions = cls._read_yaml(
path=path, pipeline_name=pipeline_name, overwrite_with_env_variables=overwrite_with_env_variables
)
pipeline = cls(address=address, **kwargs)
for node_config in pipeline_config["nodes"]:
if pipeline.root_node is None:
root_node = node_config["inputs"][0]
if root_node in ["Query", "File"]:
pipeline.root_node = root_node
handle = cls._create_ray_deployment(component_name=root_node, pipeline_config=data)
pipeline._add_ray_deployment_in_graph(handle=handle, name=root_node, outgoing_edges=1, inputs=[])
else:
raise KeyError(f"Root node '{root_node}' is invalid. Available options are 'Query' and 'File'.")
name = node_config["name"]
component_type = definitions[name]["type"]
component_class = BaseComponent.get_subclass(component_type)
replicas = next(comp for comp in data["components"] if comp["name"] == name).get("replicas", 1)
handle = cls._create_ray_deployment(component_name=name, pipeline_config=data, replicas=replicas)
pipeline._add_ray_deployment_in_graph(
handle=handle,
name=name,
outgoing_edges=component_class.outgoing_edges,
inputs=node_config.get("inputs", []),
)
return pipeline
@classmethod
def _create_ray_deployment(cls, component_name: str, pipeline_config: dict, replicas: int = 1):
"""
Create a Ray Deployment for the Component.
:param component_name: Class name of the Haystack Component.
:param pipeline_config: The Pipeline config YAML parsed as a dict.
:param replicas: By default, a single replica of the component is created. It can be
configured by setting `replicas` parameter in the Pipeline YAML.
"""
RayDeployment = serve.deployment(_RayDeploymentWrapper, name=component_name, num_replicas=replicas)
RayDeployment.deploy(pipeline_config, component_name)
handle = RayDeployment.get_handle()
return handle
def run(self, **kwargs):
has_next_node = True
current_node_id = self.root_node
input_dict = {"root_node": self.root_node, **kwargs}
output_dict = None
while has_next_node:
output_dict, stream_id = ray.get(self.graph.nodes[current_node_id]["component"].remote(**input_dict))
input_dict = output_dict
next_nodes = self.get_next_nodes(current_node_id, stream_id)
if len(next_nodes) > 1:
join_node_id = list(nx.neighbors(self.graph, next_nodes[0]))[0]
if set(self.graph.predecessors(join_node_id)) != set(next_nodes):
raise NotImplementedError(
"The current pipeline does not support multiple levels of parallel nodes."
)
inputs_for_join_node = {"inputs": []}
for n_id in next_nodes:
output = self.graph.nodes[n_id]["component"].run(**input_dict)
inputs_for_join_node["inputs"].append(output)
input_dict = inputs_for_join_node
current_node_id = join_node_id
elif len(next_nodes) == 1:
current_node_id = next_nodes[0]
else:
has_next_node = False
return output_dict
def add_node(self, component, name: str, inputs: List[str]):
raise NotImplementedError(
"The current implementation of RayPipeline only supports loading Pipelines from a YAML file."
)
def _add_ray_deployment_in_graph(self, handle, name: str, outgoing_edges: int, inputs: List[str]):
"""
Add the Ray deployment handle in the Pipeline Graph.
:param handle: Ray deployment `handle` to add in the Pipeline Graph. The handle allow calling a Ray deployment
from Python: https://docs.ray.io/en/master/serve/package-ref.html#servehandle-api.
:param name: The name for the node. It must not contain any dots.
:param inputs: A list of inputs to the node. If the predecessor node has a single outgoing edge, just the name
of node is sufficient. For instance, a 'ElasticsearchRetriever' node would always output a single
edge with a list of documents. It can be represented as ["ElasticsearchRetriever"].
In cases when the predecessor node has multiple outputs, e.g., a "QueryClassifier", the output
must be specified explicitly as "QueryClassifier.output_2".
"""
self.graph.add_node(name, component=handle, inputs=inputs, outgoing_edges=outgoing_edges)
if len(self.graph.nodes) == 2: # first node added; connect with Root
self.graph.add_edge(self.root_node, name, label="output_1")
return
for i in inputs:
if "." in i:
[input_node_name, input_edge_name] = i.split(".")
assert "output_" in input_edge_name, f"'{input_edge_name}' is not a valid edge name."
outgoing_edges_input_node = self.graph.nodes[input_node_name]["component"].outgoing_edges
assert int(input_edge_name.split("_")[1]) <= outgoing_edges_input_node, (
f"Cannot connect '{input_edge_name}' from '{input_node_name}' as it only has "
f"{outgoing_edges_input_node} outgoing edge(s)."
)
else:
outgoing_edges_input_node = self.graph.nodes[i]["outgoing_edges"]
assert outgoing_edges_input_node == 1, (
f"Adding an edge from {i} to {name} is ambiguous as {i} has {outgoing_edges_input_node} edges. "
f"Please specify the output explicitly."
)
input_node_name = i
input_edge_name = "output_1"
self.graph.add_edge(input_node_name, name, label=input_edge_name)
class _RayDeploymentWrapper:
"""
Ray Serve supports calling of __init__ methods on the Classes to create "deployment" instances.
In case of Haystack, some Components like Retrievers have complex init methods that needs objects
like Document Stores.
This wrapper class encapsulates the initialization of Components. Given a Component Class
name, it creates an instance using the YAML Pipeline config.
"""
node: BaseComponent
def __init__(self, pipeline_config: dict, component_name: str):
"""
Create an instance of Component.
:param pipeline_config: Pipeline YAML parsed as a dict.
:param component_name: Component Class name.
"""
if component_name in ["Query", "File"]:
self.node = RootNode()
else:
self.node = BaseComponent.load_from_pipeline_config(pipeline_config, component_name)
def __call__(self, *args, **kwargs):
"""
Ray calls this method which is then re-directed to the corresponding component's run().
"""
return self.node.run(*args, **kwargs)
class Docs2Answers(BaseComponent):
outgoing_edges = 1
def __init__(self):
self.set_config()
def run(self, query, documents, **kwargs):
# conversion from Document -> Answer
answers = []
for doc in documents:
# For FAQ style QA use cases
if "answer" in doc.meta:
cur_answer = {
"query": doc.text,
"answer": doc.meta["answer"],
"document_id": doc.id,
"context": doc.meta["answer"],
"score": doc.score,
"offset_start": 0,
"offset_end": len(doc.meta["answer"]),
"meta": doc.meta,
}
else:
# Regular docs
cur_answer = {
"query": None,
"answer": None,
"document_id": doc.id,
"context": doc.text,
"score": doc.score,
"offset_start": None,
"offset_end": None,
"meta": doc.meta,
}
answers.append(cur_answer)
output = {"query": query, "answers": answers}
# Pass also the other incoming kwargs so that future nodes still have access to it
output.update(**kwargs)
return output, "output_1"
|
[
"noreply@github.com"
] |
marjanhs.noreply@github.com
|
26cb0c372639eca1917f3f89ff693d0b6ea8e6c8
|
c6c0ed7585ee7dbdb328e23ffd6f9f8e007b3356
|
/python/everything_app/trainer.py
|
cc842a06dc85bcf616831906fc6132a791114daf
|
[] |
no_license
|
yoavilovich/Everything_Test_App
|
51fe18d8a35d0899b109cae307292b4c7030973a
|
4d8c73c415fcfbed852ab57ff7efa0b332e5eb0b
|
refs/heads/master
| 2021-01-18T14:10:38.728437
| 2013-02-25T20:02:09
| 2013-02-25T20:02:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,752
|
py
|
'''
Created on Feb 25, 2013
@author: yoav
'''
import json
import nltk
import math
import urllib
import os, sys
### Trainer extracts a relevant dictionary from the training set, and creates the occurunce matrix of the words in the movie plot
def get_training_set(): #extracts the training set from file into a python list
data = []
dirname, filename = os.path.split(os.path.abspath(sys.argv[0]))
path=os.path.join(dirname, "movies_train.json")
with open(path) as f:
for line in f:
data.append(json.loads(line))
return data
def get_dictionary(data):
# finds the most common words from combining all plots together,
# and creates a dictionary. Returns a list of all plots in training
# set and a list of all words (tokens) in all the plots
plots=[]
tokens=[]
for movie in data:
plots.append(movie["plot"])
#tokenized_movie_plot=nltk.word_tokenize(movie["plot"])
tokens=nltk.word_tokenize("".join(plots))
for t in tokens:
t=t.lower()
#tokens.append(tokenized_movie_plot)
token_dist = nltk.FreqDist(tokens)
dictionary = token_dist.keys()[50:500]
#dictionary_values = token_dist.values()[50:500]
return (plots,tokens,dictionary)
def get_genre_dictionary (data): #return a genre dictionary, i.e, all the possible genres
all_generes=[]
for movie in data:
movie_generes=movie["genres"]
for genre in movie_generes:
all_generes.append(genre)
#get unique categories
genre_dist = nltk.FreqDist(all_generes)
return genre_dist.keys()
#gets the indexes of the movies in genre c
def get_genre_indexes(c,dictionary,genre_dictionary):
selected_movie_genre=genre_dictionary[c]
genre_indexes=[]
for index,movie in enumerate(data):
movie_generes=movie["genres"]
for genre in movie_generes:
if genre==selected_movie_genre:
genre_indexes.append(index)
return genre_indexes
#the distribution of genres in train corpus, as probability
def get_genre_probability(c,dictionary,genre_dictionary):
return float(len(get_genre_indexes(c,dictionary,genre_dictionary)))/float(len(data))
#helper function for aithmetic
def Nic(i,c,dictionary,genre_dictionary):
Nic=0
indexes = get_genre_indexes(c,dictionary,genre_dictionary)
for j in range(len(indexes)):
if dictionary[i] in plots[indexes[j]]:
Nic+=1
return Nic
#helper function for aithmetic
def Nc(c,dictionary,genre_dictionary):
number_of_movies_in_genre=len(get_genre_indexes(c,dictionary,genre_dictionary))
return number_of_movies_in_genre
#helper function for aithmetic
def Tetaic(i,c,dictionary,genre_dictionary):
teta=float(Nic(i,c,dictionary,genre_dictionary)+1)/float(Nc(c,dictionary,genre_dictionary)+2)
return teta
#calculates teta matrix with helper function
def getTeta(dictionary,genre_dictionary):
teta=[]
for c in range(len(genre_dictionary)):
teta_c=[]
for i in range(len(dictionary)):
teta_c.append(Tetaic(i,c,dictionary,genre_dictionary))
teta.append(teta_c)
return teta
data=get_training_set()
#sets inital data as global params
results=get_dictionary(data)
plots=results[0]
tokens=results[1]
dictionary=results[2]
genre_dictionary=get_genre_dictionary(data)
#produces the teta matrix and passes params to classifier
def main():
genre_probability=[]
for index in range(len(genre_dictionary)):
genre_probability.append(get_genre_probability(index,dictionary,genre_dictionary))
teta=getTeta(dictionary,genre_dictionary)
return (teta,dictionary,genre_dictionary,genre_probability)
if __name__ == "__main__":
main()
|
[
"yoav.ilovich@outlook.com"
] |
yoav.ilovich@outlook.com
|
bcb87b977ae9f3dda477d957cc6ee78f8f5cdf2e
|
fbf6fcd3720d1a5f1f01f91c7ecad68f1b296924
|
/tools/test_modules.py
|
85199d0138cfbbde70f10f93fa006cc06675053a
|
[
"MIT"
] |
permissive
|
uvavision/DrillDown
|
9602ddabd712d14df10e7026db3d7e62e7e4edba
|
ad0ef773b3af0859e48ea302f4f1d87215b26cef
|
refs/heads/master
| 2022-04-28T21:42:06.366515
| 2022-04-15T12:14:25
| 2022-04-15T12:14:25
| 214,220,415
| 11
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 14,358
|
py
|
#!/usr/bin/env python
import _init_paths
import os, sys, cv2, json
import math, PIL, cairo
import numpy as np
import pickle, random
import os.path as osp
from time import time
from config import get_config
from copy import deepcopy
from glob import glob
import matplotlib.pyplot as plt
from vocab import Vocabulary
from utils import *
#######################################################################
from modules.text_encoder import TextEncoder
from modules.region_encoder import RegionEncoder
from modules.image_encoder import ImageEncoder
from modules.context_encoder import ContextEncoder
#######################################################################
from modules.attention import Attention
from modules.tirg_rnn import TIRGRNN
from modules.grounding_loss import GroundingLoss
#######################################################################
from modules.image_model import ImageModel
from modules.region_model import RegionModel
from modules.paragraph_model import ParagraphModel
from modules.image_hred_model import ImageHREDModel
from modules.region_grounding_model import RegionGroundingModel
#######################################################################
import torch, torchtext
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
from datasets.vg import vg
from datasets.loader import region_loader, region_collate_fn
from datasets.loader import caption_loader, caption_collate_fn
from datasets.loader import paragraph_loader, paragraph_collate_fn
def test_attention(config):
attention = Attention(config, config.attn_type, 1024, 1024)
h_s = torch.randn(7, 36, 1024)
h_t = torch.randn(7, 5, 1024)
m_s = torch.randn(7, 36).random_(0, 2)
context, scores = attention(h_t, h_s, m_s)
print(context.size(), scores.size())
def test_tirg_rnn(config):
net = TIRGRNN(config, config.n_feature_dim, config.n_feature_dim, config.n_rnn_layers, dropout=0.1)
input_var = np.random.randn(2, 3, config.n_feature_dim)
prev_hidden = np.random.randn(config.n_rnn_layers, 2, config.n_feature_dim)
input_var_th = torch.from_numpy(input_var).float()
prev_hidden_th = torch.from_numpy(prev_hidden).float()
last_layer_hiddens, last_step_hiddens = net(input_var_th, prev_hidden_th)
print('last_layer_hiddens.size()', last_layer_hiddens.size())
print('last_step_hiddens.size()', last_step_hiddens.size())
def test_region_encoder(config):
db = vg(config, 'test')
loaddb = region_loader(db)
loader = DataLoader(loaddb, batch_size=3*config.batch_size,
shuffle=True, num_workers=config.num_workers,
collate_fn=region_collate_fn)
net = RegionEncoder(config)
for cnt, batched in enumerate(loader):
region_feats = batched['region_feats'].float()
region_clses = batched['region_clses'].long()
print('region_feats', region_feats.size())
print('region_clses', region_clses.size())
img_feats, masked_feats, mm = net(region_feats, region_clses)
print('img_feats', img_feats.size())
if config.subspace_alignment_mode > 0:
print('masked_feats', masked_feats.size())
print('mm', mm.size())
break
def test_image_encoder(config):
db = vg(config, 'test')
loaddb = caption_loader(db)
loader = DataLoader(loaddb, batch_size=3*config.batch_size,
shuffle=True, num_workers=config.num_workers,
collate_fn=caption_collate_fn)
net = ImageEncoder(config)
for cnt, batched in enumerate(loader):
images = batched['images'].float()
print('images', images.size())
feats = net(images)
print('features', feats.size())
break
def test_text_encoder(config):
db = vg(config, 'test')
loaddb = region_loader(db)
loader = DataLoader(loaddb, batch_size=3*config.batch_size,
shuffle=True, num_workers=config.num_workers,
collate_fn=region_collate_fn)
net = TextEncoder(config)
for cnt, batched in enumerate(loader):
sent_inds = batched['sent_inds'].long()
sent_msks = batched['sent_msks'].float()
bsize, slen, fsize = sent_inds.size()
print('sent_inds', sent_inds.size())
print('sent_msks', sent_msks.size())
f1, f2, h = net(sent_inds.view(bsize*slen, fsize), sent_msks.view(bsize*slen, fsize))
print(f1.size(), f2.size(), h.size())
break
def test_image_model(config):
db = vg(config, 'test')
loaddb = caption_loader(db)
loader = DataLoader(loaddb, batch_size=config.batch_size,
shuffle=True, num_workers=config.num_workers,
collate_fn=caption_collate_fn)
net = ImageModel(config)
for cnt, batched in enumerate(loader):
images = batched['images'].float()
sent_inds = batched['sent_inds'].long()
sent_msks = batched['sent_msks'].long()
img_feats, txt_feats = net(sent_inds, sent_msks, None, images)
print('images', images.size())
print('img_feats', img_feats.size())
print('txt_feats', txt_feats.size())
break
def test_grounding_loss(config):
db = vg(config, 'test')
loaddb = region_loader(db)
loader = DataLoader(loaddb, batch_size=3*config.batch_size,
shuffle=True, num_workers=config.num_workers,
collate_fn=region_collate_fn)
net = RegionModel(config)
criterion = GroundingLoss(config)
for cnt, batched in enumerate(loader):
scene_inds = batched['scene_inds'].long()[:config.batch_size]
sent_inds = batched['sent_inds'].long()[:config.batch_size]
sent_msks = batched['sent_msks'].long()[:config.batch_size]
region_feats = batched['region_feats'].float()[:config.batch_size]
region_clses = batched['region_clses'].long()[:config.batch_size]
region_masks = batched['region_masks'].float()[:config.batch_size]
src_region_feats = batched['region_feats'].float()[config.batch_size:2*config.batch_size]
src_region_clses = batched['region_clses'].long()[config.batch_size:2*config.batch_size]
src_region_masks = batched['region_masks'].float()[config.batch_size:2*config.batch_size]
img_feats, masked_feats, txt_feats, subspace_masks, sample_logits, sample_indices = \
net(scene_inds, sent_inds, sent_msks,
src_region_feats, src_region_clses, src_region_masks,
region_feats, region_clses, region_masks,
config.explore_mode)
masked_feats = img_feats
sim1 = criterion.compute_batch_mutual_similarity(masked_feats, region_masks, txt_feats)
sim2 = criterion.debug_compute_batch_mutual_similarity(masked_feats, region_masks, txt_feats)
print('sim1', sim1.size())
print('sim2', sim2.size())
print('diff', torch.sum(torch.abs(sim1-sim2)))
txt_masks = txt_feats.new_ones(txt_feats.size(0), txt_feats.size(1))
losses = criterion.forward_loss(masked_feats, region_masks, txt_feats, txt_masks, config.loss_reduction_mode)
print('losses', losses.size())
break
def test_paragraph_model(config):
db = vg(config, 'test')
loaddb = paragraph_loader(db)
loader = DataLoader(loaddb, batch_size=3*config.batch_size,
shuffle=True, num_workers=config.num_workers,
collate_fn=paragraph_collate_fn)
net = ParagraphModel(config)
net.train()
for name, param in net.named_parameters():
print(name, param.size())
for cnt, batched in enumerate(loader):
start = time()
scene_inds = batched['scene_inds'].long()[:config.batch_size]
sent_inds = batched['sent_inds'].long()[:config.batch_size]
sent_msks = batched['sent_msks'].long()[:config.batch_size]
region_feats = batched['region_feats'].float()[:config.batch_size]
region_clses = batched['region_clses'].long()[:config.batch_size]
region_masks = batched['region_masks'].float()[:config.batch_size]
img_feats, txt_feats = net(sent_inds, sent_msks, region_feats, region_clses, region_masks)
losses = net.loss(img_feats, region_masks, txt_feats.unsqueeze(1))
print('losses', losses.size(), torch.mean(losses))
metrics, cache_results = net.evaluate(img_feats, region_masks, txt_feats.unsqueeze(1))
print('metrics', metrics)
print('sent_inds', sent_inds.size())
print('sent_msks', sent_msks.size())
print('region_feats', region_feats.size())
print('region_clses', region_clses.size())
print('region_masks', region_masks.size())
print('img_feats', img_feats.size())
print('txt_feats', txt_feats.size())
print('time:', time() - start)
break
def test_region_model(config):
db = vg(config, 'test')
loaddb = region_loader(db)
loader = DataLoader(loaddb, batch_size=3*config.batch_size,
shuffle=True, num_workers=config.num_workers,
collate_fn=region_collate_fn)
net = RegionModel(config)
net.train()
for name, param in net.named_parameters():
print(name, param.size())
for cnt, batched in enumerate(loader):
start = time()
scene_inds = batched['scene_inds'].long()[:config.batch_size]
sent_inds = batched['sent_inds'].long()[:config.batch_size]
sent_msks = batched['sent_msks'].long()[:config.batch_size]
region_feats = batched['region_feats'].float()[:config.batch_size]
region_clses = batched['region_clses'].long()[:config.batch_size]
region_masks = batched['region_masks'].float()[:config.batch_size]
src_region_feats = batched['region_feats'].float()[config.batch_size:2*config.batch_size]
src_region_clses = batched['region_clses'].long()[config.batch_size:2*config.batch_size]
src_region_masks = batched['region_masks'].float()[config.batch_size:2*config.batch_size]
img_feats, masked_feats, txt_feats, subspace_masks, sample_logits, sample_indices = \
net(scene_inds, sent_inds, sent_msks,
src_region_feats, src_region_clses, src_region_masks,
region_feats, region_clses, region_masks,
config.explore_mode)
print('img_feats', img_feats.size())
print('txt_feats', txt_feats.size())
if config.subspace_alignment_mode > 0:
print('masked_feats', masked_feats.size())
print('subspace_masks', subspace_masks.size())
if config.instance_dim > 1:
print('sample_logits', sample_logits.size())
print('sample_indices', sample_indices.size())
print('time:', time() - start)
break
def test_image_hred_model(config):
db = vg(config, 'train')
loaddb = caption_loader(db)
loader = DataLoader(loaddb, batch_size=3*config.batch_size,
shuffle=True, num_workers=config.num_workers,
collate_fn=caption_collate_fn)
net = ImageHREDModel(config)
net.train()
for name, param in net.named_parameters():
print(name, param.size())
for cnt, batched in enumerate(loader):
images = batched['images'].float()
sent_inds = batched['sent_inds'].long()
sent_msks = batched['sent_msks'].long()
img_feats, txt_feats = net(sent_inds, sent_msks, None, images)
print('images', images.size())
print('img_feats', img_feats.size())
print('txt_feats', txt_feats.size())
loss = net.forward_loss(img_feats, txt_feats)
print(loss)
metrics, caches = net.evaluate(img_feats, txt_feats)
print(metrics)
break
def test_region_grounding_model(config):
db = vg(config, 'test')
loaddb = region_loader(db)
loader = DataLoader(loaddb, batch_size=3*config.batch_size,
shuffle=True, num_workers=config.num_workers,
collate_fn=region_collate_fn)
net = RegionGroundingModel(config)
if config.pretrained is not None:
pretrained_path = osp.join(config.data_dir, 'caches/region_grounding_ckpts', config.pretrained+'.pkl')
states = torch.load(pretrained_path, map_location=lambda storage, loc: storage)
net.load_state_dict(states['state_dict'], strict=False)
net.train()
for name, param in net.named_parameters():
print(name, param.size())
for cnt, batched in enumerate(loader):
scene_inds = batched['scene_inds'].long()
sent_inds = batched['sent_inds'].long()
sent_msks = batched['sent_msks'].long()
region_feats = batched['region_feats'].float()
region_clses = batched['region_clses'].long()
region_masks = batched['region_masks'].float()
img_feats, masked_feats, txt_feats, subspace_masks, sample_logits, sample_indices = \
net(scene_inds, sent_inds, sent_msks, None, None, None, region_feats, region_clses, region_masks, config.explore_mode)
if config.instance_dim > 1:
print(sample_indices[0])
# print('sample_logits', sample_logits.size())
# print('sample_indices', sample_indices.size())
txt_masks = txt_feats.new_ones(txt_feats.size(0), txt_feats.size(1))
losses = net.final_loss(img_feats, masked_feats, region_masks, txt_feats, txt_masks, sample_logits, sample_indices)
print('losses', losses.size(), torch.mean(losses))
if config.subspace_alignment_mode > 0:
metrics, cache_results = net.evaluate(masked_feats, region_masks, txt_feats)
else:
metrics, cache_results = net.evaluate(img_feats, region_masks, txt_feats)
print('metrics', metrics)
print('txt_feats', txt_feats.size())
print('img_feats', img_feats.size())
break
if __name__ == '__main__':
config, unparsed = get_config()
np.random.seed(config.seed)
random.seed(config.seed)
torch.manual_seed(config.seed)
if(config.cuda):
torch.cuda.manual_seed_all(config.seed)
prepare_directories(config)
# test_attention(config)
# test_softmax_rnn(config)
# test_image_model(config)
# test_region_model(config)
# test_region_grounding_model(config)
test_paragraph_model(config)
# test_image_hred_model(config)
# test_region_encoder(config)
# test_image_encoder(config)
# test_text_encoder(config)
# test_tirg_rnn(config)
# test_grounding_loss(config)
|
[
"fuwen.tan@gmail.com"
] |
fuwen.tan@gmail.com
|
b7558607fcad286760fb506037fdaea76c39703a
|
5662986bdd309e898186fab4b18e3c2acd7b854b
|
/your_project/your_package/migrations/0001_initial.py
|
939d2573e283f839628f5c24ea1c6a7d2f34813a
|
[] |
no_license
|
axiome-oss/dive-into-django-i18n
|
8cf02243d20b47a5c4df39e0ce2434c72b3fd031
|
94016731ee58200feae56bfa5fa0c7d75cd76ba1
|
refs/heads/master
| 2021-01-19T21:36:42.338160
| 2015-11-06T13:27:23
| 2015-11-06T13:27:23
| 39,247,664
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 674
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('description', models.TextField(null=True, blank=True)),
('user', models.OneToOneField(to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"vbilley@axiome.io"
] |
vbilley@axiome.io
|
6d3a3465b4ee31a0ef11af36dbc99065914d9f18
|
dae17a2d278ce78ab987e77658a24f89903e8fac
|
/ecomm/account/migrations/0003_auto_20180402_1601.py
|
4709df63bfa1ba9b83496a7c91f2ca6efc625579
|
[] |
no_license
|
derikkip96/efarm
|
fdf15412cc3d77e166ffe90a2f6cb8a47f28092d
|
a1588ae6e7d49bac87e41b1fc5e566b28f437581
|
refs/heads/master
| 2022-12-09T23:28:01.200170
| 2019-09-02T21:41:12
| 2019-09-02T21:41:12
| 137,985,336
| 0
| 0
| null | 2022-11-22T02:34:00
| 2018-06-20T05:44:09
|
CSS
|
UTF-8
|
Python
| false
| false
| 404
|
py
|
# Generated by Django 2.0.2 on 2018-04-02 13:01
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('account', '0002_auto_20180331_0212'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='image',
field=models.ImageField(blank=True, upload_to='upload'),
),
]
|
[
"derikkip96@gmail.com"
] |
derikkip96@gmail.com
|
e59eaebb53a1dd0de0208e35718b32e92973811d
|
b7126fb70f72fea0e7bba6fe2fef6925302ef07b
|
/tceh5_opp/self_work/task1.py
|
735da977c22bdb199e6944c42bfec6b0ac104bb8
|
[] |
no_license
|
duk1edev/tceh
|
79cd909c5a6221a2ca77d342b917462345140faa
|
21649d42488883beb58d709f4a9d1a05c75d2900
|
refs/heads/master
| 2021-07-12T10:20:22.330005
| 2020-04-29T09:24:08
| 2020-04-29T09:24:08
| 239,434,484
| 0
| 0
| null | 2021-03-20T03:38:26
| 2020-02-10T05:25:33
|
Python
|
UTF-8
|
Python
| false
| false
| 1,781
|
py
|
# 1. Создать класс корзина у кторого можно выставить разную вмесительность
# для разных обьектов. В обект можн опомещать разные
# 2. Создать класс - пакет в кторый тожно можн опомещать предмет у него тоже есть вместимость
# 3. Любой класс что бы можно было помещать в корзину и в пакет
# 4. Если вместимоть не достаточна сказать, что обьект поместить нельзя
class Trash:
def __init__(self, set_size):
self.size = set_size
def get_obj(self, obj):
if obj.size > self.size:
print('You could not put this stuff({} size) to that trash, \n'
'trash size is {}'.format(obj.size, self.size))
else:
print('You put the {} size {} to the trash'.format(obj, obj.size))
class Packet(Trash):
def __init__(self, set_size):
self.size = set_size
def get_obj(self, obj):
if obj.size > self.size:
print('You could not put this stuff({} size) to that packet, \n'
'packet size is {}'.format(obj.size, self.size))
else:
print('You put the {} size {} to the packet'.format(obj, obj.size))
class SomeStuff:
def __init__(self, set_size):
self.size = set_size
small_trash = Trash(5)
middle_trash = Trash(10)
big_trash = Trash(50)
small_packet = Packet(3)
middle_packet = Packet(5)
big_packet = Packet(10)
apple = SomeStuff(25)
print(apple.size)
garbage = SomeStuff(50)
small_trash.get_obj(apple)
big_trash.get_obj(garbage)
big_packet.get_obj(garbage)
|
[
"duk1e.ptc.ua@yandex.ru"
] |
duk1e.ptc.ua@yandex.ru
|
e6363546ba11afa88ac3d92f07661dcdc012c4da
|
8c44cf09689711b9389eeb9416c8fad45aee2009
|
/phron/text_sanitizer.py
|
cdf2b53e6de63af45639f2cb6c8e3dd940d5c3ba
|
[
"Apache-2.0"
] |
permissive
|
pacu/phron
|
71e880865a13d194257acc399c3397da58739e2e
|
03d6b0cb997b361bb1c7fe6a1be5414638036450
|
refs/heads/master
| 2021-06-16T23:13:24.420625
| 2021-05-27T18:09:28
| 2021-05-27T18:09:28
| 197,436,355
| 0
| 0
|
Apache-2.0
| 2021-05-27T18:09:29
| 2019-07-17T17:45:29
|
Python
|
UTF-8
|
Python
| false
| false
| 1,228
|
py
|
def sanitize_weka(text: str, remove_newlines=True, escape_doublequote=True, escape_singlequote=True,remove_separator=None) -> str:
"""
sanitize this text for weka CSV importer.
Parameters:
remove_newlines(Bool): removes newline charaters and replaces them with blank spaces. Default: True
escape_doublequote(Bool): escapes a every doublequote character \\\" with \\\\\\\". Default: True.
if False, it will remove the doublequote and replace it with empty String
escape_singlequote(Bool): escapes a every singlequote character \\\' with \\\\\\\'. Default: True.
if False, it will remove the singlequote and replace it with empty String
remove_separator(str): removes the separator str passed as argument. Default: None
"""
if remove_newlines:
text = text.replace('\n', ' ')
if escape_doublequote:
text = text.replace('"', '\\\"')
else:
text = text.replace('"', '')
if escape_singlequote:
text = text.replace("'", "\\\'")
else:
text = text.replace("'", "")
if remove_separator:
text = text.replace(remove_separator," ")
return text
|
[
"francisco.gindre@gmail.com"
] |
francisco.gindre@gmail.com
|
9002db9fb689e2de7cb305ce596ae3d6f5abfe61
|
59062b36911a3f827d638910a653d280556869cb
|
/python/snippet1.py
|
14e7233d5cb9b374b8e1a8da7099bc8edf2fce31
|
[] |
no_license
|
atharva-bhange/codesnippets
|
aedeca7782b730ea35b5cf1de589f9d577b5e839
|
d6d2dc1da5889f26f1864b547f5cdc14cfd071d9
|
refs/heads/master
| 2021-01-02T07:37:48.514000
| 2020-02-10T20:02:08
| 2020-02-10T20:02:08
| 239,551,206
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 139
|
py
|
# Snippet 1
class dog(object):
def __init__(self):
pass
def speak(self):
pass
mark = dog()
print("Code complete")
|
[
"atharva.bhange@gmail.com"
] |
atharva.bhange@gmail.com
|
31bda42177c67668b02106a2e58888a61630ed09
|
99e1a15d8f605be456f17608843c309dd8a3260f
|
/src/Battle/Attack/Steps/Test/suite.py
|
a11d3df523d7d71da56074941becf66d934c86c9
|
[] |
no_license
|
sgtnourry/Pokemon-Project
|
e53604096dcba939efca358e4177374bffcf0b38
|
3931eee5fd04e18bb1738a0b27a4c6979dc4db01
|
refs/heads/master
| 2021-01-17T23:02:25.910738
| 2014-04-12T17:46:27
| 2014-04-12T17:46:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,034
|
py
|
import unittest
from Battle.Attack.Steps.Test.remove_pp_step_test import suite as remove_pp_step_suite
from Battle.Attack.Steps.Test.handle_miss_effects_step_test import suite as handle_miss_effects_step_suite
from Battle.Attack.Steps.Test.handle_contact_step_test import suite as handle_contact_step_suite
from Battle.Attack.Steps.Test.effects_step_test import suite as effects_step_suite
from Battle.Attack.Steps.Test.damage_step_test import suite as damage_step_suite
from Battle.Attack.Steps.Test.announcement_step_test import suite as announcement_step_suite
from Battle.Attack.Steps.Test.hit_step_test import suite as hit_step_suite
from Battle.Attack.Steps.Test.precondition_step_test import suite as precondition_step_suite
suites = [precondition_step_suite,
hit_step_suite,
announcement_step_suite,
damage_step_suite,
effects_step_suite,
handle_contact_step_suite,
handle_miss_effects_step_suite,
remove_pp_step_suite]
suite = unittest.TestSuite(suites)
|
[
"cloew123@gmail.com"
] |
cloew123@gmail.com
|
74a70cddec3707af88424f902a735dd471053666
|
7ed05e81c563b8931bdf232daf88d466bb06d698
|
/polls/admin.py
|
896bfe8b3f74c75e466c660292ed8b4b3f4afc85
|
[] |
no_license
|
chetansurwade/poller
|
c940ffc8bd19b6a5ee671322c8d2483a53170ee9
|
77657f248a3ba856e89b432593b41eaa7f455e7f
|
refs/heads/master
| 2020-09-25T22:29:36.609327
| 2019-12-05T15:17:39
| 2019-12-05T15:17:39
| 226,101,472
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 555
|
py
|
from django.contrib import admin
from .models import Question, Choice
admin.site.site_header = "Poller Admin"
admin.site.site_title = "Poller Admin Area"
admin.site.index_title = "Welcome to the Poller admin area"
class ChoiceInline(admin.TabularInline):
model = Choice
extra = 3
class QuestionAdmin(admin.ModelAdmin):
fieldsets = [(None, {'fields': ['question_text']}),
('Date Information', {'fields': ['pub_date'], 'classes': ['collapse']}), ]
inlines = [ChoiceInline]
admin.site.register(Question, QuestionAdmin)
|
[
"chetansurwade@outlook.com"
] |
chetansurwade@outlook.com
|
8375cedfd57bf1a7dd0794d23b840cd0ffe5bb75
|
6f7495631dcf2d8ad1e878f8492ffc686691d50a
|
/day03/ex03/ColorFilter.py
|
37bff11b9302a956184f017affb0d8cde2999409
|
[] |
no_license
|
mli42/python_bootcamp
|
0e0012f611902c0be40ea4933d17255652465501
|
4e71ec20b12676016514875ee96d15dafb177718
|
refs/heads/main
| 2022-12-11T00:55:44.880734
| 2022-09-16T15:13:16
| 2022-09-16T15:14:13
| 233,590,858
| 3
| 2
| null | 2022-12-08T13:07:05
| 2020-01-13T12:30:49
|
Python
|
UTF-8
|
Python
| false
| false
| 6,240
|
py
|
# **************************************************************************** #
# #
# ::: :::::::: #
# ColorFilter.py :+: :+: :+: #
# +:+ +:+ +:+ #
# By: mli <mli@student.42.fr> +#+ +:+ +#+ #
# +#+#+#+#+#+ +#+ #
# Created: 2020/11/24 22:42:30 by mli #+# #+# #
# Updated: 2022/03/12 23:30:33 by mli ### ########.fr #
# #
# **************************************************************************** #
import numpy as np
from copy import deepcopy
from ImageProcessor import ImageProcessor
class ColorFilter:
def __guard_ndarray(funct):
def inner(*args, **kwargs):
array = args[0]
if not (isinstance(array, np.ndarray) and
('float' in str(array.dtype) or 'int' in str(array.dtype))):
return None
try:
return_value = funct(*args, **kwargs)
except:
return None
return return_value
return (inner)
@staticmethod
@__guard_ndarray
def invert(array: np.ndarray) -> np.ndarray:
res = 1 - array
res[..., 3:] = array[..., 3:]
return res
@staticmethod
@__guard_ndarray
def to_blue(array: np.ndarray) -> np.ndarray:
res = np.zeros(array.shape)
res[..., 2:] = array[..., 2:]
return res
@staticmethod
@__guard_ndarray
def to_green(array: np.ndarray) -> np.ndarray:
res = deepcopy(array)
res[..., :3:2] = res[..., :3:2] * 0
return res
@staticmethod
@__guard_ndarray
def to_red(array: np.ndarray) -> np.ndarray:
only_blue_green = ColorFilter.to_blue(array) + ColorFilter.to_green(array)
res = array - only_blue_green
res[..., 3:] = array[..., 3:]
return res
@staticmethod
@__guard_ndarray
def to_celluloid(array: np.ndarray) -> np.ndarray:
bounds = np.linspace(array.min(), array.max(), 5)
res = array.copy()
lower_bound = bounds[0]
for upper_bound in bounds[1:]:
mask = (res[..., :3] > lower_bound) & (res[..., :3] < upper_bound)
res[..., :3][mask] = lower_bound
lower_bound = upper_bound
return res
@staticmethod
def __guard_grayscale(filter: str, **kwargs) -> bool:
weights = kwargs.pop('weights', None)
hasWeights = weights is not None
if (
(len(kwargs) != 0) or
(filter not in ['m', 'mean', 'w', 'weight']) or
(filter in ['m', 'mean'] and hasWeights) or
(filter in ['w', 'weight'] and (
not isinstance(weights, list) or
len(weights) != 3 or
not all([isinstance(obj, float) and obj >= 0 for obj in weights]) or
np.sum(weights) != 1.
))
):
return False
return True
@staticmethod
@__guard_ndarray
def to_grayscale(array: np.ndarray, filter: str, **kwargs) -> np.ndarray:
if ColorFilter.__guard_grayscale(filter, **kwargs) is False:
return None
weights = kwargs.get('weights')
res = None
if (filter in ['m', 'mean']):
mono = np.sum(array[..., :3], axis=2, keepdims=True) / 3
res = np.dstack((np.tile(mono, 3), array[..., 3:]))
elif (filter in ['w', 'weight']):
mono = np.sum(array[..., :3] * weights, axis=2, keepdims=True)
res = np.dstack((np.tile(mono, 3), array[..., 3:]))
return res
def main():
imgProc = ImageProcessor()
cfilter = ColorFilter()
elon = imgProc.load("../resources/elon.png")
def display_img(array):
if array is None:
print('Array is None')
return
imgProc.display(array)
def launch_filters(img):
if img is None:
print('Img is None')
return
base_ope = ('Base img', lambda x: x, [], {})
arr = [
base_ope,
('Inverted', cfilter.invert, [], {}),
('To blue', cfilter.to_blue, [], {}),
('To green', cfilter.to_green, [], {}),
('To red', cfilter.to_red, [], {}),
('To celluloid', cfilter.to_celluloid, [], {}),
('To grayscale m', cfilter.to_grayscale, ['m'], {}),
('To grayscale mean', cfilter.to_grayscale, ['mean'], {}),
('To grayscale w', cfilter.to_grayscale, ['w'], {'weights': [.2, .3, .5]}),
('To grayscale weight', cfilter.to_grayscale, ['weight'], {'weights': [.6, .2, .2]}),
base_ope
]
for label, fct, args, kwargs in arr:
print(label)
display_img(fct(img, *args, **kwargs))
def grayscale_err(img):
arr = [
('Args err', ['hey'], {'weights': [.8, .1, .1]}),
('Kwargs err', ['m'], {'hey': 123}),
('Weight value', ['m'], {'weights': 123}),
('Mean with weight', ['m'], {'weights': [.8, .1, .1]}),
('Weight tuple', ['w'], {'weights': (.8, .1, .1)}),
('Weight intruder', ['w'], {'weights': [1., 2., 'a']}),
('Too much float', ['w'], {'weights': [.8, .1, .1, .0]}),
('Too high float', ['w'], {'weights': [.8, .1, .2]}),
('Too much kwargs', ['w'], {'weights': [.8, .1, .1], 'hey': 'a'}),
('Negativ float', ['w'], {'weights': [.8, -.1, .3]}),
]
for label, args, kwargs in arr:
print(label, end=': ')
display_img(cfilter.to_grayscale(img, *args, **kwargs))
print('Trying with Elon')
launch_filters(elon)
print('Trying with inverted Elon')
launch_filters(cfilter.invert(elon))
print('Check grayscale guardian')
grayscale_err(elon)
if __name__ == "__main__":
main()
|
[
"mli@student.42.fr"
] |
mli@student.42.fr
|
8ebeb25ae069db43b23b35eea9b3cb49e7564d1c
|
d4e1b610db981020019a10af1fc90311cc0900d6
|
/students/ReemAlqaysi/lesson06/test_mailroom.py
|
af851981a3cb52f99e0b0734f1d64f3604772217
|
[] |
no_license
|
InduKolli/SP_Online_PY210
|
c9c7b52b6ac6be3f10c210cebe74b4564f35b989
|
49589778454c1549a12fd6f8bc2e44e022b86b72
|
refs/heads/master
| 2020-06-11T16:40:49.368669
| 2019-11-11T03:17:54
| 2019-11-11T03:17:54
| 193,431,588
| 1
| 0
| null | 2019-06-24T04:06:29
| 2019-06-24T04:06:29
| null |
UTF-8
|
Python
| false
| false
| 2,046
|
py
|
#!/usr/bin/env python3
import mailroom
import os
donor_list = {
"Jan Balard": [600.00,250.00],
"Joe McHennry": [1500.00,1500.00],
"Jeff Hansen": [450.00,150.00],
"Scott Newman": [100.00,5000.00],
"Rabi Das": [500.00,950.00]
}
def test_send_letter_text():
letter = '''\n\nDear Reem Alqaysi:\n Thank you for your donation of $222, we appriciate your support to our service. \n MailRoom Team\n'''
assert mailroom.thank_you_text('Reem Alqaysi',222) == letter
def test_new_donor():
fullname = 'Reem Alqaysi'
mailroom.add_name(fullname)
assert fullname in donor_list
#assert donor_list == {'Jan Balard': [600.0, 250.0], 'Joe McHennry': [1500.0, 1500.0], 'Jeff Hansen': [450.0, 150.0], 'Scott Newman': [100.0, 5000.0], 'Rabi Das': [500.0, 950.0], 'Reem Alqaysi': []}
def test_update_donor():
fullname = 'Rabi Das'
mailroom.add_name(fullname)
assert fullname in donor_list
def test_add_amount():
fullname = 'Reem Alqaysi'
amount = 222
mailroom.add_amount(fullname,amount)
assert donor_list[fullname][-1] == [amount]
def test_create_report():
report = \
f'Donor Name | Total Given |Num Gifts |Average Gift \n\
------------------------------------------------------------------------------------------\n\
Scott Newman $ 5100.0 2 $ 2550.0\n\
Jeff Hansen $ 600.0 2 $ 300.0\n\
Rabi Das $ 1450.0 2 $ 725.0\n\
Jan Balard $ 850.0 2 $ 425.0\n\
Joe McHennry $ 3000.0 2 $ 1500.0\n'
assert mailroom.create_report() == report
def test_create_report_file():
mailroom.letter_to_all()
for name in donor_list:
filename = name.replace(' ', '_').replace(',', '') + ".txt"
filename = filename.lower()
assert os.path.isfile(filename) is True
|
[
"reem3@uw.edu"
] |
reem3@uw.edu
|
7f4cb87cab420060f0713c8c91401f606532723a
|
b26c0b0d767f62325fb3963118698e5c77819c70
|
/Rice Python/Rice Rocks (no animation).py
|
c441c42cf385f97d4c47b119bfa31f318d65ec60
|
[] |
no_license
|
alecmchiu/MOOCs
|
8336ad3ed52262ce543ed0a817252362041900c9
|
f87549d19f304b64df8ad51387aa8252062676fd
|
refs/heads/master
| 2021-01-12T01:31:48.061261
| 2017-08-18T02:59:06
| 2017-08-18T02:59:06
| 78,399,530
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,259
|
py
|
# implementation of Spaceship - program template for RiceRocks
import simplegui
import math
import random
# globals for user interface
WIDTH = 800
HEIGHT = 600
score = 0
lives = 3
time = 0
started = False
class ImageInfo:
def __init__(self, center, size, radius = 0, lifespan = None, animated = False):
self.center = center
self.size = size
self.radius = radius
if lifespan:
self.lifespan = lifespan
else:
self.lifespan = float('inf')
self.animated = animated
def get_center(self):
return self.center
def get_size(self):
return self.size
def get_radius(self):
return self.radius
def get_lifespan(self):
return self.lifespan
def get_animated(self):
return self.animated
# art assets created by Kim Lathrop, may be freely re-used in non-commercial projects, please credit Kim
# debris images - debris1_brown.png, debris2_brown.png, debris3_brown.png, debris4_brown.png
# debris1_blue.png, debris2_blue.png, debris3_blue.png, debris4_blue.png, debris_blend.png
debris_info = ImageInfo([320, 240], [640, 480])
debris_image = simplegui.load_image("http://commondatastorage.googleapis.com/codeskulptor-assets/lathrop/debris2_blue.png")
# nebula images - nebula_brown.png, nebula_blue.png
nebula_info = ImageInfo([400, 300], [800, 600])
nebula_image = simplegui.load_image("http://commondatastorage.googleapis.com/codeskulptor-assets/lathrop/nebula_blue.f2014.png")
# splash image
splash_info = ImageInfo([200, 150], [400, 300])
splash_image = simplegui.load_image("http://commondatastorage.googleapis.com/codeskulptor-assets/lathrop/splash.png")
# ship image
ship_info = ImageInfo([45, 45], [90, 90], 35)
ship_image = simplegui.load_image("http://commondatastorage.googleapis.com/codeskulptor-assets/lathrop/double_ship.png")
# missile image - shot1.png, shot2.png, shot3.png
missile_info = ImageInfo([5,5], [10, 10], 3, 50)
missile_image = simplegui.load_image("http://commondatastorage.googleapis.com/codeskulptor-assets/lathrop/shot2.png")
# asteroid images - asteroid_blue.png, asteroid_brown.png, asteroid_blend.png
asteroid_info = ImageInfo([45, 45], [90, 90], 40)
asteroid_image = simplegui.load_image("http://commondatastorage.googleapis.com/codeskulptor-assets/lathrop/asteroid_blue.png")
# animated explosion - explosion_orange.png, explosion_blue.png, explosion_blue2.png, explosion_alpha.png
explosion_info = ImageInfo([64, 64], [128, 128], 17, 24, True)
explosion_image = simplegui.load_image("http://commondatastorage.googleapis.com/codeskulptor-assets/lathrop/explosion_alpha.png")
# sound assets purchased from sounddogs.com, please do not redistribute
# .ogg versions of sounds are also available, just replace .mp3 by .ogg
soundtrack = simplegui.load_sound("http://commondatastorage.googleapis.com/codeskulptor-assets/sounddogs/soundtrack.mp3")
missile_sound = simplegui.load_sound("http://commondatastorage.googleapis.com/codeskulptor-assets/sounddogs/missile.mp3")
missile_sound.set_volume(.5)
ship_thrust_sound = simplegui.load_sound("http://commondatastorage.googleapis.com/codeskulptor-assets/sounddogs/thrust.mp3")
explosion_sound = simplegui.load_sound("http://commondatastorage.googleapis.com/codeskulptor-assets/sounddogs/explosion.mp3")
# helper functions to handle transformations
def angle_to_vector(ang):
return [math.cos(ang), math.sin(ang)]
def dist(p, q):
return math.sqrt((p[0] - q[0]) ** 2 + (p[1] - q[1]) ** 2)
def process_sprite_group(a_set, canvas):
copy = set(a_set)
for each in a_set:
if (each.update()):
copy.remove(each)
else:
each.draw(canvas)
a_set.intersection_update(copy)
def group_collide(group, other_object):
original = len(group)
group_copy = set(group)
for each in group:
if (each.collide(other_object)):
group_copy.remove(each)
group.intersection_update(group_copy)
if (len(group) < original):
return True
else:
return False
def group_group_collide(group1,group2):
copy = set(group1)
collisions = 0
for each in group1:
if(group_collide(group2, each)):
collisions += 1
copy.discard(each)
group1.intersection_update(copy)
return collisions
# Ship class
class Ship:
def __init__(self, pos, vel, angle, image, info):
self.pos = [pos[0], pos[1]]
self.vel = [vel[0], vel[1]]
self.thrust = False
self.angle = angle
self.angle_vel = 0
self.image = image
self.image_center = info.get_center()
self.image_size = info.get_size()
self.radius = info.get_radius()
def draw(self,canvas):
if self.thrust:
canvas.draw_image(self.image, [self.image_center[0] + self.image_size[0], self.image_center[1]] , self.image_size,
self.pos, self.image_size, self.angle)
else:
canvas.draw_image(self.image, self.image_center, self.image_size,
self.pos, self.image_size, self.angle)
# canvas.draw_circle(self.pos, self.radius, 1, "White", "White")
def update(self):
# update angle
self.angle += self.angle_vel
# update position
self.pos[0] = (self.pos[0] + self.vel[0]) % WIDTH
self.pos[1] = (self.pos[1] + self.vel[1]) % HEIGHT
# update velocity
if self.thrust:
acc = angle_to_vector(self.angle)
self.vel[0] += acc[0] * .1
self.vel[1] += acc[1] * .1
self.vel[0] *= .99
self.vel[1] *= .99
def set_thrust(self, on):
self.thrust = on
if on:
ship_thrust_sound.rewind()
ship_thrust_sound.play()
else:
ship_thrust_sound.pause()
def increment_angle_vel(self):
self.angle_vel += .05
def decrement_angle_vel(self):
self.angle_vel -= .05
def shoot(self):
global missile_group
forward = angle_to_vector(self.angle)
missile_pos = [self.pos[0] + self.radius * forward[0], self.pos[1] + self.radius * forward[1]]
missile_vel = [self.vel[0] + 6 * forward[0], self.vel[1] + 6 * forward[1]]
a_missile = Sprite(missile_pos, missile_vel, self.angle, 0, missile_image, missile_info, missile_sound)
missile_group.add(a_missile)
def get_position(self):
return self.pos
def get_radius(self):
return self.radius
# Sprite class
class Sprite:
def __init__(self, pos, vel, ang, ang_vel, image, info, sound = None):
self.pos = [pos[0],pos[1]]
self.vel = [vel[0],vel[1]]
self.angle = ang
self.angle_vel = ang_vel
self.image = image
self.image_center = info.get_center()
self.image_size = info.get_size()
self.radius = info.get_radius()
self.lifespan = info.get_lifespan()
self.animated = info.get_animated()
self.age = 0
if sound:
sound.rewind()
sound.play()
def draw(self, canvas):
canvas.draw_image(self.image, self.image_center, self.image_size,
self.pos, self.image_size, self.angle)
def update(self):
# update angle
self.angle += self.angle_vel
# update position
self.pos[0] = (self.pos[0] + self.vel[0]) % WIDTH
self.pos[1] = (self.pos[1] + self.vel[1]) % HEIGHT
#update age
self.age += 1
if (self.age < self.lifespan):
return False
else:
return True
def get_position(self):
return self.pos
def get_radius(self):
return self.radius
def collide(self, other_object):
distance = dist(self.pos,other_object.get_position())
collision_distance = self.radius + other_object.get_radius()
if (distance < collision_distance):
return True
else:
return False
# key handlers to control ship
def keydown(key):
if key == simplegui.KEY_MAP['left']:
my_ship.decrement_angle_vel()
elif key == simplegui.KEY_MAP['right']:
my_ship.increment_angle_vel()
elif key == simplegui.KEY_MAP['up']:
my_ship.set_thrust(True)
elif key == simplegui.KEY_MAP['space']:
my_ship.shoot()
def keyup(key):
if key == simplegui.KEY_MAP['left']:
my_ship.increment_angle_vel()
elif key == simplegui.KEY_MAP['right']:
my_ship.decrement_angle_vel()
elif key == simplegui.KEY_MAP['up']:
my_ship.set_thrust(False)
# mouseclick handlers that reset UI and conditions whether splash image is drawn
def click(pos):
global started, timer, lives
center = [WIDTH / 2, HEIGHT / 2]
size = splash_info.get_size()
inwidth = (center[0] - size[0] / 2) < pos[0] < (center[0] + size[0] / 2)
inheight = (center[1] - size[1] / 2) < pos[1] < (center[1] + size[1] / 2)
if (not started) and inwidth and inheight:
started = True
timer.start()
lives = 3
soundtrack.play()
def draw(canvas):
global time, started, lives, score, timer, rock_group
# animiate background
time += 1
wtime = (time / 4) % WIDTH
center = debris_info.get_center()
size = debris_info.get_size()
canvas.draw_image(nebula_image, nebula_info.get_center(), nebula_info.get_size(), [WIDTH / 2, HEIGHT / 2], [WIDTH, HEIGHT])
canvas.draw_image(debris_image, center, size, (wtime - WIDTH / 2, HEIGHT / 2), (WIDTH, HEIGHT))
canvas.draw_image(debris_image, center, size, (wtime + WIDTH / 2, HEIGHT / 2), (WIDTH, HEIGHT))
# draw UI
canvas.draw_text("Lives", [50, 50], 22, "White")
canvas.draw_text("Score", [680, 50], 22, "White")
canvas.draw_text(str(lives), [50, 80], 22, "White")
canvas.draw_text(str(score), [680, 80], 22, "White")
# draw ship and sprites
my_ship.draw(canvas)
# update ship and sprites
my_ship.update()
#process rocks and missiles
process_sprite_group(rock_group, canvas)
process_sprite_group(missile_group, canvas)
#collisions
if (group_collide(rock_group, my_ship)):
lives -= 1
score += group_group_collide(rock_group, missile_group)
if (lives == 0):
started = False
rock_group = set()
timer.stop()
soundtrack.pause()
soundtrack.rewind()
time = 0
# draw splash screen if not started
if not started:
canvas.draw_image(splash_image, splash_info.get_center(),
splash_info.get_size(), [WIDTH / 2, HEIGHT / 2],
splash_info.get_size())
# timer handler that spawns a rock
def rock_spawner():
global rock_group, my_ship, time
rock_pos = [random.randrange(0, WIDTH), random.randrange(0, HEIGHT)]
rock_vel = [0.01*time*(random.random() * .6 - .3), 0.01*time*(random.random() * .6 - .3)]
rock_avel = random.random() * .2 - .1
a_rock = Sprite(rock_pos, rock_vel, 0, rock_avel, asteroid_image, asteroid_info)
if (len(rock_group) <= 12):
if (dist(my_ship.get_position(),a_rock.get_position()) > my_ship.get_radius()+a_rock.get_radius()):
rock_group.add(a_rock)
# initialize stuff
frame = simplegui.create_frame("Asteroids", WIDTH, HEIGHT)
# initialize ship and two sprites
my_ship = Ship([WIDTH / 2, HEIGHT / 2], [0, 0], 0, ship_image, ship_info)
rock_group = set()
missile_group = set()
# register handlers
frame.set_keyup_handler(keyup)
frame.set_keydown_handler(keydown)
frame.set_mouseclick_handler(click)
frame.set_draw_handler(draw)
timer = simplegui.create_timer(1000.0, rock_spawner)
# get things rolling
frame.start()
|
[
"alecmchiu@gmail.com"
] |
alecmchiu@gmail.com
|
9a2ea1d5b16e6bceebfb05ef2b319e294caf9509
|
f61208a1bb90c03c2a6c4540c04623d9c2a77064
|
/python labs/hackerrank/percentage.py
|
3f151c38e935d737f7360773b3c8c44a2492f4bc
|
[] |
no_license
|
albinai/Wd
|
f49b39ae8387fd02d04c5721b9505ebc1c6897da
|
2d2e315327cf60c1943da3b8ca29017d07fc3843
|
refs/heads/master
| 2020-12-29T06:02:27.177059
| 2020-04-09T23:54:49
| 2020-04-09T23:54:49
| 238,482,757
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 312
|
py
|
if __name__ == '__main__':
n = int(input())
student_marks = {}
for _ in range(n):
name, *line = input().split()
scores = list(map(float, line))
scores=sum(scores)/3
student_marks[name] = scores
query_name = input()
print('%.2f' % student_marks[query_name])
|
[
"Albina.13.2.2001@gmail.com"
] |
Albina.13.2.2001@gmail.com
|
c03744b393ec5f98ff295969921ddf3de80aecaf
|
9c52998e7d92640b82284e7e85bf69205fc94d73
|
/SeleniumLearningFiles/SeleniumLearning01/webdrivertest/web04.py
|
ec6aa9036031cb6a57f01829bff64e05c5c91ab3
|
[] |
no_license
|
github653224/GitProjects_SeleniumLearing
|
b0c57d27fa48b0cd7475f8d8e8b19c57160e65fc
|
818b573a3b0f18def98610e59e3c0c6500a675bc
|
refs/heads/master
| 2021-07-20T05:54:46.392948
| 2017-10-27T12:53:41
| 2017-10-27T12:53:41
| 107,764,014
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 473
|
py
|
from selenium import webdriver
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.keys import Keys
import time
from random import randint
verify =randint(1000,9999)
print(u"生成的随机数字: %d " %verify)
number=input("请输入随机数字:")
print(number)
number=int(number)
if number ==verify:
print ("登录成功!!")
elif number==132741:
print("登陆成功!!")
else:
print("输入错误")
|
[
"944851899@qq.com"
] |
944851899@qq.com
|
a72473ebf4f825bee83939c8f6354360345830ee
|
1781eeb99cb758106f3a41a6aab96c4108c3bffd
|
/ParserTranscript.py
|
6e8ae6169dc4e4507392a3dd762cc3256f694668
|
[] |
no_license
|
Ilhyon/Scripts
|
10015163647c2204c93d0da4d58224a116863a1d
|
496b6eb589501aa8e84ef25720d465bda2eb305f
|
refs/heads/master
| 2021-07-13T16:26:28.576512
| 2020-07-09T18:41:27
| 2020-07-09T18:41:27
| 159,869,935
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,828
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-:
import os
import argparse
import numpy as np
import pandas as pd
from pprint import pprint
def readTr(filename):
dico = {}
with open(filename) as f: # file opening
content = f.read()
lines = content.split('\n')
for l in lines:
if l :
w = l.split('|')
if w[3] == '1':
w[3] = '+'
else:
w[3] = '-'
chrStrand = w[2]+'|'+w[3]
if chrStrand not in dico:
dico[chrStrand] = {}
exon = w[5].split(';')
for e in exon:
if e not in dico[chrStrand]:
dico[chrStrand][e] = []
dico[chrStrand][e].append(w[0])
return dico
def main(path):
trAll = path + 'HS_transcript_unspliced_All.txt'
files = ['kunv', 'sinv', 'zikv', 'yvf']
dicoAllTr = readTr(trAll)
for v in files:
newF = []
with open(path+v+'_RI1New.csv') as f: # file opening
content = f.read()
lines = content.split('\n')
for l in lines:
tr1 = []
tr2 = []
w = l.split('\t')
if w[2] == '-':
E1E = str(int(w[9])+1)
E1S = str(int(w[10]))
E2E = str(int(w[11])+1)
E2S = str(int(w[12]))
chrStrand = w[3]+'|'+w[2]
if E1S+'-'+E1E in dicoAllTr[chrStrand]:
tr1 = dicoAllTr[chrStrand][ E1S+'-'+E1E ]
else:
print('tr1')
print(E1S+'-'+E1E)
if E2S+'-'+E2E in dicoAllTr[chrStrand]:
tr2 = dicoAllTr[chrStrand][ E2S+'-'+E2E ]
else:
print('tr2')
print(E2S+'-'+E2E)
if tr1 and tr2:
commonTr = list(set(tr1).intersection(tr2))
else:
commonTr = []
w.extend(commonTr)
w = '\t'.join(w)
newF.append(w)
else:
E1S = str(int(w[9])+1)
E1E = str(int(w[10]))
E2S = str(int(w[11])+1)
E2E = str(int(w[12]))
chrStrand = w[3]+'|'+w[2]
if E1S+'-'+E1E in dicoAllTr[chrStrand]:
tr1 = dicoAllTr[chrStrand][ E1S+'-'+E1E ]
else:
print('tr1')
print(E1S+'-'+E1E)
if E2S+'-'+E2E in dicoAllTr[chrStrand]:
tr2 = dicoAllTr[chrStrand][ E2S+'-'+E2E ]
else:
print('tr2')
print(E2S+'-'+E2E)
if tr1 and tr2:
commonTr = list(set(tr1).intersection(tr2))
else:
commonTr = []
w.extend(commonTr)
w = '\t'.join(w)
newF.append(w)
outputF = open(path+v+'_RI1TESTtranscript.csv', "w")
outputF.write( 'Location\tGeneSymbol\tStrand\tchr\tStartEvent\tEndEvent\tStartpG4\tEndpG4\tpG4Sequence\tE1S\tE1E\tE2S\tE2E\tTr\n' )
outputF.write( '\n'.join(newF) )
outputF.close()
def build_arg_parser():
parser = argparse.ArgumentParser(description = 'generateRandom')
GITDIR = os.getcwd()+'/'
parser.add_argument ('-p', '--path', default = GITDIR)
return parser
if __name__ == '__main__':
parser = build_arg_parser()
arg = parser.parse_args()
path = arg.path
main(path)
|
[
"anais.vannutelli@gmail.com"
] |
anais.vannutelli@gmail.com
|
30f858dd902db2be0d5101090796c8980b6e4b42
|
d990f320b549916aea7ae9f7349e5445d472a61e
|
/replay_buffer.py
|
c867c91d31d0269f53f6b8e8cf052c0a62931090
|
[
"MIT"
] |
permissive
|
alleboudy/navigation-drl
|
d88ac83bb72824f2bfc18aebd6aacea7bf12415e
|
091ae4ffb028288dc4f0464c8109a2b54cab8250
|
refs/heads/main
| 2023-04-12T20:15:39.204542
| 2021-05-04T21:49:20
| 2021-05-04T21:49:20
| 363,675,615
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,942
|
py
|
import torch
import numpy as np
import random
from collections import namedtuple
class ReplayBuffer:
"""Fixed-size buffer to store experience tuples."""
def __init__(self, action_size, buffer_size, batch_size, seed):
"""Initialize a ReplayBuffer object.
Params
======
action_size (int): dimension of each action
buffer_size (int): maximum size of buffer
batch_size (int): size of each training batch
seed (int): random seed
"""
self.action_size = action_size
self.memory = deque(maxlen=buffer_size)
self.batch_size = batch_size
self.experience = namedtuple("Experience", field_names=["state", "action", "reward", "next_state", "done"])
self.seed = random.seed(seed)
def add(self, state, action, reward, next_state, done):
"""Add a new experience to memory."""
e = self.experience(state, action, reward, next_state, done)
self.memory.append(e)
def sample(self):
"""Randomly sample a batch of experiences from memory."""
experiences = random.sample(self.memory, k=self.batch_size)
states = torch.from_numpy(np.vstack([e.state for e in experiences if e is not None])).float().to(device)
actions = torch.from_numpy(np.vstack([e.action for e in experiences if e is not None])).long().to(device)
rewards = torch.from_numpy(np.vstack([e.reward for e in experiences if e is not None])).float().to(device)
next_states = torch.from_numpy(np.vstack([e.next_state for e in experiences if e is not None])).float().to(device)
dones = torch.from_numpy(np.vstack([e.done for e in experiences if e is not None]).astype(np.uint8)).float().to(device)
return (states, actions, rewards, next_states, dones)
def __len__(self):
"""Return the current size of internal memory."""
return len(self.memory)
|
[
"ahmad.alleboudy@outlook.com"
] |
ahmad.alleboudy@outlook.com
|
b81fcd5e3a4bced2bbf26ad772ff6291dd4a369c
|
40a441c075fdb63a5b30f9baa7d3e5165070c034
|
/trained_model.py
|
1fa8e983e420f1ce49702cf3b7b85a38d2e62812
|
[] |
no_license
|
nanditashekar/Food-Classifier-Tool
|
aef8a8a92056118f11eacab3ebb7b63948f1ea30
|
e7025b9dd99771a6b8b06ebb588da8a2a7f2bfb7
|
refs/heads/master
| 2022-11-22T06:29:30.607387
| 2020-07-27T16:07:02
| 2020-07-27T16:07:02
| 282,947,275
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,142
|
py
|
# -*- coding: utf-8 -*-
"""Model_Demo_File.ipynb
Created by Aravind R Krishnan
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1BRvmIlk4lgc-UMRxssbJtJxRk1h4bAdE
"""
#Loading the model and testing
from keras.models import load_model
from keras.preprocessing import image
import numpy as np
import matplotlib.pyplot as plt
model = load_model('MINI_PROJECT_MODEL_FINAL.h5')
def pred(path):
test = image.load_img(path, target_size =(256,256))
test = image.img_to_array(test)
plt.imshow(test, cmap='gray')
plt.show()
test = np.expand_dims(test, axis=0)
result = model.predict(test)
if result[0][0] == 1:
print("CUPCAKES!")
elif result[0][1] == 1:
print("DUMPLINGS")
elif result[0][2] == 1:
print("FRENCH FRIES")
elif result[0][3] == 1:
print("FRIED RICE")
else:
print("PIZZA!")
def demo():
flag=1
while flag:
print("Input File Path of Image: ")
filepath=input()
pred(filepath)
print("Enter 0 to Quit, else 1")
flag=input()
demo()
|
[
"noreply@github.com"
] |
nanditashekar.noreply@github.com
|
402bc890c5f10dde4ade6ceda9b8d76f67c850f4
|
843d8d6bcba5ceff4f289b9566a6594d8984308d
|
/Week_3/lab-code-simplicity-efficiency/your-code/challenge-1.py
|
a4c913ff1da118ef30a143fa02097131421afc0b
|
[] |
no_license
|
GuillemGodayol/Ironhack_Data_Labs
|
df6e1db00ca3c4370b26f25a06aa9d4fdcd1a821
|
56275959d276d3ef9542efb8c287aa16876d45fa
|
refs/heads/master
| 2020-11-26T16:34:07.971756
| 2019-12-19T21:25:01
| 2019-12-19T21:25:01
| 229,141,062
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,910
|
py
|
"""
This is a dumb calculator that can add and subtract whole numbers from zero to five.
When you run the code, you are prompted to enter two numbers (in the form of English
word instead of number) and the operator sign (also in the form of English word).
The code will perform the calculation and give the result if your input is what it
expects.
The code is very long and messy. Refactor it according to what you have learned about
code simplicity and efficiency.
"""
from num2word import word
print('Welcome to this calculator!')
print('It can add and subtract whole numbers from zero to five')
a = input('Please choose your first number (zero to five): ')
b = input('What do you want to do? plus or minus: ')
c = input('Please choose your second number (zero to five): ')
# I create a diccionary with the different inputs we can have for numbers and its corresponding integer
numbers = {'zero':0, 'one':1, 'two':2, 'three':3, 'four':4, 'five':5, '0':0, '1':1, '2':2, '3':3, '4':4, '5':5}
# I create two lists with the different inputs we can have for operators
op_plus = ['plus', '+']
op_minus =['minus','-']
if (a or c) not in numbers.keys() or b not in op_plus and b not in op_minus: # I check if any of the 3 inputs is wrong
print("I am not able to answer this question. Check your input.")
elif b in op_plus: # if b is a plus, I add a + c
print(word(numbers[a]), 'plus', word(numbers[c]), 'equals',word(numbers[a] + numbers[c]))
else: # else, I substract a - c
if numbers[a] >= numbers[c]:
print(word(numbers[a]), 'minus', word(numbers[c]), 'equals',word(numbers[a] - numbers[c]))
else:
print(word(numbers[a]), 'minus', word(numbers[c]), 'equals negative', word(-(numbers[a] - numbers[c])))
print("Thanks for using this calculator, goodbye :)")
|
[
"guillemgodayol@gmail.com"
] |
guillemgodayol@gmail.com
|
6843646e4bfc8dd6d189f4981122d415672c1403
|
8937c4d452c98699610923f76a395a2247f576df
|
/preprocess/crop.py
|
5b05cb13ad998812b4d8e78a1b99878b47e16046
|
[] |
no_license
|
mistycheney/MouseBrainAtlas
|
812b204af06ed303f3c12d5c81edef50c8d9d1ed
|
bffbaa1ede9297084e64fc197716e63d5cb54275
|
refs/heads/master
| 2020-04-11T13:44:09.632311
| 2018-11-20T22:32:15
| 2018-11-20T22:32:15
| 20,377,173
| 3
| 9
| null | 2017-03-15T19:39:27
| 2014-06-01T12:42:08
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 3,884
|
py
|
#! /usr/bin/env python
import os
import argparse
import sys
import time
import numpy as np
from multiprocess import Pool
sys.path.append(os.path.join(os.environ['REPO_DIR'], 'utilities'))
from utilities2015 import *
from metadata import *
from data_manager import *
from learning_utilities import *
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description='')
parser.add_argument("stack", type=str, help="Brain name")
parser.add_argument("versions", type=str, help="json encoded str list")
parser.add_argument("resolutions", type=str, help="json encoded str list")
parser.add_argument("prep_in", type=str, help="")
parser.add_argument("prep_out", type=str, help="")
parser.add_argument("input_crop_json", type=str, help="")
parser.add_argument("output_crop_json", type=str, help="")
parser.add_argument("n_jobs", type=int, help="", default=1)
args = parser.parse_args()
versions = json.loads(args.versions)
if isinstance(versions, str):
versions = [versions]
else:
assert isinstance(versions, list), "Argument versions must be str or str list."
resolutions = json.loads(args.resolutions)
if isinstance(resolutions, str):
resolutions = [resolutions]
else:
assert isinstance(resolutions, list), "Argument resolutions must be str or str list."
n_jobs = args.n_jobs
def crop(stack, img_name, version, resol, x,y,w,h):
input_fp = DataManager.get_image_filepath_v2(stack=stack, prep_id=5, resol=resol, version=version, fn=img_name)
output_fp = DataManager.get_image_filepath_v2(stack=stack, fn=img_name, prep_id=2, version=version, resol=resol)
img = imread(input_fp)
save_data(img[y:y+h, x:x+w], output_fp)
for version in versions:
for resol in resolutions:
if resol == 'raw':
x = x_tb * 32
y = y_tb * 32
w = w_tb * 32
h = h_tb * 32
elif resol == 'thumbnail':
x = x_tb
y = y_tb
w = w_tb
h = h_tb
else:
raise
# input_dir = DataManager.get_image_dir_v2(stack=stack, prep_id=5, version=version, resol='raw')
out_dir = DataManager.get_image_dir_v2(stack=stack, prep_id=2, resol=resol, version=version)
print 'out_dir:', out_dir
# script = os.path.join(REPO_DIR, 'preprocess', 'warp_crop_IM_v3.py')
# ! rm -rf {out_dir}
create_if_not_exists(out_dir)
t = time.time()
pool = Pool(8)
_ = pool.map(lambda img_name: crop(stack=stack, img_name=img_name, version=version, resol=resol,
x=x, y=y, w=w, h=h),
metadata_cache['valid_filenames'][stack])
pool.close()
pool.join()
# for img_name in metadata_cache['valid_filenames'][stack]:
# f(stack=stack, img_name=img_name, version=version, resol=resol,
# x=x, y=y, w=w, h=h)
# run_distributed('convert \"%%(input_fp)s\" -crop %(w)dx%(h)d+%(x)d+%(y)d \"%%(output_fp)s\"' % \
# {'w':w_raw, 'h':h_raw, 'x':x_raw, 'y':y_raw},
# kwargs_list=[{'input_fp': DataManager.get_image_filepath_v2(stack=stack, prep_id=5, resol='raw', version=version, fn=img_name),
# 'output_fp': DataManager.get_image_filepath_v2(stack=stack, fn=img_name, prep_id=2, version=version, resol='raw')}
# for img_name in metadata_cache['valid_filenames'][stack]],
# # for img_name in ['CHATM3_slide35_2018_02_17-S1']],
# argument_type='single',
# jobs_per_node=1,
# local_only=True)
# wait_qsub_complete()
print 'done in', time.time() - t, 'seconds' # 1500s
|
[
"cyc3700@gmail.com"
] |
cyc3700@gmail.com
|
61d30e685f5062f0bd16062b1d190bee3ea93ccf
|
5c4c8fcf39d83c3ba9031825115f7416f474ecfd
|
/Paxel/wsgi.py
|
430007cb764f6c7f483a7190f91bfd4b2a87d076
|
[] |
no_license
|
SergioParraC/Paxel-Django
|
0fc42cec94c3c142fd06bf4cbbb550f1786c6c1a
|
25e9501902151b1b7ded45c1abf9282a5c1c0dd9
|
refs/heads/master
| 2023-03-11T09:41:55.248734
| 2021-02-25T21:08:10
| 2021-02-25T21:08:10
| 328,280,984
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 387
|
py
|
"""
WSGI config for Paxel project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Paxel.settings')
application = get_wsgi_application()
|
[
"stevenparracuesta@gmail.com"
] |
stevenparracuesta@gmail.com
|
b8c56deb337421b8e05a8a70c59c71923d4bf996
|
9039db1d63664122ac65176b1159d61eccc1ec61
|
/cables/models/__init__.py
|
1b560f4780f9466098aae59bf3a22d20f298f283
|
[] |
no_license
|
yjacolin/Avifaune-Cables_aeriens
|
8e28594c0a9b58084f3371e77ec49ed11d879a78
|
273b95be496d1b37163a40c4e2a92b60b733b903
|
refs/heads/master
| 2020-03-22T07:41:44.926554
| 2018-07-04T11:58:37
| 2018-07-04T11:58:37
| 139,718,598
| 0
| 0
| null | 2018-07-04T12:22:56
| 2018-07-04T12:22:55
| null |
UTF-8
|
Python
| false
| false
| 28,487
|
py
|
#-*- coding: utf-8 -*-
import logging
import sqlahelper
from sqlalchemy import BigInteger, Boolean, CheckConstraint, Column, Date, DateTime, Float, ForeignKey, Index, Integer, String, Table, Text, text, Unicode
from sqlalchemy.sql.sqltypes import NullType
from sqlalchemy.orm import relationship, mapper
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.ext.associationproxy import association_proxy
log = logging.getLogger(__name__)
Base = sqlahelper.get_base()
metadata = Base.metadata
DBSession = sqlahelper.get_session()
def outer_join_accessor_factory(collection_type, proxy):
def getter(obj):
if obj is None:
return None
return getattr(obj, proxy.value_attr)
def setter(obj, value):
setattr(obj, proxy.value_attr, value)
return getter, setter
class DicoAge(Base):
__tablename__ = 'dico_age'
id_age = Column(Integer, primary_key=True)
lib_age = Column(String(20))
class DicoCauseMortalite(Base):
__tablename__ = 'dico_cause_mortalite'
id_cause_mortalite = Column(Integer, primary_key=True)
lib_cause_mortalite = Column(String(20))
class DicoClassesRisque(Base):
__tablename__ = 'dico_classes_risque'
id_classe_risque = Column(Integer, primary_key=True, server_default=text("nextval('dico_classes_risque_id_classe_risque_seq'::regclass)"))
lib_classe_risque = Column(String(30))
class DicoNbEquipement(Base):
__tablename__ = 'dico_nb_equipements'
id_nb_equipements = Column(Integer, primary_key=True)
nb_equipements = Column(Integer)
class DicoSexe(Base):
__tablename__ = 'dico_sexe'
id_sexe = Column(Integer, primary_key=True)
lib_sexe = Column(String(20))
class DicoSource(Base):
__tablename__ = 'dico_source'
id_source = Column(Integer, primary_key=True)
lib_source = Column(String(20))
class DicoTypeEquipementPoteau(Base):
__tablename__ = 'dico_type_equipement_poteau'
id_type_equipement_poteau = Column(Integer, primary_key=True)
nom_type_equipement_poteau = Column(String)
class DicoTypeEquipementTroncon(Base):
__tablename__ = 'dico_type_equipement_troncon'
id_type_equipement_troncon = Column(Integer, primary_key=True)
nom_type_equipement_troncon = Column(String)
class DicoTypePoteauErdf(Base):
__tablename__ = 'dico_type_poteau_erdf'
id_type_poteau_erdf = Column(Integer, primary_key=True)
lib_type_poteau_erdf = Column(String)
class ErdfAppareilCoupure(Base):
__tablename__ = 'erdf_appareil_coupure'
__table_args__ = (
CheckConstraint(u"(public.geometrytype(geom) = 'POINT'::text) OR (geom IS NULL)"),
CheckConstraint(u'public.st_ndims(geom) = 2'),
CheckConstraint(u'public.st_srid(geom) = 4326')
)
id = Column(Integer, primary_key=True, server_default=text("nextval('erdf_appareil_coupure_id_seq'::regclass)"))
AUTOMATISM = Column(String(62))
AUTOMATIS1 = Column(String(62))
AUTOMATIS2 = Column(String(62))
POTEAU_HTA = Column(String(32))
STATUT = Column(String(12))
TYPE_DE_CO = Column(String(32))
T_L_COMMAN = Column(String(7))
SYMBOLOGIE = Column(String(64))
ANGLE = Column(Float(53))
SYSANGLE = Column(Float(53))
geom = Column(NullType, index=True)
geom_json = Column(String)
class ErdfConnexionAerienne(Base):
__tablename__ = 'erdf_connexion_aerienne'
__table_args__ = (
CheckConstraint(u"(public.geometrytype(geom) = 'POINT'::text) OR (geom IS NULL)"),
CheckConstraint(u'public.st_ndims(geom) = 2'),
CheckConstraint(u'public.st_srid(geom) = 4326')
)
id = Column(Integer, primary_key=True, server_default=text("nextval('erdf_connexion_aerienne_id_seq'::regclass)"))
POTEAU_HTA = Column(String(32))
STATUT = Column(String(12))
TYPE_DE_CO = Column(String(40))
SYMBOLOGIE = Column(String(64))
ANGLE = Column(Float(53))
SYSANGLE = Column(Float(53))
ID_SIG = Column(Integer)
geom = Column(NullType, index=True)
geom_json = Column(String)
class ErdfParafoudre(Base):
__tablename__ = 'erdf_parafoudre'
__table_args__ = (
CheckConstraint(u"(public.geometrytype(geom) = 'POINT'::text) OR (geom IS NULL)"),
CheckConstraint(u'public.st_ndims(geom) = 2'),
CheckConstraint(u'public.st_srid(geom) = 4326')
)
id = Column(Integer, primary_key=True, server_default=text("nextval('erdf_parafoudre_id_seq'::regclass)"))
POTEAU_HTA = Column(String(32))
STATUT = Column(String(12))
TYPE = Column(String(32))
SYMBOLOGIE = Column(String(64))
ANGLE = Column(Float(53))
SYSANGLE = Column(Float(53))
ID_SIG = Column(Integer)
geom = Column(NullType, index=True)
geom_json = Column(String)
class ErdfPosteElectrique(Base):
__tablename__ = 'erdf_poste_electrique'
__table_args__ = (
CheckConstraint(u"(public.geometrytype(geom) = 'POINT'::text) OR (geom IS NULL)"),
CheckConstraint(u'public.st_ndims(geom) = 2'),
CheckConstraint(u'public.st_srid(geom) = 4326')
)
id = Column(Integer, primary_key=True, server_default=text("nextval('erdf_poste_electrique_id_seq'::regclass)"))
FONCTION_P = Column(String(40))
NOM_DU_POS = Column(String(32))
POTEAU_HTA = Column(String(32))
STATUT = Column(String(12))
TYPE_DE_PO = Column(String(51))
SYMBOLOGIE = Column(String(64))
ANGLE = Column(Float(53))
SYSANGLE = Column(Float(53))
ID_SIG = Column(Integer)
geom = Column(NullType, index=True)
geom_json = Column(String)
class ErdfRemonteeAerosout(Base):
__tablename__ = 'erdf_remontee_aerosout'
__table_args__ = (
CheckConstraint(u"(public.geometrytype(geom) = 'POINT'::text) OR (geom IS NULL)"),
CheckConstraint(u'public.st_ndims(geom) = 2'),
CheckConstraint(u'public.st_srid(geom) = 4326')
)
id = Column(Integer, primary_key=True, server_default=text("nextval('erdf_remontee_aerosout_id_seq'::regclass)"))
APPAREIL_D = Column(String(32))
CONNEXION_ = Column(String(32))
HAUTEUR_PO = Column(Float(53))
INDICATEUR = Column(String(32))
PARAFOUDRE = Column(String(32))
PROTECTION = Column(String(7))
REMONT_E_A = Column(String(7))
STATUT = Column(String(12))
SYMBOLOGIE = Column(String(64))
ANGLE = Column(Float(53))
SYSANGLE = Column(Float(53))
ID_SIG = Column(Integer)
geom = Column(NullType, index=True)
geom_json = Column(String)
class ErdfTronconAerien(Base):
__tablename__ = 'erdf_troncon_aerien'
__table_args__ = (
CheckConstraint(u"(public.geometrytype(geom) = 'LINESTRING'::text) OR (geom IS NULL)"),
CheckConstraint(u'public.st_ndims(geom) = 2'),
CheckConstraint(u'public.st_srid(geom) = 4326')
)
STATUT = Column(String(12))
TECHNOLOGI = Column(String(32))
TECHNOLOG1 = Column(String(32))
SYMBOLOGIE = Column(String(64))
COMMENTAIR = Column(String(30))
geom = Column(NullType, index=True)
ID_SIG = Column(Integer)
id = Column(Integer, primary_key=True, server_default=text("nextval('erdf_troncon_aerien_id_seq'::regclass)"))
geom_json = Column(String)
class OgmCablesRemonteesMecanique(Base):
__tablename__ = 'ogm_cables_remontees_mecaniques'
__table_args__ = (
CheckConstraint(u'public.st_ndims(geom) = 2'),
CheckConstraint(u'public.st_srid(geom) = 4326')
)
geom = Column(NullType, index=True)
OBJECTID = Column(Integer)
idcable = Column(Integer, primary_key=True)
TypeInfra = Column(String(50))
NomInfra = Column(String(50))
IdDomaine = Column(Integer)
DateMontag = Column(DateTime)
DateDemont = Column(DateTime)
DateModif = Column(DateTime)
SHAPE_Leng = Column(Float(53))
geom_json = Column(String)
class OgmDomainesSkiable(Base):
__tablename__ = 'ogm_domaines_skiables'
__table_args__ = (
CheckConstraint(u'public.st_ndims(geom) = 2'),
CheckConstraint(u'public.st_srid(geom) = 4326')
)
geom = Column(NullType, index=True)
OBJECTID = Column(Integer)
iddomaine = Column(Integer, primary_key=True)
NomRDomain = Column(String(255))
IdExploita = Column(Integer)
Activite = Column(String(255))
MoOGM = Column(String(255))
Dpt = Column(String(100))
NomStation = Column(String(255))
SHAPE_Leng = Column(Float(53))
SHAPE_Area = Column(Float(53))
MoOGM_Vis = Column(String(255))
Annee_Plan = Column(Integer)
Surface_DS = Column(Integer)
geom_json = Column(String)
class OgmTronconsDangereux(Base):
__tablename__ = 'ogm_troncons_dangereux'
__table_args__ = (
CheckConstraint(u"(public.geometrytype(geom) = 'LINESTRING'::text) OR (geom IS NULL)"),
CheckConstraint(u'public.st_ndims(geom) = 2'),
CheckConstraint(u'public.st_srid(geom) = 4326')
)
geom = Column(NullType, index=True)
OBJECTID = Column(Integer)
idtd = Column(Integer, primary_key=True)
IdCable = Column(Integer)
Espece = Column(String(100))
Nombre = Column(Integer)
Estimation = Column(String(100))
Sexe = Column(String(20))
Age = Column(String(20))
idPyBas = Column(String(100))
idPyHt = Column(String(100))
NomObs = Column(String(100))
LongReelle = Column(Integer)
Date_ = Column(DateTime)
SHAPE_Leng = Column(Float(53))
geom_json = Column(String)
class OgmTronconsVisualise(Base):
__tablename__ = 'ogm_troncons_visualises'
__table_args__ = (
CheckConstraint(u'public.st_ndims(geom) = 2'),
CheckConstraint(u'public.st_srid(geom) = 4326')
)
geom = Column(NullType, index=True)
OBJECTID = Column(Integer)
idtv = Column(Integer, primary_key=True)
IdCable = Column(Integer)
TypeVisu = Column(String(255))
Financeur = Column(String(255))
Operateur = Column(String(255))
IdPyBas = Column(String(100))
IdPyHt = Column(String(100))
LongReelle = Column(Integer)
Date_visu = Column(DateTime)
SHAPE_Leng = Column(Float(53))
geom_json = Column(String)
class OgmTronconsVisualisesDangereux(Base):
__tablename__ = 'ogm_troncons_visualises_dangereux'
__table_args__ = (
CheckConstraint(u"(public.geometrytype(geom) = 'LINESTRING'::text) OR (geom IS NULL)"),
CheckConstraint(u'public.st_ndims(geom) = 2'),
CheckConstraint(u'public.st_srid(geom) = 4326')
)
geom = Column(NullType, index=True)
OBJECTID = Column(Integer)
Espece = Column(String(100))
Nombre = Column(Integer)
Estimation = Column(String(100))
Sexe = Column(String(20))
Age = Column(String(20))
idPyBas = Column(String(100))
idPyHt = Column(String(100))
NomObs = Column(String(100))
LongReelle = Column(Integer)
Date_ = Column(DateTime)
idtvd = Column(Integer, primary_key=True)
IdTV = Column(Integer)
Shape_Leng = Column(Float(53))
raisons = Column(String(255))
geom_json = Column(String)
class RteLigne(Base):
__tablename__ = 'rte_lignes'
__table_args__ = (
CheckConstraint(u"(public.geometrytype(geom) = 'LINESTRING'::text) OR (geom IS NULL)"),
CheckConstraint(u'public.st_ndims(geom) = 2'),
CheckConstraint(u'public.st_srid(geom) = 4326')
)
id_rte_ligne = Column(Integer, primary_key=True, server_default=text("nextval('rte_lignes_id_rte_ligne_seq'::regclass)"))
U_MAX = Column(String(20))
CONFIG = Column(String)
TERNE_EX = Column(Integer)
ADR_LIT_1 = Column(String)
ADR_LIT_2 = Column(String)
ADR_LIT_3 = Column(String)
geom = Column(NullType, index=True)
geom_json = Column(String)
class RtePoste(Base):
__tablename__ = 'rte_postes'
__table_args__ = (
CheckConstraint(u"(public.geometrytype(geom) = 'POINT'::text) OR (geom IS NULL)"),
CheckConstraint(u'public.st_ndims(geom) = 2'),
CheckConstraint(u'public.st_srid(geom) = 4326')
)
id_rte_poste = Column(Integer, primary_key=True, server_default=text("nextval('rte_postes_id_rte_poste_seq'::regclass)"))
U_MAX = Column(String(20))
LIBELLE = Column(String(64))
LIB_SUIT = Column(String(64))
geom = Column(NullType, index=True)
geom_json = Column(String)
class RtePoteaux(Base):
__tablename__ = 'rte_poteaux'
__table_args__ = (
CheckConstraint(u"(public.geometrytype(geom) = 'POINT'::text) OR (geom IS NULL)"),
CheckConstraint(u'public.st_ndims(geom) = 2'),
CheckConstraint(u'public.st_srid(geom) = 4326')
)
id_rte_poteaux = Column(Integer, primary_key=True, server_default=text("nextval('rte_poteaux_id_rte_poteaux_seq'::regclass)"))
U_MAX = Column(String(20))
NB_TERNE = Column(Integer)
geom = Column(NullType, index=True)
geom_json = Column(String)
class TAxesMigratoire(Base):
__tablename__ = 't_axes_migratoires'
__table_args__ = (
CheckConstraint(u"((public.geometrytype(geom) = 'POLYGON'::text) OR (public.geometrytype(geom) = 'MULTIPOLYGON'::text)) OR (geom IS NULL)"),
CheckConstraint(u'public.st_ndims(geom) = 2'),
CheckConstraint(u'public.st_srid(geom) = 4326')
)
id_axe_migratoire = Column(Integer, primary_key=True, server_default=text("nextval('t_axes_migratoires_id_axe_migratoire_seq'::regclass)"))
nom_axe_migratoire = Column(String(100))
migration = Column(Integer)
source = Column(String(100))
description = Column(String)
geom = Column(NullType, nullable=False, index=True)
geom_json = Column(String)
class TCasMortalite(Base):
__tablename__ = 't_cas_mortalite'
__table_args__ = (
CheckConstraint(u"(public.geometrytype(geom) = 'POINT'::text) OR (geom IS NULL)"),
CheckConstraint(u'public.st_ndims(geom) = 2'),
CheckConstraint(u'public.st_srid(geom) = 4326')
)
id_cas_mortalite = Column(Integer, primary_key=True, server_default=text("nextval('t_cas_mortalite_id_cas_mortalite_seq'::regclass)"))
id_espece = Column(ForeignKey(u't_especes.id_espece'), nullable=False)
source = Column(String(100))
id_cause_mortalite = Column(ForeignKey(u'dico_cause_mortalite.id_cause_mortalite'), nullable=False)
nb_cas = Column(Integer)
sexe = Column(String(30))
age = Column(String(30))
date = Column(Date)
geom = Column(NullType, index=True)
geom_json = Column(String)
dico_cause_mortalite = relationship(u'DicoCauseMortalite')
t_espece = relationship(u'TEspece')
class TCommune(Base):
__tablename__ = 't_communes'
__table_args__ = (
CheckConstraint(u'public.st_ndims(geom) = 2'),
CheckConstraint(u'public.st_srid(geom) = 4326')
)
insee = Column(Integer, primary_key=True)
nom_commune = Column(Unicode(100))
geom = Column(NullType, nullable=False, index=True)
geom_json = Column(String)
equipements = association_proxy('poteaux', 'equipements', getset_factory=outer_join_accessor_factory)
eq_troncons = association_proxy('troncons', 'equipements', getset_factory=outer_join_accessor_factory)
class TEquipementsPoteauxErdf(Base):
__tablename__ = 't_equipements_poteaux_erdf'
id_equipement_poteau_erdf = Column(Integer, primary_key=True, server_default=text("nextval('t_equipements_poteaux_erdf_id_equipement_poteau_erdf_seq'::regclass)"))
id_inventaire_poteau_erdf = Column(ForeignKey(u't_inventaire_poteaux_erdf.id_inventaire_poteau_erdf', ondelete=u'CASCADE', onupdate=u'CASCADE'))
id_type_equipement_poteau = Column(ForeignKey(u'dico_type_equipement_poteau.id_type_equipement_poteau'))
date_equipement = Column(Date)
login_saisie = Column(String(25))
mis_en_place = Column(Boolean, server_default=text("false"))
id_nb_equipements = Column(ForeignKey(u'dico_nb_equipements.id_nb_equipements'))
t_inventaire_poteaux_erdf = relationship(u'TInventairePoteauxErdf', backref="equipements")
dico_nb_equipement = relationship(u'DicoNbEquipement')
dico_type_equipement_poteau = relationship(u'DicoTypeEquipementPoteau')
class TEquipementsTronconsErdf(Base):
__tablename__ = 't_equipements_troncons_erdf'
__table_args__ = (
CheckConstraint(u"(public.geometrytype(geom) = 'LINESTRING'::text) OR (geom IS NULL)"),
CheckConstraint(u'public.st_ndims(geom) = 2'),
CheckConstraint(u'public.st_srid(geom) = 4326')
)
id_equipement_troncon_erdf = Column(Integer, primary_key=True, server_default=text("nextval('t_equipements_troncons_erdf_id_equipement_troncon_erdf_seq'::regclass)"))
id_inventaire_troncon_erdf = Column(ForeignKey(u't_inventaire_troncons_erdf.id_inventaire_troncon_erdf', ondelete=u'CASCADE', onupdate=u'CASCADE'))
id_type_equipement_troncon = Column(ForeignKey(u'dico_type_equipement_troncon.id_type_equipement_troncon'))
date_equipement_troncon = Column(Date)
geom = Column(NullType, index=True)
login_saisie = Column(String(25))
geom_json = Column(String)
t_inventaire_troncons_erdf = relationship(u'TInventaireTronconsErdf', backref="equipements")
dico_type_equipement_troncon = relationship(u'DicoTypeEquipementTroncon')
class TEspece(Base):
__tablename__ = 't_especes'
id_espece = Column(Integer, primary_key=True, server_default=text("nextval('t_especes_id_espece_seq'::regclass)"))
nom_espece = Column(String(100), nullable=False)
taille_zone_tampon = Column(Integer)
code_couleur = Column(String(20))
t_v_zones_sensibles = Table(
'v_zones_sensibles', metadata,
Column('id_zone_sensible', Integer, primary_key=True),
Column('nom_zone_sensible', String),
Column('niveau_sensibilite', Integer),
Column('nb_poteaux_inventories', BigInteger),
Column('nb_poteaux_inventories_risque_fort', BigInteger),
Column('nb_poteaux_inventories_risque_secondaire', BigInteger),
Column('nb_poteaux_inventories_risque_faible', BigInteger),
Column('nb_poteaux_equipes', BigInteger),
Column('nb_poteaux_equipes_risque_fort', BigInteger),
Column('nb_poteaux_equipes_risque_secondaire', BigInteger),
Column('nb_poteaux_equipes_risque_faible', BigInteger),
Column('m_troncons_inventories', Float(53)),
Column('m_troncons_inventories_risque_fort', Float(53)),
Column('m_troncons_inventories_risque_secondaire', Float(53)),
Column('m_troncons_inventories_risque_faible', Float(53)),
Column('m_troncons_equipes', Float(53)),
Column('m_troncons_equipes_risque_fort', Float(53)),
Column('m_troncons_equipes_risque_secondaire', Float(53)),
Column('m_troncons_equipes_risque_faible', Float(53)),
Column('geom', Text)
)
class TVZonesSensibles(object):
pass
mapper(TVZonesSensibles, t_v_zones_sensibles)
class TInventairePoteauxErdf(Base):
__tablename__ = 't_inventaire_poteaux_erdf'
__table_args__ = (
CheckConstraint(u"(public.geometrytype(geom) = 'POINT'::text) OR (geom IS NULL)"),
CheckConstraint(u'public.st_ndims(geom) = 2'),
CheckConstraint(u'public.st_srid(geom) = 4326'),
Index('t_inventaire_poteaux_erdf_index_id', 'id_type_poteau_erdf', 'id_type_poteau_erdf_secondaire', 'id_zone_sensible', 'id_attractivite', 'id_dangerosite')
)
id_inventaire_poteau_erdf = Column(Integer, primary_key=True, server_default=text("nextval('t_inventaire_poteaux_erdf_id_inventaire_poteau_erdf_seq'::regclass)"))
date_inventaire = Column(Date)
id_type_poteau_erdf = Column(ForeignKey(u'dico_type_poteau_erdf.id_type_poteau_erdf'))
id_type_poteau_erdf_secondaire = Column(ForeignKey(u'dico_type_poteau_erdf.id_type_poteau_erdf'))
remarques = Column(String)
id_zone_sensible = Column(ForeignKey(u't_zones_sensibles.id_zone_sensible'))
etat_poteau = Column(String)
id_attractivite = Column(ForeignKey(u'dico_classes_risque.id_classe_risque'))
id_dangerosite = Column(ForeignKey(u'dico_classes_risque.id_classe_risque'))
neutralisation_prevue_isolation = Column(Boolean)
neutralisation_prevue_dissuasion = Column(Boolean)
neutralisation_prevue_attraction = Column(Boolean)
deja_neutralise = Column(Boolean)
geom = Column(NullType, index=True)
geom_json = Column(String)
risque_poteau = Column(Unicode(20))
commune = Column(String(100))
nb_equipements = Column(Integer)
nb_photos = Column(Integer)
insee = Column(ForeignKey(u't_communes.insee'))
dico_classes_risque = relationship(u'DicoClassesRisque', primaryjoin='TInventairePoteauxErdf.id_attractivite == DicoClassesRisque.id_classe_risque')
dico_classes_risque1 = relationship(u'DicoClassesRisque', primaryjoin='TInventairePoteauxErdf.id_dangerosite == DicoClassesRisque.id_classe_risque')
dico_type_poteau_erdf = relationship(u'DicoTypePoteauErdf', primaryjoin='TInventairePoteauxErdf.id_type_poteau_erdf == DicoTypePoteauErdf.id_type_poteau_erdf')
dico_type_poteau_erdf1 = relationship(u'DicoTypePoteauErdf', primaryjoin='TInventairePoteauxErdf.id_type_poteau_erdf_secondaire == DicoTypePoteauErdf.id_type_poteau_erdf')
t_zones_sensible = relationship(u'TZonesSensible', backref='poteaux')
t_commune = relationship(u'TCommune', backref='poteaux')
class TInventaireTronconsErdf(Base):
__tablename__ = 't_inventaire_troncons_erdf'
__table_args__ = (
CheckConstraint(u"(public.geometrytype(geom) = 'LINESTRING'::text) OR (geom IS NULL)"),
CheckConstraint(u'public.st_ndims(geom) = 2'),
CheckConstraint(u'public.st_srid(geom) = 4326'),
Index('t_inventaire_troncons_erdf_index_id', 'id_zone_sensible', 'id_risque_deplacement', 'id_risque_integration_topo', 'id_risque_integration_vegetation', 'id_risque_integration_bati')
)
id_inventaire_troncon_erdf = Column(Integer, primary_key=True, server_default=text("nextval('t_inventaire_troncons_erdf_id_inventaire_troncon_erdf_seq'::regclass)"))
date_inventaire = Column(Date)
id_zone_sensible = Column(ForeignKey(u't_zones_sensibles.id_zone_sensible'))
geom = Column(NullType, index=True)
remarques = Column(String)
id_risque_deplacement = Column(ForeignKey(u'dico_classes_risque.id_classe_risque'))
id_risque_integration_topo = Column(ForeignKey(u'dico_classes_risque.id_classe_risque'))
id_risque_integration_vegetation = Column(ForeignKey(u'dico_classes_risque.id_classe_risque'))
id_risque_integration_bati = Column(ForeignKey(u'dico_classes_risque.id_classe_risque'))
deja_neutralise = Column(Boolean)
geom_json = Column(String)
risque_troncon = Column(String(20))
commune = Column(String(100))
nb_photos = Column(Integer)
lg_equipee = Column(Float(53))
longueur = Column(Float(53))
insee = Column(ForeignKey(u't_communes.insee'))
dico_classes_risque = relationship(u'DicoClassesRisque', primaryjoin='TInventaireTronconsErdf.id_risque_deplacement == DicoClassesRisque.id_classe_risque')
dico_classes_risque1 = relationship(u'DicoClassesRisque', primaryjoin='TInventaireTronconsErdf.id_risque_integration_bati == DicoClassesRisque.id_classe_risque')
dico_classes_risque2 = relationship(u'DicoClassesRisque', primaryjoin='TInventaireTronconsErdf.id_risque_integration_topo == DicoClassesRisque.id_classe_risque')
dico_classes_risque3 = relationship(u'DicoClassesRisque', primaryjoin='TInventaireTronconsErdf.id_risque_integration_vegetation == DicoClassesRisque.id_classe_risque')
t_zones_sensible = relationship(u'TZonesSensible')
t_commune = relationship(u'TCommune', backref='troncons')
class TObservation(Base):
__tablename__ = 't_observations'
__table_args__ = (
CheckConstraint(u"(public.geometrytype(geom) = 'POINT'::text) OR (geom IS NULL)"),
CheckConstraint(u'public.st_ndims(geom) = 2')
)
id_observation = Column(Integer, primary_key=True, server_default=text("nextval('t_observations_id_observation_seq'::regclass)"))
id_espece = Column(ForeignKey(u't_especes.id_espece', ondelete=u'CASCADE', onupdate=u'CASCADE'), nullable=False)
lieu = Column(String(100))
commentaires = Column(String)
precision_loc = Column(String(50))
source = Column(String(50))
geom = Column(NullType, index=True)
geom_json = Column(String)
nombre = Column(Integer)
date = Column(Date)
t_espece = relationship(u'TEspece')
class TPhotosPoteauxErdf(Base):
__tablename__ = 't_photos_poteaux_erdf'
id_photo_poteau_erdf = Column(Integer, primary_key=True, server_default=text("nextval('t_photos_poteaux_erdf_id_photo_poteau_erdf_seq'::regclass)"))
id_inventaire_poteau_erdf = Column(ForeignKey(u't_inventaire_poteaux_erdf.id_inventaire_poteau_erdf', ondelete=u'CASCADE', onupdate=u'CASCADE'))
chemin_photo = Column(String)
commentaire = Column(String)
neutralise = Column(Boolean)
auteur = Column(String)
t_inventaire_poteaux_erdf = relationship(u'TInventairePoteauxErdf')
class TPhotosTronconsErdf(Base):
__tablename__ = 't_photos_troncons_erdf'
id_photo_troncon_erdf = Column(Integer, primary_key=True, server_default=text("nextval('t_photos_troncons_erdf_id_photo_troncon_erdf_seq'::regclass)"))
id_inventaire_troncon_erdf = Column(ForeignKey(u't_inventaire_troncons_erdf.id_inventaire_troncon_erdf', ondelete=u'CASCADE', onupdate=u'CASCADE'))
chemin_photo = Column(String)
commentaire = Column(String)
neutralise = Column(Boolean)
auteur = Column(String)
t_inventaire_troncons_erdf = relationship(u'TInventaireTronconsErdf')
class TSitesNidification(Base):
__tablename__ = 't_sites_nidification'
__table_args__ = (
CheckConstraint(u"(public.geometrytype(geom) = 'POINT'::text) OR (geom IS NULL)"),
CheckConstraint(u'public.st_ndims(geom) = 2'),
CheckConstraint(u'public.st_srid(geom) = 4326')
)
id_site_nidification = Column(Integer, primary_key=True, server_default=text("nextval('t_sites_nidification_id_site_nidification_seq'::regclass)"))
id_espece = Column(ForeignKey(u't_especes.id_espece', ondelete=u'CASCADE', onupdate=u'CASCADE'), nullable=False)
lieu = Column(String(100))
nidification_10_ans = Column(Boolean)
commentaires = Column(String)
precision_loc = Column(String(50))
source = Column(String(50))
geom = Column(NullType, index=True)
geom_json = Column(String)
t_espece = relationship(u'TEspece')
class TZonesSensible(Base):
__tablename__ = 't_zones_sensibles'
id_zone_sensible = Column(Integer, primary_key=True, server_default=text("nextval('t_zone_sensible_id_zone_sensible_seq'::regclass)"))
nom_zone_sensible = Column(String)
niveau_sensibilite = Column(Integer)
t_v_equipements_poteaux = Table(
'v_equipements_poteaux', metadata,
Column('id', Integer, primary_key=True),
Column('id_inventaire_poteau_erdf', Integer),
Column('nom_type_equipement_poteau', String),
Column('id_nb_equipements', Integer),
Column('mis_en_place', Boolean),
Column('date_equipement', Date),
Column('geom_json', String)
)
class TVEquipementsPoteaux(object):
pass
mapper(TVEquipementsPoteaux, t_v_equipements_poteaux)
t_v_sites_nidification_zone_tampon = Table(
'v_sites_nidification_zone_tampon', metadata,
Column('id_espece', Integer),
Column('nom_espece', String(100)),
Column('geom', NullType),
Column('geom_json', Text)
)
t_v_zones_sensibles_poteaux = Table(
'v_zones_sensibles_poteaux', metadata,
Column('id_zone_sensible', Integer),
Column('nb_poteaux_inventories', BigInteger),
Column('nb_poteaux_inventories_risque_fort', BigInteger),
Column('nb_poteaux_inventories_risque_secondaire', BigInteger),
Column('nb_poteaux_inventories_risque_faible', BigInteger),
Column('nb_poteaux_equipes', BigInteger),
Column('nb_poteaux_equipes_risque_fort', BigInteger),
Column('nb_poteaux_equipes_risque_secondaire', BigInteger),
Column('nb_poteaux_equipes_risque_faible', BigInteger),
Column('geom', NullType)
)
t_v_zones_sensibles_troncons = Table(
'v_zones_sensibles_troncons', metadata,
Column('id_zone_sensible', Integer),
Column('m_troncons_inventories', Float(53)),
Column('m_troncons_inventories_risque_fort', Float(53)),
Column('m_troncons_inventories_risque_secondaire', Float(53)),
Column('m_troncons_inventories_risque_faible', Float(53)),
Column('m_troncons_equipes', Float(53)),
Column('m_troncons_equipes_risque_fort', Float(53)),
Column('m_troncons_equipes_risque_secondaire', Float(53)),
Column('m_troncons_equipes_risque_faible', Float(53)),
Column('geom', NullType)
)
|
[
"antoine@abt.im"
] |
antoine@abt.im
|
b8cf141fea4b1a22938b4d48884f5fa6a015aed3
|
8be847caa7b226c7530a530a719a6987feacf7fb
|
/large_app/python/auth0.py
|
5a027e14dbb6f3c93af41684fdee5aa6c67522e5
|
[
"MIT"
] |
permissive
|
sahilGupta89/large_flask_app
|
91af1a6fc32d6d9b9903720d132773ae5e8d18a7
|
e1ab54431bb935c02186f586d9246b741d9f2d33
|
refs/heads/master
| 2023-05-29T16:51:46.599875
| 2020-11-08T11:10:35
| 2020-11-08T11:10:35
| 213,057,891
| 0
| 0
|
MIT
| 2023-05-01T21:37:35
| 2019-10-05T19:19:37
|
Python
|
UTF-8
|
Python
| false
| false
| 8,356
|
py
|
from dataclasses import dataclass
from datetime import datetime, timedelta
import logging
from urllib.parse import urljoin
from jose import jwt
import requests
import env
from jwks import jwks
log = logging.getLogger(__name__)
def auth0_url(path=""):
return urljoin(f"https://{env.AUTH0_DOMAIN}/", path)
@dataclass
class TokenResult:
access_token: dict
id_token: dict
result: dict
@property
def subject(self) -> str:
return self.access_token["sub"]
@property
def expires(self) -> datetime:
return datetime.utcfromtimestamp(self.access_token["exp"])
def is_expired(self) -> bool:
return datetime.utcnow() > self.expires
@property
def token_type(self) -> str:
return self.result["token_type"]
@property
def access_token_value(self) -> str:
return self.result["access_token"]
def token_from_username_password(username, password) -> TokenResult:
r = requests.post(
auth0_url("oauth/token"),
json={
"grant_type": "password",
"username": username,
"password": password,
"audience": env.AUTH0_API_AUDIENCE,
"client_id": env.AUTH0_CLIENT_ID,
"scope": "openid",
"client_secret": env.AUTH0_CLIENT_SECRET,
},
)
if r.status_code == 403:
raise AuthError(r.json(), 401, reauth=True)
parse_status_code(r)
return _oauth_token_to_token_result(r.json())
def token_info_from_client_credentials(client_id, client_secret) -> dict:
r = requests.post(
auth0_url("oauth/token"),
json={
"grant_type": "client_credentials",
"client_id": client_id,
"client_secret": client_secret,
"audience": env.AUTH0_ZEAPI_AUDIENCE,
},
)
r.raise_for_status()
token_info = r.json()
log.info("Credentials login result: %s", token_info)
return token_info
def token_result_from_client_credentials(
client_id, client_secret
) -> TokenResult:
token_info = token_info_from_client_credentials(client_id, client_secret)
return TokenResult(
access_token=parse_it(
token_info["access_token"], env.AUTH0_ZEAPI_AUDIENCE
),
id_token={},
result=token_info,
)
def _oauth_token_to_token_result(
token_info: dict, audience=env.AUTH0_API_AUDIENCE
) -> TokenResult:
assert "access_token" in token_info
return TokenResult(
access_token=parse_it(
token_info["access_token"], env.AUTH0_API_AUDIENCE
),
id_token=parse_it(token_info["id_token"], env.AUTH0_CLIENT_ID),
result=token_info,
)
def token_from_header_value(token, audience=env.AUTH0_API_AUDIENCE) -> dict:
return parse_it(token, audience)
def token_result_from_header_value(
token, audience=env.AUTH0_API_AUDIENCE
) -> TokenResult:
return TokenResult(
access_token=token_from_header_value(token, audience),
id_token={},
result={"access_token": token},
)
def get_userinfo(token) -> dict:
return requests.get(
auth0_url("userinfo"), headers={"Authorization": f"Bearer {token}"}
).json()
def parse_it(token, audience) -> dict:
unverified_header = jwt.get_unverified_header(token)
rsa_key = {}
for key in jwks["keys"]:
if key["kid"] == unverified_header["kid"]:
rsa_key = {
"kty": key["kty"],
"kid": key["kid"],
"use": key["use"],
"n": key["n"],
"e": key["e"],
}
if rsa_key:
try:
payload = jwt.decode(
token,
rsa_key,
algorithms=env.AUTH0_ALGORITHMS,
audience=audience,
issuer=auth0_url(),
)
except jwt.ExpiredSignatureError:
raise AuthError(
{"code": "token_expired", "description": "token is expired"},
401,
)
except jwt.JWTClaimsError as claims_error:
raise AuthError(
{
"code": "invalid_claims",
"description": "incorrect claims,"
"please check the audience and issuer",
},
401,
) from claims_error
except Exception:
raise AuthError(
{
"code": "invalid_header",
"description": "Unable to parse authentication" " token.",
},
401,
)
return payload
raise AuthError(
{
"code": "invalid_header",
"description": "Unable to find appropriate key",
},
401,
)
class ManagementAPI(object):
def __init__(self):
self.grant_type = "client_credentials"
self._current_access_token = None
self._api_base = auth0_url("api/v2/")
self._users_api_url = urljoin(self._api_base, "users")
def _access_token(self):
if self._current_access_token:
expire_max = self._current_access_token.expires + timedelta(
minutes=30
)
if expire_max > datetime.utcnow():
log.debug(
"ManagementAPI token expires soon(%s). Renewing",
self._current_access_token.expires,
)
self._renew()
else:
self._renew()
return self._current_access_token
def _renew(self):
res = requests.post(
auth0_url("oauth/token"),
json=dict(
grant_type=self.grant_type,
client_id=env.AUTH0_CLIENT_ID,
client_secret=env.AUTH0_CLIENT_SECRET,
audience=self._api_base,
),
)
if res.status_code > 299:
log.warning(
"Failed to get token for management api: %r", res.content
)
parse_status_code(res)
token_info = res.json()
self._current_access_token = TokenResult(
access_token=parse_it(token_info["access_token"], self._api_base),
id_token={},
result=token_info,
)
def _headers(self):
token = self._access_token()
return {
"Authorization": f"{token.token_type} {token.access_token_value}"
}
def create_user(self, user, password: str):
res = requests.post(
self._users_api_url,
json={
"email": user.email,
"password": password,
"connection": env.AUTH0_UP_CONNECTION_NAME,
"user_metadata": user.dump(),
},
headers=self._headers(),
)
if res.status_code > 299:
log.warning(
"Got %r",
res.content,
extra={
"auth0_create_user_context": {
"user_id": user.id,
"email": user.email,
"name": user.name,
}
},
)
parse_status_code(res)
return res.json()
def get_userinfo(self, sub: str):
res = requests.get(
urljoin(self._users_api_url.rstrip("/") + "/", sub),
headers=self._headers(),
)
parse_status_code(res)
userinfo_result = res.json()
# Paste over the main difference between id_token and userinfo
userinfo_result.setdefault("sub", userinfo_result.get("user_id"))
return userinfo_result
class AuthError(Exception):
def __init__(self, error, status_code, reauth=False):
self.error = error
self.status_code = status_code
self.reauth = reauth
def parse_status_code(res):
if res.status_code in (409, 400, 429): # duplicate user
raise AuthError(error=res.json(), status_code=res.status_code)
res.raise_for_status()
def request_bearer_token(request) -> str:
header = request.headers.get("authorization", "")
if not header.lower().startswith("bearer"):
return None
_, header_token = header.split(" ", 1)
return header_token
management_api = ManagementAPI()
|
[
"er.sahil@gmail.com"
] |
er.sahil@gmail.com
|
d0c47516027d338f264dbded0c03ad00d6542d82
|
17bd49682f7236956f0681c7126a11f8981503fe
|
/conftest.py
|
a8f4dd7cfa3dbf3a34bd1384bbd9fb8cec552a97
|
[] |
no_license
|
saferq/TZ_tenzor
|
d7104a30a91a6da3242a4be8d9a1e21410b66952
|
42e07f32682776ae91986e48f82b546c21451cc0
|
refs/heads/main
| 2023-08-06T01:52:45.279315
| 2021-09-30T06:04:26
| 2021-09-30T06:04:26
| 411,941,523
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 163
|
py
|
import pytest
from selenium import webdriver
@pytest.fixture(scope="session")
def browser():
driver = webdriver.Firefox()
yield driver
driver.quit()
|
[
"safer88q@gmail.com"
] |
safer88q@gmail.com
|
a7c3a8dc9de426e13429cbc87ae0f7f5de87a5fb
|
fd69c5d94b20161a9f4dd6c39c7f61289d16b603
|
/replics/errors.py
|
5723c0af9a6ce486a6ef14acd1059d553960bf6c
|
[] |
no_license
|
k-t-l-h/AIS-2
|
57785a284eed9f460551c69a77d297be19dcc6c8
|
560f4de6271fa26e2bdff1d685722a158f4eca57
|
refs/heads/main
| 2023-02-02T23:08:53.580104
| 2020-12-26T04:31:06
| 2020-12-26T04:31:06
| 320,883,945
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 542
|
py
|
SORRY = ["Извини, я пока не понимаю, что ты говоришь",
"Оу, я тебя не совсем понимаю, можешь перефразировать?",
"Извини, я пока не очень хорошо умею разбирать слова. Можешь повторить?"]
ALL = ["Что я могу сделать для тебя?", "Чем я могу помочь?", "Что сегодня делаем?", "Я пришел помочь, что мне сделать?"]
|
[
"laciedreamer@gmail.com"
] |
laciedreamer@gmail.com
|
7fcc061464f4b66349e06e3ed825d4fc3e207c07
|
9b9a5ae297558d87e871e052d3d2e2c582e17ec4
|
/COW_PROJECT/テストコード/Beysian/gibbs_sampling_main.py
|
dc4c1c8950674625557baf35504f929a5515cde6
|
[] |
no_license
|
vijaydairyf/cow_python
|
9b7632915db1685b6fd2813db9d4310a54d5600b
|
8e07845c4527e753e405da708a010a8c2ca7c425
|
refs/heads/master
| 2021-01-09T17:52:07.500578
| 2020-02-11T07:51:02
| 2020-02-11T07:51:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,049
|
py
|
import numpy as np
import math
import matplotlib.pyplot as plt
import pdb # デバッグ用
# 自作クラス
import myClass.plotting as plotting
import myClass.mixed_model as mixed_model
def create_artificial_poissondata(lam, num):
""" テスト用のデータセットを作成する
Parameter
lam : ポアソン分布のλパラメータ (1次元)
num : データ生成個数 """
X = np.random.poisson(lam, num) # ndarray
return X
def create_artificial_gaussiandata(mu, cov, num):
""" テスト用のデータセットを作成する
Parameter
mu : ガウス分布の平均パラメータ (多次元)
cov : ガウス分布の分散共分散行列パラメータ
num : : データ生成個数 """
X = np.random.multivariate_normal(mu, cov, num) # ndarray
return X
def extract_data(X, S, k):
""" Sの結果からk番目のクラスタに所属するデータをXから抽出する """
N = len(X.T)
new_X = []
for n in range(N):
if (S[k, n] == 1):
new_X.append(X[:,n])
return new_X
def poisson_mixed_model_test():
""" 1次元の入力データをポアソン混合モデルを用いてクラスタリングする """
# 多峰性の1次元データ点を生成
X1 = create_artificial_poissondata(20, 1000)
X2 = create_artificial_poissondata(50, 750)
X = np.hstack((X1, X2)) # 2つのndarrayを結合
np.random.shuffle(X) # データをシャッフル
X = np.array([X]) # データの2次元化
# データを可視化
plotter = plotting.PlotUtility()
plotter.hist_plot([X1,X2], 20, color=None) # ヒストグラムを表示,正解で色分け
# ポアソン混合モデルのパラメータの設定
lambda_vector = np.array([30, 40])
pi_vector = np.array([0.5, 0.5])
alpha_vector = np.array([1, 1])
max_iterater = 50
# ギブスサンプリングによるクラスタリング
a_0, b_0 = 1, 1
poisson_model = mixed_model.PoissonMixedModel(lambda_vector, pi_vector, alpha_vector, max_iterater)
result = poisson_model.gibbs_sample(X, a_0, b_0)
# 新たな入力に対する確率を推定
new_X = np.array([np.arange(1,100)])
prob_matrix = poisson_model.predict(new_X)
# クラスタリング結果を可視化
X1 = extract_data(X, result, 0)
X2 = extract_data(X, result, 1)
plotter2 = plotting.PlotUtility()
plotter2.hist_plot([X1,X2], 20, color=None)
plotter_prob = plotting.PlotUtility()
prob1, prob2 = prob_matrix[0,:], prob_matrix[1,:]
plotter_prob.scatter_plot(new_X, prob1, [0 for _ in range(len(new_X))])
plotter_prob.scatter_plot(new_X, prob2, [1 for _ in range(len(new_X))])
# 表示
plotter.show()
plotter2.show()
plotter_prob.show()
def gaussian_mixed_model_test():
# 多峰性の2次元データ点を生成
X1 = create_artificial_gaussiandata(np.array([30, 40]), np.array([[100, 25], [25, 100]]), 1100)
X2 = create_artificial_gaussiandata(np.array([70, 20]), np.array([[150, 75], [75, 150]]), 900)
X = np.concatenate([X1, X2], 0) # 2つのndarrayを結合
np.random.shuffle(X) # データをシャッフル
X = X.T
# データの可視化
plotter = plotting.PlotUtility()
plotter.scatter_plot(X1[:,0], X1[:,1], [1 for _ in range(len(X1))])
plotter.scatter_plot(X2[:,0], X2[:,1], [2 for _ in range(len(X2))])
# ガウス混合分布のパラメータ設定
mu_vectors = [np.array([30, 50]), np.array([70, 50])]
cov_matrixes = [np.array([[110, 45], [45, 110]]), np.array([[130, 55], [55, 130]])]
pi_vector = np.array([0.6, 0.4])
alpha_vector = np.array([1, 1])
max_iterater = 10
# ギブスサンプリングによるクラスタリング
gaussian_model = mixed_model.GaussianMixedModel(cov_matrixes, mu_vectors, pi_vector, alpha_vector, max_iterater)
result = gaussian_model.gibbs_sample(X, np.array([[50, 50]]).T, 1, 3, np.array([[1, 0], [0, 1]]))
# 新たな入力に対する確率を推定
new_X = np.arange(1,101, 2)
new_Y = np.arange(1,101, 2)
grid_X, grid_Y = np.meshgrid(new_X, new_Y)
new_X = np.array([grid_X.ravel(), grid_Y.ravel()])
prob_matrix = gaussian_model.predict(new_X)
# クラスタリング結果を可視化
X1 = np.array(extract_data(X, result, 0))
X2 = np.array(extract_data(X, result, 1))
plotter2 = plotting.PlotUtility()
plotter2.scatter_plot(X1[:,0], X1[:,1], [1 for _ in range(len(X1))])
plotter2.scatter_plot(X2[:,0], X2[:,1], [2 for _ in range(len(X2))])
plotter_prob = plotting.PlotUtility3D()
prob1, prob2 = prob_matrix[0], prob_matrix[1]
plotter_prob.plot_surface(grid_X, grid_Y, prob1.reshape([50, 50]), c=1)
plotter_prob.plot_surface(grid_X, grid_Y, prob2.reshape([50, 50]), c=2)
# 表示
plotter.show()
plotter2.show()
plotter_prob.show()
if __name__ == '__main__':
#poisson_mixed_model_test()
gaussian_mixed_model_test()
|
[
"sfukumoto123@gmail.com"
] |
sfukumoto123@gmail.com
|
e770ee03f163f76ae10f97c7f4917e3649348a06
|
01799c12f6f18573cb132c6706c4d2fd7c56aadc
|
/billings/billing/venv/Scripts/pip3-script.py
|
ce92d9b3396739ad519f1ed29ab68109aff0f4a4
|
[] |
no_license
|
MyPrivatePlace/billing
|
2d1a2ef0fde83ac98c8b1b75ac56ed1b17c27116
|
5bd2ffccaac3863a5909699c70f89ddd363dd184
|
refs/heads/master
| 2020-03-28T10:42:29.653496
| 2018-10-31T19:54:23
| 2018-10-31T19:54:23
| 148,136,514
| 0
| 0
| null | 2018-09-10T10:39:43
| 2018-09-10T10:09:08
| null |
UTF-8
|
Python
| false
| false
| 395
|
py
|
#!C:\Projects\billings\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip3'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip3')()
)
|
[
"sunkang_99@126.com"
] |
sunkang_99@126.com
|
04dd25f2e360e6a0b81d6329398e7373d37c3db2
|
ff801544b1979442b886d2d1eaf8480e7d6b0d24
|
/main.py
|
20bae383952351920f5e31df5cc21b3dcc2b56c3
|
[] |
no_license
|
BLimmie/OctoGAN
|
7d420cd223ea0dd77dd0dfa1827f12fcd32e9dec
|
38bb4d76eb8dea22278da2d496b712c171be080f
|
refs/heads/master
| 2021-05-11T02:11:55.498819
| 2018-01-21T17:34:58
| 2018-01-21T17:34:58
| 118,352,908
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,747
|
py
|
from __future__ import print_function
import argparse
import os
import random
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.utils.data
import torchvision.datasets as dset
import torchvision.transforms as transforms
import torchvision.utils as vutils
from torch.autograd import Variable
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', required=True, help='cifar10 | lsun | imagenet | folder | lfw | fake')
parser.add_argument('--dataroot', required=True, help='path to dataset')
parser.add_argument('--workers', type=int, help='number of data loading workers', default=2)
parser.add_argument('--batchSize', type=int, default=64, help='input batch size')
parser.add_argument('--imageSize', type=int, default=128, help='the height / width of the input image to network')
parser.add_argument('--nz', type=int, default=100, help='size of the latent z vector')
parser.add_argument('--ngf', type=int, default=64)
parser.add_argument('--ndf', type=int, default=64)
parser.add_argument('--niter', type=int, default=150, help='number of epochs to train for')
parser.add_argument('--lr', type=float, default=0.0002, help='learning rate, default=0.0002')
parser.add_argument('--beta1', type=float, default=0.5, help='beta1 for adam. default=0.5')
parser.add_argument('--cuda', action='store_true', help='enables cuda')
parser.add_argument('--ngpu', type=int, default=1, help='number of GPUs to use')
parser.add_argument('--netG', default='', help="path to netG (to continue training)")
parser.add_argument('--netD', default='', help="path to netD (to continue training)")
parser.add_argument('--outf', default='.', help='folder to output images and model checkpoints')
parser.add_argument('--manualSeed', type=int, help='manual seed')
opt = parser.parse_args()
print(opt)
try:
os.makedirs(opt.outf)
except OSError:
pass
if opt.manualSeed is None:
opt.manualSeed = random.randint(1, 10000)
print("Random Seed: ", opt.manualSeed)
random.seed(opt.manualSeed)
torch.manual_seed(opt.manualSeed)
if opt.cuda:
torch.cuda.manual_seed_all(opt.manualSeed)
cudnn.benchmark = True
if torch.cuda.is_available() and not opt.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
if opt.dataset in ['imagenet', 'folder', 'lfw']:
# folder dataset
dataset = dset.ImageFolder(root=opt.dataroot,
transform=transforms.Compose([
transforms.Scale(opt.imageSize),
transforms.CenterCrop(opt.imageSize),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
]))
elif opt.dataset == 'lsun':
dataset = dset.LSUN(db_path=opt.dataroot, classes=['bedroom_train'],
transform=transforms.Compose([
transforms.Scale(opt.imageSize),
transforms.CenterCrop(opt.imageSize),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
]))
elif opt.dataset == 'cifar10':
dataset = dset.CIFAR10(root=opt.dataroot, download=True,
transform=transforms.Compose([
transforms.Scale(opt.imageSize),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
]))
elif opt.dataset == 'fake':
dataset = dset.FakeData(image_size=(3, opt.imageSize, opt.imageSize),
transform=transforms.ToTensor())
assert dataset
dataloader = torch.utils.data.DataLoader(dataset, batch_size=opt.batchSize,
shuffle=True, num_workers=int(opt.workers))
ngpu = int(opt.ngpu)
nz = int(opt.nz)
ngf = int(opt.ngf)
ndf = int(opt.ndf)
nc = 3
# custom weights initialization called on netG and netD
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
m.weight.data.normal_(0.0, 0.02)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
class _netG(nn.Module):
def __init__(self, ngpu):
super(_netG, self).__init__()
self.ngpu = ngpu
self.main = nn.Sequential(
# input is Z, going into a convolution
nn.ConvTranspose2d( nz, ngf * 16, 4, 1, 0, bias=False),
nn.BatchNorm2d(ngf * 16),
nn.ReLU(True),
#
nn.ConvTranspose2d(ngf * 16, ngf * 8, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 8),
nn.ReLU(True),
# state size. (ngf*8) x 4 x 4
nn.ConvTranspose2d(ngf * 8, ngf * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 4),
nn.ReLU(True),
# state size. (ngf*4) x 8 x 8
nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 2),
nn.ReLU(True),
# state size. (ngf*2) x 16 x 16
nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf),
nn.ReLU(True),
# state size. (ngf) x 32 x 32
nn.ConvTranspose2d( ngf, nc, 4, 2, 1, bias=False),
nn.Tanh()
# state size. (nc) x 64 x 64
)
def forward(self, input):
if isinstance(input.data, torch.cuda.FloatTensor) and self.ngpu > 1:
output = nn.parallel.data_parallel(self.main, input, range(self.ngpu))
else:
output = self.main(input)
return output
netG = _netG(ngpu)
netG.apply(weights_init)
if opt.netG != '':
netG.load_state_dict(torch.load(opt.netG))
print(netG)
class _netD(nn.Module):
def __init__(self, ngpu):
super(_netD, self).__init__()
self.ngpu = ngpu
self.main = nn.Sequential(
# input is (nc) x 64 x 64
nn.Conv2d(nc, ndf, 4, 2, 1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf) x 32 x 32
nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 2),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*2) x 16 x 16
nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 4),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*4) x 8 x 8
nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 8),
nn.LeakyReLU(0.2, inplace=True),
#
nn.Conv2d(ndf * 8, ndf * 16, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 16),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*8) x 4 x 4
nn.Conv2d(ndf * 16, 1, 4, 1, 0, bias=False),
nn.Sigmoid()
)
def forward(self, input):
if isinstance(input.data, torch.cuda.FloatTensor) and self.ngpu > 1:
output = nn.parallel.data_parallel(self.main, input, range(self.ngpu))
else:
output = self.main(input)
return output.view(-1, 1).squeeze(1)
netD = _netD(ngpu)
netD.apply(weights_init)
if opt.netD != '':
netD.load_state_dict(torch.load(opt.netD))
print(netD)
criterion = nn.BCELoss()
input = torch.FloatTensor(opt.batchSize, 3, opt.imageSize, opt.imageSize)
noise = torch.FloatTensor(opt.batchSize, nz, 1, 1)
fixed_noise = torch.FloatTensor(opt.batchSize, nz, 1, 1).normal_(0, 1)
label = torch.FloatTensor(opt.batchSize)
real_label = 1
fake_label = 0
if opt.cuda:
netD.cuda()
netG.cuda()
criterion.cuda()
input, label = input.cuda(), label.cuda()
noise, fixed_noise = noise.cuda(), fixed_noise.cuda()
fixed_noise = Variable(fixed_noise)
# setup optimizer
optimizerD = optim.Adam(netD.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
optimizerG = optim.Adam(netG.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
for epoch in range(opt.niter):
for i, data in enumerate(dataloader, 0):
############################
# (1) Update D network: maximize log(D(x)) + log(1 - D(G(z)))
###########################
# train with real
netD.zero_grad()
real_cpu, _ = data
batch_size = real_cpu.size(0)
if opt.cuda:
real_cpu = real_cpu.cuda()
input.resize_as_(real_cpu).copy_(real_cpu)
label.resize_(batch_size).fill_(real_label)
inputv = Variable(input)
labelv = Variable(label)
output = netD(inputv)
errD_real = criterion(output, labelv)
errD_real.backward()
D_x = output.data.mean()
# train with fake
noise.resize_(batch_size, nz, 1, 1).normal_(0, 1)
noisev = Variable(noise)
fake = netG(noisev)
labelv = Variable(label.fill_(fake_label))
output = netD(fake.detach())
errD_fake = criterion(output, labelv)
errD_fake.backward()
D_G_z1 = output.data.mean()
errD = errD_real + errD_fake
optimizerD.step()
############################
# (2) Update G network: maximize log(D(G(z)))
###########################
netG.zero_grad()
labelv = Variable(label.fill_(real_label)) # fake labels are real for generator cost
output = netD(fake)
errG = criterion(output, labelv)
errG.backward()
D_G_z2 = output.data.mean()
optimizerG.step()
print('[%d/%d][%d/%d] Loss_D: %.4f Loss_G: %.4f D(x): %.4f D(G(z)): %.4f / %.4f'
% (epoch, opt.niter, i, len(dataloader),
errD.data[0], errG.data[0], D_x, D_G_z1, D_G_z2))
if i % 100 == 0:
vutils.save_image(real_cpu,
'%s/real_samples.png' % opt.outf,
normalize=True)
fake = netG(fixed_noise)
vutils.save_image(fake.data,
'%s/fake_samples_epoch_%03d.png' % (opt.outf, epoch),
normalize=True)
# do checkpointing
torch.save(netG.state_dict(), '%s/netG_epoch_%d.pth' % (opt.outf, epoch))
torch.save(netD.state_dict(), '%s/netD_epoch_%d.pth' % (opt.outf, epoch))
|
[
"brian01.lim@gmail.com"
] |
brian01.lim@gmail.com
|
e57b674fc4450a28f95cfb01f1c0395260b4adec
|
3ae12bedf5c32d91fe148d49cfa0cfb59651e43e
|
/backend/users/admin.py
|
71f60e56d93c75c186127f3a31f3e6620af645ac
|
[] |
no_license
|
aminuolawale/personal_store
|
cb3aa4a09b5392d4cd7d400c44787d8ae4fab9ec
|
9ae2da507140430af519f27edc23340948db9e55
|
refs/heads/master
| 2023-01-03T12:01:35.291757
| 2020-11-06T21:45:25
| 2020-11-06T21:45:25
| 308,445,011
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 123
|
py
|
from django.contrib import admin
from .models import User, Address
admin.site.register(User)
admin.site.register(Address)
|
[
"aminuolawalekan@gmail.com"
] |
aminuolawalekan@gmail.com
|
b32507222fde3f24d7b8b4d925485d3b237f7ea4
|
6e1fe9ac115c8404e61e880375af685fb09696f1
|
/__main__.py
|
439817a9148425e5eb50db57a8a891ffa5ec19d4
|
[
"MIT"
] |
permissive
|
ValentinKolb/scon
|
01ab016775df71bd767c92ab26b1db03ef8912ac
|
c4a6646a0815d0c8ef9fa2505f7afb7ac68c3c2c
|
refs/heads/main
| 2023-08-28T04:16:21.075881
| 2021-11-03T20:37:28
| 2021-11-03T20:37:28
| 399,600,661
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,112
|
py
|
#!/usr/bin/env python3
# This script configures ssh for new hosts
# Author: Valentin Kolb
# Version: 1.1
# License: MIT
import os
import subprocess
import sys
from dataclasses import dataclass
from pathlib import Path
from typing import List, Union
import re
import argparse
from prompt_toolkit import PromptSession, HTML, print_formatted_text
from prompt_toolkit.completion import NestedCompleter
from prompt_toolkit.shortcuts import clear
from prompt_toolkit.styles import Style
import subprocess
#########################
# DEFAULT CONFIGURATION #
#########################
DEFAULT_USER = "admin"
DEFAULT_PORT = 22
CONFIG_FILE = str(Path.home()) + "/.ssh/config"
SSH_KEY_DIR = str(Path.home()) + "/.ssh/keys"
#########################
# END DEFAULTS #
#########################
def bottom_toolbar():
return HTML('SSH Wizard - type <b>help</b> to list all commands')
def stderr(text, end="\n"):
"""
prints error msg
"""
print_formatted_text(text, file=sys.stderr, end=end)
session = PromptSession(
bottom_toolbar=bottom_toolbar,
complete_while_typing=True
)
style = Style.from_dict({
'cmd': '#ff0066',
'hlp': '#44ff00 italic',
})
REVERSED = u"\u001b[7m"
RESET = u"\u001b[0m"
FNULL = open(os.devnull, 'w')
SSH_KEY_FILE_REGEX = r"Host +(?P<ID>.+)\n\tHostname +(?P<hostname>\S+)\n\tUser +(?P<user>\S+)\n\tPort +(?P<port>\d+)\n\tIdentityFile +(?P<key_file>\S+)\n?"
@dataclass(frozen=True)
class SSHConfig:
ID: str
hostname: str
user: str
port: int
key_file: str
def file_to_dataclass(file: str) -> List[SSHConfig]:
"""
reads a ssh config file an parses it to an list of dataclasses
:param file: the ssh config file
:return: an array of dataclasses
"""
with open(file) as file:
content = file.read()
results = []
for match in re.finditer(pattern=SSH_KEY_FILE_REGEX, string=content):
results.append(
SSHConfig(
ID=match.group("ID"),
hostname=match.group("hostname"),
user=match.group("user"),
port=int(match.group("port")),
key_file=match.group("key_file")
)
)
return results
def dataclass_to_file(file: str, data: List[SSHConfig]):
"""
writes the ssh config file
:param file: the path of the file
:param data: the data to be written
"""
with open(file, mode="w") as file:
for config in data:
file.write(
f'Host {config.ID}\n' +
f'\tHostname {config.hostname}\n' +
f'\tUser {config.user}\n' +
f'\tPort {config.port}\n' +
f'\tIdentityFile {config.key_file}\n\n'
)
def yes(prompt="[Y/n]"):
"""
asks user yes or no question, yes is default
:param prompt: the prompt for the user
:return: true if answer was yes
"""
while True:
_in = session.prompt(prompt).strip().lower()
if _in in ["y", "yes", ""]:
return True
elif _in in ["n", "no"]:
return False
def list_config():
"""
this will print all currently configured hosts
"""
hosts = file_to_dataclass(CONFIG_FILE)
i = max(len(h.ID) for h in hosts)
j = max(len(h.hostname) + 1 + len(h.user) for h in hosts)
print(f'{"identifier".upper().ljust(i)} | HOST')
print("=" * (i + j + 3))
for host in hosts:
print(f'{host.ID.ljust(i, ".")} | {(host.user + "@" + host.hostname).ljust(j, ".")}')
print(f"\nUsage: 'ssh <identifier>' (eg: ssh {hosts[0].ID})")
def add_host():
# domain name
hostname = session.prompt("Enter the domain name. (e.g. host.example.com): ").strip().lower()
ID, _ = hostname.split(".", 1)
ID = session.prompt(
f"Enter an alias of the host (usage: ssh <alias>) [{ID}]: ") or ID
# check if host is up
if not subprocess.run(["ping", "-c", "1", "-i", "0.5", hostname],
stdout=FNULL,
stderr=subprocess.STDOUT).returncode == 0:
stderr(f"{hostname} can't be reached, do want to continue anyway? [Y/n] ", end="")
if not yes(prompt=""):
stderr("... aborting")
return
# user name
user = session.prompt(f"please enter the user [{DEFAULT_USER}]: ").strip() or DEFAULT_USER
# port
port = int(session.prompt(f"please enter the port [{DEFAULT_PORT}]: ").strip() or 22)
# check for existing configuration
hosts = file_to_dataclass(CONFIG_FILE)
if any(hostname == h.hostname for h in hosts):
stderr(f"There is already a configuration for the host {hostname}, do you want to overwrite it? [Y/n] ", end="")
if not yes(prompt=""):
stderr("... aborting")
return
else:
hosts = [h for h in hosts if h.hostname != hostname]
# generate public and private key
print("generating keys ...")
subprocess.run(["mkdir", "-p", SSH_KEY_DIR])
key_file = f'{SSH_KEY_DIR}/{hostname.replace(".", "_")}'
if os.path.exists(key_file):
os.remove(key_file)
os.remove(f'{key_file}.pub')
subprocess.run(["ssh-keygen", "-t", "ed25519", "-C", f"'key for {hostname}'", "-f", key_file, "-q"])
new_config_data = SSHConfig(
ID=ID,
hostname=hostname,
user=user,
port=port,
key_file=key_file
)
with open(f'{key_file}.pub') as file:
public_key = file.read().strip()
dataclass_to_file(CONFIG_FILE, hosts + [new_config_data])
print("... wizard done.")
print()
print(f'PUBLIC KEY: {REVERSED}{public_key}{RESET}')
print()
print("To connect to the VM follow these steps:")
print(f"\t1. copy the public key to the cloud-init drive of the VM. "
f"\n\t this can be done in proxmox")
print(f"\t2. run {REVERSED}ssh {ID}{RESET} to connect to the VM")
def configure(cmd: List[str]):
"""
change the default values of this script
"""
if cmd[0] == "show":
print("Configured values for this script:")
print(f" DEFAULT-USER : {DEFAULT_USER}")
print(f" DEFAULT-PORT : {DEFAULT_PORT}")
print(f" CONFIG-FILE : {CONFIG_FILE}")
print(f" SSH-KEY-DIR : {SSH_KEY_DIR}")
elif cmd[0] == "set" and len(cmd) == 3:
if cmd[1] == "DEFAULT-USER":
...
elif cmd[1] == "DEFAULT-PORT":
...
elif cmd[1] == "CONFIG-FILE":
...
elif cmd[1] == "SSH-KEY-DIR":
...
else:
stderr(f"Invalid cmd for 'configure: {' '.join(cmd)}")
if __name__ == '__main__':
while True:
hosts = file_to_dataclass(CONFIG_FILE)
completer = NestedCompleter.from_nested_dict({
'ssh ': {host.ID for host in hosts},
'remove ': {host.ID for host in hosts},
'add': None,
'list': None,
'help': None,
'exit': None,
'clear': None,
'configure': {
"show", "set"
}
})
try:
text: str = session.prompt(message=">>> ",
completer=completer)
except KeyboardInterrupt:
stderr(HTML("Enter <b>exit</b> to exit the shell or press <b>CTRL-D</b>."))
continue
except EOFError:
stderr("... exiting")
exit(-1)
if text.startswith("ssh"):
cmd = text.split(" ")
try:
result = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
if result.stdout:
print(result.stdout)
if result.stderr:
stderr(result.stderr)
except KeyboardInterrupt:
stderr(" Keyboard Interrupt!")
elif text.startswith("remove"):
...
elif text.startswith("add"):
...
elif text.startswith("list"):
list_config()
elif text.startswith("help"):
help_text = {
'ssh <alias>': "Connect to a ssh host by it's alias.",
'remove <alias>': "Remove an ssh host from the config.",
'add': "Run wizard to add a new ssh host.",
'list': "List all ssh hosts.",
'help': "Print this help.",
'exit': "Exit the shell.",
'clear': "Clears the screen.",
'configure [show | set ..]': "Show and change the default values of the wizard."
}
width = max(len(s) for s in help_text)
for cmd in help_text:
print(f'{cmd.ljust(width)} : {help_text[cmd]}')
elif text.startswith("exit"):
break
elif text.startswith("configure"):
_, *cmd = text.split(" ")
configure(cmd)
elif text.startswith("clear"):
clear()
else:
print_formatted_text(HTML(f"Unknown Command: {text}\nEnter <b>help</b> for a list of all commands."))
|
[
"valentinkolb@ValentinsLaptop.localdomain"
] |
valentinkolb@ValentinsLaptop.localdomain
|
7c4b4221e5c0374176572d6f71f5c551f817f379
|
0c08a15045b24b56bdb42dff5cf210f9bee6827f
|
/family_album/images/models.py
|
d5b5c4f36766d7947af2bbdb671029aa4607d9dd
|
[
"MIT"
] |
permissive
|
squadran2003/family-album
|
205d6f4a7256e466506d796d7da37a0eeff65fe3
|
eae75987e4786255269ecee2482d715ae2229db2
|
refs/heads/master
| 2022-12-05T00:19:29.629432
| 2019-01-20T13:10:22
| 2019-01-20T13:10:22
| 165,837,569
| 0
| 0
|
MIT
| 2022-11-22T03:23:44
| 2019-01-15T11:15:38
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 1,199
|
py
|
from django.utils import timezone
from PIL import Image as img
from io import BytesIO
from django.core.files.uploadedfile import InMemoryUploadedFile
import sys
from django.db import models
from django.contrib.auth.models import User
class Image(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
description = models.TextField()
image = models.ImageField(upload_to='pictures')
created_at = models.DateTimeField(default=timezone.now)
class Meta:
ordering = ('-created_at',)
def save(self):
# Opening the uploaded image
im = img.open(self.image)
output = BytesIO()
# Resize/modify the image
im = im.resize((400, 300))
# after modifications, save it to the output
im.save(output, format='JPEG', quality=100)
output.seek(0)
# change the imagefield value to be the newley modifed image value
self.image = InMemoryUploadedFile(
output, 'ImageField',
"%s.jpeg" % self.image.name.split('.')[0],
'jpeg', sys.getsizeof(output), None
)
super(Image, self).save()
def __str__(self):
return self.description
|
[
"cormackandy@hotmail.com"
] |
cormackandy@hotmail.com
|
b42c9a05e876a611b682a0b70a86878e4a80aebb
|
27426683a9af095c4bbbf9bb6f2dce68a49b8302
|
/stacked_generalization.py
|
d19bff9deaba6a8bad04eaedd0a34bd231abbd48
|
[] |
no_license
|
chetanmehra/stacked_generalization-1
|
aae8bcdedd05e59d93063f5058f3c9f875b9bf5b
|
5eab38bcd9cebf0f37f52fb58b4793b85e8f0b1e
|
refs/heads/master
| 2021-06-01T00:22:58.495122
| 2016-05-09T11:31:03
| 2016-05-09T11:31:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 437
|
py
|
from sklearn.cross_validation import StratifiedKFold
import numpy
class StackedGeneralization:
def __init__(self, n_folds, train_data, train_target, test_data):
self.n_folds = n_folds
self.train_data = train_data
self.train_target = train_target
self.test_data = test_data
self.n_classes = len(numpy.unique(train_target))
self.skf = StratifiedKFold(y=train_target, n_folds=n_folds)
|
[
"sergeant.wizard@gmail.com"
] |
sergeant.wizard@gmail.com
|
c3ce6f4907c56922e923d921e78478a4fe44f176
|
ce73050565ebdec828919f339e81da54b5fd7fcf
|
/GeneralProblems/DynamicArray.py
|
cb9487aadfc557076f184d6d7d48c600069796c3
|
[] |
no_license
|
VaibhavDesai/Algorithms
|
b4b1ad6a13a32cfe16abb4174a672841d45628e2
|
32f43f0c4b28eb4aa2b6142ff962fc322ac796b0
|
refs/heads/master
| 2020-12-30T13:28:11.729137
| 2017-10-02T08:02:30
| 2017-10-02T08:02:30
| 91,217,973
| 1
| 0
| null | 2017-05-19T16:52:25
| 2017-05-14T03:41:20
|
Python
|
UTF-8
|
Python
| false
| false
| 231
|
py
|
firstIn = [int(x) for x in input().split()]
n = firstIn[0]
q = firstIn[1]
quries = []
for i in range(q):
ans.append(calDy([int(x) for x in input().split()],n))
def calDy(inputList,n):
if(inputList[0] == 1):
|
[
"admin@Admins-MacBook-Pro-2.local"
] |
admin@Admins-MacBook-Pro-2.local
|
1884b26999b578c08e920c4f7f1ae2e648715491
|
174d1c8465550eeb356a698e370828c4854ac883
|
/chapter04/qt04_QTextEdit.py
|
1afeb7d0415818bda0b65def2e78652ca439d518
|
[] |
no_license
|
Junkiwang/PyQtUI
|
a34876da8fc65b546f7e5348eaad7b9c1e54321d
|
d93a793d18c4bfc117ca374ae28a2a71631c2121
|
refs/heads/master
| 2020-03-18T23:45:13.314811
| 2018-07-09T05:58:13
| 2018-07-09T05:58:13
| 135,425,386
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,449
|
py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# Author: Junki
from PyQt5.QtWidgets import QApplication, QTextEdit, QWidget, QVBoxLayout, QPushButton
import sys
class textEditDemo(QWidget):
def __init__(self, parent=None):
super(textEditDemo, self).__init__(parent)
self.setWindowTitle('QTextEdit例子')
self.resize(300, 300)
self.textEdit = QTextEdit()
self.btnPress0 = QPushButton('获取输入内容')
self.btnPress1 = QPushButton('显示文本')
self.btnPress2 = QPushButton('显示Html')
layout = QVBoxLayout()
layout.addWidget(self.textEdit)
layout.addWidget(self.btnPress0)
layout.addWidget(self.btnPress1)
layout.addWidget(self.btnPress2)
self.setLayout(layout)
self.btnPress0.clicked.connect(self.getText)
self.btnPress1.clicked.connect(self.btnPress1_Clicked)
self.btnPress2.clicked.connect(self.btnPress2_Clicked)
def getText(self):
print('获取到文本框中的输入内容:%s' % self.textEdit.toPlainText())
def btnPress1_Clicked(self):
self.textEdit.setPlainText('Hello PyQt5!\n单击按钮。')
def btnPress2_Clicked(self):
self.textEdit.setHtml('<font color="red" size="6"><red>Hello PyQt5!<br>单击按钮。</red></font>')
if __name__ == '__main__':
app = QApplication(sys.argv)
win = textEditDemo()
win.show()
sys.exit(app.exec_())
|
[
"350187552@qq.com"
] |
350187552@qq.com
|
41f2df2137a227386f0dece011dcf1d628037fd7
|
ad544b38ec09828cda1b1918f407975bc79bf976
|
/missioncontrol/mc/mc/views.py
|
82f5e002d54b800f164e42ee9229c4612ff2bd76
|
[] |
no_license
|
mattvenn/earth-to-mars
|
6de13606f3f8087da40e8ed0543a03e0093c25fb
|
c2b0064ef87c3d095d231587ee3ef48b00360bfd
|
refs/heads/master
| 2021-01-10T07:29:17.557441
| 2016-03-17T16:34:42
| 2016-03-17T16:34:42
| 45,628,116
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,754
|
py
|
from mc import app
from mc import db
from sqlalchemy.exc import IntegrityError
import datetime
from flask import Flask, request, session, g, redirect, url_for, \
abort, render_template, flash, jsonify, make_response, send_file
from contextlib import closing
from flask_admin.contrib.sqla import ModelView
import time
from wtforms import TextAreaField, TextField, IntegerField, FloatField, SelectField, PasswordField
from wtforms import validators
from flask_wtf import Form
from flask_wtf.file import FileField, FileAllowed, FileRequired
from wtforms.ext.sqlalchemy.fields import QuerySelectField
from mc.models import Teams, School, Sample, Answers, Questions, GroupGraph, Photo, Panorama
from graphing import submit_graph, update_group_graph, get_group_graph_name
from werkzeug import secure_filename
import os
class SecureView(ModelView):
def is_accessible(self):
if 'logged_in' in session.keys():
return True
def inaccessible_callback(self, name, **kwargs):
# redirect to login page if user doesn't have access
return redirect(url_for('login', next=request.url))
@app.teardown_appcontext
def shutdown_session(exception=None):
db.session.remove()
# tested
def get_teams():
return Teams.query.all()
class LoginForm(Form):
username = TextField('Username', [validators.Required()])
password = PasswordField('Password', [validators.Required()])
def validate(self):
rv = Form.validate(self)
if not rv:
return False
if self.username.data != app.config['USERNAME']:
self.username.errors.append('Unknown username')
return False
if self.password.data != app.config['PASSWORD']:
self.password.errors.append('bad password')
return False
return True
class AnswerForm(Form):
team = QuerySelectField(query_factory=get_teams, allow_blank=True, blank_text=u'Please choose')
answer = TextAreaField('Answer', [validators.Required()])
def validate(self):
rv = Form.validate(self)
if not rv:
return False
if not self.team.data:
self.team.errors.append('choose a team')
return False
self.answer = Answers(None, self.answer.data, self.team.data)
return True
class PhotoForm(Form):
team = QuerySelectField(query_factory=get_teams, allow_blank=True, blank_text=u'Please choose')
maxx = app.config['MAX_X']
maxy = app.config['MAX_Y']
x = IntegerField('X', [validators.NumberRange(min=0, max=maxx - 1)])
y = IntegerField('Y', [validators.NumberRange(min=0, max=maxy - 1)])
photo = FileField('Image', validators=[
FileRequired(message="you must choose a photo"),
FileAllowed(['jpg', 'png'], message='only images allowed')
])
def validate(self):
rv = Form.validate(self)
if not rv:
return False
if not self.team.data:
self.team.errors.append('choose a team')
return False
return True
class SampleForm(Form):
team = QuerySelectField(query_factory=get_teams, allow_blank=True, blank_text=u'Please choose')
types = app.config['SAMPLE_TYPES']
methane = FloatField('Methane', [validators.NumberRange(min=types['methane']['min'], max=types['methane']['max'])])
temperature = FloatField('Temperature', [validators.NumberRange(min=types['temperature']['min'], max=types['temperature']['max'])])
humidity = FloatField('Humidity', [validators.NumberRange(min=types['humidity']['min'], max=types['humidity']['max'])])
maxx = app.config['MAX_X']
maxy = app.config['MAX_Y']
x = IntegerField('X', [validators.NumberRange(min=0, max=maxx - 1)])
y = IntegerField('Y', [validators.NumberRange(min=0, max=maxy - 1)])
def validate(self):
rv = Form.validate(self)
if not rv:
return False
if not self.team.data:
self.team.errors.append('choose a team')
return False
if Sample.query.filter(Sample.x == self.x.data, Sample.y == self.y.data, Sample.team == self.team.data).first():
self.team.errors.append('your team already uploaded this sample')
return False
return True
# tested
def add_school_point(points=1):
school = School.query.order_by(School.timestamp.desc()).first()
if school is not None:
school.points += points
db.session.commit()
# tested
def get_group_id():
try:
group_id = GroupGraph.query.all()[-1].id
except IndexError:
group_id = 0
return group_id
# tested
@app.route('/')
def mission_control():
school = School.query.order_by(School.timestamp.desc()).first()
now = datetime.datetime.now()
end_hour = app.config['END_HOUR']
end_min = app.config['END_MIN']
end_time = datetime.datetime.now().replace(hour=end_hour,minute=end_min,second=0)
delta = end_time - now
mins = delta.total_seconds() / 60
hours = mins / 60
mins = mins % 60
secs = delta.total_seconds() % 60
time_info = { 'now': now.strftime('%H:%M'), 'left': '%02d:%02d' % (hours, mins) }
pan = Panorama.query.first()
pan_info = { 'name': pan.get_pan_name(), 'num': pan.get_num_photos() }
return render_template('mission_control.html', school_info=school, time_info=time_info, pan_info=pan_info, group_id=get_group_id())
# tested
@app.route('/show/samples')
def show_samples():
samples = Sample.query.all()
return render_template('show_samples.html', samples=samples)
# tested
@app.route('/show/graph/<type>')
def show_group_graph(type):
return render_template('show_group_graph.html', type=type, group_id=get_group_id())
# tested
@app.route('/upload/sample', methods=['GET', 'POST'])
def add_sample():
form = SampleForm()
if form.validate_on_submit():
sample = Sample()
form.populate_obj(sample)
db.session.add(sample)
db.session.commit()
add_school_point()
submit_graph(sample) # make a graph
#update_group_graph(form.sample)
flash('sample logged')
return render_template('sample_submitted.html', sample=sample)
return render_template('add_sample.html', form=form)
class InvalidUsage(Exception):
status_code = 400
def __init__(self, message, status_code=None, payload=None):
Exception.__init__(self)
self.message = message
if status_code is not None:
self.status_code = status_code
self.payload = payload
def to_dict(self):
rv = dict(self.payload or ())
rv['message'] = self.message
return rv
@app.errorhandler(InvalidUsage)
def handle_invalid_usage(error):
response = jsonify(error.to_dict())
response.status_code = error.status_code
return response
def make_csv(head, list):
import StringIO
import csv
si = StringIO.StringIO()
cw = csv.writer(si)
cw.writerow(head)
for i in list:
cw.writerow(i.get_csv())
return si
def make_csv_response(head, list, name):
si = make_csv(head, list)
response = make_response(si.getvalue())
response.headers["Content-Disposition"] = "attachment; filename=%s" % name
return response
@app.route('/api/questions')
def api_get_questions():
questions = Questions.query.all()
head = Questions.get_csv_head()
return make_csv_response(head, questions,'questions.csv')
@app.route('/api/answers')
def api_get_answers():
answers = Answers.query.all()
head = Answers.get_csv_head()
return make_csv_response(head, answers,'answers.csv')
# build an archive of all the cool data and zip it
@app.route('/api/zipped-data')
def zipped_data():
import zipfile
import io
import json
memory_file = io.BytesIO()
with zipfile.ZipFile(memory_file, 'w') as zf:
for name in app.config['SAMPLE_TYPES'].keys():
graph_name = get_group_graph_name(name, get_group_id())
zf.write(graph_name, name + '.png')
answers = Answers.query.all()
head = Answers.get_csv_head()
answers_csv = make_csv(head, answers)
zf.writestr('answers.csv', answers_csv.getvalue())
questions = Questions.query.all()
head = Questions.get_csv_head()
questions_csv = make_csv(head, questions)
zf.writestr('questions.csv', questions_csv.getvalue())
samples = Sample.query.all()
data = { 'samples' : [sample.serialise() for sample in samples]}
zf.writestr('samples.json', json.dumps(data))
memory_file.seek(0)
return send_file(memory_file, attachment_filename='missioncontrol.zip', as_attachment=True)
# tested
@app.route('/api/team/<name>')
def api_get_team_by_name(name):
name = name.lower()
teams = get_teams()
for team in teams:
if team.name.lower() == name:
return jsonify(team.serialise())
raise InvalidUsage("no team of that name found")
# tested
@app.route('/api/samples')
def api_get_all_samples():
samples = Sample.query.all()
data = { 'samples' : [sample.serialise() for sample in samples]}
return jsonify(data)
# tested
@app.route('/api/sample/<int:sample_id>')
def api_get_sample(sample_id):
sample = Sample.query.get(sample_id)
if not sample:
raise InvalidUsage("no sample of that id found")
return jsonify(sample.serialise())
# tested
@app.route('/api/sample', methods=['POST'])
def api_add_sample():
if not request.json:
raise InvalidUsage("json needed")
form = SampleForm(data = request.get_json())
form.csrf_enabled = False
if not form.validate():
raise InvalidUsage("invalid data", payload=form.errors)
sample = Sample()
form.populate_obj(sample)
db.session.add(sample)
db.session.commit()
#update_group_graph(form.sample)
add_school_point()
return jsonify(sample.serialise()), 201
# tested
@app.route('/login', methods=['GET', 'POST'])
def login():
form = LoginForm()
if form.validate_on_submit():
session['logged_in'] = True
flash('You were logged in')
return redirect('/admin')
return render_template('login.html', form=form)
# tested
@app.route('/logout')
def logout():
session.pop('logged_in', None)
flash('You were logged out')
return redirect('/admin')
# tested
@app.route('/answers/<int:question_id>')
def answers(question_id):
question = Questions.query.get(question_id)
return render_template('answer.html', question=question)
# tested
@app.route('/questions/<int:question_id>', methods=['GET', 'POST'])
def questions(question_id):
form = AnswerForm()
question = Questions.query.get(question_id)
if form.validate_on_submit():
form.answer.question = question
db.session.add(form.answer)
db.session.commit()
add_school_point(10)
flash('answer logged')
return redirect(url_for('answers', question_id=question_id))
return render_template('question.html', question=question, form=form)
@app.route('/upload/photo', methods=['GET', 'POST'])
def add_photo():
form = PhotoForm()
if form.validate_on_submit():
filename = secure_filename(form.photo.data.filename)
form.photo.data.save(os.path.join(app.static_folder, 'photos', filename))
photo = Photo()
form.populate_obj(photo)
photo.image_path = filename
db.session.add(photo)
db.session.commit()
pan = Panorama.query.first()
pan.add_to_panorama(photo)
add_school_point()
return render_template('photo_submitted.html', photo=photo)
return render_template('add_photo.html', form=form)
|
[
"matt@mattvenn.net"
] |
matt@mattvenn.net
|
bf055d3d9a0f6250e6e0336a5e27ccf9328377c7
|
0a118de91d880058dd2b9301d81ffa3ffd17514a
|
/benchmarking/smartseq2/merge_picard_metrics/merge_picard_mets.py
|
a39d568b22a0d409d3946b10422bf79c73dfc4ec
|
[] |
no_license
|
garyluu/skylab
|
9b15aee18f1240122331eef6de8cc04e8212bf81
|
319d0ac57654d14056669dc836f894d482891dbc
|
refs/heads/master
| 2020-03-13T08:51:55.944993
| 2018-05-24T13:42:59
| 2018-05-24T13:42:59
| 131,052,488
| 0
| 4
| null | 2018-04-25T19:13:26
| 2018-04-25T19:13:25
| null |
UTF-8
|
Python
| false
| false
| 4,167
|
py
|
from crimson import picard
import pandas as pd
import numpy as np
from google.cloud import storage
import json
from os.path import basename
import sys
import requests
import argparse
def retrieve_workflow_outputs(cromwell_uuid, output_name):
# load cromwell credential
logins = json.load(open('/usr/secrets/broad-dsde-mint-dev-cromwell.json'))
metadata_url = "https://cromwell.mint-dev.broadinstitute.org/api/workflows/v1/" + cromwell_uuid + "/metadata?expandSubWorkflows=false"
r = requests.get(
metadata_url,
auth=(logins['cromwell_username'], logins['cromwell_password']))
data = r.json()
# load output files
files = data['outputs'][output_name]
return (files)
def merge_picard_metrics(files, metric_name):
"""
piepline output picard QC metrics at sinle cell/sample level.
This functin is called to merge/aggregate QC metrics by metrics type and then merge multiple QC measurement
into single matrix file. In this file, column is sample/cell and row is QC metrics
:param files: metric files from pipeline outputs
:param met_name: metrics name with workflow name and subworkflow name as prefix. such as 'run_pipelines.RunStarPipeline.alignment_summary_metrics'
"""
# set up auth
client = storage.Client()
bucket = client.get_bucket('broad-dsde-mint-dev-cromwell-execution')
# load cromwell credential
logins = json.load(open('/usr/secrets/broad-dsde-mint-dev-cromwell.json'))
# initial output
mets = {}
for kk in range(0, len(files)):
fc = files[kk]
fc = fc.replace('gs://broad-dsde-mint-dev-cromwell-execution/', '')
blob = bucket.get_blob(fc)
met_name = basename(fc)
# sample name is prefix of file name
sample_name = met_name.split('.')[0]
with open(met_name, 'wb') as file_obj:
blob.download_to_file(file_obj)
# use picard package parse out picard output, a json file is returned
parsed = picard.parse(met_name)
class_name = parsed['metrics']['class']
# Aignment metrics return multiple lines, but only output PAIRED-READS/third line
if class_name == "picard.analysis.AlignmentSummaryMetrics":
## only parse out pair reads
met = parsed['metrics']['contents'][2]
# sometimes(very rare), insertion metrics also return multiple lines results to include TANDEM repeats. but we only output the first line.
elif class_name == "picard.analysis.InsertSizeMetrics":
# if the elemnet counts is less than 21, it means insertion metrics returns multiple line results.
if len(parsed['metrics']['contents']) < 21:
met = parsed['metrics']['contents'][0]
else:
met = parsed['metrics']['contents']
else:
# other metrics(so far) only return one line results.
met = parsed['metrics']['contents']
mets[sample_name] = met
merged = pd.DataFrame.from_dict(mets)
return merged
def run_merge_metrics(cromwell_uuid, metric_name, output_name):
"""
call functions to nerge metrics and output in one file
:param cromwell_uuid cromwell workflow uuid
:param metric_name a Picard metric name
:param output_name, the output csv file name
"""
metfiles = retrieve_workflow_outputs(cromwell_uuid, metric_name)
metrics_matrix = merge_picard_metrics(metfiles, metric_name)
metrics_matrix.to_csv(output_name)
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"-u",
"--cromwell_uuid",
dest="cromwell_uuid",
required=True,
help="The uuid of workflow")
parser.add_argument(
"-m",
"--metrics_name",
dest="met_name",
required=True,
help="The list of Picard metrics class names")
parser.add_argument(
"-o",
"--output_name",
dest="output_name",
required=True,
help="The output file name")
args = parser.parse_args()
run_merge_metrics(args.cromwell_uuid, args.met_name, args.output_name)
if __name__ == "__main__":
main()
|
[
"noreply@github.com"
] |
garyluu.noreply@github.com
|
37e0fb4dbe4d99d999a4a4ff25c33d7f504d8fc8
|
ab574f7511fa15e5ea50a26f26e3e38f7e33505a
|
/win_2018/scipy/special/_ufuncs_cxx.py
|
65fc513447b7d344b151f7ba228174ebe12f7257
|
[] |
no_license
|
zclongpop123/maya_python_packages
|
49d6b340512a2580bc8c14ae6281ca3f57017acd
|
4dd4a48c41749443ac16053d20aec04e9d2db202
|
refs/heads/master
| 2021-11-30T01:49:41.846727
| 2021-11-17T01:47:08
| 2021-11-17T01:47:08
| 49,186,909
| 16
| 9
| null | 2017-03-07T00:13:41
| 2016-01-07T06:48:35
|
Python
|
UTF-8
|
Python
| false
| false
| 288
|
py
|
def __bootstrap__():
global __bootstrap__, __loader__, __file__
import sys, pkg_resources, imp
__file__ = pkg_resources.resource_filename(__name__, '_ufuncs_cxx.pyd')
__loader__ = None; del __bootstrap__, __loader__
imp.load_dynamic(__name__,__file__)
__bootstrap__()
|
[
"aton.lerin@gmail.com"
] |
aton.lerin@gmail.com
|
d0594ba180ac2eb8f8df3854ae9e4fd1f3cf86e6
|
e2b4c4dc7b9ad43e5e06d050eccd43ebf98d76c3
|
/snap_plugin/v1/pub_proc_arg.py
|
c6486d5adc3ed1562e447aa52d1182f141293507
|
[
"Apache-2.0"
] |
permissive
|
intelsdi-x/snap-plugin-lib-py
|
4bcf7d6c665f85285af83271380f23413b23082e
|
24b08eb5feaeb64d7c6e25781abe3b8ce2fa9277
|
refs/heads/master
| 2022-11-12T11:31:11.420061
| 2022-11-07T23:11:16
| 2022-11-07T23:11:16
| 69,615,435
| 5
| 16
| null | 2017-08-28T13:38:17
| 2016-09-29T23:16:25
|
Python
|
UTF-8
|
Python
| false
| false
| 1,282
|
py
|
# -*- coding: utf-8 -*-
# http://www.apache.org/licenses/LICENSE-2.0.txt
#
# Copyright 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .plugin_pb2 import PubProcArg
class _PubProcArg(object):
def __init__(self, metrics=[], **kwargs):
self._pb = PubProcArg(Metrics=[m.pb for m in metrics])
if "config" in kwargs:
self._pb.Config.MergeFrom(kwargs.get("config").pb)
@property
def pb(self):
return self._pb
class _ProcessArg(_PubProcArg):
def __init__(self, metrics=[], **kwargs):
super(_ProcessArg, self).__init__(metrics=metrics, **kwargs)
class _PublishArg(_PubProcArg):
def __init__(self, metrics=None, **kwargs):
super(_PublishArg, self).__init__(metrics=metrics, **kwargs)
|
[
"joel.cooklin@gmail.com"
] |
joel.cooklin@gmail.com
|
e66e93413063fb93740bd8dbb7b6721fabef46c9
|
22adb6a4cbd88a5d5e8b006b07fbdd03a23dca97
|
/update_scheduler.py
|
945c39766368bcc821432e3d79db6b9ded1f8f97
|
[] |
no_license
|
shatteroff/flask_CU_price_checker
|
71719bf6865a0775923909f43a67af8cb0c74f22
|
a285cd70905d95ec452cdb68acf14705e3011cef
|
refs/heads/master
| 2022-12-14T08:52:41.408014
| 2020-12-30T09:30:42
| 2020-12-30T09:30:42
| 241,875,724
| 0
| 0
| null | 2022-07-06T20:29:15
| 2020-02-20T12:14:07
|
Python
|
UTF-8
|
Python
| false
| false
| 738
|
py
|
import datetime
from apscheduler.schedulers.blocking import BlockingScheduler
from config import Config
from redis_helper import RedisHelper
scheduler = BlockingScheduler()
redis_helper = RedisHelper()
@scheduler.scheduled_job('cron', misfire_grace_time=3000, hour=Config.hour_for_update, minute=Config.minute_for_update)
def update_prices():
print(f'{datetime.datetime.now()}\tUpdate started')
conn = Config.conn
redis_helper.update_date()
redis_helper.load_prices(conn)
redis_helper.add_product(conn)
conn.close()
print(f'{datetime.datetime.now()}\tUpdate ended')
@scheduler.scheduled_job('interval', minutes=5)
def timed_job():
print('Test scheduler is run every 5 minutes.')
scheduler.start()
|
[
"shatter007@mail.ru"
] |
shatter007@mail.ru
|
139a60ffd6e82195e835f691c53c0f317ab5a8d9
|
acf7457d3a799cb9bff12686d2d616688bcd4b5b
|
/packages/python/plotly/plotly/validators/heatmap/_yperiod.py
|
6496c7ed1592b867d1b2a5946e177c084910c381
|
[
"MIT"
] |
permissive
|
plotly/plotly.py
|
f4f61639f08160f16195efc95b5901dc5a937346
|
975a704074f01c078e0fdfa32bdf17130bf89e69
|
refs/heads/master
| 2023-09-06T06:15:08.340035
| 2023-08-24T12:28:14
| 2023-08-24T12:28:14
| 14,579,099
| 14,751
| 2,989
|
MIT
| 2023-09-08T19:55:32
| 2013-11-21T05:53:08
|
Python
|
UTF-8
|
Python
| false
| false
| 470
|
py
|
import _plotly_utils.basevalidators
class YperiodValidator(_plotly_utils.basevalidators.AnyValidator):
def __init__(self, plotly_name="yperiod", parent_name="heatmap", **kwargs):
super(YperiodValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
implied_edits=kwargs.pop("implied_edits", {"ytype": "scaled"}),
**kwargs,
)
|
[
"nicolas@plot.ly"
] |
nicolas@plot.ly
|
a2116f849321bb09ca0351c79ae1a80cf17d6dec
|
588396f66a5c0fbfcf1d2af44386c8f4dca95abf
|
/sanjiaoxing.py
|
c045ef04118103c5a2613365e5f8cf7601af0c9d
|
[] |
no_license
|
yuki9965/PAT_python
|
219dc4deedf097bbb41b325f538f8a5bb806104d
|
5a7ad358d9beaeb9e4c47a4026248cd5d2268b5b
|
refs/heads/master
| 2021-05-04T18:41:35.403984
| 2017-10-06T05:19:18
| 2017-10-06T05:19:18
| 105,956,338
| 1
| 0
| null | 2017-10-06T01:15:10
| 2017-10-06T01:15:10
| null |
UTF-8
|
Python
| false
| false
| 325
|
py
|
#-*- coding=utf-8 -*-
__author__ = 'Yaicky'
sides = map(int, raw_input().strip().split())
sides.sort()
longside = (sides[2])**2
shortsides = (sides[0])**2 + (sides[1])**2
if longside > shortsides:
print (u"钝角三角形")
elif shortsides > longside:
print (u"锐角三角形")
else:
print(u"直角三角形")
|
[
"ajirencnty@gmail.com"
] |
ajirencnty@gmail.com
|
821a36d24596e0ac1a7bce97e1a3d9b9992c271f
|
03043b715d2e177dd3ba93078463ce79c33173dc
|
/NI_DAQmx/models/NI_PXIe_6535.py
|
ffdfbaabce93ed1ea32f606174fc1da92d542ec7
|
[] |
no_license
|
labscript-suite-bitbucket-archive/cavitylab-labscript_devices--forked-from--labscript_suite-labscript_devices
|
2efc068eb35ca70e1eecab9c7fec7991fd596c9c
|
e665d3ee0ce1cfd7fb7cd5c6cc4d783528bc4935
|
refs/heads/master
| 2020-12-27T02:35:41.710162
| 2019-12-06T20:57:48
| 2019-12-06T20:57:48
| 253,143,395
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,629
|
py
|
#####################################################################
# #
# /NI_DAQmx/models/_subclass_template.py #
# #
# Copyright 2018, Christopher Billington #
# #
# This file is part of the module labscript_devices, in the #
# labscript suite (see http://labscriptsuite.org), and is #
# licensed under the Simplified BSD License. See the license.txt #
# file in the root of the project for the full license. #
# #
#####################################################################
#####################################################################
# WARNING #
# #
# This file is auto-generated, any modifications may be #
# overwritten. See README.txt in this folder for details #
# #
#####################################################################
from __future__ import division, unicode_literals, print_function, absolute_import
from labscript_utils import PY2
if PY2:
str = unicode
from labscript_devices.NI_DAQmx.labscript_devices import NI_DAQmx
CAPABILITIES = {
'AI_range': None,
'AI_start_delay': None,
'AO_range': None,
'max_AI_multi_chan_rate': None,
'max_AI_single_chan_rate': None,
'max_AO_sample_rate': None,
'max_DO_sample_rate': 10000000.0,
'min_semiperiod_measurement': None,
'num_AI': 0,
'num_AO': 0,
'num_CI': 0,
'ports': {
'port0': {'num_lines': 8, 'supports_buffered': True},
'port1': {'num_lines': 8, 'supports_buffered': True},
'port2': {'num_lines': 8, 'supports_buffered': True},
'port3': {'num_lines': 8, 'supports_buffered': True},
'port4': {'num_lines': 6, 'supports_buffered': False},
},
'supports_buffered_AO': False,
'supports_buffered_DO': True,
'supports_semiperiod_measurement': False,
}
class NI_PXIe_6535(NI_DAQmx):
description = 'NI-PXIe-6535'
def __init__(self, *args, **kwargs):
# Any provided kwargs take precedent over capabilities
combined_kwargs = CAPABILITIES.copy()
combined_kwargs.update(kwargs)
NI_DAQmx.__init__(self, *args, **combined_kwargs)
|
[
"chrisjbillington@gmail.com"
] |
chrisjbillington@gmail.com
|
0702087eed1caf59c86a54c11a4482b18f7b120e
|
b0346d8d798a8534fb2e1c0f1f98b4038e23d1ba
|
/Modetool/wsgi.py
|
7e2c4b744a0f08c2f3c78b30af8c415c12c9cb53
|
[] |
no_license
|
pavelcerny/modetool
|
ed1237f1ac54b617eed7161341ab640e52190fe3
|
ba5379e6b2604e1c1b0c5a84fec01ab0ef4e5e41
|
refs/heads/master
| 2020-03-29T12:36:41.111251
| 2018-09-23T08:30:26
| 2018-09-23T08:30:26
| 149,908,494
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 394
|
py
|
"""
WSGI config for Modetool project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "Modetool.settings")
application = get_wsgi_application()
|
[
"cerny.pav@gmail.com"
] |
cerny.pav@gmail.com
|
d978aee1a03ddbd4eec8a61a6d7792586dbbeb14
|
a25aa09af984d08084a395f9b6df427d3756f11a
|
/35.Search Insert Position.py
|
39611cdd7879d9f73747e131d4d9446fec4691dc
|
[] |
no_license
|
luyihsien/leetcodepy
|
31971e851a4ae77942a5d9e3ff07faea6e504c66
|
a54bd09f4b28f106196a6cd8a0f9c056bcd237e6
|
refs/heads/master
| 2020-05-19T13:21:57.854086
| 2019-10-16T14:23:00
| 2019-10-16T14:23:00
| 185,037,569
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 724
|
py
|
''''
class Solution:
def searchInsert(self, nums: List[int], target: int) -> int:
'''
class Solution:
def searchInsert(self, nums, target):
if len(nums)==0:
return 0
for i in range(len(nums)):
if nums[i]==target:
return i
for i in range(1,len(nums)):
if nums[i]>target and nums[i-1]<target:
return i
if max(nums)<target:
return len(nums)
if min(nums)>target:
return 0
'''
成功
显示详情
执行用时 : 52 ms, 在Search Insert Position的Python3提交中击败了90.74% 的用户
内存消耗 : 13.5 MB, 在Search Insert Position的Python3提交中击败了96.03% 的用户
'''
|
[
"luyihsien@gmail.com"
] |
luyihsien@gmail.com
|
3cc871344d6720297182aaba7b29ac5e814f33b7
|
2b4e7f8dcf3296bdb33b29b44a83650f5bfab8e1
|
/common/content.py
|
43a8c8ab1da8f1697d3f2ef0dd1ec2649a9305f4
|
[] |
no_license
|
bp72/asd
|
9e42e88f6fe18abfcce52be646649aab11946aaf
|
a687dfba154b2682c521d5a4ee329ef13c84c5a7
|
refs/heads/master
| 2016-09-10T12:42:37.485619
| 2015-06-22T17:50:27
| 2015-06-22T17:50:27
| 37,869,546
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,031
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'bp'
__version__ = (0, 0, 1)
from fields import MD5Field, FilenameField
################################################################################
class File(object):
"""
Объект файла
>>> with open('./filename.txt', 'w') as f:
... f.write('1')
... f.close()
>>> a = File('filename.txt', 'c4ca4238a0b923820dcc509a6f75849b')
>>> a.filename
'filename.txt'
>>> a.md5sum
'c4ca4238a0b923820dcc509a6f75849b'
>>> a.filepath()
'./filename.txt'
>>> import os
>>> os.unlink('./filename.txt')
"""
md5sum = MD5Field()
filename = FilenameField()
def __init__(self, filename, md5, root=None):
self.root = root or '.'
self.filename = filename
self.md5sum = md5
def filepath(self):
return '{}/{}'.format(self.root, self.filename)
# end of class FileField(BaseField)
################################################################################
|
[
"pavleg.bityukov@gmail.com"
] |
pavleg.bityukov@gmail.com
|
4dade9f8a38ec5174c7440af316e5d916ab2f049
|
488a2817b9c55856d367a37fc1d029ebf335f3c7
|
/crawling/cheogajip_scraping.py
|
f6b266219af8026669233763ba9606d556772031
|
[] |
no_license
|
qudals55/chicken-store-visualization
|
18d518df0ad99f10e5d593742d585e0e1e40dcfb
|
d8ac96afc0ae4bdc53fd282f29854b8ff04f0b8e
|
refs/heads/master
| 2020-04-30T21:17:40.395764
| 2019-03-22T07:13:37
| 2019-03-22T07:13:37
| 177,090,052
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,192
|
py
|
import sys
import csv
import re
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
from bs4 import BeautifulSoup
def address(state, city) :
return ({ '경기' : '경기도',
'서울' : '서울특별시',
'서울시' : '서울특별시',
'인천' : '인천광역시',
'인천시' : '인천광역시',
'제주' : '제주특별자치도',
'전남' : '전라남도',
'전북' : '전라북도',
'경북' : '경상북도',
'경남' : '경상남도',
'부산' : '부산광역시',
'울산' : '울산광역시',
'대구' : '대구광역시',
'충북' : '충청북도',
'충남' : '충청남도',
'세종시' : '세종특별자치시',
'세종' : '세종특별자치시',
'대전' : '대전광역시',
'강원' : '강원도',
'광주' : '광주광역시',
}.get(state, state), city)
def main():
driver = webdriver.PhantomJS()
idx = 1
f = open('cheogajip.csv', 'w', encoding='utf-8', newline='')
wr = csv.writer(f, delimiter=',')
wr.writerow(['매장이름', '시도정보', '시군구정보', '매장주소'])
while idx <= 105:
driver.get("http://www.cheogajip.co.kr/bbs/board.php?bo_table=store&page=" + str(idx))
html = driver.page_source
soup = BeautifulSoup(html, 'html.parser')
chickens = soup.select('#fboardlist > div > table > tbody > tr')
for chicken in chickens :
shopName = chicken.select('td[class=td_date]')[1].text
shopAdd = chicken.select_one('td[class=td_subject]').text
shopAdd = re.sub('\n', '', shopAdd)
shopAddSplit = shopAdd.split()
state, city = address(shopAddSplit[0], shopAddSplit[1])
wr.writerow([shopName, state, city, shopAdd])
idx = idx + 1
f.close()
print('end')
if __name__ == '__main__':
main()
|
[
"gongbyeongmin@gmail.com"
] |
gongbyeongmin@gmail.com
|
1234f26b6c4eeb7584ae2a210bca4db698d88a26
|
e5712ee7ff8e013b33d0ee236252909997429b15
|
/Python/Sets/No Idea.py
|
7378798bad44140fa629cac23a0e92ac26634898
|
[] |
no_license
|
shubhamkatore/HackerRank
|
fdb031b2875eebcf63b0f7dc5c996f8f80fc42ac
|
11b75a356987d3aa63901413994bffb8d33b50bb
|
refs/heads/master
| 2021-05-05T06:10:47.537066
| 2018-06-24T06:41:12
| 2018-06-24T06:41:12
| 118,781,433
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 223
|
py
|
n,m=map(int,input().split(' '))
narr=map(int,input().split(' '))
a=set(map(int,input().split(' ')))
b=set(map(int,input().split(' ')))
ha=0
for i in narr:
if i in a:
ha+=1
if i in b:
ha-=1
print(ha)
|
[
"shubhamkatore@gmail.com"
] |
shubhamkatore@gmail.com
|
0ab0e2bee34871966bf2bcc9d4aeefec6b1a9287
|
0196ff82d8022ae81aa7e5d6f0797aa746e40a08
|
/huobi_crawler.py
|
5f3bce850fd40654dd7db5e2624f5d6ca32fa605
|
[] |
no_license
|
Sungbin17/coin_exchange
|
85d691c954f5e58087c7504c5b11451658a3e604
|
4fdf0ffa5d180fac6726516a261fc359f7888c5a
|
refs/heads/master
| 2020-03-18T22:08:28.442186
| 2018-06-07T09:01:11
| 2018-06-07T09:01:11
| 135,327,506
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,172
|
py
|
import urllib.request, json
from urllib.request import Request, urlopen
huobi_symbol_api = 'https://api.huobipro.com/v1/common/symbols'
response = Request(huobi_symbol_api, headers={'User-Agent': 'Mozilla/5.0'})
data = json.loads(urlopen(response).read())
data = data.get('data')
print(type(data))
['BTC', 'BCH', 'ETH', 'ETC', 'LTC', 'EOS', 'XRP', 'OMG', 'DASH', 'ZEC', 'ADA', 'STEEM', 'IOTA', 'SOC', 'CTXC', 'ACT', 'BTM', 'BTS', 'ONT', 'IOST', 'HT', 'TRX', 'DTA', 'NEO', 'QTUM', 'SMT', 'ELA', 'VEN', 'THETA', 'SNT', 'ZIL', 'XEM', 'NAS', 'RUFF', 'HSR', 'LET', 'MDS', 'STORJ', 'ELF', 'ITC', 'CVC', 'GNT', 'BCH', 'ETH', 'LTC', 'ETC', 'EOS', 'OMG', 'XRP', 'DASH', 'ZEC', 'ADA', 'STEEM', 'IOTA', 'POLY', 'KAN', 'LBA', 'WAN', 'BFT', 'BTM', 'ONT', 'IOST', 'HT', 'TRX', 'SMT', 'ELA', 'WICC', 'OCN', 'ZLA', 'ABT', 'MTX', 'NAS', 'VEN', 'DTA', 'NEO', 'WAX', 'BTS', 'ZIL', 'THETA', 'CTXC', 'SRN', 'XEM', 'ICX', 'DGD', 'CHAT', 'WPR', 'LUN', 'SWFTC', 'SNT', 'MEET', 'YEE', 'ELF', 'LET', 'QTUM', 'LSK', 'ITC', 'SOC', 'QASH', 'MDS', 'EKO', 'TOPC', 'MTN', 'ACT', 'HSR', 'STK', 'STORJ', 'GNX', 'DBC', 'SNC', 'CMT', 'TNB', 'RUFF', 'QUN', 'ZRX', 'KNC', 'BLZ', 'PROPY', 'RPX', 'APPC', 'AIDOC', 'POWR', 'CVC', 'PAY', 'QSP', 'DAT', 'RDN', 'MCO', 'RCN', 'MANA', 'UTK', 'TNT', 'GAS', 'BAT', 'OST', 'LINK', 'GNT', 'MTL', 'EVX', 'REQ', 'ADX', 'AST', 'ENG', 'SALT', 'EDU', 'BIFI', 'BCX', 'BCD', 'SBTC', 'BTG', 'EOS', 'OMG', 'IOTA', 'ADA', 'STEEM', 'POLY', 'KAN', 'LBA', 'WAN', 'BFT', 'ZRX', 'AST', 'KNC', 'ONT', 'HT', 'BTM', 'IOST', 'SMT', 'ELA', 'TRX', 'ABT', 'NAS', 'OCN', 'WICC', 'ZIL', 'CTXC', 'ZLA', 'WPR', 'DTA', 'MTX', 'THETA', 'SRN', 'VEN', 'BTS', 'WAX', 'HSR', 'ICX', 'MTN', 'ACT', 'BLZ', 'QASH', 'RUFF', 'CMT', 'ELF', 'MEET', 'SOC', 'QTUM', 'ITC', 'SWFTC', 'YEE', 'LSK', 'LUN', 'LET', 'GNX', 'CHAT', 'EKO', 'TOPC', 'DGD', 'STK', 'MDS', 'DBC', 'SNC', 'PAY', 'QUN', 'AIDOC', 'TNB', 'APPC', 'RDN', 'UTK', 'POWR', 'BAT', 'PROPY', 'MANA', 'REQ', 'CVC', 'QSP', 'EVX', 'DAT', 'MCO', 'GNT', 'GAS', 'OST', 'LINK', 'RCN', 'TNT', 'ENG', 'SALT', 'ADX', 'EDU']
for base_currency in data:
base_currency_list.append(base_currency.get('base-currency').upper())
print(base_currency_list)
|
[
"wd1kr1@gmail.com"
] |
wd1kr1@gmail.com
|
9464793a12fd15b36cf79f711c7308ed8e638665
|
e56ad8a3c8b34bed3c5ff0f168beb4ceec19b8bc
|
/test.py
|
3bdc36b350229988e79d2b89c8c32aac239b247f
|
[] |
no_license
|
YoungseogChung/angry_turtle
|
77ba732008abf7433e21a39dc145d9ffde8284cb
|
8d9288c030de3d40d8554aad688a80082ce095c7
|
refs/heads/master
| 2020-05-21T00:57:01.277698
| 2019-05-09T20:08:23
| 2019-05-09T20:08:23
| 185,842,247
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 881
|
py
|
import turtle
import random
import math
player = turtle.Turtle()
player.color("blue")
player.shape("turtle")
player.penup()
player.speed(0)
screen = player.getscreen()
a1 = turtle.Turtle()
a1.color("red")
a1.shape("circle")
a1.penup()
a1.speed(0)
a1.goto(random.randint(-300, 300), random.randint(-300, 300))
a2 = turtle.Turtle()
a2.color("red")
a2.shape("circle")
a2.penup()
a2.speed(0)
a2.goto(random.randint(-300, 300), random.randint(-300, 300))
def turnleft():
player.left(30) # 왼쪽으로 30도 회전한다.
def turnright():
player.right(30) # 오른쪽으로 30도 회전한다.
def play():
player.forward(2) # 2픽셀 전진
a1.forward(2)
a2.forward(2)
screen.ontimer(play, 10) # 10ms가 지나면 play()를 다시 호출
screen.onkeypress(turnleft, "Left")
screen.onkeypress(turnright, "Right")
screen.listen()
turtle.done()
# screen.ontimer(play, 10)
|
[
"yschung55@hotmail.com"
] |
yschung55@hotmail.com
|
6140826c1e42e213c230cc67aa4e7a4aa67603fd
|
81e87227fb6eee0c6c00608d3913f6c5fb951b41
|
/project_1/task_1.py
|
a6ed401a518727661b498183be37886a29ead373
|
[] |
no_license
|
pierwiastekzminusjeden/Graph-Theory-Course
|
e43b7e8b7dba0945360b09873aa300d778da3638
|
6c95575b3bea397d1b8ad9aeb29d23280dab4a71
|
refs/heads/master
| 2020-03-11T15:35:00.953471
| 2018-07-11T18:52:38
| 2018-07-11T18:52:38
| 130,088,484
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,126
|
py
|
#!/usr/bin/env python3
#############################
#@author Karolina Mizera
#@author Krystian Molenda
#@author Marcin Miś
#############################
#import sys
#sys.path.append('$(src)') #add path to project_11/src or being all files in the same catalog is required
from list import List
from adjmatrix import AdjMatrix
from incidencematrix import IncidenceMatrix
from adjMatrixFile import SaveToFile
import convert
from draw import draw_graph
#Enter first matrix
print('''Import matrix from file.
A - Adjacency Matrix
I - Incidence Matrix
L - Adjacency List
other - exit''')
#@key representation flag
key = input(" ")
fileName = input("Enter file name: ") #enter name of data file. File must be in the same catalog. Examples in catalog /data
if (key not in 'AIL') or (fileName != ''):
if key == 'A':
adjMatrix = AdjMatrix
adjMatrix.createAdjMatrixFromFile(adjMatrix,fileName)
elif key == 'I':
incMatrix = IncidenceMatrix
incMatrix.createIncMatrixFromFile(incMatrix,fileName)
elif key == 'L':
_list = List
_list.createListFromFile(_list, fileName)
print(" ")
#conversions
while key in 'AIL' :
if key == 'A':
draw_graph(adjMatrix, 'zad1Graph.png')
print('''Convert representation:
AI - Adjacency Matrix to Incidence Matrix
AL - Adjency Matrix to Adjency List
x - exit''')
key = input(" ")
if key == 'AI':
incMatrix = convert.fromAdjMatrixtoIncidenceMatrix(adjMatrix)
print(incMatrix.matrix)
key = 'I'
elif key == 'AL':
incMatrix = convert.fromAdjMatrixtoIncidenceMatrix(adjMatrix)
_list = incMatrix = convert.fromIncidenceMatrixtoList(incMatrix)
print(_list.matrix)
key = 'L'
elif key == 'I':
print('''Convert representation:
IL - Incidence Matrix to Adjency List
IA - Incidence Matrix to Adjency Matrix
x - exit ''')
key = input(" ")
if key == 'IL':
_list = convert.fromIncidenceMatrixtoList(incMatrix)
print(_list.matrix)
key = 'L'
elif key == 'IA':
_list = convert.fromIncidenceMatrixtoList(incMatrix)
adjMatrix = convert.fromListToAdjMatrix(_list)
print(adjMatrix.matrix)
key = 'A'
elif key == 'L':
print('''Convert representation:
LA - Adjacency List to Adjency Matrix
LI - Adjency List to Incidence Matrix
x - exit''')
key = input(" ")
if key == 'LA':
adjMatrix = convert.fromListToAdjMatrix(_list)
print(adjMatrix.matrix)
key = 'A'
elif key == 'LI':
adjMatrix = convert.fromListToAdjMatrix(_list)
incMatrix = convert.fromAdjMatrixtoIncidenceMatrix(adjMatrix)
print(incMatrix.matrix)
key = 'I'
|
[
"krystian.molenda@gmail.com"
] |
krystian.molenda@gmail.com
|
9c6a07dcfbdf352a591d9e7fe0d53f19f2b65bf9
|
c486c7bfe16804a8fd28b2f8d833b44df1a0f553
|
/topi/python/topi/x86/conv3d_transpose.py
|
ad035d34c3a13e715a1247ed4ba5c11825a4df4f
|
[
"Zlib",
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause"
] |
permissive
|
TexasInstruments/tvm
|
9ef8ebc5825030e595ea8a667387ea430dd92259
|
c78ea878a05e262a30c3ffa250c1479a695ecf33
|
refs/heads/dev
| 2023-08-03T19:59:53.639979
| 2020-06-15T22:29:11
| 2020-06-18T03:22:39
| 225,893,305
| 14
| 3
|
Apache-2.0
| 2020-07-08T14:34:47
| 2019-12-04T15:02:32
|
Python
|
UTF-8
|
Python
| false
| false
| 2,238
|
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-variable,unused-argument,no-member
# pylint: disable=no-value-for-parameter
"""Conv3D Transpose schedule on x86"""
from tvm import te
from ..util import traverse_inline
from .. import nn
from .conv3d import conv3d_ncdhw, schedule_conv3d_ncdhw
def conv3d_transpose_ncdhw(data, kernel, strides, padding, out_dtype):
data_pad, kernel_transform = \
nn.conv3d_transpose_ncdhw_preprocess(data, kernel, strides, padding, out_dtype)
# reuse conv3d_ncdhw implementation
return conv3d_ncdhw(data_pad, kernel_transform, (1, 1, 1),
(0, 0, 0), (1, 1, 1), out_dtype)
def schedule_conv3d_transpose_ncdhw(outs):
"""Create schedule for tensors"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = schedule_conv3d_ncdhw(outs)
def _callback(op):
if 'unpack_ncdhwc' in op.tag:
conv_out = op.input_tensors[0]
# retrieve data
data_vec = conv_out.op.input_tensors[0]
data_pad = data_vec.op.input_tensors[0]
data_dilate = data_pad.op.input_tensors[0]
s[data_dilate].compute_inline()
s[data_pad].compute_inline()
# retrieve kernel
kernel_vec = conv_out.op.input_tensors[1]
kernel_transform = kernel_vec.op.input_tensors[0]
s[kernel_transform].compute_inline()
traverse_inline(s, outs[0].op, _callback)
return s
|
[
"trevoraidanmorris@gmail.com"
] |
trevoraidanmorris@gmail.com
|
387635873635283c5290831c6f2104f6d7e1fed8
|
aeb2f0bb7b01f87a1b6c65b88b216bed47025fe5
|
/experiment/ex_025_predict.py
|
db89c037080c832fffa5c1b6a6ffee69035c39e7
|
[] |
no_license
|
kurupical/riiid
|
7e68239cd50243fbb734bf433d60ebd7469cb180
|
7bab580ce03d03873748a6afc91092c11871465f
|
refs/heads/master
| 2023-03-30T04:15:54.109815
| 2021-04-04T01:20:33
| 2021-04-04T01:20:33
| 302,828,112
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,041
|
py
|
from datetime import datetime as dt
from feature_engineering.feature_factory import \
FeatureFactoryManager, \
TargetEncoder, \
CountEncoder, \
MeanAggregator, \
TagsSeparator, \
UserLevelEncoder, \
NUniqueEncoder, \
ShiftDiffEncoder
import pandas as pd
import glob
import os
import tqdm
import lightgbm as lgb
import pickle
import riiideducation
import numpy as np
from logging import Logger, StreamHandler, Formatter
import shutil
import time
import warnings
warnings.filterwarnings("ignore")
model_dir = "../output/ex_025/20201022082802"
data_types_dict = {
'row_id': 'int64',
'timestamp': 'int64',
'user_id': 'int32',
'content_id': 'int16',
'content_type_id': 'int8',
'task_container_id': 'int16',
'user_answer': 'int8',
'answered_correctly': 'int8',
}
prior_columns = ["prior_group_responses", "prior_group_answers_correct"]
def get_logger():
formatter = Formatter("%(asctime)s|%(levelname)s| %(message)s")
logger = Logger(name="log")
handler = StreamHandler()
handler.setFormatter(formatter)
logger.addHandler(handler)
return logger
def run(debug,
model_dir,
kaggle=False):
if kaggle:
files_dir = "/kaggle/input/riiid-split10/*.pickle"
else:
files_dir = "../input/riiid-test-answer-prediction/split10_base/*.pickle"
logger = get_logger()
# environment
env = riiideducation.make_env()
df_question = pd.read_csv("../input/riiid-test-answer-prediction/questions.csv",
dtype={"bundle_id": "int32",
"question_id": "int32",
"correct_answer": "int8",
"part": "int8"})
df_lecture = pd.read_csv("../input/riiid-test-answer-prediction/lectures.csv",
dtype={"lecture_id": "int32",
"tag": "int16",
"part": "int8"})
# model loading
models = []
for model_path in glob.glob(f"{model_dir}/*model*.pickle"):
with open(model_path, "rb") as f:
models.append(pickle.load(f))
# data preprocessing
logger = get_logger()
feature_factory_dict = {}
feature_factory_dict["tags"] = {
"TagsSeparator": TagsSeparator()
}
for column in ["content_id", "user_id", "content_type_id", "prior_question_had_explanation",
"tags1", "tags2", "tags3", "tags4", "tags5", "tags6",
("user_id", "content_type_id"), ("user_id", "prior_question_had_explanation")]:
is_partial_fit = column == "content_id"
is_onebyone = "content_id" in column
if type(column) == str:
feature_factory_dict[column] = {
"CountEncoder": CountEncoder(column=column, onebyone=is_onebyone),
"TargetEncoder": TargetEncoder(column=column, is_partial_fit=is_partial_fit, onebyone=is_onebyone)
}
else:
feature_factory_dict[column] = {
"CountEncoder": CountEncoder(column=list(column), onebyone=is_onebyone),
"TargetEncoder": TargetEncoder(column=list(column), is_partial_fit=is_partial_fit, onebyone=is_onebyone)
}
for column in ["part", ("user_id", "tag"), ("user_id", "part"), ("content_type_id", "part")]:
if type(column) == str:
feature_factory_dict[column] = {
"CountEncoder": CountEncoder(column=column)
}
else:
feature_factory_dict[column] = {
"CountEncoder": CountEncoder(column=list(column))
}
feature_factory_dict["user_id"]["MeanAggregatorTimestamp"] = MeanAggregator(column="user_id",
agg_column="timestamp",
remove_now=False)
feature_factory_dict["user_id"]["MeanAggregatorPriorQuestionElapsedTime"] = MeanAggregator(column="user_id",
agg_column="prior_question_elapsed_time",
remove_now=True)
feature_factory_dict["user_id"]["ShiftDiffEncoder"] = ShiftDiffEncoder(groupby="user_id",
column="timestamp")
feature_factory_dict["content_id"]["MeanAggregatorPriorQuestionElapsedTime"] = MeanAggregator(column="content_id",
agg_column="prior_question_elapsed_time",
remove_now=True)
feature_factory_manager = FeatureFactoryManager(feature_factory_dict=feature_factory_dict,
logger=logger)
for model_id, fname in enumerate(glob.glob(files_dir)):
logger.info(f"loading... {fname}")
df = pd.read_pickle(fname)
df["answered_correctly"] = df["answered_correctly"].replace(-1, np.nan)
df["prior_question_had_explanation"] = df["prior_question_had_explanation"].fillna(-1).astype("int8")
if debug:
df = df.head(1000)
df = pd.concat([pd.merge(df[df["content_type_id"] == 0], df_question,
how="left", left_on="content_id", right_on="question_id"),
pd.merge(df[df["content_type_id"] == 1], df_lecture,
how="left", left_on="content_id", right_on="lecture_id")]).sort_values(["user_id", "timestamp"])
feature_factory_manager.fit(df, is_first_fit=True)
iter_test = env.iter_test()
df_test_prev = pd.DataFrame()
df_test_prev1 = pd.DataFrame()
answered_correctlies = []
user_answers = []
i = 0
t = time.time()
for (df_test, df_sample_prediction) in iter_test:
i += 1
logger.info(f"[time: {int(time.time() - t)}iteration {i}: data_length: {len(df_test)}")
# 前回のデータ更新
if len(df_test_prev) > 0: # 初回のみパスするためのif
answered_correctly = df_test.iloc[0]["prior_group_answers_correct"]
answered_correctly = [int(x) for x in answered_correctly.replace("[", "").replace("'", "").replace("]", "").replace(" ", "").split(",")]
user_answer = df_test.iloc[0]["prior_group_responses"]
user_answer = [int(x) for x in user_answer.replace("[", "").replace("'", "").replace("]", "").replace(" ", "").split(",")]
answered_correctlies.extend(answered_correctly)
user_answers.extend(user_answer)
df_test_prev1["answered_correctly"] = answered_correctly
df_test_prev1["user_answer"] = user_answer
df_test_prev1["answered_correctly"] = df_test_prev1["answered_correctly"].replace(-1, np.nan)
df_test_prev1["prior_question_had_explanation"] = \
df_test_prev1["prior_question_had_explanation"].fillna(-1).astype("int8")
feature_factory_manager.fit(df_test_prev1, partial_predict_mode=True, onebyone_mode=True)
df_test_prev1 = pd.DataFrame()
if debug:
update_record = 50
else:
update_record = 150
# update1
if len(df_test_prev) > update_record:
df_test_prev["answered_correctly"] = answered_correctlies
df_test_prev["user_answer"] = user_answers
# df_test_prev = df_test_prev.drop(prior_columns, axis=1)
df_test_prev["answered_correctly"] = df_test_prev["answered_correctly"].replace(-1, np.nan)
df_test_prev["prior_question_had_explanation"] = df_test_prev["prior_question_had_explanation"].fillna(-1).astype("int8")
feature_factory_manager.fit(df_test_prev, partial_predict_mode=True, onebyone_mode=False)
df_test_prev = pd.DataFrame()
answered_correctlies = []
user_answers = []
# 今回のデータ取得&計算
# logger.info(f"[time: {int(time.time() - t)}dataload")
logger.info(f"merge... ")
w_df1 = pd.merge(df_test[df_test["content_type_id"] == 0], df_question, how="left", left_on="content_id",
right_on="question_id")
w_df2 = pd.merge(df_test[df_test["content_type_id"] == 1], df_lecture, how="left", left_on="content_id",
right_on="lecture_id")
df_test = pd.concat([w_df1, w_df2]).sort_values(["user_id", "timestamp"])
df_test["tag"] = df_test["tag"].fillna(-1)
df_test["correct_answer"] = df_test["correct_answer"].fillna(-1)
df_test["bundle_id"] = df_test["bundle_id"].fillna(-1)
logger.info(f"transform... ")
df_test["prior_question_had_explanation"] = df_test["prior_question_had_explanation"].astype("float16").fillna(-1).astype("int8")
df = feature_factory_manager.partial_predict(df_test)
df.columns = [x.replace(" ", "_") for x in df.columns]
logger.info(f"other... ")
# predict
predicts = []
cols = models[0].feature_name()
for model in models:
predicts.append(model.predict(df[cols]))
df["answered_correctly"] = np.array(predicts).transpose().mean(axis=1)
df_sample_prediction = pd.merge(df_sample_prediction[["row_id"]],
df[["row_id", "answered_correctly"]],
how="inner")
env.predict(df_sample_prediction)
df_test_prev = df_test_prev.append(df[cols + ["user_id", "tags"]])
df_test_prev1 = df[cols + ["user_id", "tags"]]
if i < 5:
df_test_prev.to_csv(f"{i}.csv")
if __name__ == "__main__":
run(debug=True,
model_dir=model_dir)
|
[
"kurupical@gmail.com"
] |
kurupical@gmail.com
|
07216bcd55a48955b32cea2c65be6627df8648d9
|
56ff870edec243b9b4b6d54e15fd95f741a9bd33
|
/settings_dev.py
|
c49d68ea5358f1c59db2320d72f631b35990dca6
|
[
"Apache-2.0"
] |
permissive
|
mushkevych/grazer
|
2a0357c33448fadc6e91528098e0eabf74bc3cd1
|
37254a550eeaaa8125bb1a643d493bcaa785fb25
|
refs/heads/master
| 2016-09-15T20:03:30.653432
| 2015-05-05T06:00:19
| 2015-05-05T06:00:19
| 31,232,304
| 0
| 1
| null | 2015-02-24T00:00:08
| 2015-02-23T22:05:11
|
Python
|
UTF-8
|
Python
| false
| false
| 594
|
py
|
settings = dict(
# created with: sudo rabbitmqctl add_vhost /hadoop
# set permissions with: sudo rabbitmqctl set_permissions -p /hadoop guest ".*" ".*" ".*"
mq_host='rabbitmq.yourdomain.com',
mq_user_id='MQ_USER',
mq_password='MQ_PASSWORD',
mq_vhost='/grazer',
mq_port=5672,
aws_redshift_host='REDSHIFT_HOST.redshift.amazonaws.com',
aws_redshift_db='DB_NAME',
aws_redshift_user='DB_USER',
aws_redshift_password='DB_PASSWORD',
aws_redshift_port=5439,
mq_timeout_sec=10.0,
aws_redshift_grazer_suffix='_test',
csv_bulk_threshold=64,
)
|
[
"dan.mushkevych@mobidia.com"
] |
dan.mushkevych@mobidia.com
|
41527e638d93cfffa7419214e8a19a547c0222fc
|
7c0cffba0b0e37daee3cf33d3750e1c8a89d1822
|
/Controller/control.py
|
c4c437dd392a25382a5c2fc191f5ec90304aeb1b
|
[] |
no_license
|
ShanghaitechGeekPie/IFTHEN
|
47f0e9ebf51a65ed16ea130139e2a8cc9ff900e9
|
c67b5c925d91553a5e07a9dee84bb8af419b5827
|
refs/heads/master
| 2021-01-18T18:11:42.077635
| 2016-10-15T04:17:24
| 2016-10-15T04:17:24
| 59,354,507
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,190
|
py
|
# Python 3.4.3 #
from apscheduler.schedulers.blocking import BlockingScheduler
from logic.models import Logic
import django
import json
import requests
import time
def excute():
commands = Logic.objects.all()
for command in commands:
time_present = time.time()
query = json.loads(command['Q'])
action = json.loads(command['A'])
time_interval = command['T']
time_stamp = command['TimeStamp']
if (time_present - time_stamp) % time_interval >= 5:
continue
i = 0
while (i + 4 < len(query)):
API1 = API.objects.get(id = query[i]['API'])
API2 = API.objects.get(id = query[i + 2]['API'])
tmp1 = requests.get(API1.provider.baseurl + API1.slug, data = query[i]['args'])
tmp2 = requests.get(API2.provider.baseurl + API2.slug, data = query[i + 2]['args'])
if API1.retu in ['int', 'float']:
flag = eval(tmp1 + query[i + 1] + tmp2)
else:
if qurey[i+1] == '=':
flag = (tmp1 == tmp2)
else:
flag = (tmp1 != tmp2)
if flag == False:
continue
i = i + 4
API1 = API.objects.get(id = action['API'])
requests.get(API1.provider.baseurl + API1.slug)
sched = BlockingScheduler()
sched.add_job(excute, 'interval', seconds = 5)
sched.start()
|
[
"yuanyzh@shanghaitech.edu.cn"
] |
yuanyzh@shanghaitech.edu.cn
|
32b5c6c58b4c8eeaa2951f17ab0bf0380b2b5467
|
a92b6ed6ba2091e4d4ec9613c6f6affe6e655c40
|
/main.py
|
b3135588610a604ee17520ff6956c0d1e5caabfe
|
[] |
no_license
|
rushali09/Python-Coffee-Machine
|
f3f8770449fb42772ab970f6a52eb43250f856b9
|
572a3b45b414ba8723f972de500fe98d7e9bfcf3
|
refs/heads/main
| 2023-02-17T15:56:41.170337
| 2021-01-21T08:07:39
| 2021-01-21T08:07:39
| 331,557,917
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,594
|
py
|
MENU = {
"espresso": {
"ingredients": {
"water": 50,
"coffee": 18,
},
"cost": 1.5,
},
"latte": {
"ingredients": {
"water": 200,
"milk": 150,
"coffee": 24,
},
"cost": 2.5,
},
"cappuccino": {
"ingredients": {
"water": 250,
"milk": 100,
"coffee": 24,
},
"cost": 3.0,
}
}
profit = 0
resources = {
"water": 300,
"milk": 200,
"coffee": 100,
}
def is_resource_sufficient(user_ordered_ingredients):
"""Returns True when ingredients are sufficient, False when ingredients are insufficient"""
for item in user_ordered_ingredients:
if user_ordered_ingredients[item] >= resources[item]:
print(f"Sorry, there is not enough {item}")
return False
return True
def process_coins():
"""Returns the total calculated from coins inserted"""
print("Please insert coins")
total = int(input("How many quarters?: "))* 0.25
total += int(input("How many dimes?: "))* 0.1
total += int(input("How many nickles?: "))* 0.05
total += int(input("How many pennies?: "))* 0.01
return total
def is_transaction_successful(money_received, drink_cost):
"""Returns True when payment is sufficient and False when money received by user is insufficient"""
if money_received >= drink_cost:
change = round(money_received - drink_cost, 2)
print(f"Here is ${change} in change")
global profit
profit += drink_cost
return True
else:
print("Sorry, there is not enough money. Money Refunded")
return False
def make_coffee(drink_name, order_ingredients):
"""deduct the required ingredients from the resources"""
for item in order_ingredients:
resources[item] -= order_ingredients[item]
print(f"Here is your {drink_name} ☕")
hello_kitty = True
while hello_kitty:
choice = input("What would you like? (espresso/latte/cappuccino): ")
if choice == "off":
hello_kitty = False
elif choice == "report":
print(f"Water: {resources['water']}ml")
print(f"Milk: {resources['milk']}ml")
print(f"Coffee: {resources['coffee']}g")
print(f"Money: ${profit}")
else:
drink = MENU[choice]
if is_resource_sufficient(drink["ingredients"]):
payment = process_coins()
if is_transaction_successful(payment, drink["cost"]):
make_coffee(choice, drink["ingredients"])
|
[
"rushalisreedhar37@gmail.com"
] |
rushalisreedhar37@gmail.com
|
165063736ccff5a78e51a0ed056d596280d583b3
|
532a912beca7dc986d2f3ff34fb22edd692932f0
|
/deploy.py
|
cef1301b10c0ac8cd26827be8c47d552f8b4aa27
|
[] |
no_license
|
aGHz/aptgregator
|
ce1539feaeb9bd2cf607a1fea334b415028b7cc4
|
2abed7bebd88e1ad4de2b60b4d5cf668e8d907e8
|
refs/heads/master
| 2021-01-23T03:12:58.027835
| 2014-04-08T01:11:27
| 2014-04-08T01:11:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,939
|
py
|
#!/bin/python
import getopt
import os
import subprocess
import sys
def syntax():
print """Generate instructions to deploy this new installation of aptgregator
After review, the output can be run manually or piped through sh
Syntax:
python deploy.py [restart] [options]
Options:
--flow Initializes git-flow and pulls branch develop if remote is set
--venv Sets up a new virtualenv, installs packages
--nginx= The path to Nginx sites-enabled, will symlink app's nginx.conf
Leave blank for a sensible default, i.e. '--nginx='
--auto= user[:group] under which the Paste process should run at boot
If absent, app will not be set up for starting on boot
If group is absent, it is assumed to match the user
Will also start the app right after deployment
Probably pointless without --nginx
restart Reconfigures the app and restarts it
--nginx When used after restart, will also restart Nginx
Only needed when the Nginx configuration changed
Examples:
Typical activation of a fresh WebCore template setup
python deploy.py --venv
Typical for development, running builtin server without Nginx our autostart
python deploy.py --flow --venv
Typical for production environments
python deploy.py --venv --auto=`id -nu`:`id -ng` --nginx
After making changes to the Python code
python deploy.py restart
"""
def restart(nginx):
pass
def flow():
try:
branches = subprocess.check_output(['git', 'branch'], stderr=subprocess.STDOUT)
except subprocess.CalledProcessError:
return [
"", "# " + '-' * 72,
"# WARNING: This is not a git repository",
"# " + '-' * 72,
"",
]
if 'develop' in branches:
return [
"", "# " + '-' * 72,
"# WARNING: --flow requested but git-flow already installed",
"# " + '-' * 72,
"",
]
out = [
"", "# " + '-' * 72,
"# Initialize git-flow",
"# " + '-' * 72,
"git flow init",
"git checkout develop", # Possibly redundant
"",
]
try:
remotes = subprocess.check_output(['git', 'remote'], stderr=subprocess.STDOUT)
except subprocess.CalledProcessError:
remotes = ''
if 'origin' in remotes:
out += [
"# Set the proper upstream for branch develop",
"git branch --set-upstream develop origin/develop",
"git pull",
"git submodule update --init --recursive", # Possibly redundant
"",
]
return out
def venv():
out = [
"", "# " + '-' * 72,
"# Initialize virtualenv",
"# " + '-' * 72,
"virtualenv --no-site-packages --distribute .",
". bin/activate",
"",
"# Install dependencies",
"pip install -r etc/packages.pip",
"python src/setup.py develop",
"cd src && python setup.py develop && cd ..",
"",
]
return out
def nginx(path, linux):
out = []
if not path:
if linux:
path = '/etc/nginx/sites-enabled'
else:
path = '/usr/local/etc/nginx/sites-enabled'
if not os.path.isdir(path):
out = [
"", "# " + '-' * 72,
"# ERROR: Nginx config not found: {0}".format(path),
"# " + '-' * 72,
"",
]
out += [
"", "# " + '-' * 72,
"# Sym-link to the Nginx config from the proper location",
"# " + '-' * 72,
"{0}ln -s /Users/tek/src/aptgregator/etc/nginx.conf {1}".format('sudo ' if linux else '', os.path.join(path, 'aptgregator')),
"",
]
out += ["# Reload the Nginx config"]
if linux:
out += ["sudo /etc/init.d/nginx reload"]
else:
out += ["nginx -s reload"]
out += [""]
return out
def auto(user_group, linux):
[user, group] = (user_group + ':' + user_group).split(':')[:2] # trick to make group=user if absent
out = [
"", "# " + '-' * 72,
"# Configure initd.sh with user {user}:{group}".format(user=user, group=group),
"# " + '-' * 72,
"sed -i '' 's|__user__|{user}|' bin/initd.sh".format(user=user),
"sed -i '' 's|__group__|{group}|' bin/initd.sh".format(group=group),
"",
]
if linux:
out += [
"# Sym-link to the init.d script from the proper location",
"sudo ln -s /Users/tek/src/aptgregator/bin/initd.sh /etc/init.d/aptgregator",
"sudo update-rc.d aptgregator defaults",
"",
"echo",
"echo " + '-' * 80,
"echo ' To no longer start on boot, run:'",
"echo ' sudo /etc/init.d/aptgregator stop'",
"echo ' sudo update-rc.d -f aptgregator remove'",
"echo " + '-' * 80,
"echo",
"",
]
else:
out += [
"# Sym-link to the LaunchAgent plist from the proper location",
"ln -s /Users/tek/src/aptgregator/bin/launchAgent.plist ~/Library/LaunchAgents/com.aptgregator.tek.production.plist",
"launchctl load ~/Library/LaunchAgents/com.aptgregator.tek.production.plist",
"echo",
"echo " + '-' * 80,
"echo ' To no longer start on boot, run:'",
"echo ' launchctl stop com.aptgregator.tek.production'",
"echo ' launchctl remove com.aptgregator.tek.production'",
"echo ' rm ~/Library/LaunchAgents/com.aptgregator.tek.production.plist'",
"echo " + '-' * 80,
"echo",
"",
]
return out
def start(opt, linux):
out = []
if '--auto' in opt and '--nginx' not in opt:
out += [
"", "# " + '-' * 72,
"# WARNING: --auto set without --nginx",
"# The production server will start but FastCGI will not be served by Nginx",
"# This is potentially okay if it was specifically intended",
"# " + '-' * 72,
"",
]
if '--auto' in opt:
out += [
"", "# " + '-' * 72,
"# Start the production server",
"# " + '-' * 72,
"echo",
"echo " + '-' * 80,
"echo ' Starting production server'",
]
if linux:
out += [
"echo ' sudo /etc/init.d/aptgregator start'",
"sudo /etc/init.d/aptgregator start",
]
else:
out += [
"echo ' launchctl start com.aptgregator.tek.production'",
"launchctl start com.aptgregator.tek.production",
]
out += [
"echo " + '-' * 80,
"",
]
out += [
"", "# " + '-' * 72,
"# Server instructions",
"# " + '-' * 72,
"echo",
"echo " + '-' * 80,
"echo ' To run the local development server:'",
"echo ' ./etc/local.ini'",
]
if '--auto' in opt:
out += [
"echo " + '-' * 80,
"echo ' To control the local production server:'",
]
if linux:
out += ["echo ' sudo /etc/init.d/aptgregator start|stop|restart'"]
else:
out += ["echo ' launchctl start|stop com.aptgregator.tek.production'"]
out += [
"echo " + '-' * 80,
"echo",
"",
]
return out
def main(argv):
linux = sys.platform.startswith('linux')
if '--nginx' in argv:
# Silly getopt fix for potentially empty option
argv[argv.index('--nginx')] = '--nginx='
opt = getopt.getopt(argv, 'h', [
'venv',
'flow',
'auto=',
'nginx=',
'help',
])
argv = opt[1]
opt = dict(opt[0])
if '-h' in opt or '--help' in opt or (len(opt) == 0 and len(argv) == 0):
syntax()
return 1
if 'restart' in argv:
restart('--nginx' in argv)
return 1
out = [
"",
"cd /Users/tek/src/aptgregator",
]
if '--flow' in opt:
out += flow()
if '--venv' in opt:
out += venv()
if '--nginx' in opt:
out += nginx(opt['--nginx'], linux)
if '--auto' in opt:
out += auto(opt['--auto'], linux)
out += start(opt, linux)
out += [
"",
"# " + '-' * 72,
"# ",
"# If the script is correct, run the following to deploy:",
"# ",
"# python {0}".format(' '.join(sys.argv) + ' | sh'),
"# ",
"# " + '-' * 72,
"",
]
print "\n".join(out)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
[
"adrian.ghizaru@gmail.com"
] |
adrian.ghizaru@gmail.com
|
341cddee35f5b6e4b78500da685d57d1aaee67e7
|
47ee13dce0907de438461ea7e33832a09f1ba362
|
/corpus/c4bf475a-19a9-11de-ba4e-3babc36f5e84/solution/python/test
|
d33d6575b8e97b88cf40da8de6cfc8937109eb57
|
[] |
no_license
|
Marta81/tapperdan
|
1c6624b12d33a0a0fc7906c11c8c0de88d0d3e05
|
d9d27f47ea378ad04ea0f91ce82b699b1e1d8f5d
|
refs/heads/master
| 2021-01-18T20:42:09.957943
| 2009-03-26T03:18:02
| 2009-03-26T03:18:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 46
|
#!/usr/bin/env python
print "Hello, World!"
|
[
"rblackwe@rblackwe.com"
] |
rblackwe@rblackwe.com
|
|
6d1a9a8a9639cc6ec0093c2eb0ba511f0654f894
|
4a9ed707b3b9adffd3e2f98c39040cede7dc0cc8
|
/garage/envs/mujoco/gather/ant_gather_env.py
|
7c0e3c54faf07ce45971d590b3efea02eb491053
|
[
"MIT"
] |
permissive
|
flyers/garage
|
f0c568bd850a0770a0f13d6c550318338049a462
|
745dff67d6777b78c5faaf2f2bfafcaf6f71d575
|
refs/heads/master
| 2020-04-15T15:38:42.500998
| 2019-01-29T11:56:29
| 2019-01-29T11:56:29
| 164,802,583
| 0
| 0
|
MIT
| 2019-01-29T12:11:13
| 2019-01-09T06:28:48
|
Python
|
UTF-8
|
Python
| false
| false
| 161
|
py
|
from garage.envs.mujoco import AntEnv
from garage.envs.mujoco.gather import GatherEnv
class AntGatherEnv(GatherEnv):
MODEL_CLASS = AntEnv
ORI_IND = 6
|
[
"noreply@github.com"
] |
flyers.noreply@github.com
|
713a24a7ccdd51e993b29e4b2f542ce44c4723f6
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03448/s790400785.py
|
17c0ac19efb39097ef60a9bdde7f5b5bfd5d9764
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 337
|
py
|
def resolve():
A = int(input())
B = int(input())
C = int(input())
X = int(input())
ans = []
for a in range(A + 1):
for b in range(B + 1):
c = (X - 500 * a - 100 * b) / 50
if c <= C and c >= 0:
ans.append((a, b, c))
print((len(set(ans))))
return
resolve()
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
0c00cb5df809def448fd1c5f50e41d957f662365
|
e6e3e22f4111e7a9a1c3c8f719a4a00f1a76e36b
|
/ConnectedComp.py
|
3be7256728c3e817679d9c6afafe0a3f9929cadd
|
[] |
no_license
|
GiuliaLovati/Tesy
|
656553b383633c1426abbae7f3da483dd152e238
|
3bb50bfea37c3b0316a479453d629e839aa9a4c4
|
refs/heads/master
| 2022-12-12T00:53:36.020812
| 2020-09-11T17:01:03
| 2020-09-11T17:01:03
| 211,265,687
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,822
|
py
|
import cv2 as cv
import numpy as np
def imshow_components(image, threshold=70):
img = cv.threshold(image, 70, 255, cv.THRESH_BINARY)[1] # ensure binary
num_labels, labels = cv.connectedComponents(img)
# Map component labels to hue val
label_hue = np.uint8(179*labels/np.max(labels)) #each label gets a different hue
blank_ch = 255*np.ones_like(label_hue)
labeled_img = cv.merge([label_hue, blank_ch, blank_ch]) #each element of the output array will be a concatenation of the elements of the input arrays
# cvt to BGR for display
labeled_img = cv.cvtColor(labeled_img, cv.COLOR_HSV2BGR)
# set bg label to black
labeled_img[label_hue==0] = 0
return labeled_img
#cv.imshow('labeled.png', labeled_img)
#cv.waitKey()
def connected_components_for_binaryimg(img):
num_labels, labels = cv.connectedComponents(img)
# Map component labels to hue val
label_hue = np.uint8(179*labels/np.max(labels))
blank_ch = 255*np.ones_like(label_hue)
#print (blank_ch)
labeled_img = cv.merge([label_hue, blank_ch, blank_ch])
# cvt to BGR for display
labeled_img = cv.cvtColor(labeled_img, cv.COLOR_HSV2BGR)
# set bg label to black
labeled_img[label_hue==0] = 0
return labeled_img
#OPERATIONS ON FOUND COMPONENTS:
def equallabels(labels_im, number): #equal to find 5° column of cv.connectedComponentsWithStats for a specific row (number)
numlist=[]
for i in range(labels_im.shape[0]):
for j in range(labels_im.shape[1]):
if labels_im[i][j] == number:
numlist.append(labels_im[i][j])
else:
pass
return len(numlist)
def concompmean(image,thr): #returns np.mean(stats[:,4])
lens=[]
img = cv.threshold(image, thr, 255, cv.THRESH_BINARY)[1]
num_labels, labels_im = cv.connectedComponents(img)
for k in range(num_labels):
newlen = equallabels(labels_im, k)
lens.append(newlen)
print (lens)
return (np.mean(lens))
def selection(image, thr=70): #selection of connected components with pixel area > certain value (valuemean)
img = cv.threshold(image, thr, 255, cv.THRESH_BINARY)[1]
num_labels, labels_im, stats, centroids = cv.connectedComponentsWithStats(img)
#print (stats.shape)
#n° stats rows: n° of connected components
#5° column stats: number of pixel of that connected component
#other stats columns describe the box thar contains each component
areas = stats[:,4]
areas1 = areas.tolist()
valuemean = np.mean(areas1)
print ('Total number of connected components:', len(areas1))
print ('Average area of connected components:', valuemean)
bigareasindex = []
bigareas = []
for i in areas1:
if i>=valuemean:
bigareasindex.append(areas1.index(i))
bigareas.append(i)
print ('Labels of connected components with pixel area higher than average:', bigareasindex) #index 0 : background
print ('Number of pixels of each selected area:', bigareas)
print('')
bigareasarray = np.array([bigareasindex, bigareas]).T
print (bigareasarray)
return bigareasindex, bigareas, bigareasarray
def differentSelection(image, thr=70, number=1): #selection of connected components with pixel area > certain value (valuemean) +/- number times standard deviation
img = cv.threshold(image, thr, 255, cv.THRESH_BINARY)[1]
num_labels, labels_im, stats, centroids = cv.connectedComponentsWithStats(img)
#print (stats.shape)
#n° stats rows: n° of connected components
#5° column stats: number of pixel of that connected component
#other stats columns describe the box thar contains each component
areas = stats[:,4]
areas1 = areas.tolist()
valuemean = np.mean(areas1)
standarddev = np.std(areas1)
print ('Total number of connected components:', len(areas1))
print ('Average area of connected components:', valuemean)
print ('Areas standard deviation:', standarddev)
bigareasindex = []
bigareas = []
for i in areas1:
if i>=(valuemean - (number*standarddev)):
bigareasindex.append(areas1.index(i))
bigareas.append(i)
print ('Labels of selected connected components:', bigareasindex) #index 0 : background
print ('Number of pixels of each selected area:', bigareas)
print('')
bigareasarray = np.array([bigareasindex, bigareas]).T
print (bigareasarray)
return bigareasindex, bigareas, bigareasarray
def newimgbigcomponents(image, bigareasindex, thr=70): #new array image with only the components having area[pixel]> average area of all components
img = cv.threshold(image, thr, 255, cv.THRESH_BINARY)[1]
new= np.zeros_like(img,dtype='int32')
num_labels, labels_im = cv.connectedComponents(img)
hue = range(0, 255, int(255/len(bigareasindex))) #set new colors for the selected components in range(0,255)
for i in range(len(bigareasindex)):
#new += np.where(labels_im == bigareasindex[i], labels_im, 0) #gives problems showing components with label>255
new += np.where(labels_im == bigareasindex[i], hue[i], 0) #selected components are mantained with a new label in range(0,255)
print ('New label for', bigareasindex[i], 'component:', hue[i])
return new, hue
#FINDING EDGES
def FindingUpperEdges(newimg, huenewimg):
edges = np.zeros_like(newimg)
upperlimitx = []
upperlimity = []
for i in range(newimg.shape[1]):
column = newimg[:,i]
colist = column.tolist()
for j in huenewimg[1:]:
try:
print ('column', i, 'upper edge at:', colist.index(j), ', with label:', j)
#if in the i-column, pixels with label equal to one of the selected components are present,
#it finds the index (row) of the first one with that label
edges[colist.index(j)][i] = j
upperlimitx.append(colist.index(j))
upperlimity.append(i)
except ValueError:
pass
return edges, upperlimitx, upperlimity
def FindingLowerEdges(newimg, huenewimg, edges):
lowerlimitx = []
lowerlimity = []
for i in range(newimg.shape[1]):
column = newimg[:,i]
colist = list(reversed(column)) #reversing the column in order to find the last pixel with one of the selected label value
for j in huenewimg[1:]:
try:
print ('column', i, 'lower edge at:', colist.index(j), '(not reversed value), right reversed value:', newimg.shape[0]-colist.index(j), ', with label:', j)
lowerlimitx.append(newimg.shape[0]-colist.index(j))
lowerlimity.append(i)
if colist.index(j) == 0 : #useful if there is a component that ends beyond image limit
edges[newimg.shape[0]-colist.index(j)-1][i] = j #reversing again
else:
edges[newimg.shape[0]-colist.index(j)][i] = j #reversing again
except ValueError:
pass
return edges, lowerlimitx, lowerlimity
#THICKNESS CALCULATION
def Thickness(upperlimity, upperlimitx, lowerlimity, lowerlimitx): #Thickness in pixels
deltacolumn = np.zeros_like(upperlimity)
delta = np.zeros_like(upperlimity)
for i in range(len(upperlimity)):
for j in range(len(lowerlimity)):
if i == j:
delta[i] = lowerlimitx[j] - upperlimitx[i]
deltacolumn[i] = upperlimity[i]
return deltacolumn, delta
#Conversion function has 3 possible argument: a thickness values array in pixel for each column of the selected connected components
#Data type specification: automatically US data (important for pixel to second conversion), specify "ITA" for italian data
#Value for dieletric const. : automatically eps = 3.15 from Putzig et al. 2009, tipical of pure water ice. For Grima et al 2009 is 3.1
def Conversion(delta, datatype = "USA", eps = 3.15):
c = 299792.458 #km/s
if datatype == "USA":
convpx = 0.0375*10**(-6) #US data, MROSH_2001: https://pds.nasa.gov/ds-view/pds/viewProfile.jsp?dsid=MRO-M-SHARAD-5-RADARGRAM-V1
elif datatype == "ITA":
convpx = 0.075*10**(-6) #from 4.3.2.6 TIME ALIGNMENT OF ECHOES paragraph of rdrsis (italian data)
else:
print ('uncorrect datatype, try "USA" or "ITA" ')
deltasec = delta*convpx
print('Thickness [sec]', deltasec)
print('Maximum thickness [microsec]', (deltasec*10**6).max())
deltakm = (deltasec*c)/(2*eps**(0.5))
deltam = deltakm*1000
print ('Thickness [m]:', deltam)
print ('Maximum thickness [m]:', deltam.max())
print ('Average thickness [m]:', deltam.mean())
return deltasec, deltakm, deltam
|
[
"giulialovati.gl@gmail.com"
] |
giulialovati.gl@gmail.com
|
5cd7a65e1435a46c2cb3ade49bcdca5022026d27
|
0e461c3ca52347efe1df6d7bf4dc9754a1a60bc9
|
/send_text.py
|
86ce81b32de0ab9867834519f07bec56065df80c
|
[] |
no_license
|
nena6/Udacitiy-Programming_foundations_with_Python
|
ebb92837ca7cd002d84b290a7bae6fa55031630c
|
c06a5d32835b603d2fc82dec7e0bec80fdd77226
|
refs/heads/master
| 2021-08-31T19:06:04.076417
| 2017-12-15T13:43:33
| 2017-12-15T13:43:33
| 113,049,865
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 402
|
py
|
from twilio.rest import Client
# Your Account SID from twilio.com/console
account_sid = "ACc7c6527d71af857207a258a1f0ffeb5e"
# Your Auth Token from twilio.com/console
auth_token = "85b43dbae62be16d3831e23cdda59bb0"
client = Client(account_sid, auth_token)
message = client.messages.create(
to="+385913653829",
from_="+12568264529",
body="Hello from the other side!")
print(message.sid)
|
[
"nevia.vidakovic@gmail.com"
] |
nevia.vidakovic@gmail.com
|
80fc4b38b7dff6b4f630a8e31f713c5c9b512f3c
|
53163d4129930426c2d7aa650cb1b638d1347d21
|
/lxmert/lxmert/src/tasks/nlvr2_model.py
|
ef93474403461f18461d1da85fb8877b6f6b5364
|
[
"MIT"
] |
permissive
|
fdsig/Transformer-MM-Explainability
|
5e4d9d0c927afd0316311259fc318b325d74628e
|
accc4dd3491d321948e826079ce85f61bb02e0a6
|
refs/heads/main
| 2023-09-03T01:21:27.188260
| 2021-11-17T23:56:49
| 2021-11-17T23:56:49
| 433,759,755
| 1
| 0
|
MIT
| 2021-12-01T09:20:31
| 2021-12-01T09:20:31
| null |
UTF-8
|
Python
| false
| false
| 1,773
|
py
|
# coding=utf-8
# Copyleft 2019 project LXRT.
import torch.nn as nn
from lxrt.modeling import GeLU, BertLayerNorm
from lxrt.entry import LXRTEncoder
from param import args
class NLVR2Model(nn.Module):
def __init__(self):
super().__init__()
self.lxrt_encoder = LXRTEncoder(
args,
max_seq_length=20
)
self.hid_dim = hid_dim = self.lxrt_encoder.dim
self.logit_fc = nn.Sequential(
nn.Linear(hid_dim * 2, hid_dim * 2),
GeLU(),
BertLayerNorm(hid_dim * 2, eps=1e-12),
nn.Linear(hid_dim * 2, 2)
)
self.logit_fc.apply(self.lxrt_encoder.model.init_bert_weights)
def forward(self, feat, pos, sent):
"""
:param feat: b, 2, o, f
:param pos: b, 2, o, 4
:param sent: b, (string)
:param leng: b, (numpy, int)
:return:
"""
# Pairing images and sentences:
# The input of NLVR2 is two images and one sentence. In batch level, they are saved as
# [ [img0_0, img0_1], [img1_0, img1_1], ...] and [sent0, sent1, ...]
# Here, we flat them to
# feat/pos = [ img0_0, img0_1, img1_0, img1_1, ...]
# sent = [ sent0, sent0, sent1, sent1, ...]
sent = sum(zip(sent, sent), ())
batch_size, img_num, obj_num, feat_size = feat.size()
assert img_num == 2 and obj_num == 36 and feat_size == 2048
feat = feat.view(batch_size * 2, obj_num, feat_size)
pos = pos.view(batch_size * 2, obj_num, 4)
# Extract feature --> Concat
x = self.lxrt_encoder(sent, (feat, pos))
x = x.view(-1, self.hid_dim*2)
# Compute logit of answers
logit = self.logit_fc(x)
return logit
|
[
"hilach70@gmail.com"
] |
hilach70@gmail.com
|
8b22af7888df6c2ed8a9604c7b942d3091b1ae42
|
0039e09b2c18efad98a0c51995b68c9c22582ed0
|
/portfollio/migrations/0010_auto_20200327_1914.py
|
dc3138a3efdf84c6ef75038c142e7b9bfa0314bd
|
[] |
no_license
|
aishmn/base_app
|
b72dee7d4ebea2efbd64208c2e4dfbf6a2085779
|
1fde6cd9c95ccf2ada0cf5b802c11f49d3a75048
|
refs/heads/master
| 2021-05-17T02:58:18.861534
| 2020-03-27T16:35:43
| 2020-03-27T16:35:43
| 250,587,235
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 595
|
py
|
# Generated by Django 3.0.4 on 2020-03-27 13:29
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('portfollio', '0009_blog_category'),
]
operations = [
migrations.AddField(
model_name='blog',
name='creation_date',
field=models.DateTimeField(default=django.utils.timezone.now),
),
migrations.AddField(
model_name='blog',
name='slug',
field=models.SlugField(blank=True, null=True),
),
]
|
[
"manish.sinuwari@gmail.com"
] |
manish.sinuwari@gmail.com
|
807c48c6962ab4fd329836f97eaeb05bb435f2bf
|
d93b5c753ac9c9309d946cc8cfde005027fc1859
|
/No6_1.py
|
82c1e33002a93e0d5c1e77e851c0cd200b024e6b
|
[] |
no_license
|
injoinD0913/Python-case
|
12e0d53ee493e748d51240666f8bb699c21fbbb3
|
13f2cdebf815aaf0367bde1372f7720a792b6d36
|
refs/heads/master
| 2020-09-07T10:17:47.884970
| 2019-11-15T15:55:58
| 2019-11-15T15:55:58
| 220,750,132
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 669
|
py
|
# _*_ coding:utf-8 _*_
# 开发团队:
# 开发人员:Administrator
# 开发时间:2019/10/12 20:34
# 文件名称:No6_1.py
# 开发工具:PyCharm
# 题目:斐波那契数列。
# 程序分析:斐波那契数列指的是这样一个数列:0、1、1、2、3、5、8、13、21、34、……。
# 可以以递归的方法来定义:
# F0 = 0(n=0)
# F1 = 1(n=1)
# Fn = F[n - 1] + F[n - 2](n= > 2)
# 输出指定个数的斐波那契数列
i = int(input())
def fib(n):
if n == 1:
return [1]
if n == 2:
return [1, 1]
fibs = [1, 1]
for i in range(2, n):
fibs.append(fibs[-1] + fibs[-2])
return fibs
print(fib(i))
|
[
"injoin_d@aliyun.com"
] |
injoin_d@aliyun.com
|
c8dd76f68361f90919bc5ca4d3b4e315a3f3ab89
|
fe752040ed8552246e465d4259a73579acf1b623
|
/drift.py
|
35b4acfa8de452bebd0dfbeb10a4c4adf4c33903
|
[] |
no_license
|
abdifatah87/imt3003
|
2d119c4868fd868de02f78b5716430a38f73f6b4
|
28c471032944fbbd78fcf18b483a2b91b308bd39
|
refs/heads/master
| 2020-12-13T06:53:04.286139
| 2020-01-26T17:34:50
| 2020-01-26T17:34:50
| 234,341,227
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 685
|
py
|
import os
from openstack import connection
conn = connection.Connection(auth_url= "https://api.skyhigh.iik.ntnu.no:8774/v2.1",
project_name=str(os.getenv("OS_PROJECT_NAME")),
username=str(os.getenv("OS_USERNAME")),
password=str(os.getenv("OS_PASSWORD")),
user_domain_id=str(os.getenv("OS_USER_DOMAIN_NAME")),
project_domain_id=str(os.getenv("OS_PROJECT_DOMAIN_ID"))
)
def list_servers(connection):
print("list servers:")
for server in conn.compute.servers():
print(server)
list_servers(conn)
|
[
"abdifatah87@live.no"
] |
abdifatah87@live.no
|
7f1173e8bb1f003e5a7f5f407b9c460188d6b251
|
20406108a91d05b5e05a16fa17329b68d8cbfc7c
|
/src/mario_maze/settings.py
|
7374af9a22637d9afd5737f2054d705de0181241
|
[] |
no_license
|
Olena-Mordas/mario-maze_be
|
d85f81022f66c7c699e5db11cf187451d96d68a0
|
dc2426793149f81ec275ee64ea3d4344e3fa5c99
|
refs/heads/master
| 2023-04-11T02:32:26.307974
| 2021-04-29T14:49:48
| 2021-04-29T14:49:48
| 359,937,585
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,557
|
py
|
"""
Django settings for mario_maze project.
Generated by 'django-admin startproject' using Django 3.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-m(bu0w2sl%kzj@&$r+0*b@)gq)zb#@ld&3pq_&5mx=yq+%&*kl'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'api',
'corsheaders'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'corsheaders.middleware.CorsMiddleware'
]
CORS_ORIGIN_ALLOW_ALL = False
CORS_ORIGIN_WHITELIST = (
'http://localhost:4200',
)
ROOT_URLCONF = 'mario_maze.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mario_maze.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
REST_FRAMEWORK = {'DEFAULT_SCHEMA_CLASS':
'rest_framework.schemas.coreapi.AutoSchema',
}
|
[
"alyonaalive@gmail.com"
] |
alyonaalive@gmail.com
|
82499eb923a32ad19aeec1efd231f9c15b47ec86
|
62e7db04e60e07a6def7bc7e32e17d381ef0fa44
|
/test/test_unpack_status.py
|
712bddc93c308d9e45d7cfcafdaf90bb79d08937
|
[
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
kryptoslogic/unpacme-python
|
0e830cb44fb137bd076f4100da736b929c8cd30b
|
86529853f24ed00afa7e90b87fa64104dfc68dfe
|
refs/heads/master
| 2023-02-26T16:17:57.047693
| 2021-02-02T14:23:47
| 2021-02-02T14:23:47
| 335,313,234
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,386
|
py
|
"""
UnpacMe
# Introduction Welcome to the UNPACME API! All the malware unpacking and file analysis features that you are familiar with on the [unpac.me](https://www.unpac.me/) website are available through our API. You can easily integrate our unpacker into your malware analysis pipeline and begin unpacking at scale! # Authentication The public UNPACME API is publicly available and can be accessed without authentication. In order to use the private UNPACME API you must sign up for an account with UNPACME. Once you have a valid user account you can view your personal API key in your user profile. <SecurityDefinitions /> # Response Structure When interacting with the UNPACME API, if the request was correctly handled, a <b>200</b> HTTP status code will be returned. The body of the response will usually be a JSON object (except for file downloads). ## Response Status Codes Status Code | Description | Notes ------------- | ------------- | - 200 | OK | The request was successful 400 | Bad Request | The request was somehow incorrect. This can be caused by missing arguments or arguments with wrong values. 401 | Unauthorized | The supplied credentials, if any, are not sufficient to access the resource 403 | Forbidden | The account does not have enough privileges to make the request. 404 | Not Found | The requested resource is not found 429 | Too Many Requests | The request frequency has exceeded one of the account quotas (minute, daily or monthly). Monthly quotas are reset on the 1st of the month at 00:00 UTC. 500 | Server Error | The server could not return the representation due to an internal server error ## Error Response If an error has occurred while handling the request an error status code will be returend along with a JSON error message with the following properties. Property | Description ------------- | ------------- Error | The error type Description | A more informative message # Example Clients The following clients can be used to interact with the UNPACME API directly and are provided as examples. These clients are community projects and are not maintained or developed by UNPACME. UNPACME makes no claim as to the safety of these clients, use at your own risk. - [UnpacMe Python Client](https://github.com/larsborn/UnpacMeClient) (Python) - [UnpacMe GO Client](https://github.com/kryptoslogic/unpacme-go) (Golang) - [UnpacMe Library](https://github.com/R3MRUM/unpacme) (Python) - [AssemblyLine](https://github.com/CybercentreCanada/assemblyline-service-unpacme) (Automation Service) <br> # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import unpacme
from unpacme.model.status import Status
from unpacme.model.unpack_status_all_of import UnpackStatusAllOf
globals()['Status'] = Status
globals()['UnpackStatusAllOf'] = UnpackStatusAllOf
from unpacme.model.unpack_status import UnpackStatus
class TestUnpackStatus(unittest.TestCase):
"""UnpackStatus unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testUnpackStatus(self):
"""Test UnpackStatus"""
# FIXME: construct object with mandatory attributes with example values
# model = UnpackStatus() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"jamieh@kryptoslogic.com"
] |
jamieh@kryptoslogic.com
|
fa634099a27ded13c1952c58524029bb04dfce23
|
41986b7a1b95784f0a6256ae24d5942c70ced4d7
|
/prod/google-cloud-sdk/lib/googlecloudsdk/third_party/apis/container/v1alpha1/container_v1alpha1_messages.py
|
49c00a4745dfa8067e647185d258367759f8dcfb
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
wakabayashi-seiya/terraform_gcp
|
ed829a5a21d5d19d6663804ee5d5f7f3d23b4ec4
|
f757e56779f33c2fabd8a8eed9c51ff0b897a38f
|
refs/heads/master
| 2021-07-07T21:51:35.993317
| 2020-03-11T05:42:57
| 2020-03-11T05:42:57
| 239,411,772
| 0
| 1
| null | 2021-04-30T21:05:04
| 2020-02-10T02:32:04
|
Python
|
UTF-8
|
Python
| false
| false
| 175,511
|
py
|
"""Generated message classes for container version v1alpha1.
Builds and manages container-based applications, powered by the open source
Kubernetes technology.
"""
# NOTE: This file is autogenerated and should not be edited by hand.
from apitools.base.protorpclite import messages as _messages
from apitools.base.py import encoding
package = 'container'
class AcceleratorConfig(_messages.Message):
r"""AcceleratorConfig represents a Hardware Accelerator request.
Fields:
acceleratorCount: The number of the accelerator cards exposed to an
instance.
acceleratorType: The accelerator type resource name. List of supported
accelerators [here](/compute/docs/gpus)
"""
acceleratorCount = _messages.IntegerField(1)
acceleratorType = _messages.StringField(2)
class AddonsConfig(_messages.Message):
r"""Configuration for the addons that can be automatically spun up in the
cluster, enabling additional functionality.
Fields:
cloudBuildConfig: Configuration for the Cloud Build addon.
cloudRunConfig: Configuration for the Cloud Run addon. The `IstioConfig`
addon must be enabled in order to enable Cloud Run. This option can only
be enabled at cluster creation time.
configConnectorConfig: Configuration for the ConfigConnector add-on, a
Kubernetes extension to manage hosted GCP services through the
Kubernetes API
dnsCacheConfig: Configuration for NodeLocalDNS, a dns cache running on
cluster nodes
gcePersistentDiskCsiDriverConfig: Configuration for the GCP Compute
Persistent Disk CSI driver.
horizontalPodAutoscaling: Configuration for the horizontal pod autoscaling
feature, which increases or decreases the number of replica pods a
replication controller has based on the resource usage of the existing
pods.
httpLoadBalancing: Configuration for the HTTP (L7) load balancing
controller addon, which makes it easy to set up HTTP load balancers for
services in a cluster.
istioConfig: Configuration for Istio, an open platform to connect, manage,
and secure microservices.
kalmConfig: Configuration for the KALM addon, which manages the lifecycle
of k8s applications.
kubernetesDashboard: Configuration for the Kubernetes Dashboard. This
addon is deprecated, and will be disabled in 1.15. It is recommended to
use the Cloud Console to manage and monitor your Kubernetes clusters,
workloads and applications. For more information, see:
https://cloud.google.com/kubernetes-engine/docs/concepts/dashboards
networkPolicyConfig: Configuration for NetworkPolicy. This only tracks
whether the addon is enabled or not on the Master, it does not track
whether network policy is enabled for the nodes.
"""
cloudBuildConfig = _messages.MessageField('CloudBuildConfig', 1)
cloudRunConfig = _messages.MessageField('CloudRunConfig', 2)
configConnectorConfig = _messages.MessageField('ConfigConnectorConfig', 3)
dnsCacheConfig = _messages.MessageField('DnsCacheConfig', 4)
gcePersistentDiskCsiDriverConfig = _messages.MessageField('GcePersistentDiskCsiDriverConfig', 5)
horizontalPodAutoscaling = _messages.MessageField('HorizontalPodAutoscaling', 6)
httpLoadBalancing = _messages.MessageField('HttpLoadBalancing', 7)
istioConfig = _messages.MessageField('IstioConfig', 8)
kalmConfig = _messages.MessageField('KalmConfig', 9)
kubernetesDashboard = _messages.MessageField('KubernetesDashboard', 10)
networkPolicyConfig = _messages.MessageField('NetworkPolicyConfig', 11)
class AuthenticatorGroupsConfig(_messages.Message):
r"""Configuration for returning group information from authenticators.
Fields:
enabled: Whether this cluster should return group membership lookups
during authentication using a group of security groups.
securityGroup: The name of the security group-of-groups to be used. Only
relevant if enabled = true.
"""
enabled = _messages.BooleanField(1)
securityGroup = _messages.StringField(2)
class AutoUpgradeOptions(_messages.Message):
r"""AutoUpgradeOptions defines the set of options for the user to control
how the Auto Upgrades will proceed.
Fields:
autoUpgradeStartTime: [Output only] This field is set when upgrades are
about to commence with the approximate start time for the upgrades, in
[RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format.
description: [Output only] This field is set when upgrades are about to
commence with the description of the upgrade.
"""
autoUpgradeStartTime = _messages.StringField(1)
description = _messages.StringField(2)
class AutoprovisioningNodePoolDefaults(_messages.Message):
r"""AutoprovisioningNodePoolDefaults contains defaults for a node pool
created by NAP.
Fields:
management: Specifies the node management options for NAP created node-
pools.
minCpuPlatform: Minimum CPU platform to be used for NAP created node
pools. The instance may be scheduled on the specified or newer CPU
platform. Applicable values are the friendly names of CPU platforms,
such as <code>minCpuPlatform: "Intel Haswell"</code> or
<code>minCpuPlatform: "Intel Sandy Bridge"</code>. For more
information, read [how to specify min CPU
platform](https://cloud.google.com/compute/docs/instances/specify-min-
cpu-platform) To unset the min cpu platform field pass "automatic" as
field value.
oauthScopes: Scopes that are used by NAP when creating node pools. If
oauth_scopes are specified, service_account should be empty.
serviceAccount: The Google Cloud Platform Service Account to be used by
the node VMs. If service_account is specified, scopes should be empty.
upgradeSettings: Specifies the upgrade settings for NAP created node pools
"""
management = _messages.MessageField('NodeManagement', 1)
minCpuPlatform = _messages.StringField(2)
oauthScopes = _messages.StringField(3, repeated=True)
serviceAccount = _messages.StringField(4)
upgradeSettings = _messages.MessageField('UpgradeSettings', 5)
class AvailableVersion(_messages.Message):
r"""AvailableVersion is an additional Kubernetes versions offered to users
who subscribed to the release channel.
Fields:
reason: Reason for availability.
version: Kubernetes version.
"""
reason = _messages.StringField(1)
version = _messages.StringField(2)
class BigQueryDestination(_messages.Message):
r"""Parameters for using BigQuery as the destination of resource usage
export.
Fields:
datasetId: The ID of a BigQuery Dataset.
"""
datasetId = _messages.StringField(1)
class BinaryAuthorization(_messages.Message):
r"""Configuration for Binary Authorization.
Fields:
enabled: Enable Binary Authorization for this cluster. If enabled, all
container images will be validated by Google Binauthz.
"""
enabled = _messages.BooleanField(1)
class CancelOperationRequest(_messages.Message):
r"""CancelOperationRequest cancels a single operation.
Fields:
name: The name (project, location, operation id) of the operation to
cancel. Specified in the format 'projects/*/locations/*/operations/*'.
operationId: Deprecated. The server-assigned `name` of the operation. This
field has been deprecated and replaced by the name field.
projectId: Deprecated. The Google Developers Console [project ID or
project number](https://support.google.com/cloud/answer/6158840). This
field has been deprecated and replaced by the name field.
zone: Deprecated. The name of the Google Compute Engine
[zone](/compute/docs/zones#available) in which the operation resides.
This field has been deprecated and replaced by the name field.
"""
name = _messages.StringField(1)
operationId = _messages.StringField(2)
projectId = _messages.StringField(3)
zone = _messages.StringField(4)
class CidrBlock(_messages.Message):
r"""CidrBlock contains an optional name and one CIDR block.
Fields:
cidrBlock: cidr_block must be specified in CIDR notation.
displayName: display_name is an optional field for users to identify CIDR
blocks.
"""
cidrBlock = _messages.StringField(1)
displayName = _messages.StringField(2)
class ClientCertificateConfig(_messages.Message):
r"""Configuration for client certificates on the cluster.
Fields:
issueClientCertificate: Issue a client certificate.
"""
issueClientCertificate = _messages.BooleanField(1)
class CloudBuildConfig(_messages.Message):
r"""Configuration options for the Cloud Build addon.
Fields:
enabled: Whether the Cloud Build addon is enabled for this cluster.
"""
enabled = _messages.BooleanField(1)
class CloudNatStatus(_messages.Message):
r"""CloudNatStatus contains the desired state of the cloud nat functionality
on this cluster.
Fields:
enabled: Enables Cloud Nat on this cluster. On an update if
update.desired_cloud_nat_status.enabled = true, The API will check if
any Routers in the cluster's network has Cloud NAT enabled on the pod
range. a. If so, then the cluster nodes will be updated to not perform
SNAT. b. If no NAT configuration exists, a new Router with Cloud NAT
on the secondary range will be created first, and then the nodes
will be updated to no longer do SNAT.
"""
enabled = _messages.BooleanField(1)
class CloudRunConfig(_messages.Message):
r"""Configuration options for the Cloud Run feature.
Fields:
disabled: Whether Cloud Run is enabled for this cluster.
enableAlphaFeatures: Enable alpha features of Cloud Run. These features
are only available to trusted testers.
"""
disabled = _messages.BooleanField(1)
enableAlphaFeatures = _messages.BooleanField(2)
class Cluster(_messages.Message):
r"""A Google Kubernetes Engine cluster.
Enums:
NodeSchedulingStrategyValueValuesEnum: Defines behaviour of k8s scheduler.
StatusValueValuesEnum: [Output only] The current status of this cluster.
Messages:
ResourceLabelsValue: The resource labels for the cluster to use to
annotate any related GCE resources.
Fields:
addonsConfig: Configurations for the various addons available to run in
the cluster.
authenticatorGroupsConfig: Configuration controlling RBAC group membership
information.
autoscaling: Cluster-level autoscaling configuration.
binaryAuthorization: Configuration for Binary Authorization.
clusterIpv4Cidr: The IP address range of the container pods in this
cluster, in [CIDR](http://en.wikipedia.org/wiki/Classless_Inter-
Domain_Routing) notation (e.g. `10.96.0.0/14`). Leave blank to have one
automatically chosen or specify a `/14` block in `10.0.0.0/8`.
clusterTelemetry: Telemetry integration for the cluster.
conditions: Which conditions caused the current cluster state.
costManagementConfig: Configuration for the fine-grained cost management
feature.
createTime: [Output only] The time the cluster was created, in
[RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format.
currentMasterVersion: The current software version of the master endpoint.
currentNodeCount: [Output only] The number of nodes currently in the
cluster. Deprecated. Call Kubernetes API directly to retrieve node
information.
currentNodeVersion: [Output only] Deprecated, use [NodePool.version
](/kubernetes-
engine/docs/reference/rest/v1alpha1/projects.zones.clusters.nodePool)
instead. The current version of the node software components. If they
are currently at multiple versions because they're in the process of
being upgraded, this reflects the minimum version of all nodes.
databaseEncryption: Configuration of etcd encryption.
databaseEncryptionKeyId: Resource name of a CloudKMS key to be used for
the encryption of secrets in etcd. Ex. projects/kms-
project/locations/global/keyRings/ring-1/cryptoKeys/key-1 Deprecated,
use database_encryption instead.
defaultMaxPodsConstraint: The default constraint on the maximum number of
pods that can be run simultaneously on a node in the node pool of this
cluster. Only honored if cluster created with IP Alias support.
description: An optional description of this cluster.
enableKubernetesAlpha: Kubernetes alpha features are enabled on this
cluster. This includes alpha API groups (e.g. v1alpha1) and features
that may not be production ready in the kubernetes version of the master
and nodes. The cluster has no SLA for uptime and master/node upgrades
are disabled. Alpha enabled clusters are automatically deleted thirty
days after creation.
enableTpu: Enable the ability to use Cloud TPUs in this cluster.
endpoint: [Output only] The IP address of this cluster's master endpoint.
The endpoint can be accessed from the internet at
`https://username:password@endpoint/`. See the `masterAuth` property of
this resource for username and password information.
expireTime: [Output only] The time the cluster will be automatically
deleted in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format.
initialClusterVersion: The initial Kubernetes version for this cluster.
Valid versions are those found in validMasterVersions returned by
getServerConfig. The version can be upgraded over time; such upgrades
are reflected in currentMasterVersion and currentNodeVersion. Users may
specify either explicit versions offered by Kubernetes Engine or version
aliases, which have the following behavior: - "latest": picks the
highest valid Kubernetes version - "1.X": picks the highest valid
patch+gke.N patch in the 1.X version - "1.X.Y": picks the highest valid
gke.N patch in the 1.X.Y version - "1.X.Y-gke.N": picks an explicit
Kubernetes version - "","-": picks the default Kubernetes version
initialNodeCount: The number of nodes to create in this cluster. You must
ensure that your Compute Engine <a href="/compute/docs/resource-
quotas">resource quota</a> is sufficient for this number of instances.
You must also have available firewall and routes quota. For requests,
this field should only be used in lieu of a "node_pool" object, since
this configuration (along with the "node_config") will be used to create
a "NodePool" object with an auto-generated name. Do not use this and a
node_pool at the same time. This field is deprecated, use
node_pool.initial_node_count instead.
instanceGroupUrls: Deprecated. Use node_pools.instance_group_urls.
ipAllocationPolicy: Configuration for cluster IP allocation.
labelFingerprint: The fingerprint of the set of labels for this cluster.
legacyAbac: Configuration for the legacy ABAC authorization mode.
location: [Output only] The name of the Google Compute Engine
[zone](/compute/docs/regions-zones/regions-zones#available) or
[region](/compute/docs/regions-zones/regions-zones#available) in which
the cluster resides.
locations: The list of Google Compute Engine
[zones](/compute/docs/zones#available) in which the cluster's nodes
should be located.
loggingService: The logging service the cluster should use to write logs.
Currently available options: * `logging.googleapis.com` - the Google
Cloud Logging service. * `none` - no logs will be exported from the
cluster. * if left as an empty string,`logging.googleapis.com` will be
used.
maintenancePolicy: Configure the maintenance policy for this cluster.
masterAuth: The authentication information for accessing the master
endpoint. If unspecified, the defaults are used: For clusters before
v1.12, if master_auth is unspecified, `username` will be set to "admin",
a random password will be generated, and a client certificate will be
issued.
masterAuthorizedNetworksConfig: The configuration options for master
authorized networks feature.
masterIpv4CidrBlock: The IP prefix in CIDR notation to use for the hosted
master network. This prefix will be used for assigning private IP
addresses to the master or set of masters, as well as the ILB VIP. This
field is deprecated, use private_cluster_config.master_ipv4_cidr_block
instead.
monitoringService: The monitoring service the cluster should use to write
metrics. Currently available options: * `monitoring.googleapis.com` -
the Google Cloud Monitoring service. * `none` - no metrics will be
exported from the cluster. * if left as an empty string,
`monitoring.googleapis.com` will be used.
name: The name of this cluster. The name must be unique within this
project and location (e.g. zone or region), and can be up to 40
characters with the following restrictions: * Lowercase letters,
numbers, and hyphens only. * Must start with a letter. * Must end with a
number or a letter.
network: The name of the Google Compute Engine [network](/compute/docs
/networks-and-firewalls#networks) to which the cluster is connected. If
left unspecified, the `default` network will be used.
networkConfig: Configuration for cluster networking.
networkPolicy: Configuration options for the NetworkPolicy feature.
nodeConfig: Parameters used in creating the cluster's nodes. For requests,
this field should only be used in lieu of a "node_pool" object, since
this configuration (along with the "initial_node_count") will be used to
create a "NodePool" object with an auto-generated name. Do not use this
and a node_pool at the same time. For responses, this field will be
populated with the node configuration of the first node pool. (For
configuration of each node pool, see `node_pool.config`) If
unspecified, the defaults are used. This field is deprecated, use
node_pool.config instead.
nodeIpv4CidrSize: [Output only] The size of the address space on each node
for hosting containers. This is provisioned from within the
`container_ipv4_cidr` range. This field will only be set when cluster is
in route-based network mode.
nodePools: The node pools associated with this cluster. This field should
not be set if "node_config" or "initial_node_count" are specified.
nodeSchedulingStrategy: Defines behaviour of k8s scheduler.
podSecurityPolicyConfig: Configuration for the PodSecurityPolicy feature.
privateCluster: If this is a private cluster setup. Private clusters are
clusters that, by default have no external IP addresses on the nodes and
where nodes and the master communicate over private IP addresses. This
field is deprecated, use private_cluster_config.enable_private_nodes
instead.
privateClusterConfig: Configuration for private cluster.
releaseChannel: Release channel configuration.
resourceLabels: The resource labels for the cluster to use to annotate any
related GCE resources.
resourceUsageExportConfig: Configuration for exporting resource usages.
Resource usage export is disabled when this config unspecified.
resourceVersion: Server-defined resource version (etag).
securityProfile: User selected security profile
selfLink: [Output only] Server-defined URL for the resource.
servicesIpv4Cidr: [Output only] The IP address range of the Kubernetes
services in this cluster, in [CIDR](http://en.wikipedia.org/wiki
/Classless_Inter-Domain_Routing) notation (e.g. `1.2.3.4/29`). Service
addresses are typically put in the last `/16` from the container CIDR.
shieldedNodes: Shielded Nodes configuration.
status: [Output only] The current status of this cluster.
statusMessage: [Output only] Additional information about the current
status of this cluster, if available. Deprecated, use the field
conditions instead.
subnetwork: The name of the Google Compute Engine
[subnetwork](/compute/docs/subnetworks) to which the cluster is
connected. On output this shows the subnetwork ID instead of the name.
tierSettings: Cluster tier settings.
tpuIpv4CidrBlock: [Output only] The IP address range of the Cloud TPUs in
this cluster, in [CIDR](http://en.wikipedia.org/wiki/Classless_Inter-
Domain_Routing) notation (e.g. `1.2.3.4/29`).
verticalPodAutoscaling: Cluster-level Vertical Pod Autoscaling
configuration.
workloadIdentityConfig: Configuration for the use of k8s Service Accounts
in GCP IAM policies.
zone: [Output only] The name of the Google Compute Engine
[zone](/compute/docs/zones#available) in which the cluster resides. This
field is deprecated, use location instead.
"""
class NodeSchedulingStrategyValueValuesEnum(_messages.Enum):
r"""Defines behaviour of k8s scheduler.
Values:
STRATEGY_UNSPECIFIED: Use default scheduling strategy.
PRIORITIZE_LEAST_UTILIZED: Least utilized nodes will be prioritized by
k8s scheduler.
PRIORITIZE_MEDIUM_UTILIZED: Nodes with medium utilization will be
prioritized by k8s scheduler. This option improves interoperability of
scheduler with cluster autoscaler.
"""
STRATEGY_UNSPECIFIED = 0
PRIORITIZE_LEAST_UTILIZED = 1
PRIORITIZE_MEDIUM_UTILIZED = 2
class StatusValueValuesEnum(_messages.Enum):
r"""[Output only] The current status of this cluster.
Values:
STATUS_UNSPECIFIED: Not set.
PROVISIONING: The PROVISIONING state indicates the cluster is being
created.
RUNNING: The RUNNING state indicates the cluster has been created and is
fully usable.
RECONCILING: The RECONCILING state indicates that some work is actively
being done on the cluster, such as upgrading the master or node
software. Details can be found in the `statusMessage` field.
STOPPING: The STOPPING state indicates the cluster is being deleted.
ERROR: The ERROR state indicates the cluster may be unusable. Details
can be found in the `statusMessage` field.
DEGRADED: The DEGRADED state indicates the cluster requires user action
to restore full functionality. Details can be found in the
`statusMessage` field.
"""
STATUS_UNSPECIFIED = 0
PROVISIONING = 1
RUNNING = 2
RECONCILING = 3
STOPPING = 4
ERROR = 5
DEGRADED = 6
@encoding.MapUnrecognizedFields('additionalProperties')
class ResourceLabelsValue(_messages.Message):
r"""The resource labels for the cluster to use to annotate any related GCE
resources.
Messages:
AdditionalProperty: An additional property for a ResourceLabelsValue
object.
Fields:
additionalProperties: Additional properties of type ResourceLabelsValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a ResourceLabelsValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
addonsConfig = _messages.MessageField('AddonsConfig', 1)
authenticatorGroupsConfig = _messages.MessageField('AuthenticatorGroupsConfig', 2)
autoscaling = _messages.MessageField('ClusterAutoscaling', 3)
binaryAuthorization = _messages.MessageField('BinaryAuthorization', 4)
clusterIpv4Cidr = _messages.StringField(5)
clusterTelemetry = _messages.MessageField('ClusterTelemetry', 6)
conditions = _messages.MessageField('StatusCondition', 7, repeated=True)
costManagementConfig = _messages.MessageField('CostManagementConfig', 8)
createTime = _messages.StringField(9)
currentMasterVersion = _messages.StringField(10)
currentNodeCount = _messages.IntegerField(11, variant=_messages.Variant.INT32)
currentNodeVersion = _messages.StringField(12)
databaseEncryption = _messages.MessageField('DatabaseEncryption', 13)
databaseEncryptionKeyId = _messages.StringField(14)
defaultMaxPodsConstraint = _messages.MessageField('MaxPodsConstraint', 15)
description = _messages.StringField(16)
enableKubernetesAlpha = _messages.BooleanField(17)
enableTpu = _messages.BooleanField(18)
endpoint = _messages.StringField(19)
expireTime = _messages.StringField(20)
initialClusterVersion = _messages.StringField(21)
initialNodeCount = _messages.IntegerField(22, variant=_messages.Variant.INT32)
instanceGroupUrls = _messages.StringField(23, repeated=True)
ipAllocationPolicy = _messages.MessageField('IPAllocationPolicy', 24)
labelFingerprint = _messages.StringField(25)
legacyAbac = _messages.MessageField('LegacyAbac', 26)
location = _messages.StringField(27)
locations = _messages.StringField(28, repeated=True)
loggingService = _messages.StringField(29)
maintenancePolicy = _messages.MessageField('MaintenancePolicy', 30)
masterAuth = _messages.MessageField('MasterAuth', 31)
masterAuthorizedNetworksConfig = _messages.MessageField('MasterAuthorizedNetworksConfig', 32)
masterIpv4CidrBlock = _messages.StringField(33)
monitoringService = _messages.StringField(34)
name = _messages.StringField(35)
network = _messages.StringField(36)
networkConfig = _messages.MessageField('NetworkConfig', 37)
networkPolicy = _messages.MessageField('NetworkPolicy', 38)
nodeConfig = _messages.MessageField('NodeConfig', 39)
nodeIpv4CidrSize = _messages.IntegerField(40, variant=_messages.Variant.INT32)
nodePools = _messages.MessageField('NodePool', 41, repeated=True)
nodeSchedulingStrategy = _messages.EnumField('NodeSchedulingStrategyValueValuesEnum', 42)
podSecurityPolicyConfig = _messages.MessageField('PodSecurityPolicyConfig', 43)
privateCluster = _messages.BooleanField(44)
privateClusterConfig = _messages.MessageField('PrivateClusterConfig', 45)
releaseChannel = _messages.MessageField('ReleaseChannel', 46)
resourceLabels = _messages.MessageField('ResourceLabelsValue', 47)
resourceUsageExportConfig = _messages.MessageField('ResourceUsageExportConfig', 48)
resourceVersion = _messages.StringField(49)
securityProfile = _messages.MessageField('SecurityProfile', 50)
selfLink = _messages.StringField(51)
servicesIpv4Cidr = _messages.StringField(52)
shieldedNodes = _messages.MessageField('ShieldedNodes', 53)
status = _messages.EnumField('StatusValueValuesEnum', 54)
statusMessage = _messages.StringField(55)
subnetwork = _messages.StringField(56)
tierSettings = _messages.MessageField('TierSettings', 57)
tpuIpv4CidrBlock = _messages.StringField(58)
verticalPodAutoscaling = _messages.MessageField('VerticalPodAutoscaling', 59)
workloadIdentityConfig = _messages.MessageField('WorkloadIdentityConfig', 60)
zone = _messages.StringField(61)
class ClusterAutoscaling(_messages.Message):
r"""ClusterAutoscaling contains global, per-cluster information required by
Cluster Autoscaler to automatically adjust the size of the cluster and
create/delete node pools based on the current needs.
Enums:
AutoscalingProfileValueValuesEnum: Defines autoscaling behaviour.
Fields:
autoprovisioningLocations: The list of Google Compute Engine
[zones](/compute/docs/zones#available) in which the NodePool's nodes can
be created by NAP.
autoprovisioningNodePoolDefaults: AutoprovisioningNodePoolDefaults
contains defaults for a node pool created by NAP.
autoscalingProfile: Defines autoscaling behaviour.
enableNodeAutoprovisioning: Enables automatic node pool creation and
deletion.
resourceLimits: Contains global constraints regarding minimum and maximum
amount of resources in the cluster.
"""
class AutoscalingProfileValueValuesEnum(_messages.Enum):
r"""Defines autoscaling behaviour.
Values:
PROFILE_UNSPECIFIED: No change to autoscaling configuration.
OPTIMIZE_UTILIZATION: Prioritize optimizing utilization of resources.
BALANCED: Use default (balanced) autoscaling configuration.
"""
PROFILE_UNSPECIFIED = 0
OPTIMIZE_UTILIZATION = 1
BALANCED = 2
autoprovisioningLocations = _messages.StringField(1, repeated=True)
autoprovisioningNodePoolDefaults = _messages.MessageField('AutoprovisioningNodePoolDefaults', 2)
autoscalingProfile = _messages.EnumField('AutoscalingProfileValueValuesEnum', 3)
enableNodeAutoprovisioning = _messages.BooleanField(4)
resourceLimits = _messages.MessageField('ResourceLimit', 5, repeated=True)
class ClusterTelemetry(_messages.Message):
r"""Telemetry integration for the cluster.
Enums:
TypeValueValuesEnum: Type of the integration.
Fields:
type: Type of the integration.
"""
class TypeValueValuesEnum(_messages.Enum):
r"""Type of the integration.
Values:
UNSPECIFIED: Not set.
DISABLED: Monitoring integration is disabled.
ENABLED: Monitoring integration is enabled.
SYSTEM_ONLY: Only system components are monitored and logged.
"""
UNSPECIFIED = 0
DISABLED = 1
ENABLED = 2
SYSTEM_ONLY = 3
type = _messages.EnumField('TypeValueValuesEnum', 1)
class ClusterUpdate(_messages.Message):
r"""ClusterUpdate describes an update to the cluster. Exactly one update can
be applied to a cluster with each request, so at most one field can be
provided.
Fields:
concurrentNodeCount: Controls how many nodes to upgrade in parallel. A
maximum of 20 concurrent nodes is allowed. Deprecated. This feature will
be replaced by an equivalent new feature that gives better control over
concurrency. It is not planned to propagate this field to GA and it will
be eventually removed from the API.
desiredAddonsConfig: Configurations for the various addons available to
run in the cluster.
desiredBinaryAuthorization: The desired configuration options for the
Binary Authorization feature.
desiredCloudNatStatus: The desired status of Cloud NAT for this cluster.
Deprecated: use desired_default_snat_status instead.
desiredClusterAutoscaling: The desired cluster-level autoscaling
configuration.
desiredClusterTelemetry: The desired telemetry integration for the
cluster.
desiredCostManagementConfig: The desired configuration for the fine-
grained cost management feature.
desiredDatabaseEncryption: Configuration of etcd encryption.
desiredDefaultSnatStatus: The desired status of whether to disable default
sNAT for this cluster.
desiredImage: The desired name of the image to use for this node. This is
used to create clusters using a custom image.
desiredImageProject: The project containing the desired image to use for
this node. This is used to create clusters using a custom image.
desiredImageType: The desired image type for the node pool. NOTE: Set the
"desired_node_pool" field as well.
desiredIntraNodeVisibilityConfig: The desired config of Intra-node
visibility.
desiredLocations: The desired list of Google Compute Engine
[zones](/compute/docs/zones#available) in which the cluster's nodes
should be located. Changing the locations a cluster is in will result in
nodes being either created or removed from the cluster, depending on
whether locations are being added or removed. This list must always
include the cluster's primary zone.
desiredLoggingService: The logging service the cluster should use to write
metrics. Currently available options: *
"logging.googleapis.com/kubernetes" - the Google Cloud Logging service
with Kubernetes-native resource model * "logging.googleapis.com" - the
Google Cloud Logging service * "none" - no logs will be exported from
the cluster
desiredMasterAuthorizedNetworksConfig: The desired configuration options
for master authorized networks feature.
desiredMasterVersion: The Kubernetes version to change the master to.
Users may specify either explicit versions offered by Kubernetes Engine
or version aliases, which have the following behavior: - "latest":
picks the highest valid Kubernetes version - "1.X": picks the highest
valid patch+gke.N patch in the 1.X version - "1.X.Y": picks the highest
valid gke.N patch in the 1.X.Y version - "1.X.Y-gke.N": picks an
explicit Kubernetes version - "-": picks the default Kubernetes version
desiredMonitoringService: The monitoring service the cluster should use to
write metrics. Currently available options: *
"monitoring.googleapis.com/kubernetes" - the Google Cloud Monitoring
service with Kubernetes-native resource model *
"monitoring.googleapis.com" - the Google Cloud Monitoring service *
"none" - no metrics will be exported from the cluster
desiredNodePoolAutoscaling: Autoscaler configuration for the node pool
specified in desired_node_pool_id. If there is only one pool in the
cluster and desired_node_pool_id is not provided then the change applies
to that single node pool.
desiredNodePoolId: The node pool to be upgraded. This field is mandatory
if "desired_node_version", "desired_image_family",
"desired_node_pool_autoscaling", or "desired_workload_metadata_config"
is specified and there is more than one node pool on the cluster.
desiredNodeVersion: The Kubernetes version to change the nodes to
(typically an upgrade). Users may specify either explicit versions
offered by Kubernetes Engine or version aliases, which have the
following behavior: - "latest": picks the highest valid Kubernetes
version - "1.X": picks the highest valid patch+gke.N patch in the 1.X
version - "1.X.Y": picks the highest valid gke.N patch in the 1.X.Y
version - "1.X.Y-gke.N": picks an explicit Kubernetes version - "-":
picks the Kubernetes master version
desiredPodSecurityPolicyConfig: The desired configuration options for the
PodSecurityPolicy feature.
desiredPrivateClusterConfig: The desired private cluster configuration.
desiredPrivateIpv6Access: The desired status of Private IPv6 access for
this cluster.
desiredReleaseChannel: The desired release channel configuration.
desiredResourceUsageExportConfig: The desired configuration for exporting
resource usage.
desiredShieldedNodes: Configuration for Shielded Nodes.
desiredVerticalPodAutoscaling: Cluster-level Vertical Pod Autoscaling
configuration.
desiredWorkloadIdentityConfig: Configuration for Workload Identity.
privateClusterConfig: The desired private cluster configuration.
securityProfile: User may change security profile during update
"""
concurrentNodeCount = _messages.IntegerField(1, variant=_messages.Variant.INT32)
desiredAddonsConfig = _messages.MessageField('AddonsConfig', 2)
desiredBinaryAuthorization = _messages.MessageField('BinaryAuthorization', 3)
desiredCloudNatStatus = _messages.MessageField('CloudNatStatus', 4)
desiredClusterAutoscaling = _messages.MessageField('ClusterAutoscaling', 5)
desiredClusterTelemetry = _messages.MessageField('ClusterTelemetry', 6)
desiredCostManagementConfig = _messages.MessageField('CostManagementConfig', 7)
desiredDatabaseEncryption = _messages.MessageField('DatabaseEncryption', 8)
desiredDefaultSnatStatus = _messages.MessageField('DefaultSnatStatus', 9)
desiredImage = _messages.StringField(10)
desiredImageProject = _messages.StringField(11)
desiredImageType = _messages.StringField(12)
desiredIntraNodeVisibilityConfig = _messages.MessageField('IntraNodeVisibilityConfig', 13)
desiredLocations = _messages.StringField(14, repeated=True)
desiredLoggingService = _messages.StringField(15)
desiredMasterAuthorizedNetworksConfig = _messages.MessageField('MasterAuthorizedNetworksConfig', 16)
desiredMasterVersion = _messages.StringField(17)
desiredMonitoringService = _messages.StringField(18)
desiredNodePoolAutoscaling = _messages.MessageField('NodePoolAutoscaling', 19)
desiredNodePoolId = _messages.StringField(20)
desiredNodeVersion = _messages.StringField(21)
desiredPodSecurityPolicyConfig = _messages.MessageField('PodSecurityPolicyConfig', 22)
desiredPrivateClusterConfig = _messages.MessageField('PrivateClusterConfig', 23)
desiredPrivateIpv6Access = _messages.MessageField('PrivateIPv6Status', 24)
desiredReleaseChannel = _messages.MessageField('ReleaseChannel', 25)
desiredResourceUsageExportConfig = _messages.MessageField('ResourceUsageExportConfig', 26)
desiredShieldedNodes = _messages.MessageField('ShieldedNodes', 27)
desiredVerticalPodAutoscaling = _messages.MessageField('VerticalPodAutoscaling', 28)
desiredWorkloadIdentityConfig = _messages.MessageField('WorkloadIdentityConfig', 29)
privateClusterConfig = _messages.MessageField('PrivateClusterConfig', 30)
securityProfile = _messages.MessageField('SecurityProfile', 31)
class CompleteIPRotationRequest(_messages.Message):
r"""CompleteIPRotationRequest moves the cluster master back into single-IP
mode.
Fields:
clusterId: Deprecated. The name of the cluster. This field has been
deprecated and replaced by the name field.
name: The name (project, location, cluster id) of the cluster to complete
IP rotation. Specified in the format
'projects/*/locations/*/clusters/*'.
projectId: Deprecated. The Google Developers Console [project ID or
project
number](https://developers.google.com/console/help/new/#projectnumber).
This field has been deprecated and replaced by the name field.
zone: Deprecated. The name of the Google Compute Engine
[zone](/compute/docs/zones#available) in which the cluster resides. This
field has been deprecated and replaced by the name field.
"""
clusterId = _messages.StringField(1)
name = _messages.StringField(2)
projectId = _messages.StringField(3)
zone = _messages.StringField(4)
class ConfigConnectorConfig(_messages.Message):
r"""Configuration options for the Config Connector add-on.
Fields:
enabled: Whether Cloud Connector is enabled for this cluster.
"""
enabled = _messages.BooleanField(1)
class ConsumptionMeteringConfig(_messages.Message):
r"""Parameters for controlling consumption metering.
Fields:
enabled: Whether to enable consumption metering for this cluster. If
enabled, a second BigQuery table will be created to hold resource
consumption records.
"""
enabled = _messages.BooleanField(1)
class ContainerProjectsAggregatedUsableSubnetworksListRequest(_messages.Message):
r"""A ContainerProjectsAggregatedUsableSubnetworksListRequest object.
Fields:
filter: Filtering currently only supports equality on the networkProjectId
and must be in the form: "networkProjectId=[PROJECTID]", where
`networkProjectId` is the project which owns the listed subnetworks.
This defaults to the parent project ID.
pageSize: The max number of results per page that should be returned. If
the number of available results is larger than `page_size`, a
`next_page_token` is returned which can be used to get the next page of
results in subsequent requests. Acceptable values are 0 to 500,
inclusive. (Default: 500)
pageToken: Specifies a page token to use. Set this to the next_page_token
returned by previous list requests to get the next page of results.
parent: The parent project where subnetworks are usable. Specified in the
format 'projects/*'.
"""
filter = _messages.StringField(1)
pageSize = _messages.IntegerField(2, variant=_messages.Variant.INT32)
pageToken = _messages.StringField(3)
parent = _messages.StringField(4, required=True)
class ContainerProjectsLocationsClustersDeleteRequest(_messages.Message):
r"""A ContainerProjectsLocationsClustersDeleteRequest object.
Fields:
clusterId: Deprecated. The name of the cluster to delete. This field has
been deprecated and replaced by the name field.
name: The name (project, location, cluster) of the cluster to delete.
Specified in the format 'projects/*/locations/*/clusters/*'.
projectId: Deprecated. The Google Developers Console [project ID or
project number](https://support.google.com/cloud/answer/6158840). This
field has been deprecated and replaced by the name field.
zone: Deprecated. The name of the Google Compute Engine
[zone](/compute/docs/zones#available) in which the cluster resides. This
field has been deprecated and replaced by the name field.
"""
clusterId = _messages.StringField(1)
name = _messages.StringField(2, required=True)
projectId = _messages.StringField(3)
zone = _messages.StringField(4)
class ContainerProjectsLocationsClustersGetJwksRequest(_messages.Message):
r"""A ContainerProjectsLocationsClustersGetJwksRequest object.
Fields:
parent: The cluster (project, location, cluster id) to get keys for.
Specified in the format 'projects/*/locations/*/clusters/*'.
"""
parent = _messages.StringField(1, required=True)
class ContainerProjectsLocationsClustersGetRequest(_messages.Message):
r"""A ContainerProjectsLocationsClustersGetRequest object.
Fields:
clusterId: Deprecated. The name of the cluster to retrieve. This field has
been deprecated and replaced by the name field.
name: The name (project, location, cluster) of the cluster to retrieve.
Specified in the format 'projects/*/locations/*/clusters/*'.
projectId: Deprecated. The Google Developers Console [project ID or
project number](https://support.google.com/cloud/answer/6158840). This
field has been deprecated and replaced by the name field.
zone: Deprecated. The name of the Google Compute Engine
[zone](/compute/docs/zones#available) in which the cluster resides. This
field has been deprecated and replaced by the name field.
"""
clusterId = _messages.StringField(1)
name = _messages.StringField(2, required=True)
projectId = _messages.StringField(3)
zone = _messages.StringField(4)
class ContainerProjectsLocationsClustersListRequest(_messages.Message):
r"""A ContainerProjectsLocationsClustersListRequest object.
Fields:
parent: The parent (project and location) where the clusters will be
listed. Specified in the format 'projects/*/locations/*'. Location "-"
matches all zones and all regions.
projectId: Deprecated. The Google Developers Console [project ID or
project number](https://support.google.com/cloud/answer/6158840). This
field has been deprecated and replaced by the parent field.
zone: Deprecated. The name of the Google Compute Engine
[zone](/compute/docs/zones#available) in which the cluster resides, or
"-" for all zones. This field has been deprecated and replaced by the
parent field.
"""
parent = _messages.StringField(1, required=True)
projectId = _messages.StringField(2)
zone = _messages.StringField(3)
class ContainerProjectsLocationsClustersNodePoolsDeleteRequest(_messages.Message):
r"""A ContainerProjectsLocationsClustersNodePoolsDeleteRequest object.
Fields:
clusterId: Deprecate. The name of the cluster. This field has been
deprecated and replaced by the name field.
name: The name (project, location, cluster, node pool id) of the node pool
to delete. Specified in the format
'projects/*/locations/*/clusters/*/nodePools/*'.
nodePoolId: Deprecated. The name of the node pool to delete. This field
has been deprecated and replaced by the name field.
projectId: Deprecated. The Google Developers Console [project ID or
project
number](https://developers.google.com/console/help/new/#projectnumber).
This field has been deprecated and replaced by the name field.
zone: Deprecated. The name of the Google Compute Engine
[zone](/compute/docs/zones#available) in which the cluster resides. This
field has been deprecated and replaced by the name field.
"""
clusterId = _messages.StringField(1)
name = _messages.StringField(2, required=True)
nodePoolId = _messages.StringField(3)
projectId = _messages.StringField(4)
zone = _messages.StringField(5)
class ContainerProjectsLocationsClustersNodePoolsGetRequest(_messages.Message):
r"""A ContainerProjectsLocationsClustersNodePoolsGetRequest object.
Fields:
clusterId: Deprecated. The name of the cluster. This field has been
deprecated and replaced by the name field.
name: The name (project, location, cluster, node pool id) of the node pool
to get. Specified in the format
'projects/*/locations/*/clusters/*/nodePools/*'.
nodePoolId: Deprecated. The name of the node pool. This field has been
deprecated and replaced by the name field.
projectId: Deprecated. The Google Developers Console [project ID or
project
number](https://developers.google.com/console/help/new/#projectnumber).
This field has been deprecated and replaced by the name field.
zone: Deprecated. The name of the Google Compute Engine
[zone](/compute/docs/zones#available) in which the cluster resides. This
field has been deprecated and replaced by the name field.
"""
clusterId = _messages.StringField(1)
name = _messages.StringField(2, required=True)
nodePoolId = _messages.StringField(3)
projectId = _messages.StringField(4)
zone = _messages.StringField(5)
class ContainerProjectsLocationsClustersNodePoolsListRequest(_messages.Message):
r"""A ContainerProjectsLocationsClustersNodePoolsListRequest object.
Fields:
clusterId: Deprecated. The name of the cluster. This field has been
deprecated and replaced by the parent field.
parent: The parent (project, location, cluster id) where the node pools
will be listed. Specified in the format
'projects/*/locations/*/clusters/*'.
projectId: Deprecated. The Google Developers Console [project ID or
project
number](https://developers.google.com/console/help/new/#projectnumber).
This field has been deprecated and replaced by the parent field.
zone: Deprecated. The name of the Google Compute Engine
[zone](/compute/docs/zones#available) in which the cluster resides. This
field has been deprecated and replaced by the parent field.
"""
clusterId = _messages.StringField(1)
parent = _messages.StringField(2, required=True)
projectId = _messages.StringField(3)
zone = _messages.StringField(4)
class ContainerProjectsLocationsClustersWellKnownGetOpenidConfigurationRequest(_messages.Message):
r"""A
ContainerProjectsLocationsClustersWellKnownGetOpenidConfigurationRequest
object.
Fields:
parent: The cluster (project, location, cluster id) to get the discovery
document for. Specified in the format
'projects/*/locations/*/clusters/*'.
"""
parent = _messages.StringField(1, required=True)
class ContainerProjectsLocationsGetServerConfigRequest(_messages.Message):
r"""A ContainerProjectsLocationsGetServerConfigRequest object.
Fields:
name: The name (project and location) of the server config to get,
specified in the format 'projects/*/locations/*'.
projectId: Deprecated. The Google Developers Console [project ID or
project number](https://support.google.com/cloud/answer/6158840). This
field has been deprecated and replaced by the name field.
zone: Deprecated. The name of the Google Compute Engine
[zone](/compute/docs/zones#available) to return operations for. This
field has been deprecated and replaced by the name field.
"""
name = _messages.StringField(1, required=True)
projectId = _messages.StringField(2)
zone = _messages.StringField(3)
class ContainerProjectsLocationsListRequest(_messages.Message):
r"""A ContainerProjectsLocationsListRequest object.
Fields:
parent: Contains the name of the resource requested. Specified in the
format 'projects/*'.
"""
parent = _messages.StringField(1, required=True)
class ContainerProjectsLocationsOperationsGetRequest(_messages.Message):
r"""A ContainerProjectsLocationsOperationsGetRequest object.
Fields:
name: The name (project, location, operation id) of the operation to get.
Specified in the format 'projects/*/locations/*/operations/*'.
operationId: Deprecated. The server-assigned `name` of the operation. This
field has been deprecated and replaced by the name field.
projectId: Deprecated. The Google Developers Console [project ID or
project number](https://support.google.com/cloud/answer/6158840). This
field has been deprecated and replaced by the name field.
zone: Deprecated. The name of the Google Compute Engine
[zone](/compute/docs/zones#available) in which the cluster resides. This
field has been deprecated and replaced by the name field.
"""
name = _messages.StringField(1, required=True)
operationId = _messages.StringField(2)
projectId = _messages.StringField(3)
zone = _messages.StringField(4)
class ContainerProjectsLocationsOperationsListRequest(_messages.Message):
r"""A ContainerProjectsLocationsOperationsListRequest object.
Fields:
parent: The parent (project and location) where the operations will be
listed. Specified in the format 'projects/*/locations/*'. Location "-"
matches all zones and all regions.
projectId: Deprecated. The Google Developers Console [project ID or
project number](https://support.google.com/cloud/answer/6158840). This
field has been deprecated and replaced by the parent field.
zone: Deprecated. The name of the Google Compute Engine
[zone](/compute/docs/zones#available) to return operations for, or `-`
for all zones. This field has been deprecated and replaced by the parent
field.
"""
parent = _messages.StringField(1, required=True)
projectId = _messages.StringField(2)
zone = _messages.StringField(3)
class ContainerProjectsZonesClustersDeleteRequest(_messages.Message):
r"""A ContainerProjectsZonesClustersDeleteRequest object.
Fields:
clusterId: Deprecated. The name of the cluster to delete. This field has
been deprecated and replaced by the name field.
name: The name (project, location, cluster) of the cluster to delete.
Specified in the format 'projects/*/locations/*/clusters/*'.
projectId: Deprecated. The Google Developers Console [project ID or
project number](https://support.google.com/cloud/answer/6158840). This
field has been deprecated and replaced by the name field.
zone: Deprecated. The name of the Google Compute Engine
[zone](/compute/docs/zones#available) in which the cluster resides. This
field has been deprecated and replaced by the name field.
"""
clusterId = _messages.StringField(1, required=True)
name = _messages.StringField(2)
projectId = _messages.StringField(3, required=True)
zone = _messages.StringField(4, required=True)
class ContainerProjectsZonesClustersGetRequest(_messages.Message):
r"""A ContainerProjectsZonesClustersGetRequest object.
Fields:
clusterId: Deprecated. The name of the cluster to retrieve. This field has
been deprecated and replaced by the name field.
name: The name (project, location, cluster) of the cluster to retrieve.
Specified in the format 'projects/*/locations/*/clusters/*'.
projectId: Deprecated. The Google Developers Console [project ID or
project number](https://support.google.com/cloud/answer/6158840). This
field has been deprecated and replaced by the name field.
zone: Deprecated. The name of the Google Compute Engine
[zone](/compute/docs/zones#available) in which the cluster resides. This
field has been deprecated and replaced by the name field.
"""
clusterId = _messages.StringField(1, required=True)
name = _messages.StringField(2)
projectId = _messages.StringField(3, required=True)
zone = _messages.StringField(4, required=True)
class ContainerProjectsZonesClustersListRequest(_messages.Message):
r"""A ContainerProjectsZonesClustersListRequest object.
Fields:
parent: The parent (project and location) where the clusters will be
listed. Specified in the format 'projects/*/locations/*'. Location "-"
matches all zones and all regions.
projectId: Deprecated. The Google Developers Console [project ID or
project number](https://support.google.com/cloud/answer/6158840). This
field has been deprecated and replaced by the parent field.
zone: Deprecated. The name of the Google Compute Engine
[zone](/compute/docs/zones#available) in which the cluster resides, or
"-" for all zones. This field has been deprecated and replaced by the
parent field.
"""
parent = _messages.StringField(1)
projectId = _messages.StringField(2, required=True)
zone = _messages.StringField(3, required=True)
class ContainerProjectsZonesClustersNodePoolsDeleteRequest(_messages.Message):
r"""A ContainerProjectsZonesClustersNodePoolsDeleteRequest object.
Fields:
clusterId: Deprecate. The name of the cluster. This field has been
deprecated and replaced by the name field.
name: The name (project, location, cluster, node pool id) of the node pool
to delete. Specified in the format
'projects/*/locations/*/clusters/*/nodePools/*'.
nodePoolId: Deprecated. The name of the node pool to delete. This field
has been deprecated and replaced by the name field.
projectId: Deprecated. The Google Developers Console [project ID or
project
number](https://developers.google.com/console/help/new/#projectnumber).
This field has been deprecated and replaced by the name field.
zone: Deprecated. The name of the Google Compute Engine
[zone](/compute/docs/zones#available) in which the cluster resides. This
field has been deprecated and replaced by the name field.
"""
clusterId = _messages.StringField(1, required=True)
name = _messages.StringField(2)
nodePoolId = _messages.StringField(3, required=True)
projectId = _messages.StringField(4, required=True)
zone = _messages.StringField(5, required=True)
class ContainerProjectsZonesClustersNodePoolsGetRequest(_messages.Message):
r"""A ContainerProjectsZonesClustersNodePoolsGetRequest object.
Fields:
clusterId: Deprecated. The name of the cluster. This field has been
deprecated and replaced by the name field.
name: The name (project, location, cluster, node pool id) of the node pool
to get. Specified in the format
'projects/*/locations/*/clusters/*/nodePools/*'.
nodePoolId: Deprecated. The name of the node pool. This field has been
deprecated and replaced by the name field.
projectId: Deprecated. The Google Developers Console [project ID or
project
number](https://developers.google.com/console/help/new/#projectnumber).
This field has been deprecated and replaced by the name field.
zone: Deprecated. The name of the Google Compute Engine
[zone](/compute/docs/zones#available) in which the cluster resides. This
field has been deprecated and replaced by the name field.
"""
clusterId = _messages.StringField(1, required=True)
name = _messages.StringField(2)
nodePoolId = _messages.StringField(3, required=True)
projectId = _messages.StringField(4, required=True)
zone = _messages.StringField(5, required=True)
class ContainerProjectsZonesClustersNodePoolsListRequest(_messages.Message):
r"""A ContainerProjectsZonesClustersNodePoolsListRequest object.
Fields:
clusterId: Deprecated. The name of the cluster. This field has been
deprecated and replaced by the parent field.
parent: The parent (project, location, cluster id) where the node pools
will be listed. Specified in the format
'projects/*/locations/*/clusters/*'.
projectId: Deprecated. The Google Developers Console [project ID or
project
number](https://developers.google.com/console/help/new/#projectnumber).
This field has been deprecated and replaced by the parent field.
zone: Deprecated. The name of the Google Compute Engine
[zone](/compute/docs/zones#available) in which the cluster resides. This
field has been deprecated and replaced by the parent field.
"""
clusterId = _messages.StringField(1, required=True)
parent = _messages.StringField(2)
projectId = _messages.StringField(3, required=True)
zone = _messages.StringField(4, required=True)
class ContainerProjectsZonesGetServerconfigRequest(_messages.Message):
r"""A ContainerProjectsZonesGetServerconfigRequest object.
Fields:
name: The name (project and location) of the server config to get,
specified in the format 'projects/*/locations/*'.
projectId: Deprecated. The Google Developers Console [project ID or
project number](https://support.google.com/cloud/answer/6158840). This
field has been deprecated and replaced by the name field.
zone: Deprecated. The name of the Google Compute Engine
[zone](/compute/docs/zones#available) to return operations for. This
field has been deprecated and replaced by the name field.
"""
name = _messages.StringField(1)
projectId = _messages.StringField(2, required=True)
zone = _messages.StringField(3, required=True)
class ContainerProjectsZonesOperationsGetRequest(_messages.Message):
r"""A ContainerProjectsZonesOperationsGetRequest object.
Fields:
name: The name (project, location, operation id) of the operation to get.
Specified in the format 'projects/*/locations/*/operations/*'.
operationId: Deprecated. The server-assigned `name` of the operation. This
field has been deprecated and replaced by the name field.
projectId: Deprecated. The Google Developers Console [project ID or
project number](https://support.google.com/cloud/answer/6158840). This
field has been deprecated and replaced by the name field.
zone: Deprecated. The name of the Google Compute Engine
[zone](/compute/docs/zones#available) in which the cluster resides. This
field has been deprecated and replaced by the name field.
"""
name = _messages.StringField(1)
operationId = _messages.StringField(2, required=True)
projectId = _messages.StringField(3, required=True)
zone = _messages.StringField(4, required=True)
class ContainerProjectsZonesOperationsListRequest(_messages.Message):
r"""A ContainerProjectsZonesOperationsListRequest object.
Fields:
parent: The parent (project and location) where the operations will be
listed. Specified in the format 'projects/*/locations/*'. Location "-"
matches all zones and all regions.
projectId: Deprecated. The Google Developers Console [project ID or
project number](https://support.google.com/cloud/answer/6158840). This
field has been deprecated and replaced by the parent field.
zone: Deprecated. The name of the Google Compute Engine
[zone](/compute/docs/zones#available) to return operations for, or `-`
for all zones. This field has been deprecated and replaced by the parent
field.
"""
parent = _messages.StringField(1)
projectId = _messages.StringField(2, required=True)
zone = _messages.StringField(3, required=True)
class CostManagementConfig(_messages.Message):
r"""Configuration for fine-grained cost management feature.
Fields:
enabled: Whether the feature is enabled or not.
"""
enabled = _messages.BooleanField(1)
class CreateClusterRequest(_messages.Message):
r"""CreateClusterRequest creates a cluster.
Fields:
cluster: A [cluster resource](/container-
engine/reference/rest/v1alpha1/projects.zones.clusters)
parent: The parent (project and location) where the cluster will be
created. Specified in the format 'projects/*/locations/*'.
projectId: Deprecated. The Google Developers Console [project ID or
project number](https://support.google.com/cloud/answer/6158840). This
field has been deprecated and replaced by the parent field.
zone: Deprecated. The name of the Google Compute Engine
[zone](/compute/docs/zones#available) in which the cluster resides. This
field has been deprecated and replaced by the parent field.
"""
cluster = _messages.MessageField('Cluster', 1)
parent = _messages.StringField(2)
projectId = _messages.StringField(3)
zone = _messages.StringField(4)
class CreateNodePoolRequest(_messages.Message):
r"""CreateNodePoolRequest creates a node pool for a cluster.
Fields:
clusterId: Deprecated. The name of the cluster. This field has been
deprecated and replaced by the parent field.
nodePool: The node pool to create.
parent: The parent (project, location, cluster id) where the node pool
will be created. Specified in the format
'projects/*/locations/*/clusters/*'.
projectId: Deprecated. The Google Developers Console [project ID or
project
number](https://developers.google.com/console/help/new/#projectnumber).
This field has been deprecated and replaced by the parent field.
zone: Deprecated. The name of the Google Compute Engine
[zone](/compute/docs/zones#available) in which the cluster resides. This
field has been deprecated and replaced by the parent field.
"""
clusterId = _messages.StringField(1)
nodePool = _messages.MessageField('NodePool', 2)
parent = _messages.StringField(3)
projectId = _messages.StringField(4)
zone = _messages.StringField(5)
class CustomImageConfig(_messages.Message):
r"""CustomImageConfig contains the information
Fields:
image: The name of the image to use for this node.
imageFamily: The name of the image family to use for this node.
imageProject: The project containing the image to use for this node.
"""
image = _messages.StringField(1)
imageFamily = _messages.StringField(2)
imageProject = _messages.StringField(3)
class DailyMaintenanceWindow(_messages.Message):
r"""Time window specified for daily maintenance operations.
Fields:
duration: [Output only] Duration of the time window, automatically chosen
to be smallest possible in the given scenario.
startTime: Time within the maintenance window to start the maintenance
operations. It must be in format "HH:MM", where HH : [00-23] and MM :
[00-59] GMT.
"""
duration = _messages.StringField(1)
startTime = _messages.StringField(2)
class DatabaseEncryption(_messages.Message):
r"""Configuration of etcd encryption.
Enums:
StateValueValuesEnum: Denotes the state of etcd encryption.
Fields:
keyName: Name of CloudKMS key to use for the encryption of secrets in
etcd. Ex. projects/my-project/locations/global/keyRings/my-
ring/cryptoKeys/my-key
state: Denotes the state of etcd encryption.
"""
class StateValueValuesEnum(_messages.Enum):
r"""Denotes the state of etcd encryption.
Values:
UNKNOWN: Should never be set
ENCRYPTED: Secrets in etcd are encrypted.
DECRYPTED: Secrets in etcd are stored in plain text (at etcd level) -
this is unrelated to Google Compute Engine level full disk encryption.
"""
UNKNOWN = 0
ENCRYPTED = 1
DECRYPTED = 2
keyName = _messages.StringField(1)
state = _messages.EnumField('StateValueValuesEnum', 2)
class DefaultSnatStatus(_messages.Message):
r"""DefaultSnatStatus contains the desired state of whether default sNAT
should be disabled on the cluster.
Fields:
disabled: Disables cluster default sNAT rules.
"""
disabled = _messages.BooleanField(1)
class DnsCacheConfig(_messages.Message):
r"""Configuration for NodeLocal DNSCache
Fields:
enabled: Whether NodeLocal DNSCache is enabled for this cluster.
"""
enabled = _messages.BooleanField(1)
class Empty(_messages.Message):
r"""A generic empty message that you can re-use to avoid defining duplicated
empty messages in your APIs. A typical example is to use it as the request
or the response type of an API method. For instance: service Foo {
rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The
JSON representation for `Empty` is empty JSON object `{}`.
"""
class FeatureConfig(_messages.Message):
r"""FeatureConfig is the configuration for a specific feature including the
definition of the feature as well as the tier in which it resides.
Enums:
FeatureValueValuesEnum: The feature that is being configured with this
value.
TierValueValuesEnum: The tier in which the configured feature resides.
Fields:
feature: The feature that is being configured with this value.
tier: The tier in which the configured feature resides.
"""
class FeatureValueValuesEnum(_messages.Enum):
r"""The feature that is being configured with this value.
Values:
DEFAULT_FEATURE: DEFAULT_FEATURE is the default zero value of the
Feature. This value is valid.
VERTICAL_POD_AUTOSCALER: The vertical pod autoscaling feature.
NODE_AUTO_PROVISIONING: The node auto provisioning feature.
BINARY_AUTHORIZATION: The binary authorization feature.
RESOURCE_LABELS: The resource labels feature.
USAGE_METERING: The GKE usage metering feature.
CLOUD_RUN_ON_GKE: The Cloud Run on GKE feature.
"""
DEFAULT_FEATURE = 0
VERTICAL_POD_AUTOSCALER = 1
NODE_AUTO_PROVISIONING = 2
BINARY_AUTHORIZATION = 3
RESOURCE_LABELS = 4
USAGE_METERING = 5
CLOUD_RUN_ON_GKE = 6
class TierValueValuesEnum(_messages.Enum):
r"""The tier in which the configured feature resides.
Values:
TIER_UNSPECIFIED: TIER_UNSPECIFIED is the default value. If this value
is set during create or update, it defaults to the project level tier
setting.
STANDARD: Represents the standard tier or base Google Kubernetes Engine
offering.
ADVANCED: Represents the advanced tier.
"""
TIER_UNSPECIFIED = 0
STANDARD = 1
ADVANCED = 2
feature = _messages.EnumField('FeatureValueValuesEnum', 1)
tier = _messages.EnumField('TierValueValuesEnum', 2)
class GcePersistentDiskCsiDriverConfig(_messages.Message):
r"""Configuration for the GCE PD CSI driver. This option can only be enabled
at cluster creation time.
Fields:
enabled: Whether the GCE PD CSI driver is enabled for this cluster.
"""
enabled = _messages.BooleanField(1)
class GetJSONWebKeysResponse(_messages.Message):
r"""GetJSONWebKeysResponse is a valid JSON Web Key Set as specififed in rfc
7517
Fields:
cacheHeader: OnePlatform automagically extracts this field and uses it to
set the HTTP Cache-Control header.
keys: The public component of the keys used by the cluster to sign token
requests.
"""
cacheHeader = _messages.MessageField('HttpCacheControlResponseHeader', 1)
keys = _messages.MessageField('Jwk', 2, repeated=True)
class GetOpenIDConfigResponse(_messages.Message):
r"""GetOpenIDConfigResponse is an OIDC discovery document for the cluster.
See the OpenID Connect Discovery 1.0 specification for details.
Fields:
cacheHeader: OnePlatform automagically extracts this field and uses it to
set the HTTP Cache-Control header.
claims_supported: Supported claims.
grant_types: Supported grant types.
id_token_signing_alg_values_supported: supported ID Token signing
Algorithms.
issuer: OIDC Issuer.
jwks_uri: JSON Web Key uri.
response_types_supported: Supported response types.
subject_types_supported: Supported subject types.
"""
cacheHeader = _messages.MessageField('HttpCacheControlResponseHeader', 1)
claims_supported = _messages.StringField(2, repeated=True)
grant_types = _messages.StringField(3, repeated=True)
id_token_signing_alg_values_supported = _messages.StringField(4, repeated=True)
issuer = _messages.StringField(5)
jwks_uri = _messages.StringField(6)
response_types_supported = _messages.StringField(7, repeated=True)
subject_types_supported = _messages.StringField(8, repeated=True)
class HorizontalPodAutoscaling(_messages.Message):
r"""Configuration options for the horizontal pod autoscaling feature, which
increases or decreases the number of replica pods a replication controller
has based on the resource usage of the existing pods.
Fields:
disabled: Whether the Horizontal Pod Autoscaling feature is enabled in the
cluster. When enabled, it ensures that metrics are collected into
Stackdriver Monitoring.
"""
disabled = _messages.BooleanField(1)
class HttpCacheControlResponseHeader(_messages.Message):
r"""RFC-2616: cache control support
Fields:
age: 14.6 response cache age, in seconds since the response is generated
directive: 14.9 request and response directives
expires: 14.21 response cache expires, in RFC 1123 date format
"""
age = _messages.IntegerField(1)
directive = _messages.StringField(2)
expires = _messages.StringField(3)
class HttpLoadBalancing(_messages.Message):
r"""Configuration options for the HTTP (L7) load balancing controller addon,
which makes it easy to set up HTTP load balancers for services in a cluster.
Fields:
disabled: Whether the HTTP Load Balancing controller is enabled in the
cluster. When enabled, it runs a small pod in the cluster that manages
the load balancers.
"""
disabled = _messages.BooleanField(1)
class IPAllocationPolicy(_messages.Message):
r"""Configuration for controlling how IPs are allocated in the cluster.
Fields:
allowRouteOverlap: If true, allow allocation of cluster CIDR ranges that
overlap with certain kinds of network routes. By default we do not allow
cluster CIDR ranges to intersect with any user declared routes. With
allow_route_overlap == true, we allow overlapping with CIDR ranges that
are larger than the cluster CIDR range. If this field is set to true,
then cluster and services CIDRs must be fully-specified (e.g.
`10.96.0.0/14`, but not `/14`), which means: 1) When `use_ip_aliases` is
true, `cluster_ipv4_cidr_block` and `services_ipv4_cidr_block` must
be fully-specified. 2) When `use_ip_aliases` is false,
`cluster.cluster_ipv4_cidr` muse be fully-specified.
clusterIpv4Cidr: This field is deprecated, use cluster_ipv4_cidr_block.
clusterIpv4CidrBlock: The IP address range for the cluster pod IPs. If
this field is set, then `cluster.cluster_ipv4_cidr` must be left blank.
This field is only applicable when `use_ip_aliases` is true. Set to
blank to have a range chosen with the default size. Set to /netmask
(e.g. `/14`) to have a range chosen with a specific netmask. Set to a
[CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing)
notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g.
`10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific
range to use.
clusterSecondaryRangeName: The name of the secondary range to be used for
the cluster CIDR block. The secondary range will be used for pod IP
addresses. This must be an existing secondary range associated with the
cluster subnetwork. This field is only applicable if use_ip_aliases is
true and create_subnetwork is false.
createSubnetwork: Whether a new subnetwork will be created automatically
for the cluster. This field is only applicable when `use_ip_aliases` is
true.
nodeIpv4Cidr: This field is deprecated, use node_ipv4_cidr_block.
nodeIpv4CidrBlock: The IP address range of the instance IPs in this
cluster. This is applicable only if `create_subnetwork` is true. Set
to blank to have a range chosen with the default size. Set to /netmask
(e.g. `/14`) to have a range chosen with a specific netmask. Set to a
[CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing)
notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g.
`10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific
range to use.
servicesIpv4Cidr: This field is deprecated, use services_ipv4_cidr_block.
servicesIpv4CidrBlock: The IP address range of the services IPs in this
cluster. If blank, a range will be automatically chosen with the default
size. This field is only applicable when `use_ip_aliases` is true. Set
to blank to have a range chosen with the default size. Set to /netmask
(e.g. `/14`) to have a range chosen with a specific netmask. Set to a
[CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing)
notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g.
`10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific
range to use.
servicesSecondaryRangeName: The name of the secondary range to be used as
for the services CIDR block. The secondary range will be used for
service ClusterIPs. This must be an existing secondary range associated
with the cluster subnetwork. This field is only applicable with
use_ip_aliases is true and create_subnetwork is false.
subnetworkName: A custom subnetwork name to be used if `create_subnetwork`
is true. If this field is empty, then an automatic name will be chosen
for the new subnetwork.
tpuIpv4CidrBlock: The IP address range of the Cloud TPUs in this cluster.
If unspecified, a range will be automatically chosen with the default
size. This field is only applicable when `use_ip_aliases` is true, and
it must not be specified when the `tpu_use_service_networking` is
`true`. Unspecified to have a range chosen with the default size `/20`.
Set to /netmask (e.g. `/14`) to have a range chosen with a specific
netmask. Set to a [CIDR](http://en.wikipedia.org/wiki/Classless_Inter-
Domain_Routing) notation (e.g. `10.96.0.0/14`) from the RFC-1918 private
networks (e.g. `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick
a specific range to use.
tpuUseServiceNetworking: Enable Cloud TPU's Service Networking mode. In
this mode, the CIDR blocks used by the Cloud TPUs will be allocated and
managed by Service Networking, instead of GKE. This field must be
`false` when `tpu_ipv4_cidr_block` is specified.
useIpAliases: Whether alias IPs will be used for pod IPs in the cluster.
This is used in conjunction with use_routes. It cannot be true if
use_routes is true. If both use_ip_aliases and use_routes are false,
then the server picks the default IP allocation mode
"""
allowRouteOverlap = _messages.BooleanField(1)
clusterIpv4Cidr = _messages.StringField(2)
clusterIpv4CidrBlock = _messages.StringField(3)
clusterSecondaryRangeName = _messages.StringField(4)
createSubnetwork = _messages.BooleanField(5)
nodeIpv4Cidr = _messages.StringField(6)
nodeIpv4CidrBlock = _messages.StringField(7)
servicesIpv4Cidr = _messages.StringField(8)
servicesIpv4CidrBlock = _messages.StringField(9)
servicesSecondaryRangeName = _messages.StringField(10)
subnetworkName = _messages.StringField(11)
tpuIpv4CidrBlock = _messages.StringField(12)
tpuUseServiceNetworking = _messages.BooleanField(13)
useIpAliases = _messages.BooleanField(14)
class IntraNodeVisibilityConfig(_messages.Message):
r"""IntraNodeVisibilityConfig contains the desired config of the intra-node
visibility on this cluster.
Fields:
enabled: Enables intra node visibility for this cluster.
"""
enabled = _messages.BooleanField(1)
class IstioConfig(_messages.Message):
r"""Configuration options for Istio addon.
Enums:
AuthValueValuesEnum: The specified Istio auth mode, either none, or mutual
TLS.
Fields:
auth: The specified Istio auth mode, either none, or mutual TLS.
csmMeshName: DEPRECATED: No longer used.
disabled: Whether Istio is enabled for this cluster.
"""
class AuthValueValuesEnum(_messages.Enum):
r"""The specified Istio auth mode, either none, or mutual TLS.
Values:
AUTH_NONE: auth not enabled
AUTH_MUTUAL_TLS: auth mutual TLS enabled
"""
AUTH_NONE = 0
AUTH_MUTUAL_TLS = 1
auth = _messages.EnumField('AuthValueValuesEnum', 1)
csmMeshName = _messages.StringField(2)
disabled = _messages.BooleanField(3)
class Jwk(_messages.Message):
r"""Jwk is a JSON Web Key as specified in RFC 7517
Fields:
alg: Algorithm.
crv: Used for ECDSA keys.
e: Used for RSA keys.
kid: Key ID.
kty: Key Type.
n: Used for RSA keys.
use: Permitted uses for the public keys.
x: Used for ECDSA keys.
y: Used for ECDSA keys.
"""
alg = _messages.StringField(1)
crv = _messages.StringField(2)
e = _messages.StringField(3)
kid = _messages.StringField(4)
kty = _messages.StringField(5)
n = _messages.StringField(6)
use = _messages.StringField(7)
x = _messages.StringField(8)
y = _messages.StringField(9)
class KalmConfig(_messages.Message):
r"""Configuration options for the KALM addon.
Fields:
enabled: Whether KALM is enabled for this cluster.
"""
enabled = _messages.BooleanField(1)
class KubernetesDashboard(_messages.Message):
r"""Configuration for the Kubernetes Dashboard.
Fields:
disabled: Whether the Kubernetes Dashboard is enabled for this cluster.
"""
disabled = _messages.BooleanField(1)
class LegacyAbac(_messages.Message):
r"""Configuration for the legacy Attribute Based Access Control
authorization mode.
Fields:
enabled: Whether the ABAC authorizer is enabled for this cluster. When
enabled, identities in the system, including service accounts, nodes,
and controllers, will have statically granted permissions beyond those
provided by the RBAC configuration or IAM.
"""
enabled = _messages.BooleanField(1)
class LinuxNodeConfig(_messages.Message):
r"""Parameters that can be configured on Linux nodes.
Messages:
SysctlsValue: The Linux kernel parameters to be applied to the nodes and
all pods running on the nodes. The following parameters are supported.
kernel.pid_max kernel.threads-max fs.inotify.max_queued_events
fs.inotify.max_user_instances fs.inotify.max_user_watches
net.core.netdev_budget net.core.netdev_budget_usecs
net.core.netdev_max_backlog net.core.rmem_default net.core.rmem_max
net.core.wmem_default net.core.wmem_max net.core.optmem_max
net.core.somaxconn net.ipv4.tcp_rmem net.ipv4.tcp_wmem net.ipv4.tcp_mem
net.ipv4.tcp_fin_timeout net.ipv4.tcp_keepalive_intvl
net.ipv4.tcp_keepalive_probes net.ipv4.tcp_keepalive_time
net.ipv4.tcp_max_orphans net.ipv4.tcp_max_syn_backlog
net.ipv4.tcp_max_tw_buckets net.ipv4.tcp_syn_retries
net.ipv4.tcp_tw_reuse net.ipv4.udp_mem net.ipv4.udp_rmem_min
net.ipv4.udp_wmem_min net.netfilter.nf_conntrack_generic_timeout
net.netfilter.nf_conntrack_max
net.netfilter.nf_conntrack_tcp_timeout_close_wait
net.netfilter.nf_conntrack_tcp_timeout_established
Fields:
sysctls: The Linux kernel parameters to be applied to the nodes and all
pods running on the nodes. The following parameters are supported.
kernel.pid_max kernel.threads-max fs.inotify.max_queued_events
fs.inotify.max_user_instances fs.inotify.max_user_watches
net.core.netdev_budget net.core.netdev_budget_usecs
net.core.netdev_max_backlog net.core.rmem_default net.core.rmem_max
net.core.wmem_default net.core.wmem_max net.core.optmem_max
net.core.somaxconn net.ipv4.tcp_rmem net.ipv4.tcp_wmem net.ipv4.tcp_mem
net.ipv4.tcp_fin_timeout net.ipv4.tcp_keepalive_intvl
net.ipv4.tcp_keepalive_probes net.ipv4.tcp_keepalive_time
net.ipv4.tcp_max_orphans net.ipv4.tcp_max_syn_backlog
net.ipv4.tcp_max_tw_buckets net.ipv4.tcp_syn_retries
net.ipv4.tcp_tw_reuse net.ipv4.udp_mem net.ipv4.udp_rmem_min
net.ipv4.udp_wmem_min net.netfilter.nf_conntrack_generic_timeout
net.netfilter.nf_conntrack_max
net.netfilter.nf_conntrack_tcp_timeout_close_wait
net.netfilter.nf_conntrack_tcp_timeout_established
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class SysctlsValue(_messages.Message):
r"""The Linux kernel parameters to be applied to the nodes and all pods
running on the nodes. The following parameters are supported.
kernel.pid_max kernel.threads-max fs.inotify.max_queued_events
fs.inotify.max_user_instances fs.inotify.max_user_watches
net.core.netdev_budget net.core.netdev_budget_usecs
net.core.netdev_max_backlog net.core.rmem_default net.core.rmem_max
net.core.wmem_default net.core.wmem_max net.core.optmem_max
net.core.somaxconn net.ipv4.tcp_rmem net.ipv4.tcp_wmem net.ipv4.tcp_mem
net.ipv4.tcp_fin_timeout net.ipv4.tcp_keepalive_intvl
net.ipv4.tcp_keepalive_probes net.ipv4.tcp_keepalive_time
net.ipv4.tcp_max_orphans net.ipv4.tcp_max_syn_backlog
net.ipv4.tcp_max_tw_buckets net.ipv4.tcp_syn_retries net.ipv4.tcp_tw_reuse
net.ipv4.udp_mem net.ipv4.udp_rmem_min net.ipv4.udp_wmem_min
net.netfilter.nf_conntrack_generic_timeout net.netfilter.nf_conntrack_max
net.netfilter.nf_conntrack_tcp_timeout_close_wait
net.netfilter.nf_conntrack_tcp_timeout_established
Messages:
AdditionalProperty: An additional property for a SysctlsValue object.
Fields:
additionalProperties: Additional properties of type SysctlsValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a SysctlsValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
sysctls = _messages.MessageField('SysctlsValue', 1)
class ListClustersResponse(_messages.Message):
r"""ListClustersResponse is the result of ListClustersRequest.
Fields:
clusters: A list of clusters in the project in the specified zone, or
across all ones.
missingZones: If any zones are listed here, the list of clusters returned
may be missing those zones.
"""
clusters = _messages.MessageField('Cluster', 1, repeated=True)
missingZones = _messages.StringField(2, repeated=True)
class ListLocationsResponse(_messages.Message):
r"""ListLocationsResponse returns the list of all GKE locations and their
recommendation state.
Fields:
locations: A full list of GKE locations.
nextPageToken: Only return ListLocationsResponse that occur after the
page_token. This value should be populated from the
ListLocationsResponse.next_page_token if that response token was set
(which happens when listing more Locations than fit in a single
ListLocationsResponse). This is currently not used and will be honored
once we use pagination.
"""
locations = _messages.MessageField('Location', 1, repeated=True)
nextPageToken = _messages.StringField(2)
class ListNodePoolsResponse(_messages.Message):
r"""ListNodePoolsResponse is the result of ListNodePoolsRequest.
Fields:
nodePools: A list of node pools for a cluster.
"""
nodePools = _messages.MessageField('NodePool', 1, repeated=True)
class ListOperationsResponse(_messages.Message):
r"""ListOperationsResponse is the result of ListOperationsRequest.
Fields:
missingZones: If any zones are listed here, the list of operations
returned may be missing the operations from those zones.
operations: A list of operations in the project in the specified zone.
"""
missingZones = _messages.StringField(1, repeated=True)
operations = _messages.MessageField('Operation', 2, repeated=True)
class ListUsableSubnetworksResponse(_messages.Message):
r"""ListUsableSubnetworksResponse is the response of
ListUsableSubnetworksRequest.
Fields:
nextPageToken: This token allows you to get the next page of results for
list requests. If the number of results is larger than `page_size`, use
the `next_page_token` as a value for the query parameter `page_token` in
the next request. The value will become empty when there are no more
pages.
subnetworks: A list of usable subnetworks in the specified network
project.
"""
nextPageToken = _messages.StringField(1)
subnetworks = _messages.MessageField('UsableSubnetwork', 2, repeated=True)
class LocalSsdVolumeConfig(_messages.Message):
r"""LocalSsdVolumeConfig is comprised of three fields, count, type, and
format. Count is the number of ssds of this grouping requested, type is the
interface type and is either nvme or scsi, and format is whether the disk is
to be formatted with a filesystem or left for block storage
Enums:
FormatValueValuesEnum: Format of the local SSD (fs/block).
Fields:
count: Number of local SSDs to use
format: Format of the local SSD (fs/block).
type: Local SSD interface to use (nvme/scsi).
"""
class FormatValueValuesEnum(_messages.Enum):
r"""Format of the local SSD (fs/block).
Values:
FORMAT_UNSPECIFIED: Default value
FS: File system formatted
BLOCK: Raw block
"""
FORMAT_UNSPECIFIED = 0
FS = 1
BLOCK = 2
count = _messages.IntegerField(1, variant=_messages.Variant.INT32)
format = _messages.EnumField('FormatValueValuesEnum', 2)
type = _messages.StringField(3)
class Location(_messages.Message):
r"""Location returns the location name, and if the location is recommended
for GKE cluster scheduling.
Enums:
TypeValueValuesEnum: Contains the type of location this Location is for.
Regional or Zonal.
Fields:
name: Contains the name of the resource requested. Specified in the format
'projects/*/locations/*'.
recommended: Recommended is a bool combining the drain state of the
location (ie- has the region been drained manually?), and the stockout
status of any zone according to Zone Advisor. This will be internal only
for use by pantheon.
type: Contains the type of location this Location is for. Regional or
Zonal.
"""
class TypeValueValuesEnum(_messages.Enum):
r"""Contains the type of location this Location is for. Regional or Zonal.
Values:
LOCATION_TYPE_UNSPECIFIED: LOCATION_TYPE_UNSPECIFIED means the location
type was not determined.
ZONE: A GKE Location where Zonal clusters can be created.
REGION: A GKE Location where Regional clusters can be created.
"""
LOCATION_TYPE_UNSPECIFIED = 0
ZONE = 1
REGION = 2
name = _messages.StringField(1)
recommended = _messages.BooleanField(2)
type = _messages.EnumField('TypeValueValuesEnum', 3)
class MaintenancePolicy(_messages.Message):
r"""MaintenancePolicy defines the maintenance policy to be used for the
cluster.
Fields:
resourceVersion: A hash identifying the version of this policy, so that
updates to fields of the policy won't accidentally undo intermediate
changes (and so that users of the API unaware of some fields won't
accidentally remove other fields). Make a <code>get()</code> request to
the cluster to get the current resource version and include it with
requests to set the policy.
window: Specifies the maintenance window in which maintenance may be
performed.
"""
resourceVersion = _messages.StringField(1)
window = _messages.MessageField('MaintenanceWindow', 2)
class MaintenanceWindow(_messages.Message):
r"""MaintenanceWindow defines the maintenance window to be used for the
cluster.
Messages:
MaintenanceExclusionsValue: Exceptions to maintenance window. Non-
emergency maintenance should not occur in these windows.
Fields:
dailyMaintenanceWindow: DailyMaintenanceWindow specifies a daily
maintenance operation window.
maintenanceExclusions: Exceptions to maintenance window. Non-emergency
maintenance should not occur in these windows.
recurringWindow: RecurringWindow specifies some number of recurring time
periods for maintenance to occur. The time windows may be overlapping.
If no maintenance windows are set, maintenance can occur at any time.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class MaintenanceExclusionsValue(_messages.Message):
r"""Exceptions to maintenance window. Non-emergency maintenance should not
occur in these windows.
Messages:
AdditionalProperty: An additional property for a
MaintenanceExclusionsValue object.
Fields:
additionalProperties: Additional properties of type
MaintenanceExclusionsValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a MaintenanceExclusionsValue object.
Fields:
key: Name of the additional property.
value: A TimeWindow attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('TimeWindow', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
dailyMaintenanceWindow = _messages.MessageField('DailyMaintenanceWindow', 1)
maintenanceExclusions = _messages.MessageField('MaintenanceExclusionsValue', 2)
recurringWindow = _messages.MessageField('RecurringTimeWindow', 3)
class MasterAuth(_messages.Message):
r"""The authentication information for accessing the master endpoint.
Authentication can be done using HTTP basic auth or using client
certificates.
Fields:
clientCertificate: [Output only] Base64-encoded public certificate used by
clients to authenticate to the cluster endpoint.
clientCertificateConfig: Configuration for client certificate
authentication on the cluster. For clusters before v1.12, if no
configuration is specified, a client certificate is issued.
clientKey: [Output only] Base64-encoded private key used by clients to
authenticate to the cluster endpoint.
clusterCaCertificate: [Output only] Base64-encoded public certificate that
is the root of trust for the cluster.
password: The password to use for HTTP basic authentication to the master
endpoint. Because the master endpoint is open to the Internet, you
should create a strong password. If a password is provided for cluster
creation, username must be non-empty.
username: The username to use for HTTP basic authentication to the master
endpoint. For clusters v1.6.0 and later, basic authentication can be
disabled by leaving username unspecified (or setting it to the empty
string).
"""
clientCertificate = _messages.StringField(1)
clientCertificateConfig = _messages.MessageField('ClientCertificateConfig', 2)
clientKey = _messages.StringField(3)
clusterCaCertificate = _messages.StringField(4)
password = _messages.StringField(5)
username = _messages.StringField(6)
class MasterAuthorizedNetworksConfig(_messages.Message):
r"""Configuration options for the master authorized networks feature.
Enabled master authorized networks will disallow all external traffic to
access Kubernetes master through HTTPS except traffic from the given CIDR
blocks, Google Compute Engine Public IPs and Google Prod IPs.
Fields:
cidrBlocks: cidr_blocks define up to 50 external networks that could
access Kubernetes master through HTTPS.
enabled: Whether or not master authorized networks is enabled.
"""
cidrBlocks = _messages.MessageField('CidrBlock', 1, repeated=True)
enabled = _messages.BooleanField(2)
class MaxPodsConstraint(_messages.Message):
r"""Constraints applied to pods.
Fields:
maxPodsPerNode: Constraint enforced on the max num of pods per node.
"""
maxPodsPerNode = _messages.IntegerField(1)
class Metric(_messages.Message):
r"""Progress metric is (string, int|float|string) pair.
Fields:
doubleValue: For metrics with floating point value.
intValue: For metrics with integer value.
name: Required. Metric name, e.g., "nodes total", "percent done".
stringValue: For metrics with custom values (ratios, visual progress,
etc.).
"""
doubleValue = _messages.FloatField(1)
intValue = _messages.IntegerField(2)
name = _messages.StringField(3)
stringValue = _messages.StringField(4)
class NetworkConfig(_messages.Message):
r"""Parameters for cluster networking.
Fields:
disableDefaultSnat: Whether the cluster disables default in-node sNAT
rules. In-node sNAT rules will be disabled when this flag is true. When
set to false, default IP masquerade rules will be applied to the nodes
to prevent sNAT on cluster internal traffic. Deprecated. Use
default_snat_status instead
enableCloudNat: Whether GKE Cloud NAT is enabled for this cluster.
Requires that the cluster has already set
IPAllocationPolicy.use_ip_aliases to true. Deprecated: use
disable_default_snat instead.
enableIntraNodeVisibility: Whether Intra-node visibility is enabled for
this cluster. This enables flow logs for same node pod to pod traffic.
enablePrivateIpv6Access: Whether or not Private IPv6 access is enabled.
This enables direct connectivity from GKE pods to Google Cloud services
over gRPC.
enableSharedNetwork: Deprecated: This flag doesn't need to be flipped for
using shared VPC and it has no effect.
network: Output only. The relative name of the Google Compute Engine
network(/compute/docs/networks-and-firewalls#networks) to which the
cluster is connected. Example: projects/my-project/global/networks/my-
network
subnetwork: Output only. The relative name of the Google Compute Engine
[subnetwork](/compute/docs/vpc) to which the cluster is connected.
Example: projects/my-project/regions/us-central1/subnetworks/my-subnet
"""
disableDefaultSnat = _messages.BooleanField(1)
enableCloudNat = _messages.BooleanField(2)
enableIntraNodeVisibility = _messages.BooleanField(3)
enablePrivateIpv6Access = _messages.BooleanField(4)
enableSharedNetwork = _messages.BooleanField(5)
network = _messages.StringField(6)
subnetwork = _messages.StringField(7)
class NetworkPolicy(_messages.Message):
r"""Configuration options for the NetworkPolicy feature.
https://kubernetes.io/docs/concepts/services-networking/networkpolicies/
Enums:
ProviderValueValuesEnum: The selected network policy provider.
Fields:
enabled: Whether network policy is enabled on the cluster.
provider: The selected network policy provider.
"""
class ProviderValueValuesEnum(_messages.Enum):
r"""The selected network policy provider.
Values:
PROVIDER_UNSPECIFIED: Not set
CALICO: Tigera (Calico Felix).
"""
PROVIDER_UNSPECIFIED = 0
CALICO = 1
enabled = _messages.BooleanField(1)
provider = _messages.EnumField('ProviderValueValuesEnum', 2)
class NetworkPolicyConfig(_messages.Message):
r"""Configuration for NetworkPolicy. This only tracks whether the addon is
enabled or not on the Master, it does not track whether network policy is
enabled for the nodes.
Fields:
disabled: Whether NetworkPolicy is enabled for this cluster.
"""
disabled = _messages.BooleanField(1)
class NodeConfig(_messages.Message):
r"""Parameters that describe the nodes in a cluster.
Messages:
LabelsValue: The map of Kubernetes labels (key/value pairs) to be applied
to each node. These will added in addition to any default label(s) that
Kubernetes may apply to the node. In case of conflict in label keys, the
applied set may differ depending on the Kubernetes version -- it's best
to assume the behavior is undefined and conflicts should be avoided. For
more information, including usage and the valid values, see:
https://kubernetes.io/docs/concepts/overview/working-with-
objects/labels/
MetadataValue: The metadata key/value pairs assigned to instances in the
cluster. Keys must conform to the regexp [a-zA-Z0-9-_]+ and be less
than 128 bytes in length. These are reflected as part of a URL in the
metadata server. Additionally, to avoid ambiguity, keys must not
conflict with any other metadata keys for the project or be one of the
reserved keys: "cluster-location" "cluster-name" "cluster-uid"
"configure-sh" "containerd-configure-sh" "enable-os-login" "gci-
ensure-gke-docker" "gci-metrics-enabled" "gci-update-strategy"
"instance-template" "kube-env" "startup-script" "user-data"
"disable-address-manager" "windows-startup-script-ps1" "common-psm1"
"k8s-node-setup-psm1" "install-ssh-psm1" "user-profile-psm1" "serial-
port-logging-enable" Values are free-form strings, and only have meaning
as interpreted by the image running in the instance. The only
restriction placed on them is that each value's size must be less than
or equal to 32 KB. The total size of all keys and values must be less
than 512 KB.
Fields:
accelerators: A list of hardware accelerators to be attached to each node.
See https://cloud.google.com/compute/docs/gpus for more information
about support for GPUs.
bootDiskKmsKey: The Customer Managed Encryption Key used to encrypt the
boot disk attached to each node in the node pool. This should be of the
form projects/[KEY_PROJECT_ID]/locations/[LOCATION]/keyRings/[RING_NAME]
/cryptoKeys/[KEY_NAME]. For more information about protecting resources
with Cloud KMS Keys please see:
https://cloud.google.com/compute/docs/disks/customer-managed-encryption
diskSizeGb: Size of the disk attached to each node, specified in GB. The
smallest allowed disk size is 10GB. If unspecified, the default disk
size is 100GB.
diskType: Type of the disk attached to each node (e.g. 'pd-standard' or
'pd-ssd') If unspecified, the default disk type is 'pd-standard'
imageType: The image type to use for this node. Note that for a given
image type, the latest version of it will be used.
kubeletConfig: Node kubelet configs.
labels: The map of Kubernetes labels (key/value pairs) to be applied to
each node. These will added in addition to any default label(s) that
Kubernetes may apply to the node. In case of conflict in label keys, the
applied set may differ depending on the Kubernetes version -- it's best
to assume the behavior is undefined and conflicts should be avoided. For
more information, including usage and the valid values, see:
https://kubernetes.io/docs/concepts/overview/working-with-
objects/labels/
linuxNodeConfig: Parameters that can be configured on Linux nodes.
localSsdCount: The number of local SSD disks to be attached to the node.
The limit for this value is dependent upon the maximum number of disks
available on a machine per zone. See:
https://cloud.google.com/compute/docs/disks/local-ssd for more
information.
localSsdVolumeConfigs: Parameters for using Local SSD with extra options
as hostpath or local volumes
machineType: The name of a Google Compute Engine [machine
type](/compute/docs/machine-types) (e.g. `n1-standard-1`). If
unspecified, the default machine type is `n1-standard-1`.
metadata: The metadata key/value pairs assigned to instances in the
cluster. Keys must conform to the regexp [a-zA-Z0-9-_]+ and be less
than 128 bytes in length. These are reflected as part of a URL in the
metadata server. Additionally, to avoid ambiguity, keys must not
conflict with any other metadata keys for the project or be one of the
reserved keys: "cluster-location" "cluster-name" "cluster-uid"
"configure-sh" "containerd-configure-sh" "enable-os-login" "gci-
ensure-gke-docker" "gci-metrics-enabled" "gci-update-strategy"
"instance-template" "kube-env" "startup-script" "user-data"
"disable-address-manager" "windows-startup-script-ps1" "common-psm1"
"k8s-node-setup-psm1" "install-ssh-psm1" "user-profile-psm1" "serial-
port-logging-enable" Values are free-form strings, and only have meaning
as interpreted by the image running in the instance. The only
restriction placed on them is that each value's size must be less than
or equal to 32 KB. The total size of all keys and values must be less
than 512 KB.
minCpuPlatform: Minimum CPU platform to be used by this instance. The
instance may be scheduled on the specified or newer CPU platform.
Applicable values are the friendly names of CPU platforms, such as
<code>minCpuPlatform: "Intel Haswell"</code> or
<code>minCpuPlatform: "Intel Sandy Bridge"</code>. For more
information, read [how to specify min CPU
platform](https://cloud.google.com/compute/docs/instances/specify-min-
cpu-platform)
nodeGroup: The optional node group. Setting this field will assign
instances of this pool to run on the specified node group. This is
useful for running workloads on [sole tenant
nodes](/compute/docs/nodes/)
nodeImageConfig: The node image configuration to use for this node pool.
Note that this is only applicable for node pools using
image_type=CUSTOM.
oauthScopes: The set of Google API scopes to be made available on all of
the node VMs under the "default" service account. The following scopes
are recommended, but not required, and by default are not included: *
`https://www.googleapis.com/auth/compute` is required for mounting
persistent storage on your nodes. *
`https://www.googleapis.com/auth/devstorage.read_only` is required for
communicating with **gcr.io** (the [Google Container Registry
](/container-registry/)). If unspecified, no scopes are added, unless
Cloud Logging or Cloud Monitoring are enabled, in which case their
required scopes will be added.
preemptible: Whether the nodes are created as preemptible VM instances.
See: https://cloud.google.com/compute/docs/instances/preemptible for
more inforamtion about preemptible VM instances.
reservationAffinity: The optional reservation affinity. Setting this field
will apply the specified [Zonal Compute
Reservation](/compute/docs/instances/reserving-zonal-resources) to this
node pool.
sandboxConfig: Sandbox configuration for this node.
serviceAccount: The Google Cloud Platform Service Account to be used by
the node VMs. Specify the email address of the Service Account;
otherwise, if no Service Account is specified, the "default" service
account is used.
shieldedInstanceConfig: Shielded Instance options.
tags: The list of instance tags applied to all nodes. Tags are used to
identify valid sources or targets for network firewalls and are
specified by the client during cluster or node pool creation. Each tag
within the list must comply with RFC1035.
taints: List of kubernetes taints to be applied to each node. For more
information, including usage and the valid values, see:
https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
workloadMetadataConfig: The workload metadata configuration for this node.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class LabelsValue(_messages.Message):
r"""The map of Kubernetes labels (key/value pairs) to be applied to each
node. These will added in addition to any default label(s) that Kubernetes
may apply to the node. In case of conflict in label keys, the applied set
may differ depending on the Kubernetes version -- it's best to assume the
behavior is undefined and conflicts should be avoided. For more
information, including usage and the valid values, see:
https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
Messages:
AdditionalProperty: An additional property for a LabelsValue object.
Fields:
additionalProperties: Additional properties of type LabelsValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a LabelsValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
@encoding.MapUnrecognizedFields('additionalProperties')
class MetadataValue(_messages.Message):
r"""The metadata key/value pairs assigned to instances in the cluster.
Keys must conform to the regexp [a-zA-Z0-9-_]+ and be less than 128 bytes
in length. These are reflected as part of a URL in the metadata server.
Additionally, to avoid ambiguity, keys must not conflict with any other
metadata keys for the project or be one of the reserved keys: "cluster-
location" "cluster-name" "cluster-uid" "configure-sh" "containerd-
configure-sh" "enable-os-login" "gci-ensure-gke-docker" "gci-metrics-
enabled" "gci-update-strategy" "instance-template" "kube-env"
"startup-script" "user-data" "disable-address-manager" "windows-
startup-script-ps1" "common-psm1" "k8s-node-setup-psm1" "install-ssh-
psm1" "user-profile-psm1" "serial-port-logging-enable" Values are free-
form strings, and only have meaning as interpreted by the image running in
the instance. The only restriction placed on them is that each value's
size must be less than or equal to 32 KB. The total size of all keys and
values must be less than 512 KB.
Messages:
AdditionalProperty: An additional property for a MetadataValue object.
Fields:
additionalProperties: Additional properties of type MetadataValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a MetadataValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
accelerators = _messages.MessageField('AcceleratorConfig', 1, repeated=True)
bootDiskKmsKey = _messages.StringField(2)
diskSizeGb = _messages.IntegerField(3, variant=_messages.Variant.INT32)
diskType = _messages.StringField(4)
imageType = _messages.StringField(5)
kubeletConfig = _messages.MessageField('NodeKubeletConfig', 6)
labels = _messages.MessageField('LabelsValue', 7)
linuxNodeConfig = _messages.MessageField('LinuxNodeConfig', 8)
localSsdCount = _messages.IntegerField(9, variant=_messages.Variant.INT32)
localSsdVolumeConfigs = _messages.MessageField('LocalSsdVolumeConfig', 10, repeated=True)
machineType = _messages.StringField(11)
metadata = _messages.MessageField('MetadataValue', 12)
minCpuPlatform = _messages.StringField(13)
nodeGroup = _messages.StringField(14)
nodeImageConfig = _messages.MessageField('CustomImageConfig', 15)
oauthScopes = _messages.StringField(16, repeated=True)
preemptible = _messages.BooleanField(17)
reservationAffinity = _messages.MessageField('ReservationAffinity', 18)
sandboxConfig = _messages.MessageField('SandboxConfig', 19)
serviceAccount = _messages.StringField(20)
shieldedInstanceConfig = _messages.MessageField('ShieldedInstanceConfig', 21)
tags = _messages.StringField(22, repeated=True)
taints = _messages.MessageField('NodeTaint', 23, repeated=True)
workloadMetadataConfig = _messages.MessageField('WorkloadMetadataConfig', 24)
class NodeKubeletConfig(_messages.Message):
r"""Node kubelet configs. NOTE: This is an Alpha only API.
Fields:
cpuCfsQuota: Enable CPU CFS quota enforcement for containers that specify
CPU limits. If this option is enabled, kubelet uses CFS quota
(https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt) to
enforce container CPU limits. Otherwise, CPU limits will not be enforced
at all. Disable this option to mitigate CPU throttling problems while
still having your pods to be in Guaranteed QoS class by specifying the
CPU limits. The default value is 'true' if unspecified.
cpuCfsQuotaPeriod: Set the CPU CFS quota period value 'cpu.cfs_period_us'.
The string must be a sequence of decimal numbers, each with optional
fraction and a unit suffix, such as "300ms". Valid time units are "ns",
"us" (or "\xb5s"), "ms", "s", "m", "h". The value must be a positive
duration.
cpuManagerPolicy: Control the CPU management policy on the node. See
https://kubernetes.io/docs/tasks/administer-cluster/cpu-management-
policies/ The following values are allowed. - "none": the default,
which represents the existing scheduling behavior. - "static": allows
pods with certain resource characteristics to be granted
increased CPU affinity and exclusivity on the node.
"""
cpuCfsQuota = _messages.BooleanField(1)
cpuCfsQuotaPeriod = _messages.StringField(2)
cpuManagerPolicy = _messages.StringField(3)
class NodeManagement(_messages.Message):
r"""NodeManagement defines the set of node management services turned on for
the node pool.
Fields:
autoRepair: Whether the nodes will be automatically repaired.
autoUpgrade: Whether the nodes will be automatically upgraded.
upgradeOptions: Specifies the Auto Upgrade knobs for the node pool.
"""
autoRepair = _messages.BooleanField(1)
autoUpgrade = _messages.BooleanField(2)
upgradeOptions = _messages.MessageField('AutoUpgradeOptions', 3)
class NodePool(_messages.Message):
r"""NodePool contains the name and configuration for a cluster's node pool.
Node pools are a set of nodes (i.e. VM's), with a common configuration and
specification, under the control of the cluster master. They may have a set
of Kubernetes labels applied to them, which may be used to reference them
during pod scheduling. They may also be resized up or down, to accommodate
the workload.
Enums:
StatusValueValuesEnum: [Output only] The status of the nodes in this pool
instance.
Fields:
autoscaling: Autoscaler configuration for this NodePool. Autoscaler is
enabled only if a valid configuration is present.
conditions: Which conditions caused the current node pool state.
config: The node configuration of the pool.
initialNodeCount: The initial node count for the pool. You must ensure
that your Compute Engine <a href="/compute/docs/resource-
quotas">resource quota</a> is sufficient for this number of instances.
You must also have available firewall and routes quota.
instanceGroupUrls: [Output only] The resource URLs of the [managed
instance groups](/compute/docs/instance-groups/creating-groups-of-
managed-instances) associated with this node pool.
locations: The list of Google Compute Engine
[zones](/compute/docs/zones#available) in which the NodePool's nodes
should be located.
management: NodeManagement configuration for this NodePool.
maxPodsConstraint: The constraint on the maximum number of pods that can
be run simultaneously on a node in the node pool.
name: The name of the node pool.
podIpv4CidrSize: [Output only] The pod CIDR block size per node in this
node pool.
resourceVersion: Server-defined resource version (etag).
selfLink: [Output only] Server-defined URL for the resource.
status: [Output only] The status of the nodes in this pool instance.
statusMessage: [Output only] Additional information about the current
status of this node pool instance, if available. Deprecated, use the
field conditions instead.
upgradeSettings: Upgrade settings control disruption and speed of the
upgrade.
version: The version of the Kubernetes of this node.
"""
class StatusValueValuesEnum(_messages.Enum):
r"""[Output only] The status of the nodes in this pool instance.
Values:
STATUS_UNSPECIFIED: Not set.
PROVISIONING: The PROVISIONING state indicates the node pool is being
created.
RUNNING: The RUNNING state indicates the node pool has been created and
is fully usable.
RUNNING_WITH_ERROR: The RUNNING_WITH_ERROR state indicates the node pool
has been created and is partially usable. Some error state has
occurred and some functionality may be impaired. Customer may need to
reissue a request or trigger a new update.
RECONCILING: The RECONCILING state indicates that some work is actively
being done on the node pool, such as upgrading node software. Details
can be found in the `statusMessage` field.
STOPPING: The STOPPING state indicates the node pool is being deleted.
ERROR: The ERROR state indicates the node pool may be unusable. Details
can be found in the `statusMessage` field.
"""
STATUS_UNSPECIFIED = 0
PROVISIONING = 1
RUNNING = 2
RUNNING_WITH_ERROR = 3
RECONCILING = 4
STOPPING = 5
ERROR = 6
autoscaling = _messages.MessageField('NodePoolAutoscaling', 1)
conditions = _messages.MessageField('StatusCondition', 2, repeated=True)
config = _messages.MessageField('NodeConfig', 3)
initialNodeCount = _messages.IntegerField(4, variant=_messages.Variant.INT32)
instanceGroupUrls = _messages.StringField(5, repeated=True)
locations = _messages.StringField(6, repeated=True)
management = _messages.MessageField('NodeManagement', 7)
maxPodsConstraint = _messages.MessageField('MaxPodsConstraint', 8)
name = _messages.StringField(9)
podIpv4CidrSize = _messages.IntegerField(10, variant=_messages.Variant.INT32)
resourceVersion = _messages.StringField(11)
selfLink = _messages.StringField(12)
status = _messages.EnumField('StatusValueValuesEnum', 13)
statusMessage = _messages.StringField(14)
upgradeSettings = _messages.MessageField('UpgradeSettings', 15)
version = _messages.StringField(16)
class NodePoolAutoscaling(_messages.Message):
r"""NodePoolAutoscaling contains information required by cluster autoscaler
to adjust the size of the node pool to the current cluster usage.
Fields:
autoprovisioned: Can this node pool be deleted automatically.
enabled: Is autoscaling enabled for this node pool.
maxNodeCount: Maximum number of nodes in the NodePool. Must be >=
min_node_count. There has to enough quota to scale up the cluster.
minNodeCount: Minimum number of nodes in the NodePool. Must be >= 1 and <=
max_node_count.
"""
autoprovisioned = _messages.BooleanField(1)
enabled = _messages.BooleanField(2)
maxNodeCount = _messages.IntegerField(3, variant=_messages.Variant.INT32)
minNodeCount = _messages.IntegerField(4, variant=_messages.Variant.INT32)
class NodeTaint(_messages.Message):
r"""Kubernetes taint is comprised of three fields: key, value, and effect.
Effect can only be one of three types: NoSchedule, PreferNoSchedule or
NoExecute. For more information, including usage and the valid values, see:
https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
Enums:
EffectValueValuesEnum: Effect for taint.
Fields:
effect: Effect for taint.
key: Key for taint.
value: Value for taint.
"""
class EffectValueValuesEnum(_messages.Enum):
r"""Effect for taint.
Values:
EFFECT_UNSPECIFIED: Not set
NO_SCHEDULE: NoSchedule
PREFER_NO_SCHEDULE: PreferNoSchedule
NO_EXECUTE: NoExecute
"""
EFFECT_UNSPECIFIED = 0
NO_SCHEDULE = 1
PREFER_NO_SCHEDULE = 2
NO_EXECUTE = 3
effect = _messages.EnumField('EffectValueValuesEnum', 1)
key = _messages.StringField(2)
value = _messages.StringField(3)
class Operation(_messages.Message):
r"""This operation resource represents operations that may have happened or
are happening on the cluster. All fields are output only.
Enums:
OperationTypeValueValuesEnum: The operation type.
StatusValueValuesEnum: The current status of the operation.
Fields:
clusterConditions: Which conditions caused the current cluster state.
detail: Detailed operation progress, if available.
endTime: [Output only] The time the operation completed, in
[RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format.
location: [Output only] The name of the Google Compute Engine
[zone](/compute/docs/regions-zones/regions-zones#available) or
[region](/compute/docs/regions-zones/regions-zones#available) in which
the cluster resides.
name: The server-assigned ID for the operation.
nodepoolConditions: Which conditions caused the current node pool state.
operationType: The operation type.
progress: Output only. [Output only] Progress information for an
operation.
selfLink: Server-defined URL for the resource.
startTime: [Output only] The time the operation started, in
[RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format.
status: The current status of the operation.
statusMessage: Output only. If an error has occurred, a textual
description of the error.
targetLink: Server-defined URL for the target of the operation.
zone: The name of the Google Compute Engine
[zone](/compute/docs/zones#available) in which the operation is taking
place. This field is deprecated, use location instead.
"""
class OperationTypeValueValuesEnum(_messages.Enum):
r"""The operation type.
Values:
TYPE_UNSPECIFIED: Not set.
CREATE_CLUSTER: Cluster create.
DELETE_CLUSTER: Cluster delete.
UPGRADE_MASTER: A master upgrade.
UPGRADE_NODES: A node upgrade.
REPAIR_CLUSTER: Cluster repair.
UPDATE_CLUSTER: Cluster update.
CREATE_NODE_POOL: Node pool create.
DELETE_NODE_POOL: Node pool delete.
SET_NODE_POOL_MANAGEMENT: Set node pool management.
AUTO_REPAIR_NODES: Automatic node pool repair.
AUTO_UPGRADE_NODES: Automatic node upgrade.
SET_LABELS: Set labels.
SET_MASTER_AUTH: Set/generate master auth materials
SET_NODE_POOL_SIZE: Set node pool size.
SET_NETWORK_POLICY: Updates network policy for a cluster.
SET_MAINTENANCE_POLICY: Set the maintenance policy.
UPDATE_IP_ALLOCATION_POLICY: Update cluster IP allocation policy.
"""
TYPE_UNSPECIFIED = 0
CREATE_CLUSTER = 1
DELETE_CLUSTER = 2
UPGRADE_MASTER = 3
UPGRADE_NODES = 4
REPAIR_CLUSTER = 5
UPDATE_CLUSTER = 6
CREATE_NODE_POOL = 7
DELETE_NODE_POOL = 8
SET_NODE_POOL_MANAGEMENT = 9
AUTO_REPAIR_NODES = 10
AUTO_UPGRADE_NODES = 11
SET_LABELS = 12
SET_MASTER_AUTH = 13
SET_NODE_POOL_SIZE = 14
SET_NETWORK_POLICY = 15
SET_MAINTENANCE_POLICY = 16
UPDATE_IP_ALLOCATION_POLICY = 17
class StatusValueValuesEnum(_messages.Enum):
r"""The current status of the operation.
Values:
STATUS_UNSPECIFIED: Not set.
PENDING: The operation has been created.
RUNNING: The operation is currently running.
DONE: The operation is done, either cancelled or completed.
ABORTING: The operation is aborting.
"""
STATUS_UNSPECIFIED = 0
PENDING = 1
RUNNING = 2
DONE = 3
ABORTING = 4
clusterConditions = _messages.MessageField('StatusCondition', 1, repeated=True)
detail = _messages.StringField(2)
endTime = _messages.StringField(3)
location = _messages.StringField(4)
name = _messages.StringField(5)
nodepoolConditions = _messages.MessageField('StatusCondition', 6, repeated=True)
operationType = _messages.EnumField('OperationTypeValueValuesEnum', 7)
progress = _messages.MessageField('OperationProgress', 8)
selfLink = _messages.StringField(9)
startTime = _messages.StringField(10)
status = _messages.EnumField('StatusValueValuesEnum', 11)
statusMessage = _messages.StringField(12)
targetLink = _messages.StringField(13)
zone = _messages.StringField(14)
class OperationProgress(_messages.Message):
r"""Information about operation (or operation stage) progress.
Enums:
StatusValueValuesEnum: Status of an operation stage. Unset for single-
stage operations.
Fields:
metrics: Progress metric bundle, for example: metrics: [{name: "nodes
done", int_value: 15}, {name: "nodes total",
int_value: 32}] or metrics: [{name: "progress", double_value:
0.56}, {name: "progress scale", double_value: 1.0}]
name: A non-parameterized string describing an operation stage. Unset for
single-stage operations.
stages: Substages of an operation or a stage.
status: Status of an operation stage. Unset for single-stage operations.
"""
class StatusValueValuesEnum(_messages.Enum):
r"""Status of an operation stage. Unset for single-stage operations.
Values:
STATUS_UNSPECIFIED: Not set.
PENDING: The operation has been created.
RUNNING: The operation is currently running.
DONE: The operation is done, either cancelled or completed.
ABORTING: The operation is aborting.
"""
STATUS_UNSPECIFIED = 0
PENDING = 1
RUNNING = 2
DONE = 3
ABORTING = 4
metrics = _messages.MessageField('Metric', 1, repeated=True)
name = _messages.StringField(2)
stages = _messages.MessageField('OperationProgress', 3, repeated=True)
status = _messages.EnumField('StatusValueValuesEnum', 4)
class PodSecurityPolicyConfig(_messages.Message):
r"""Configuration for the PodSecurityPolicy feature.
Fields:
enabled: Enable the PodSecurityPolicy controller for this cluster. If
enabled, pods must be valid under a PodSecurityPolicy to be created.
"""
enabled = _messages.BooleanField(1)
class PremiumConfig(_messages.Message):
r"""PremiumConfig is the configuration for all premium features and tiers.
Fields:
features: The features that GKE provides.
tiers: The tiers that are part of the premium offering.
"""
features = _messages.MessageField('FeatureConfig', 1, repeated=True)
tiers = _messages.MessageField('TierConfig', 2, repeated=True)
class PrivateClusterConfig(_messages.Message):
r"""Configuration options for private clusters.
Fields:
enablePeeringRouteSharing: Whether to enable route sharing over the
network peering.
enablePrivateEndpoint: Whether the master's internal IP address is used as
the cluster endpoint.
enablePrivateNodes: Whether nodes have internal IP addresses only. If
enabled, all nodes are given only RFC 1918 private addresses and
communicate with the master via private networking.
masterIpv4CidrBlock: The IP range in CIDR notation to use for the hosted
master network. This range will be used for assigning internal IP
addresses to the master or set of masters, as well as the ILB VIP. This
range must not overlap with any other ranges in use within the cluster's
network.
peeringName: Output only. The peering name in the customer VPC used by
this cluster.
privateEndpoint: Output only. The internal IP address of this cluster's
endpoint.
publicEndpoint: Output only. The external IP address of this cluster's
endpoint.
"""
enablePeeringRouteSharing = _messages.BooleanField(1)
enablePrivateEndpoint = _messages.BooleanField(2)
enablePrivateNodes = _messages.BooleanField(3)
masterIpv4CidrBlock = _messages.StringField(4)
peeringName = _messages.StringField(5)
privateEndpoint = _messages.StringField(6)
publicEndpoint = _messages.StringField(7)
class PrivateIPv6Status(_messages.Message):
r"""PrivateIPv6Status contains the desired state of the IPv6 fast path on
this cluster. Private IPv6 access allows direct high speed communication
from GKE pods to gRPC Google cloud services over IPv6.
Fields:
enabled: Enables private IPv6 access to Google Cloud services for this
cluster.
"""
enabled = _messages.BooleanField(1)
class RecurringTimeWindow(_messages.Message):
r"""Represents an arbitrary window of time that recurs.
Fields:
recurrence: An RRULE (https://tools.ietf.org/html/rfc5545#section-3.8.5.3)
for how this window reccurs. They go on for the span of time between the
start and end time. For example, to have something repeat every
weekday, you'd use: <code>FREQ=WEEKLY;BYDAY=MO,TU,WE,TH,FR</code> To
repeat some window daily (equivalent to the DailyMaintenanceWindow):
<code>FREQ=DAILY</code> For the first weekend of every month:
<code>FREQ=MONTHLY;BYSETPOS=1;BYDAY=SA,SU</code> This specifies how
frequently the window starts. Eg, if you wanted to have a 9-5 UTC-4
window every weekday, you'd use something like: <code> start time =
2019-01-01T09:00:00-0400 end time = 2019-01-01T17:00:00-0400
recurrence = FREQ=WEEKLY;BYDAY=MO,TU,WE,TH,FR </code> Windows can span
multiple days. Eg, to make the window encompass every weekend from
midnight Saturday till the last minute of Sunday UTC: <code> start
time = 2019-01-05T00:00:00Z end time = 2019-01-07T23:59:00Z
recurrence = FREQ=WEEKLY;BYDAY=SA </code> Note the start and end time's
specific dates are largely arbitrary except to specify duration of the
window and when it first starts. The FREQ values of HOURLY, MINUTELY,
and SECONDLY are not supported.
window: The window of the first recurrence.
"""
recurrence = _messages.StringField(1)
window = _messages.MessageField('TimeWindow', 2)
class ReleaseChannel(_messages.Message):
r"""ReleaseChannel indicates which release channel a cluster is subscribed
to. Release channels are arranged in order of risk and frequency of updates.
When a cluster is subscribed to a release channel, Google maintains both the
master version and the node version. Node auto-upgrade defaults to true and
cannot be disabled. Updates to version related fields (e.g.
current_master_version) return an error.
Enums:
ChannelValueValuesEnum: channel specifies which release channel the
cluster is subscribed to.
Fields:
channel: channel specifies which release channel the cluster is subscribed
to.
"""
class ChannelValueValuesEnum(_messages.Enum):
r"""channel specifies which release channel the cluster is subscribed to.
Values:
UNSPECIFIED: No channel specified.
RAPID: RAPID channel is offered on an early access basis for customers
who want to test new releases before they are qualified for production
use or general availability. New upgrades will occur roughly weekly.
WARNING: Versions available in the RAPID Channel may be subject to
unresolved issues with no known workaround and are not for use with
production workloads or subject to any SLAs.
REGULAR: Clusters subscribed to REGULAR receive versions that are
considered GA quality. REGULAR is intended for production users who
want to take advantage of new features. New upgrades will occur
roughly every few weeks.
STABLE: Clusters subscribed to STABLE receive versions that are known to
be stable and reliable in production. STABLE is intended for
production users who need stability above all else, or for whom
frequent upgrades are too risky. New upgrades will occur roughly every
few months.
"""
UNSPECIFIED = 0
RAPID = 1
REGULAR = 2
STABLE = 3
channel = _messages.EnumField('ChannelValueValuesEnum', 1)
class ReleaseChannelConfig(_messages.Message):
r"""ReleaseChannelConfig exposes configuration for a release channel.
Enums:
ChannelValueValuesEnum: The release channel this configuration applies to.
Fields:
availableVersions: List of available versions for the release channel.
channel: The release channel this configuration applies to.
defaultVersion: The default version for newly created clusters on the
channel.
"""
class ChannelValueValuesEnum(_messages.Enum):
r"""The release channel this configuration applies to.
Values:
UNSPECIFIED: No channel specified.
RAPID: RAPID channel is offered on an early access basis for customers
who want to test new releases before they are qualified for production
use or general availability. New upgrades will occur roughly weekly.
WARNING: Versions available in the RAPID Channel may be subject to
unresolved issues with no known workaround and are not for use with
production workloads or subject to any SLAs.
REGULAR: Clusters subscribed to REGULAR receive versions that are
considered GA quality. REGULAR is intended for production users who
want to take advantage of new features. New upgrades will occur
roughly every few weeks.
STABLE: Clusters subscribed to STABLE receive versions that are known to
be stable and reliable in production. STABLE is intended for
production users who need stability above all else, or for whom
frequent upgrades are too risky. New upgrades will occur roughly every
few months.
"""
UNSPECIFIED = 0
RAPID = 1
REGULAR = 2
STABLE = 3
availableVersions = _messages.MessageField('AvailableVersion', 1, repeated=True)
channel = _messages.EnumField('ChannelValueValuesEnum', 2)
defaultVersion = _messages.StringField(3)
class ReservationAffinity(_messages.Message):
r"""[ReservationAffinity](/compute/docs/instances/reserving-zonal-resources)
is the configuration of desired reservation which instances could take
capacity from.
Enums:
ConsumeReservationTypeValueValuesEnum: Corresponds to the type of
reservation consumption.
Fields:
consumeReservationType: Corresponds to the type of reservation
consumption.
key: Corresponds to the label key of a reservation resource. To target a
SPECIFIC_RESERVATION by name, specify "googleapis.com/reservation-name"
as the key and specify the name of your reservation as its value.
values: Corresponds to the label value(s) of reservation resource(s).
"""
class ConsumeReservationTypeValueValuesEnum(_messages.Enum):
r"""Corresponds to the type of reservation consumption.
Values:
UNSPECIFIED: Default value. This should not be used.
NO_RESERVATION: Do not consume from any reserved capacity.
ANY_RESERVATION: Consume any reservation available.
SPECIFIC_RESERVATION: Must consume from a specific reservation. Must
specify key value fields for specifying the reservations.
"""
UNSPECIFIED = 0
NO_RESERVATION = 1
ANY_RESERVATION = 2
SPECIFIC_RESERVATION = 3
consumeReservationType = _messages.EnumField('ConsumeReservationTypeValueValuesEnum', 1)
key = _messages.StringField(2)
values = _messages.StringField(3, repeated=True)
class ResourceLimit(_messages.Message):
r"""Contains information about amount of some resource in the cluster. For
memory, value should be in GB.
Fields:
maximum: Maximum amount of the resource in the cluster.
minimum: Minimum amount of the resource in the cluster.
resourceType: Resource name "cpu", "memory" or gpu-specific string.
"""
maximum = _messages.IntegerField(1)
minimum = _messages.IntegerField(2)
resourceType = _messages.StringField(3)
class ResourceUsageExportConfig(_messages.Message):
r"""Configuration for exporting cluster resource usages.
Fields:
bigqueryDestination: Configuration to use BigQuery as usage export
destination.
consumptionMeteringConfig: Configuration to enable resource consumption
metering.
enableNetworkEgressMetering: Whether to enable network egress metering for
this cluster. If enabled, a daemonset will be created in the cluster to
meter network egress traffic.
"""
bigqueryDestination = _messages.MessageField('BigQueryDestination', 1)
consumptionMeteringConfig = _messages.MessageField('ConsumptionMeteringConfig', 2)
enableNetworkEgressMetering = _messages.BooleanField(3)
class RollbackNodePoolUpgradeRequest(_messages.Message):
r"""RollbackNodePoolUpgradeRequest rollbacks the previously Aborted or
Failed NodePool upgrade. This will be an no-op if the last upgrade
successfully completed.
Fields:
clusterId: Deprecated. The name of the cluster to rollback. This field has
been deprecated and replaced by the name field.
name: The name (project, location, cluster, node pool id) of the node poll
to rollback upgrade. Specified in the format
'projects/*/locations/*/clusters/*/nodePools/*'.
nodePoolId: Deprecated. The name of the node pool to rollback. This field
has been deprecated and replaced by the name field.
projectId: Deprecated. The Google Developers Console [project ID or
project number](https://support.google.com/cloud/answer/6158840). This
field has been deprecated and replaced by the name field.
zone: Deprecated. The name of the Google Compute Engine
[zone](/compute/docs/zones#available) in which the cluster resides. This
field has been deprecated and replaced by the name field.
"""
clusterId = _messages.StringField(1)
name = _messages.StringField(2)
nodePoolId = _messages.StringField(3)
projectId = _messages.StringField(4)
zone = _messages.StringField(5)
class SandboxConfig(_messages.Message):
r"""SandboxConfig contains configurations of the sandbox to use for the
node.
Enums:
TypeValueValuesEnum: Type of the sandbox to use for the node.
Fields:
sandboxType: Type of the sandbox to use for the node (e.g. 'gvisor')
type: Type of the sandbox to use for the node.
"""
class TypeValueValuesEnum(_messages.Enum):
r"""Type of the sandbox to use for the node.
Values:
UNSPECIFIED: Default value. This should not be used.
GVISOR: Run sandbox using gvisor.
"""
UNSPECIFIED = 0
GVISOR = 1
sandboxType = _messages.StringField(1)
type = _messages.EnumField('TypeValueValuesEnum', 2)
class SecurityProfile(_messages.Message):
r"""User selected security profile
Fields:
disableRuntimeRules: Don't apply runtime rules. When set to true, no
objects/deployments will be installed in the cluster to enforce runtime
rules. This is useful to work with config-as-code systems
name: Name with version of selected security profile A security profile
name follows kebob-case (a-zA-Z*) and a version is like MAJOR.MINOR-
suffix suffix is ([a-zA-Z0-9\-_\.]+) e.g. default-1.0-gke.0
"""
disableRuntimeRules = _messages.BooleanField(1)
name = _messages.StringField(2)
class ServerConfig(_messages.Message):
r"""Kubernetes Engine service configuration.
Fields:
channels: List of release channel configurations.
defaultClusterVersion: Version of Kubernetes the service deploys by
default.
defaultImageType: Default image type.
premiumConfig: Premium configuration for the service.
validImageTypes: List of valid image types.
validMasterVersions: List of valid master versions.
validNodeVersions: List of valid node upgrade target versions.
"""
channels = _messages.MessageField('ReleaseChannelConfig', 1, repeated=True)
defaultClusterVersion = _messages.StringField(2)
defaultImageType = _messages.StringField(3)
premiumConfig = _messages.MessageField('PremiumConfig', 4)
validImageTypes = _messages.StringField(5, repeated=True)
validMasterVersions = _messages.StringField(6, repeated=True)
validNodeVersions = _messages.StringField(7, repeated=True)
class SetAddonsConfigRequest(_messages.Message):
r"""SetAddonsRequest sets the addons associated with the cluster.
Fields:
addonsConfig: The desired configurations for the various addons available
to run in the cluster.
clusterId: Deprecated. The name of the cluster to upgrade. This field has
been deprecated and replaced by the name field.
name: The name (project, location, cluster) of the cluster to set addons.
Specified in the format 'projects/*/locations/*/clusters/*'.
projectId: Deprecated. The Google Developers Console [project ID or
project number](https://support.google.com/cloud/answer/6158840). This
field has been deprecated and replaced by the name field.
zone: Deprecated. The name of the Google Compute Engine
[zone](/compute/docs/zones#available) in which the cluster resides. This
field has been deprecated and replaced by the name field.
"""
addonsConfig = _messages.MessageField('AddonsConfig', 1)
clusterId = _messages.StringField(2)
name = _messages.StringField(3)
projectId = _messages.StringField(4)
zone = _messages.StringField(5)
class SetLabelsRequest(_messages.Message):
r"""SetLabelsRequest sets the Google Cloud Platform labels on a Google
Container Engine cluster, which will in turn set them for Google Compute
Engine resources used by that cluster
Messages:
ResourceLabelsValue: The labels to set for that cluster.
Fields:
clusterId: Deprecated. The name of the cluster. This field has been
deprecated and replaced by the name field.
labelFingerprint: The fingerprint of the previous set of labels for this
resource, used to detect conflicts. The fingerprint is initially
generated by Kubernetes Engine and changes after every request to modify
or update labels. You must always provide an up-to-date fingerprint hash
when updating or changing labels. Make a <code>get()</code> request to
the resource to get the latest fingerprint.
name: The name (project, location, cluster id) of the cluster to set
labels. Specified in the format 'projects/*/locations/*/clusters/*'.
projectId: Deprecated. The Google Developers Console [project ID or
project
number](https://developers.google.com/console/help/new/#projectnumber).
This field has been deprecated and replaced by the name field.
resourceLabels: The labels to set for that cluster.
zone: Deprecated. The name of the Google Compute Engine
[zone](/compute/docs/zones#available) in which the cluster resides. This
field has been deprecated and replaced by the name field.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class ResourceLabelsValue(_messages.Message):
r"""The labels to set for that cluster.
Messages:
AdditionalProperty: An additional property for a ResourceLabelsValue
object.
Fields:
additionalProperties: Additional properties of type ResourceLabelsValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a ResourceLabelsValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
clusterId = _messages.StringField(1)
labelFingerprint = _messages.StringField(2)
name = _messages.StringField(3)
projectId = _messages.StringField(4)
resourceLabels = _messages.MessageField('ResourceLabelsValue', 5)
zone = _messages.StringField(6)
class SetLegacyAbacRequest(_messages.Message):
r"""SetLegacyAbacRequest enables or disables the ABAC authorization
mechanism for a cluster.
Fields:
clusterId: Deprecated. The name of the cluster to update. This field has
been deprecated and replaced by the name field.
enabled: Whether ABAC authorization will be enabled in the cluster.
name: The name (project, location, cluster id) of the cluster to set
legacy abac. Specified in the format
'projects/*/locations/*/clusters/*'.
projectId: Deprecated. The Google Developers Console [project ID or
project number](https://support.google.com/cloud/answer/6158840). This
field has been deprecated and replaced by the name field.
zone: Deprecated. The name of the Google Compute Engine
[zone](/compute/docs/zones#available) in which the cluster resides. This
field has been deprecated and replaced by the name field.
"""
clusterId = _messages.StringField(1)
enabled = _messages.BooleanField(2)
name = _messages.StringField(3)
projectId = _messages.StringField(4)
zone = _messages.StringField(5)
class SetLocationsRequest(_messages.Message):
r"""SetLocationsRequest sets the locations of the cluster.
Fields:
clusterId: Deprecated. The name of the cluster to upgrade. This field has
been deprecated and replaced by the name field.
locations: The desired list of Google Compute Engine
[zones](/compute/docs/zones#available) in which the cluster's nodes
should be located. Changing the locations a cluster is in will result in
nodes being either created or removed from the cluster, depending on
whether locations are being added or removed. This list must always
include the cluster's primary zone.
name: The name (project, location, cluster) of the cluster to set
locations. Specified in the format 'projects/*/locations/*/clusters/*'.
projectId: Deprecated. The Google Developers Console [project ID or
project number](https://support.google.com/cloud/answer/6158840). This
field has been deprecated and replaced by the name field.
zone: Deprecated. The name of the Google Compute Engine
[zone](/compute/docs/zones#available) in which the cluster resides. This
field has been deprecated and replaced by the name field.
"""
clusterId = _messages.StringField(1)
locations = _messages.StringField(2, repeated=True)
name = _messages.StringField(3)
projectId = _messages.StringField(4)
zone = _messages.StringField(5)
class SetLoggingServiceRequest(_messages.Message):
r"""SetLoggingServiceRequest sets the logging service of a cluster.
Fields:
clusterId: Deprecated. The name of the cluster to upgrade. This field has
been deprecated and replaced by the name field.
loggingService: The logging service the cluster should use to write
metrics. Currently available options: * "logging.googleapis.com" - the
Google Cloud Logging service * "none" - no metrics will be exported from
the cluster
name: The name (project, location, cluster) of the cluster to set logging.
Specified in the format 'projects/*/locations/*/clusters/*'.
projectId: Deprecated. The Google Developers Console [project ID or
project number](https://support.google.com/cloud/answer/6158840). This
field has been deprecated and replaced by the name field.
zone: Deprecated. The name of the Google Compute Engine
[zone](/compute/docs/zones#available) in which the cluster resides. This
field has been deprecated and replaced by the name field.
"""
clusterId = _messages.StringField(1)
loggingService = _messages.StringField(2)
name = _messages.StringField(3)
projectId = _messages.StringField(4)
zone = _messages.StringField(5)
class SetMaintenancePolicyRequest(_messages.Message):
r"""SetMaintenancePolicyRequest sets the maintenance policy for a cluster.
Fields:
clusterId: The name of the cluster to update.
maintenancePolicy: The maintenance policy to be set for the cluster. An
empty field clears the existing maintenance policy.
name: The name (project, location, cluster id) of the cluster to set
maintenance policy. Specified in the format
'projects/*/locations/*/clusters/*'.
projectId: The Google Developers Console [project ID or project
number](https://support.google.com/cloud/answer/6158840).
zone: The name of the Google Compute Engine
[zone](/compute/docs/zones#available) in which the cluster resides.
"""
clusterId = _messages.StringField(1)
maintenancePolicy = _messages.MessageField('MaintenancePolicy', 2)
name = _messages.StringField(3)
projectId = _messages.StringField(4)
zone = _messages.StringField(5)
class SetMasterAuthRequest(_messages.Message):
r"""SetMasterAuthRequest updates the admin password of a cluster.
Enums:
ActionValueValuesEnum: The exact form of action to be taken on the master
auth.
Fields:
action: The exact form of action to be taken on the master auth.
clusterId: Deprecated. The name of the cluster to upgrade. This field has
been deprecated and replaced by the name field.
name: The name (project, location, cluster) of the cluster to set auth.
Specified in the format 'projects/*/locations/*/clusters/*'.
projectId: Deprecated. The Google Developers Console [project ID or
project number](https://support.google.com/cloud/answer/6158840). This
field has been deprecated and replaced by the name field.
update: A description of the update.
zone: Deprecated. The name of the Google Compute Engine
[zone](/compute/docs/zones#available) in which the cluster resides. This
field has been deprecated and replaced by the name field.
"""
class ActionValueValuesEnum(_messages.Enum):
r"""The exact form of action to be taken on the master auth.
Values:
UNKNOWN: Operation is unknown and will error out.
SET_PASSWORD: Set the password to a user generated value.
GENERATE_PASSWORD: Generate a new password and set it to that.
SET_USERNAME: Set the username. If an empty username is provided, basic
authentication is disabled for the cluster. If a non-empty username
is provided, basic authentication is enabled, with either a provided
password or a generated one.
"""
UNKNOWN = 0
SET_PASSWORD = 1
GENERATE_PASSWORD = 2
SET_USERNAME = 3
action = _messages.EnumField('ActionValueValuesEnum', 1)
clusterId = _messages.StringField(2)
name = _messages.StringField(3)
projectId = _messages.StringField(4)
update = _messages.MessageField('MasterAuth', 5)
zone = _messages.StringField(6)
class SetMonitoringServiceRequest(_messages.Message):
r"""SetMonitoringServiceRequest sets the monitoring service of a cluster.
Fields:
clusterId: Deprecated. The name of the cluster to upgrade. This field has
been deprecated and replaced by the name field.
monitoringService: The monitoring service the cluster should use to write
metrics. Currently available options: * "monitoring.googleapis.com" -
the Google Cloud Monitoring service * "none" - no metrics will be
exported from the cluster
name: The name (project, location, cluster) of the cluster to set
monitoring. Specified in the format 'projects/*/locations/*/clusters/*'.
projectId: Deprecated. The Google Developers Console [project ID or
project number](https://support.google.com/cloud/answer/6158840). This
field has been deprecated and replaced by the name field.
zone: Deprecated. The name of the Google Compute Engine
[zone](/compute/docs/zones#available) in which the cluster resides. This
field has been deprecated and replaced by the name field.
"""
clusterId = _messages.StringField(1)
monitoringService = _messages.StringField(2)
name = _messages.StringField(3)
projectId = _messages.StringField(4)
zone = _messages.StringField(5)
class SetNetworkPolicyRequest(_messages.Message):
r"""SetNetworkPolicyRequest enables/disables network policy for a cluster.
Fields:
clusterId: Deprecated. The name of the cluster. This field has been
deprecated and replaced by the name field.
name: The name (project, location, cluster id) of the cluster to set
networking policy. Specified in the format
'projects/*/locations/*/clusters/*'.
networkPolicy: Configuration options for the NetworkPolicy feature.
projectId: Deprecated. The Google Developers Console [project ID or
project
number](https://developers.google.com/console/help/new/#projectnumber).
This field has been deprecated and replaced by the name field.
zone: Deprecated. The name of the Google Compute Engine
[zone](/compute/docs/zones#available) in which the cluster resides. This
field has been deprecated and replaced by the name field.
"""
clusterId = _messages.StringField(1)
name = _messages.StringField(2)
networkPolicy = _messages.MessageField('NetworkPolicy', 3)
projectId = _messages.StringField(4)
zone = _messages.StringField(5)
class SetNodePoolAutoscalingRequest(_messages.Message):
r"""SetNodePoolAutoscalingRequest sets the autoscaler settings of a node
pool.
Fields:
autoscaling: Autoscaling configuration for the node pool.
clusterId: Deprecated. The name of the cluster to upgrade. This field has
been deprecated and replaced by the name field.
name: The name (project, location, cluster, node pool) of the node pool to
set autoscaler settings. Specified in the format
'projects/*/locations/*/clusters/*/nodePools/*'.
nodePoolId: Deprecated. The name of the node pool to upgrade. This field
has been deprecated and replaced by the name field.
projectId: Deprecated. The Google Developers Console [project ID or
project number](https://support.google.com/cloud/answer/6158840). This
field has been deprecated and replaced by the name field.
zone: Deprecated. The name of the Google Compute Engine
[zone](/compute/docs/zones#available) in which the cluster resides. This
field has been deprecated and replaced by the name field.
"""
autoscaling = _messages.MessageField('NodePoolAutoscaling', 1)
clusterId = _messages.StringField(2)
name = _messages.StringField(3)
nodePoolId = _messages.StringField(4)
projectId = _messages.StringField(5)
zone = _messages.StringField(6)
class SetNodePoolManagementRequest(_messages.Message):
r"""SetNodePoolManagementRequest sets the node management properties of a
node pool.
Fields:
clusterId: Deprecated. The name of the cluster to update. This field has
been deprecated and replaced by the name field.
management: NodeManagement configuration for the node pool.
name: The name (project, location, cluster, node pool id) of the node pool
to set management properties. Specified in the format
'projects/*/locations/*/clusters/*/nodePools/*'.
nodePoolId: Deprecated. The name of the node pool to update. This field
has been deprecated and replaced by the name field.
projectId: Deprecated. The Google Developers Console [project ID or
project number](https://support.google.com/cloud/answer/6158840). This
field has been deprecated and replaced by the name field.
zone: Deprecated. The name of the Google Compute Engine
[zone](/compute/docs/zones#available) in which the cluster resides. This
field has been deprecated and replaced by the name field.
"""
clusterId = _messages.StringField(1)
management = _messages.MessageField('NodeManagement', 2)
name = _messages.StringField(3)
nodePoolId = _messages.StringField(4)
projectId = _messages.StringField(5)
zone = _messages.StringField(6)
class SetNodePoolSizeRequest(_messages.Message):
r"""SetNodePoolSizeRequest sets the size a node pool.
Fields:
clusterId: Deprecated. The name of the cluster to update. This field has
been deprecated and replaced by the name field.
name: The name (project, location, cluster, node pool id) of the node pool
to set size. Specified in the format
'projects/*/locations/*/clusters/*/nodePools/*'.
nodeCount: The desired node count for the pool.
nodePoolId: Deprecated. The name of the node pool to update. This field
has been deprecated and replaced by the name field.
projectId: Deprecated. The Google Developers Console [project ID or
project number](https://support.google.com/cloud/answer/6158840).
zone: Deprecated. The name of the Google Compute Engine
[zone](/compute/docs/zones#available) in which the cluster resides. This
field has been deprecated and replaced by the name field.
"""
clusterId = _messages.StringField(1)
name = _messages.StringField(2)
nodeCount = _messages.IntegerField(3, variant=_messages.Variant.INT32)
nodePoolId = _messages.StringField(4)
projectId = _messages.StringField(5)
zone = _messages.StringField(6)
class ShieldedInstanceConfig(_messages.Message):
r"""A set of Shielded Instance options.
Fields:
enableIntegrityMonitoring: Defines whether the instance has integrity
monitoring enabled.
enableSecureBoot: Defines whether the instance has Secure Boot enabled.
"""
enableIntegrityMonitoring = _messages.BooleanField(1)
enableSecureBoot = _messages.BooleanField(2)
class ShieldedNodes(_messages.Message):
r"""Configuration of Shielded Nodes feature.
Fields:
enabled: Whether Shielded Nodes features are enabled on all nodes in this
cluster.
"""
enabled = _messages.BooleanField(1)
class StandardQueryParameters(_messages.Message):
r"""Query parameters accepted by all methods.
Enums:
FXgafvValueValuesEnum: V1 error format.
AltValueValuesEnum: Data format for response.
Fields:
f__xgafv: V1 error format.
access_token: OAuth access token.
alt: Data format for response.
callback: JSONP
fields: Selector specifying which fields to include in a partial response.
key: API key. Your API key identifies your project and provides you with
API access, quota, and reports. Required unless you provide an OAuth 2.0
token.
oauth_token: OAuth 2.0 token for the current user.
prettyPrint: Returns response with indentations and line breaks.
quotaUser: Available to use for quota purposes for server-side
applications. Can be any arbitrary string assigned to a user, but should
not exceed 40 characters.
trace: A tracing token of the form "token:<tokenid>" to include in api
requests.
uploadType: Legacy upload protocol for media (e.g. "media", "multipart").
upload_protocol: Upload protocol for media (e.g. "raw", "multipart").
"""
class AltValueValuesEnum(_messages.Enum):
r"""Data format for response.
Values:
json: Responses with Content-Type of application/json
media: Media download with context-dependent Content-Type
proto: Responses with Content-Type of application/x-protobuf
"""
json = 0
media = 1
proto = 2
class FXgafvValueValuesEnum(_messages.Enum):
r"""V1 error format.
Values:
_1: v1 error format
_2: v2 error format
"""
_1 = 0
_2 = 1
f__xgafv = _messages.EnumField('FXgafvValueValuesEnum', 1)
access_token = _messages.StringField(2)
alt = _messages.EnumField('AltValueValuesEnum', 3, default=u'json')
callback = _messages.StringField(4)
fields = _messages.StringField(5)
key = _messages.StringField(6)
oauth_token = _messages.StringField(7)
prettyPrint = _messages.BooleanField(8, default=True)
quotaUser = _messages.StringField(9)
trace = _messages.StringField(10)
uploadType = _messages.StringField(11)
upload_protocol = _messages.StringField(12)
class StartIPRotationRequest(_messages.Message):
r"""StartIPRotationRequest creates a new IP for the cluster and then
performs a node upgrade on each node pool to point to the new IP.
Fields:
clusterId: Deprecated. The name of the cluster. This field has been
deprecated and replaced by the name field.
name: The name (project, location, cluster id) of the cluster to start IP
rotation. Specified in the format 'projects/*/locations/*/clusters/*'.
projectId: Deprecated. The Google Developers Console [project ID or
project
number](https://developers.google.com/console/help/new/#projectnumber).
This field has been deprecated and replaced by the name field.
rotateCredentials: Whether to rotate credentials during IP rotation.
zone: Deprecated. The name of the Google Compute Engine
[zone](/compute/docs/zones#available) in which the cluster resides. This
field has been deprecated and replaced by the name field.
"""
clusterId = _messages.StringField(1)
name = _messages.StringField(2)
projectId = _messages.StringField(3)
rotateCredentials = _messages.BooleanField(4)
zone = _messages.StringField(5)
class StatusCondition(_messages.Message):
r"""StatusCondition describes why a cluster or a node pool has a certain
status (e.g., ERROR or DEGRADED).
Enums:
CodeValueValuesEnum: Machine-friendly representation of the condition
Fields:
code: Machine-friendly representation of the condition
message: Human-friendly representation of the condition
"""
class CodeValueValuesEnum(_messages.Enum):
r"""Machine-friendly representation of the condition
Values:
UNKNOWN: UNKNOWN indicates a generic condition.
GCE_STOCKOUT: GCE_STOCKOUT indicates that Google Compute Engine
resources are temporarily unavailable.
GKE_SERVICE_ACCOUNT_DELETED: GKE_SERVICE_ACCOUNT_DELETED indicates that
the user deleted their robot service account.
GCE_QUOTA_EXCEEDED: Google Compute Engine quota was exceeded.
SET_BY_OPERATOR: Cluster state was manually changed by an SRE due to a
system logic error.
CLOUD_KMS_KEY_ERROR: Unable to perform an encrypt operation against the
CloudKMS key used for etcd level encryption. More codes TBA
"""
UNKNOWN = 0
GCE_STOCKOUT = 1
GKE_SERVICE_ACCOUNT_DELETED = 2
GCE_QUOTA_EXCEEDED = 3
SET_BY_OPERATOR = 4
CLOUD_KMS_KEY_ERROR = 5
code = _messages.EnumField('CodeValueValuesEnum', 1)
message = _messages.StringField(2)
class TierConfig(_messages.Message):
r"""TierConfig is the configuration for a tier offering. For example the
GKE standard or advanced offerings which contain different levels of
functionality and possibly cost.
Enums:
ParentValueValuesEnum: The tier from which the tier being configured
inherits. The configured tier will inherit all the features from its
parent tier.
TierValueValuesEnum: The tier that is being configured with this value.
Fields:
parent: The tier from which the tier being configured inherits. The
configured tier will inherit all the features from its parent tier.
tier: The tier that is being configured with this value.
"""
class ParentValueValuesEnum(_messages.Enum):
r"""The tier from which the tier being configured inherits. The
configured tier will inherit all the features from its parent tier.
Values:
TIER_UNSPECIFIED: TIER_UNSPECIFIED is the default value. If this value
is set during create or update, it defaults to the project level tier
setting.
STANDARD: Represents the standard tier or base Google Kubernetes Engine
offering.
ADVANCED: Represents the advanced tier.
"""
TIER_UNSPECIFIED = 0
STANDARD = 1
ADVANCED = 2
class TierValueValuesEnum(_messages.Enum):
r"""The tier that is being configured with this value.
Values:
TIER_UNSPECIFIED: TIER_UNSPECIFIED is the default value. If this value
is set during create or update, it defaults to the project level tier
setting.
STANDARD: Represents the standard tier or base Google Kubernetes Engine
offering.
ADVANCED: Represents the advanced tier.
"""
TIER_UNSPECIFIED = 0
STANDARD = 1
ADVANCED = 2
parent = _messages.EnumField('ParentValueValuesEnum', 1)
tier = _messages.EnumField('TierValueValuesEnum', 2)
class TierSettings(_messages.Message):
r"""Cluster tier settings.
Enums:
TierValueValuesEnum: Cluster tier.
Fields:
tier: Cluster tier.
"""
class TierValueValuesEnum(_messages.Enum):
r"""Cluster tier.
Values:
TIER_UNSPECIFIED: TIER_UNSPECIFIED is the default value. If this value
is set during create or update, it defaults to the project level tier
setting.
STANDARD: Represents the standard tier or base Google Kubernetes Engine
offering.
ADVANCED: Represents the advanced tier.
"""
TIER_UNSPECIFIED = 0
STANDARD = 1
ADVANCED = 2
tier = _messages.EnumField('TierValueValuesEnum', 1)
class TimeWindow(_messages.Message):
r"""Represents an arbitrary window of time.
Fields:
endTime: The time that the window ends. The end time should take place
after the start time.
startTime: The time that the window first starts.
"""
endTime = _messages.StringField(1)
startTime = _messages.StringField(2)
class UpdateClusterRequest(_messages.Message):
r"""UpdateClusterRequest updates the settings of a cluster.
Fields:
clusterId: Deprecated. The name of the cluster to upgrade. This field has
been deprecated and replaced by the name field.
name: The name (project, location, cluster) of the cluster to update.
Specified in the format 'projects/*/locations/*/clusters/*'.
projectId: Deprecated. The Google Developers Console [project ID or
project number](https://support.google.com/cloud/answer/6158840). This
field has been deprecated and replaced by the name field.
update: A description of the update.
updatedCluster: The updated cluster object. This field must be empty if
'update' is set.
zone: Deprecated. The name of the Google Compute Engine
[zone](/compute/docs/zones#available) in which the cluster resides. This
field has been deprecated and replaced by the name field.
"""
clusterId = _messages.StringField(1)
name = _messages.StringField(2)
projectId = _messages.StringField(3)
update = _messages.MessageField('ClusterUpdate', 4)
updatedCluster = _messages.MessageField('Cluster', 5)
zone = _messages.StringField(6)
class UpdateMasterRequest(_messages.Message):
r"""UpdateMasterRequest updates the master of the cluster.
Fields:
clusterId: Deprecated. The name of the cluster to upgrade. This field has
been deprecated and replaced by the name field.
masterVersion: The Kubernetes version to change the master to. Users may
specify either explicit versions offered by Kubernetes Engine or version
aliases, which have the following behavior: - "latest": picks the
highest valid Kubernetes version - "1.X": picks the highest valid
patch+gke.N patch in the 1.X version - "1.X.Y": picks the highest valid
gke.N patch in the 1.X.Y version - "1.X.Y-gke.N": picks an explicit
Kubernetes version - "-": picks the default Kubernetes version
name: The name (project, location, cluster) of the cluster to update.
Specified in the format 'projects/*/locations/*/clusters/*'.
projectId: Deprecated. The Google Developers Console [project ID or
project number](https://support.google.com/cloud/answer/6158840).
zone: Deprecated. The name of the Google Compute Engine
[zone](/compute/docs/zones#available) in which the cluster resides. This
field has been deprecated and replaced by the name field.
"""
clusterId = _messages.StringField(1)
masterVersion = _messages.StringField(2)
name = _messages.StringField(3)
projectId = _messages.StringField(4)
zone = _messages.StringField(5)
class UpdateNodePoolRequest(_messages.Message):
r"""SetNodePoolVersionRequest updates the version of a node pool.
Fields:
clusterId: Deprecated. The name of the cluster to upgrade. This field has
been deprecated and replaced by the name field.
image: The desired name of the image name to use for this node. This is
used to create clusters using a custom image.
imageProject: The project containing the desired image to use for this
node pool. This is used to create clusters using a custom image.
imageType: The desired image type for the node pool.
locations: The desired list of Google Compute Engine
[zones](/compute/docs/zones#available) in which the node pool's nodes
should be located. Changing the locations for a node pool will result in
nodes being either created or removed from the node pool, depending on
whether locations are being added or removed.
name: The name (project, location, cluster, node pool) of the node pool to
update. Specified in the format
'projects/*/locations/*/clusters/*/nodePools/*'.
nodePoolId: Deprecated. The name of the node pool to upgrade. This field
has been deprecated and replaced by the name field.
nodeVersion: The Kubernetes version to change the nodes to (typically an
upgrade). Users may specify either explicit versions offered by
Kubernetes Engine or version aliases, which have the following behavior:
- "latest": picks the highest valid Kubernetes version - "1.X": picks
the highest valid patch+gke.N patch in the 1.X version - "1.X.Y": picks
the highest valid gke.N patch in the 1.X.Y version - "1.X.Y-gke.N":
picks an explicit Kubernetes version - "-": picks the Kubernetes master
version
projectId: Deprecated. The Google Developers Console [project ID or
project number](https://support.google.com/cloud/answer/6158840). This
field has been deprecated and replaced by the name field.
updatedNodePool: The updated node pool object. This field must be empty if
any other node pool field is set (e.g. 'node_version', 'image_type',
'locations', etc.)
upgradeSettings: Upgrade settings control disruption and speed of the
upgrade.
workloadMetadataConfig: The desired workload metadata config for the node
pool.
zone: Deprecated. The name of the Google Compute Engine
[zone](/compute/docs/zones#available) in which the cluster resides. This
field has been deprecated and replaced by the name field.
"""
clusterId = _messages.StringField(1)
image = _messages.StringField(2)
imageProject = _messages.StringField(3)
imageType = _messages.StringField(4)
locations = _messages.StringField(5, repeated=True)
name = _messages.StringField(6)
nodePoolId = _messages.StringField(7)
nodeVersion = _messages.StringField(8)
projectId = _messages.StringField(9)
updatedNodePool = _messages.MessageField('NodePool', 10)
upgradeSettings = _messages.MessageField('UpgradeSettings', 11)
workloadMetadataConfig = _messages.MessageField('WorkloadMetadataConfig', 12)
zone = _messages.StringField(13)
class UpgradeSettings(_messages.Message):
r"""These upgrade settings control the level of parallelism and the level of
disruption caused by an upgrade. maxUnavailable controls the number of
nodes that can be simultaneously unavailable. maxSurge controls the number
of additional nodes that can be added to the node pool temporarily for the
time of the upgrade to increase the number of available nodes.
(maxUnavailable + maxSurge) determines the level of parallelism (how many
nodes are being upgraded at the same time). Note: upgrades inevitably
introduce some disruption since workloads need to be moved from old nodes to
new, upgraded ones. Even if maxUnavailable=0, this holds true. (Disruption
stays within the limits of PodDisruptionBudget, if it is configured.) For
example, a 5-node pool is created with maxSurge set to 2 and maxUnavailable
set to 1. During an upgrade, GKE creates 2 upgraded nodes, then brings down
up to 3 existing nodes after the upgraded nodes are ready. GKE will only
bring down 1 node at a time.
Fields:
maxSurge: The maximum number of nodes that can be created beyond the
current size of the node pool during the upgrade process.
maxUnavailable: The maximum number of nodes that can be simultaneously
unavailable during the upgrade process. A node is considered available
if its status is Ready.
"""
maxSurge = _messages.IntegerField(1, variant=_messages.Variant.INT32)
maxUnavailable = _messages.IntegerField(2, variant=_messages.Variant.INT32)
class UsableSubnetwork(_messages.Message):
r"""UsableSubnetwork resource returns the subnetwork name, its associated
network and the primary CIDR range.
Fields:
ipCidrRange: The range of internal addresses that are owned by this
subnetwork.
network: Network Name.
secondaryIpRanges: Secondary IP ranges.
statusMessage: A human readable status message representing the reasons
for cases where the caller cannot use the secondary ranges under the
subnet. For example if the secondary_ip_ranges is empty due to a
permission issue, an insufficient permission message will be given by
status_message.
subnetwork: Subnetwork Name.
"""
ipCidrRange = _messages.StringField(1)
network = _messages.StringField(2)
secondaryIpRanges = _messages.MessageField('UsableSubnetworkSecondaryRange', 3, repeated=True)
statusMessage = _messages.StringField(4)
subnetwork = _messages.StringField(5)
class UsableSubnetworkSecondaryRange(_messages.Message):
r"""Secondary IP range of a usable subnetwork.
Enums:
StatusValueValuesEnum: This field is to determine the status of the
secondary range programmably.
Fields:
ipCidrRange: The range of IP addresses belonging to this subnetwork
secondary range.
rangeName: The name associated with this subnetwork secondary range, used
when adding an alias IP range to a VM instance.
status: This field is to determine the status of the secondary range
programmably.
"""
class StatusValueValuesEnum(_messages.Enum):
r"""This field is to determine the status of the secondary range
programmably.
Values:
UNKNOWN: UNKNOWN is the zero value of the Status enum. It's not a valid
status.
UNUSED: UNUSED denotes that this range is unclaimed by any cluster.
IN_USE_SERVICE: IN_USE_SERVICE denotes that this range is claimed by a
cluster for services. It cannot be used for other clusters.
IN_USE_SHAREABLE_POD: IN_USE_SHAREABLE_POD denotes this range was
created by the network admin and is currently claimed by a cluster for
pods. It can only be used by other clusters as a pod range.
IN_USE_MANAGED_POD: IN_USE_MANAGED_POD denotes this range was created by
Google Kubernetes Engine and is claimed for pods. It cannot be used
for other clusters.
"""
UNKNOWN = 0
UNUSED = 1
IN_USE_SERVICE = 2
IN_USE_SHAREABLE_POD = 3
IN_USE_MANAGED_POD = 4
ipCidrRange = _messages.StringField(1)
rangeName = _messages.StringField(2)
status = _messages.EnumField('StatusValueValuesEnum', 3)
class VerticalPodAutoscaling(_messages.Message):
r"""VerticalPodAutoscaling contains global, per-cluster information required
by Vertical Pod Autoscaler to automatically adjust the resources of pods
controlled by it.
Fields:
enabled: Enables vertical pod autoscaling.
"""
enabled = _messages.BooleanField(1)
class WorkloadIdentityConfig(_messages.Message):
r"""Configuration for the use of k8s Service Accounts in GCP IAM policies.
Fields:
identityNamespace: IAM Identity Namespace to attach all k8s Service
Accounts to.
workloadPool: The workload pool to attach all Kubernetes service accounts
to.
"""
identityNamespace = _messages.StringField(1)
workloadPool = _messages.StringField(2)
class WorkloadMetadataConfig(_messages.Message):
r"""WorkloadMetadataConfig defines the metadata configuration to expose to
workloads on the node pool.
Enums:
ModeValueValuesEnum: Mode is the configuration for how to expose metadata
to workloads running on the node pool.
NodeMetadataValueValuesEnum: NodeMetadata is the configuration for how to
expose metadata to the workloads running on the node.
Fields:
mode: Mode is the configuration for how to expose metadata to workloads
running on the node pool.
nodeMetadata: NodeMetadata is the configuration for how to expose metadata
to the workloads running on the node.
"""
class ModeValueValuesEnum(_messages.Enum):
r"""Mode is the configuration for how to expose metadata to workloads
running on the node pool.
Values:
MODE_UNSPECIFIED: Not set.
GCE_METADATA: Expose all GCE metadata to pods.
GKE_METADATA: Run the GKE Metadata Server on this node. The GKE Metadata
Server exposes a metadata API to workloads that is compatible with the
V1 Compute Metadata APIs exposed by the Compute Engine and App Engine
Metadata Servers. This feature can only be enabled if Workload
Identity is enabled at the cluster level.
"""
MODE_UNSPECIFIED = 0
GCE_METADATA = 1
GKE_METADATA = 2
class NodeMetadataValueValuesEnum(_messages.Enum):
r"""NodeMetadata is the configuration for how to expose metadata to the
workloads running on the node.
Values:
UNSPECIFIED: Not set.
SECURE: Prevent workloads not in hostNetwork from accessing certain VM
metadata, specifically kube-env, which contains Kubelet credentials,
and the instance identity token. Metadata concealment is a temporary
security solution available while the bootstrapping process for
cluster nodes is being redesigned with significant security
improvements. This feature is scheduled to be deprecated in the
future and later removed.
EXPOSE: Expose all VM metadata to pods.
GKE_METADATA_SERVER: Run the GKE Metadata Server on this node. The GKE
Metadata Server exposes a metadata API to workloads that is compatible
with the V1 Compute Metadata APIs exposed by the Compute Engine and
App Engine Metadata Servers. This feature can only be enabled if
Workload Identity is enabled at the cluster level.
"""
UNSPECIFIED = 0
SECURE = 1
EXPOSE = 2
GKE_METADATA_SERVER = 3
mode = _messages.EnumField('ModeValueValuesEnum', 1)
nodeMetadata = _messages.EnumField('NodeMetadataValueValuesEnum', 2)
encoding.AddCustomJsonFieldMapping(
StandardQueryParameters, 'f__xgafv', '$.xgafv')
encoding.AddCustomJsonEnumMapping(
StandardQueryParameters.FXgafvValueValuesEnum, '_1', '1')
encoding.AddCustomJsonEnumMapping(
StandardQueryParameters.FXgafvValueValuesEnum, '_2', '2')
|
[
"e1517234@soka-u.jp"
] |
e1517234@soka-u.jp
|
164f7e179ec264ee49337f55cfdcec1944421c2b
|
685e1a25f56109de935d1ad443372d3fff8a2264
|
/lesson8/main.py
|
852514b91d0a45e92292f03dc3c701221fcd5b92
|
[] |
no_license
|
osydorchuk/ITEA2
|
8a8afdcfc08aa96aae3182ff19bc9b173d043a67
|
7e64e9d9843017413705367c1e742c3f83b76d14
|
refs/heads/master
| 2020-06-24T16:38:15.625652
| 2019-09-07T13:58:24
| 2019-09-07T13:58:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 129
|
py
|
print(__name__)
print(globals())
print(locals())
def check_locals():
a = 0
b ="q"
print(locals())
check_locals()
|
[
"a.sydorchuk@gmail.com"
] |
a.sydorchuk@gmail.com
|
4c8316dcfdb30ccba4b2ac6a9c266ca950e5db88
|
7ad63f456925594105573cdf3eebdf719b19a1e1
|
/python/code_challenges/hashmap-repeated-word/hashmap_repeated_word/hashmap_repeated_word.py
|
c3099cd4b8bf4706efd1eea98ff4b79ab93fcd6b
|
[] |
no_license
|
laithfayizhussein/data-structures-and-algorithm
|
18425437b238a9fe1060daec13d3c6aa378093d4
|
c0ef81bc7e0aa04627d0b2a08a2070fbb3b01b65
|
refs/heads/master
| 2023-08-03T15:29:52.697073
| 2021-09-14T14:47:10
| 2021-09-14T14:47:10
| 373,604,346
| 1
| 0
| null | 2021-09-14T14:47:12
| 2021-06-03T18:25:08
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 2,196
|
py
|
import re
class Node:
def __init__(self, data):
self.data=data
self.next=None
class LinkedList:
def __init__(self):
self.head=None
def add(self, data):
node=Node(data)
if not self.head:
self.head=node
else:
current=self.head
while current.next:
current=current.next
current.next=node
def __str__(self):
values =[]
current = self.head
while current:
values.append(current.data)
current = current.next
return f'{values}'
class Hash_table:
def __init__(self, size):
self.size = size
self.map = [None]*size
def hash(self, key):
ascii = 0
for ch in key:
ascii_ch = ord(ch)
ascii += ascii_ch
temp_value = ascii * 19
hashed_key = temp_value % self.size
return hashed_key
def add(self,key,value):
hashed_key = self.hash(key)
if not self.map[hashed_key]:
self.map[hashed_key] = LinkedList()
self.map[hashed_key].add((key,value))
def contains(self,key):
hashed_key=self.hash(key)
if self.map[hashed_key]:
self.map[hashed_key].head.data[0]
current=self.map[hashed_key].head
while current:
if current.data[0]==key:
return True
else:
current=current.next
return False
def get(self,key):
hashed_key = self.hash(key)
if self.map [hashed_key]:
self.map [hashed_key].head.data[0]
current=self.map[hashed_key].head
while current:
if current.data[0]== key:
return current.data[1]
else:
current=current.next
return None
def repeated_word(book=None):
if book==None:
return 'book is empty'
hash_table=Hash_table(1024)
book =re.sub('\W+', ' ',book).lower().split()
for word in book:
if hash_table.contains(word):
return word
else:
hash_table.add(word, True)
|
[
"laithalsanory9919@gmail.com"
] |
laithalsanory9919@gmail.com
|
bfc47b482deb0ccf1f3e645d49665369758987ff
|
3a3e823f6b94b7eae8a363b0b51b036d2b0a1669
|
/metvae/dataset/biom.py
|
aa3196a0a38243f360389493a4983f3f36972811
|
[] |
no_license
|
mortonjt/metvae
|
8a28bbbd72ee79d66992bd31bd82af65b83ea819
|
f2f241fdedd2f4c045a088727df1f155b9ce9a20
|
refs/heads/main
| 2022-12-31T16:24:26.014394
| 2020-10-20T23:38:50
| 2020-10-20T23:38:50
| 305,812,115
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,780
|
py
|
import os
import re
import biom
import math
import logging
import numpy as np
import pandas as pd
import torch
from torch.utils.data import Dataset
from typing import List
logger = logging.getLogger(__name__)
class BiomDataset(Dataset):
"""Loads a `.biom` file.
Parameters
----------
filename : Path
Filepath to biom table
metadata_file : Path
Filepath to sample metadata
batch_category : str
Column name forr batch indices
"""
def __init__(
self,
table: biom.Table,
metadata: pd.DataFrame = None,
batch_category: str = None,
):
super(BiomDataset).__init__()
self.table = table
self.metadata = metadata
self.batch_category = batch_category
self.populate()
def populate(self):
logger.info("Preprocessing dataset")
if self.metadata is not None:
# match the metadata with the table
ids = set(self.table.ids()) & set(self.metadata.index)
filter_f = lambda v, i, m: i in ids
self.table = self.table.filter(filter_f, axis='sample')
self.metadata = self.metadata.loc[self.table.ids()]
if self.metadata.index.name is None:
raise ValueError('`Index` must have a name either'
'`sampleid`, `sample-id` or #SampleID')
self.index_name = self.metadata.index.name
self.metadata = self.metadata.reset_index()
self.batch_indices = None
if self.batch_category is not None and self.metadata is not None:
batch_cats = np.unique(self.metadata[self.batch_category].values)
batch_cats = pd.Series(
np.arange(len(batch_cats)), index=batch_cats)
self.batch_indices = np.array(
list(map(lambda x: batch_cats.loc[x],
self.metadata[self.batch_category].values)))
logger.info("Finished preprocessing dataset")
def __len__(self) -> int:
return len(self.table.ids())
def __getitem__(self, i):
""" Returns all of the samples for a given subject
Returns
-------
counts : np.array
OTU counts for specified samples.
batch_indices : np.array
Membership ids for batch samples. If not specified, return None.
"""
sample_idx = self.table.ids()[i]
if self.batch_indices is not None:
batch_indices = self.batch_indices[i]
else:
batch_indices = None
counts = self.table.data(id=sample_idx, axis='sample')
return counts, batch_indices
def __iter__(self):
worker_info = torch.utils.data.get_worker_info()
start = 0
end = self.__len__()
if worker_info is None: # single-process data loading
for i in range(end):
yield self.__getitem__(i)
else:
worker_id = worker_info.id
w = float(worker_info.num_workers)
t = (end - start)
w = float(worker_info.num_workers)
per_worker = int(math.ceil(t / w))
worker_id = worker_info.id
iter_start = start + worker_id * per_worker
iter_end = min(iter_start + per_worker, end)
for i in range(iter_start, iter_end):
yield self.__getitem__(i)
class BiomBatchDataset(BiomDataset):
"""Loads a `.biom` file.
Parameters
----------
filename : Path
Filepath to biom table
metadata_file : Path
Filepath to sample metadata
batch_differentials : str
Pre-trained batch differentials effects
batch_category : str
Column name in metadata for batch indices
Notes
-----
Important, periods cannot be handled in the labels
in the batch_category. Make sure that these are converted to
hyphens or underscores.
"""
def __init__(
self,
table: biom.Table,
metadata: pd.DataFrame,
batch_differentials : pd.DataFrame,
batch_category: str,
format_columns=True,
):
super(BiomBatchDataset).__init__()
self.table = table
self.metadata = metadata
self.batch_category = batch_category
self.batch_differentials = batch_differentials
self.format_columns = format_columns
self.populate()
def populate(self):
logger.info("Preprocessing dataset")
# Match the metadata with the table
ids = set(self.table.ids()) & set(self.metadata.index)
filter_f = lambda v, i, m: i in ids
self.table = self.table.filter(filter_f, axis='sample')
self.metadata = self.metadata.loc[self.table.ids()]
if self.metadata.index.name is None:
raise ValueError('`Index` must have a name either'
'`sampleid`, `sample-id` or #SampleID')
self.index_name = self.metadata.index.name
self.metadata = self.metadata.reset_index()
# Clean up the batch indexes
if self.format_columns:
if (self.metadata[self.batch_category].dtypes == np.float64 or
self.metadata[self.batch_category].dtypes == np.int64):
# format the batch category column
m = self.metadata[self.batch_category].astype(np.int64)
self.metadata[self.batch_category] = m.astype(np.str)
cols = self.batch_differentials.columns
def regex_f(x):
return re.findall(r"\[([A-Za-z0-9_]+).*\]", x)[0]
cols = list(map(regex_f, cols))
print('columns', cols)
self.batch_differentials.columns = cols
# Retrieve batch labels
batch_cats = np.unique(self.metadata[self.batch_category].values)
batch_cats = pd.Series(
np.arange(len(batch_cats)), index=batch_cats)
self.batch_indices = np.array(
list(map(lambda x: batch_cats.loc[x],
self.metadata[self.batch_category].values)))
# Clean up batch differentials
table_features = set(self.table.ids(axis='observation'))
batch_features = set(self.batch_differentials.index)
ids = table_features & batch_features
filter_f = lambda v, i, m: i in ids
self.table = self.table.filter(filter_f, axis='observation')
table_obs = self.table.ids(axis='observation')
self.batch_differentials = self.batch_differentials.loc[table_obs]
logger.info("Finished preprocessing dataset")
def __getitem__(self, i):
""" Returns all of the samples for a given subject.
Returns
-------
counts : np.array
OTU counts for specified samples.
batch_indices : np.array
Membership ids for batch samples.
"""
sample_idx = self.table.ids()[i]
batch_index = self.batch_indices[i]
counts = self.table.data(id=sample_idx, axis='sample')
batch_diffs = self.batch_differentials
assert batch_index < batch_diffs.shape[1], f'Batch diffs " {batch_diffs.shape[1]} > index : {batch_index}'
batch_diffs = np.array(batch_diffs.iloc[:, batch_index].values)
return counts, batch_diffs
def collate_single_f(batch):
counts_list = np.vstack([b[0] for b in batch])
counts = torch.from_numpy(counts_list).float()
return counts
def collate_batch_f(batch):
counts_list = np.vstack([b[0] for b in batch])
batch_diffs = np.vstack([b[1] for b in batch])
counts = torch.from_numpy(counts_list).float()
batch_diffs = torch.from_numpy(batch_diffs).float()
return counts, batch_diffs
|
[
"jamietmorton@gmail.com"
] |
jamietmorton@gmail.com
|
6176590b086fa51c97cf9f07166346416c151b32
|
c1a8dd3a5379caa8124ff0c20f4a0b775874c614
|
/venv/bin/pip3
|
0c0400dbeb62afdbd7d795b71041e7d20d471cef
|
[] |
no_license
|
ssm5/illini
|
25a40833be60c125cf91485d78aaa0506bf3b5c9
|
9ca880e9603790e16b7439ece54502884a2a171d
|
refs/heads/master
| 2021-08-15T03:48:12.666900
| 2017-11-17T08:16:55
| 2017-11-17T08:16:55
| 108,466,970
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 251
|
#!/Users/johnqian/Documents/College/CS196/illini/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pip import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"johnlongqian@gmail.com"
] |
johnlongqian@gmail.com
|
|
a5da3fc38c2b91b2122f0fd2cb7e5d2e1f764ad9
|
9dc3ae479c1b5c6941681917151fcb0379f9173d
|
/CanvasFeatureFlag.py
|
7a8e37d3b28a61f52fb91ba58b6f1eb53cf1381a
|
[] |
no_license
|
cthacker-udel/Python-Canvas-API-Wrapper
|
bf2400b42b644791f45bbda7ed42e2c03a8d97b2
|
0263c591a2b02197529559346558b9be02f592c3
|
refs/heads/master
| 2023-08-25T12:01:48.417204
| 2021-10-09T10:49:51
| 2021-10-09T10:49:51
| 388,362,237
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 575
|
py
|
from CanvasClient import CanvasClient
class CanvasFeatureFlags(CanvasClient):
def __init__(self):
self.course_id = None
self.account_id = None
self.user_id = None
self.feature_id = None
self.state = None
def generate_queries(self):
body = {}
if self.state is not None:
body['state'] = self.state
return body
def clear_queries(self):
self.course_id = None
self.account_id = None
self.user_id = None
self.feature_id = None
self.state = None
|
[
"cthacker@udel.edu"
] |
cthacker@udel.edu
|
6e0ae3e9c859c2ff133011147002083abb1e1ecf
|
6dfb7fe44b6c5bfb7feb5a101656e3d3402a621f
|
/simp_py_examples/course/S1800/t105.py
|
14b64f55e86d1ce9d76af5b273b6ada48bd93378
|
[
"MIT"
] |
permissive
|
kcfkwok2003/Simp_py
|
11d6813fac83ab6309eb8efc22fcd8edde5b19b8
|
f75e66da01b45dc8688dda602f8b33d4258f0c31
|
refs/heads/master
| 2021-05-11T00:36:36.872754
| 2018-12-19T01:41:15
| 2018-12-19T01:41:15
| 118,306,332
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 149
|
py
|
from simp_py import tft
lcd = tft.tft
lcd.clear()
import time
cnt=10
while cnt >=0:
lcd.text(10,10, 'count: %s ' % cnt)
cnt -=1
time.sleep(1)
|
[
"kcfkwok@gmail.com"
] |
kcfkwok@gmail.com
|
b1b504761ef386bea3c5ec22159ec1973a0ac635
|
d4c47276c8fbd15240aa228eda04ee8e338caf02
|
/Python/Python Lesson/Second/Lesson9/Sample8.py
|
447d9972d35e1c1f96525406233e419f925a3a61
|
[] |
no_license
|
developer579/Practice
|
a745384450172fb327913c130303ab76492096f1
|
54084468af83afcc44530e757800c8c3678147c1
|
refs/heads/main
| 2023-05-06T01:36:06.222554
| 2021-06-02T07:04:03
| 2021-06-02T07:04:03
| 324,312,009
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 365
|
py
|
import re
ptr = ["TXT","TXT..",".TXT","..TXT"]
str = ["TXT","TXTT","TXTTT","TTXT","TTTXT"]
for valueptr in ptr:
print("------")
pattern = re.compile(valueptr)
for valuestr in str:
res = pattern.search(valuestr)
if res is not None:
m = "o"
else:
m = "x"
mrs = "(パターン)" + valueptr + "(文字列)" + valuestr + "(マッチ)" + m
print(mrs)
|
[
"69954570+developer579@users.noreply.github.com"
] |
69954570+developer579@users.noreply.github.com
|
50ac7fee9fba9158cdaa1d59c98b29131acafa31
|
234c0ce6a3c867b882f5aa6c8eb260f1a48c70ac
|
/mysite/blog/migrations/0003_auto_20190304_1654.py
|
542615bc94cb88de9e5182da038b20998688ab20
|
[] |
no_license
|
mubarakmaddy/MySite
|
b32e064f3d09a1d2898f6e0cb07f316ab1436079
|
5650a8c108e2cabf990a8e0cfd2e66b69d68d839
|
refs/heads/master
| 2020-04-23T21:46:11.204773
| 2019-06-27T09:02:22
| 2019-06-27T09:02:22
| 171,480,172
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,009
|
py
|
# Generated by Django 2.1.7 on 2019-03-04 11:24
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('blog', '0002_postmodel_author_email'),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('content', models.TextField()),
('date_posted', models.DateTimeField(default=django.utils.timezone.now)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.DeleteModel(
name='PostModel',
),
]
|
[
"mubarakalis1@gmail.com"
] |
mubarakalis1@gmail.com
|
d02fb0c15d67504305264787a3321d77fe9822f8
|
068ac6386ff76431e308b7d7b69d8f8c8ae4f724
|
/jmj/wsgi.py
|
bccbd5fdc6024710a40b741290eb0bce529d8b94
|
[] |
no_license
|
Cesarcalles1/proyecto
|
67cf0a618e34c728bcf51ec54015170446997ba4
|
6417126c57ace7854b25ad5a042e8080bbd52f82
|
refs/heads/master
| 2021-05-04T05:30:38.363080
| 2018-02-05T16:58:47
| 2018-02-05T16:58:47
| 120,339,693
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 383
|
py
|
"""
WSGI config for jmj project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "jmj.settings")
application = get_wsgi_application()
|
[
"ridiazx@gmail.com"
] |
ridiazx@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.