blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 777 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 149 values | src_encoding stringclasses 26 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 3 10.2M | extension stringclasses 188 values | content stringlengths 3 10.2M | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
cfd8ae9767066953e1ea707ce6f3ec6793552d06 | e1e08ca2df1caadc30b5b62263fa1e769d4904d8 | /stream/models/db_wizard_auth.py | 531b7c4e5d9eaad89effd490b6e181cf48a09428 | [
"LicenseRef-scancode-public-domain"
] | permissive | tiench189/ClassbookStore | 509cedad5cc4109b8fb126ad59e25b922dfae6be | 4fff9bc6119d9ec922861cbecf23a3f676551485 | refs/heads/master | 2020-12-02T07:48:26.575023 | 2017-07-10T02:45:09 | 2017-07-10T02:45:09 | 96,728,874 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,873 | py | # -*- coding: utf-8 -*-
"""
#-----------------------------------------------------------------------------
# Name: db_wizard_auth
#
# Purpose:
#
# Version: 1.1
#
# Author: manhtd
#
# Created: 1/14/14
# Updated: 1/14/14
#
# Copyright: (c) Tinh Vân Books
#
# Todo:
#-----------------------------------------------------------------------------
"""
db.define_table('auth20_function_category',
Field('name', type='string', notnull=True, label=T('Category Name')),
Field('category_order', type='integer', notnull=True, default=0, label=T('Category Order')),
Field('description', type='text', label=T('Function Description')),
auth.signature, format='%(name)s')
db.define_table('auth20_function',
Field('name', type='string', notnull=True, label=T('Function Name')),
Field('category', type='reference auth20_function_category', label=T('Function Category')),
Field('aname', type='string', notnull=True, label=T('Application Name')),
Field('cname', type='string', notnull=True, label=T('Controller Name')),
Field('fname', type='string', notnull=True, default='index', label=T('Function Name')),
Field('args', type='list:string', notnull=True, label=T('Function Agruments')),
Field('vars', type='list:string', notnull=True, label=T('Function Variables')),
Field('description', type='text', label=T('Function Description')),
auth.signature, format='%(name)s')
#########################################
db.define_table('auth20_action',
Field('name', type='string', notnull=True, unique=True, label=T('Action Name')),
Field('description', type='text', label=T('Action Description')),
auth.signature, format='%(name)s')
#########################################
db.define_table('auth20_data',
Field('name', type='string', notnull=True, unique=True, label=T('Data Name')),
Field('table_name', type='string', notnull=True, label=T('Table Data')),
Field('data_condition', type='string', notnull=True, default='id>0', label=T('Data Condition')),
auth.signature, format='%(name)s')
#########################################
db.define_table('auth20_permission',
Field('group_id', type='reference auth_group', label=T('Auth Group')),
Field('actions', type='list:reference auth20_action', label=T('Auth Actions'),
requires=IS_IN_DB(db, 'auth20_action.id', '%(name)s', multiple=(1, 10000), sort=True)),
Field('functions', type='list:reference auth20_function', label=T('Auth Functions'),
requires=IS_IN_DB(db, 'auth20_function.id',
lambda r: '%s > %s > %s' % (r.aname, r.category.name, r.name),
multiple=(1, 10000), sort=True)),
Field('data_id', type='reference auth20_data', label=T('Auth Data'),
requires=IS_EMPTY_OR(IS_IN_DB(db, 'auth20_data.id', '%(id)s'))),
auth.signature)
def __authorize():
def decorate(action):
def f(*a, **b):
if auth.user:
permissions = list()
for group_id in auth.user_groups.keys():
query = db(db.auth20_permission.group_id == group_id)
query = query(db.auth20_permission.functions.contains(db.auth20_function.id))
query = query(db.auth20_function.aname == request.application)
query = query(db.auth20_function.cname == request.controller)
query = query(db.auth20_function.fname == request.function)
query = query(db.auth20_permission.actions.contains(db.auth20_action.id))
roles = query(db.auth20_action.name == 'View').select(db.auth20_permission.actions,
db.auth20_permission.data_id)
if len(roles) > 0:
for role in roles:
actions = db(db.auth20_action.id.belongs(role.actions))
actions = actions.select(db.auth20_action.name).as_list()
data = db(db.auth20_data.id == role.data_id).select(db.auth20_data.table_name,
db.auth20_data.data_condition).as_list()
permissions.append(dict(actions=actions, data=data))
if len(permissions) > 0:
auth.user.permissions = permissions
return action(*a, **b)
if request.is_restful:
raise HTTP(401)
else:
session.flash = "You don't have permission to access!"
redirect(URL(c='default', f='index'))
f.__doc__ = action.__doc__
f.__name__ = action.__name__
f.__dict__.update(action.__dict__)
return f
return decorate
auth.requires_authorize = __authorize
def __authorize_token(pos=0, total=1):
def decorate(action):
def f(*a, **b):
messages = Storage()
messages.token_invalid = "Token is invalid!"
messages.token_expired = "Token is expired!"
messages.parameter_invalid = "Parameters are invalid!"
messages.error = "Error occur!"
if len(a) != total or pos >= total:
return dict(result=False, reason=0, message=messages.parameter_invalid)
from datetime import datetime
cur_time = datetime.today()
token = a[pos]
try:
query = db(db.auth_user.token == token)
user = query.select(db.auth_user.last_login, db.auth_user.id)
if len(user) == 0:
return dict(result=False, reason=1, message=messages.token_invalid)
elif (cur_time - user.first().last_login).total_seconds() > auth.settings.expiration:
return dict(result=False, reason=2, message=messages.token_expired)
a = [(x if not i == pos else user.first().id) for i, x in enumerate(a)]
db(db.auth_user.token == token).update(last_login=cur_time)
except:
import traceback
traceback.print_exc()
return dict(result=False, reason=-1, message=messages.error)
return action(*a, **b)
f.__doc__ = action.__doc__
f.__name__ = action.__name__
f.__dict__.update(action.__dict__)
return f
return decorate
auth.requires_token = __authorize_token
| [
"caotien189@gmail.com"
] | caotien189@gmail.com |
809de9e29748f706ece9fd93d10433c46825738b | 1a7ccda7c8a5daa7b4b17e0472dd463cc3227e62 | /solve_1/39.combination-sum.py | cef4667dac1d4d8adf2945ef8b1bc38e6676a449 | [] | no_license | yoshikipom/leetcode | c362e5b74cb9e21382bf4c6275cc13f7f51b2497 | 08f93287d0ef4348ebbeb32985d3a544ecfee24c | refs/heads/main | 2023-09-02T18:46:08.346013 | 2023-08-31T23:07:55 | 2023-08-31T23:07:55 | 323,344,120 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 764 | py | #
# @lc app=leetcode id=39 lang=python3
#
# [39] Combination Sum
#
# @lc code=start
from typing import List
class Solution:
def combinationSum(self, candidates: List[int], target: int) -> List[List[int]]:
self.result = []
def backtrack(arr: List[int], total: int, last_index: int):
# print(arr, total)
if total == target:
self.result.append(arr[:])
return
if total > target:
return
for i in range(last_index, len(candidates)):
num = candidates[i]
arr.append(num)
backtrack(arr, total+num, i)
arr.pop()
backtrack([], 0, 0)
return self.result
# @lc code=end
| [
"yoshiki.shino.tech@gmail.com"
] | yoshiki.shino.tech@gmail.com |
36534e69f03c98f3a076c28132044a865683cad9 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_044/ch168_2020_06_22_14_43_26_644897.py | 5e7c3ed0f9f34c784ea44f017c85b13df31733e2 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 323 | py | def login_disponivel(nome, lista):
i=1
if nome not in lista:
return nome
else:
for name in range(len(lista)):
while nome in lista:
nome = nome +str(i)
#if nome in lista:
# nome= nome[:-1]
i+=1
return nome | [
"you@example.com"
] | you@example.com |
f532912962feb01c6b4b29fddde3864c6f0ff158 | 3169b5a8191f45140eeeea5422db5ebf7b73efd3 | /Projects/BadProjects/ALL/testJoy.py | 45870a36494be3e33eb60d803660cdaf8b8730d7 | [] | no_license | ArtemZaZ/OldAllPython-projects | e56fdae544a26418414e2e8717fe3616f28d73f9 | 01dc77f0342b8f6403b9532a13f3f89cd42d2b06 | refs/heads/master | 2021-09-07T19:17:03.607785 | 2018-02-27T20:34:03 | 2018-02-27T20:34:03 | 105,284,981 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 297 | py | import time
import RTCjoystic
J = RTCjoystic.Joystick()
J.connect("/dev/input/js0")
J.info()
time.sleep(2)
J.start()
def hand():
print("IT'S ALIVE")
J.connectButton('trigger', hand)
while(True):
print(J.Axis.get('z'))
#print(J.Buttons.get('trigger'))
time.sleep(0.1)
J.exit()
| [
"temka.911@mail.ru"
] | temka.911@mail.ru |
bb12fdf31aff0eabe212b1d61dca6979087dd933 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02690/s266356249.py | 51ac877f9ea77fc515e2789fa39600f138aeb94e | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 203 | py | import sys
input = sys.stdin.readline
X = int(input())
A = 1
B = 0
while True:
while A**5 - B**5 < X:
B -= 1
if A**5 - B**5 == X:
print(A,B)
exit()
B = A
A += 1 | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
7b481e4611b931de492ac27e94a94a9b2433a087 | cca6bcec6528417842ce4cc9aee2b891c37fa421 | /pogo/proto/Networking/Responses/EchoResponse_pb2.py | 1368215e1da1ecd57acf583e22bef01d0adfed59 | [] | no_license | p0psicles/pokemongo-api | 2c1b219dcc6441399a787280e3df9446761d2230 | c1e20ae5892b045ac0b035b0f50254d94a6ac077 | refs/heads/master | 2021-01-16T23:08:42.501756 | 2016-07-20T20:51:54 | 2016-07-20T20:51:54 | 63,850,559 | 2 | 0 | null | 2016-07-21T08:17:44 | 2016-07-21T08:17:44 | null | UTF-8 | Python | false | true | 2,129 | py | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: Networking/Responses/EchoResponse.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='Networking/Responses/EchoResponse.proto',
package='POGOProtos.Networking.Responses',
syntax='proto3',
serialized_pb=_b('\n\'Networking/Responses/EchoResponse.proto\x12\x1fPOGOProtos.Networking.Responses\"\x1f\n\x0c\x45\x63hoResponse\x12\x0f\n\x07\x63ontext\x18\x01 \x01(\tb\x06proto3')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_ECHORESPONSE = _descriptor.Descriptor(
name='EchoResponse',
full_name='POGOProtos.Networking.Responses.EchoResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='context', full_name='POGOProtos.Networking.Responses.EchoResponse.context', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=76,
serialized_end=107,
)
DESCRIPTOR.message_types_by_name['EchoResponse'] = _ECHORESPONSE
EchoResponse = _reflection.GeneratedProtocolMessageType('EchoResponse', (_message.Message,), dict(
DESCRIPTOR = _ECHORESPONSE,
__module__ = 'Networking.Responses.EchoResponse_pb2'
# @@protoc_insertion_point(class_scope:POGOProtos.Networking.Responses.EchoResponse)
))
_sym_db.RegisterMessage(EchoResponse)
# @@protoc_insertion_point(module_scope)
| [
"contact@dylanmadisetti.com"
] | contact@dylanmadisetti.com |
87ecc947f22dcc7fbfb81ec478ee7e66c27ae353 | 07b7d6244732a3fd52d431e5d3d1ab7f651b4ab0 | /src/exemplos/05_EstruturasDeDados/06_Arquivo/05-texto.py | e40ece22b49f17df2ca33cb4c3c7fd5b715ffaba | [] | no_license | GTaumaturgo/CIC-APC | 19832050efe94dd29a78bde8b6e121b990ccd6b9 | cc3b7b132218855ad50ddbe63bbdd6f94f273c54 | refs/heads/master | 2020-04-05T22:54:13.414180 | 2017-01-18T17:20:25 | 2017-01-18T17:20:25 | 42,275,384 | 0 | 0 | null | 2016-04-10T14:25:16 | 2015-09-10T23:05:54 | C | UTF-8 | Python | false | false | 546 | py | # -*- coding: utf-8 -*-
# @package: 05-texto.py
# @author: Guilherme N. Ramos (gnramos@unb.br)
# @disciplina: Algoritmos e Programação de Computadores
#
# Exemplo de uso de arquivo texto.
if __name__ == '__main__':
arquivo = 'apc.python.txt'
try:
with open(arquivo, 'w') as f:
f.write('disse o corvo, \'Nunca mais\'.\n')
f.seek(0) # Não existe a função 'rewind', mas este é um
# comportamento equivalente
f.write('D')
except:
print('Erro!')
| [
"ramos@gnramos.com"
] | ramos@gnramos.com |
536f861ee0f71641bd33ae7c7e32c1d559773a41 | 280a9dda130e27869c5ba791e7fbf502d5ca075c | /linskel.py | c7881d32f5f45fffda182ffa1d0b35b098652ad7 | [] | no_license | clbarnes/linear_skeleton | d6a9ad9ef565251143f9e5e419544a820d9cee80 | 96599a89ddd7509029069c93983388c323e27703 | refs/heads/master | 2021-06-02T11:09:53.069883 | 2018-07-11T16:56:06 | 2018-07-11T16:56:06 | 140,604,257 | 0 | 0 | null | 2021-04-28T21:54:58 | 2018-07-11T16:49:27 | Python | UTF-8 | Python | false | false | 3,001 | py | from collections import deque
import networkx as nx
from skimage.morphology import skeletonize
import numpy as np
from scipy.ndimage.filters import convolve
kernel = 2 ** np.array([
[4, 5, 6],
[3, 0, 7],
[2, 1, 0]
])
kernel[1, 1] = 0
int_reprs = np.zeros((256, 8), dtype=np.uint8)
for i in range(255):
int_reprs[i] = [int(c) for c in np.binary_repr(i, 8)]
int_reprs *= np.array([8, 7, 6, 5, 4, 3, 2, 1], dtype=np.uint8)
neighbour_locs = np.array([
(0, 0),
(-1, -1),
(-1, 0),
(-1, 1),
(0, 1),
(1, 1),
(1, 0),
(1, -1),
(0, -1)
])
def linearise_img(bin_im):
"""
Takes a binary image, skeletonises it, returns multilinestrings present in the image.
Returns a list with one item per connected component.
Each connected component is represented by a list with one item per linestring.
Each linestring is represented by a list with one item per point.
Each point is represented by a (y, x) tuple.
i.e. to get the y coordinate of the first point in the first linestring of the first connected component, use
``result[0][0][0]``
N.B. does not close rings
:param bin_im:
:return:
"""
assert np.allclose(np.unique(bin_im), np.array([0, 1]).astype(bin_im.dtype))
skeletonized = skeletonize(bin_im.astype(np.uint8)).astype(np.uint8)
convolved = (
convolve(skeletonized, kernel, mode="constant", cval=0, origin=[0, 0]) * skeletonized
).astype(np.uint8)
ys, xs = convolved.nonzero() # n length
location_bits = int_reprs[convolved[ys, xs]] # n by 8
diffs = neighbour_locs[location_bits] # n by 8 by 2
g = nx.Graph()
for yx, this_diff in zip(zip(ys, xs), diffs):
nonself = this_diff[np.abs(this_diff).sum(axis=1) > 0]
partners = nonself + yx
for partner in partners:
g.add_edge(
yx, tuple(partner),
weight=np.linalg.norm(partner - yx)
)
msf = nx.minimum_spanning_tree(g)
paths = dict(nx.all_pairs_shortest_path(msf))
for nodes in nx.connected_components(msf):
mst = msf.subgraph(nodes)
lines = []
src, *leaves = sorted(node for node, deg in mst.degree if deg == 1)
visited = set()
for leaf in leaves:
path = paths[src][leaf]
existing_path = []
new_path = []
for item in path:
if item in visited:
existing_path.append(item)
else:
new_path.append(item)
new_path = existing_path[-1:] + new_path
lines.append(new_path)
visited.update(new_path)
yield lines
if __name__ == '__main__':
import imageio
from timeit import timeit
im = imageio.imread("img/two_lines.png", pilmode='L') // 255
n = 50
time = timeit("list(linearise_img(im))", number=n, globals=globals()) / n
coords = list(linearise_img(im))
print(time)
| [
"barnesc@janelia.hhmi.org"
] | barnesc@janelia.hhmi.org |
f3534da8e15a9fda51d59846cc158f9e04c9e5f9 | 38372fcc2ca58798176267360ff07f886400bc7b | /core_scheduled_tasks/functions.py | be85100a4bd8f11b158c46009f7b925f70c2b6a7 | [] | no_license | portman-asset-finance/_GO_PAF | 4eb22c980aae01e0ad45095eb5e55e4cb4eb5189 | ee93c49d55bb5717ff1ce73b5d2df6c8daf7678f | refs/heads/master | 2020-09-21T05:22:10.555710 | 2019-11-28T16:44:17 | 2019-11-28T16:44:17 | 224,691,377 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,219 | py |
from datetime import datetime, timedelta
#####################
# #
# FUNCTION SKELETON #
# #
#####################
#
# def function_name_as_in_go_scheduler_table(job_qs, job_parameters):
#
# # Step 1: Import any function-specific modules
# # --------------------------------------------
# from a_module import a_function
#
# # Step 2: Execute your function
# # -----------------------------
# a_function.a_task(**job_parameters)
#
# # Step 3: Check the next run date
# # -------------------------------
# job_qs.next_run = job_qs.next_run + timedelta(days=1)
# job_qs.save()
#
#
# * EXISTING FUNCTIONS BELOW DO NOT WORK WITHOUT DEPENDENCIES NOT PROVIDED AS PART OF THE WORK PACKAGE*
def lazybatch(job_qs, job_params):
from core_lazybatch.functions import create_batches
# Step 1: Create batches.
# =======================
create_batches(**job_params)
# Step 2: Change next run date to tomorrow.
# =========================================
job_qs.next_run = datetime.now() + timedelta(days=1)
job_qs.save()
def sagewisdom(job_qs, job_params):
from core_sage_export.models import SageBatchHeaders
from core_sage_export.functions import build_sage_transactions_from_batch
# Step 1: Pull in unprocessed Sage Batch Headers
# ==============================================
sb_recs = SageBatchHeaders.objects.filter(processed__isnull=True)
# Step 2: Process.
# ================
for sb_rec in sb_recs:
if sb_rec.batch_header:
build_sage_transactions_from_batch(sb_rec)
# Step 3: Change next run date to tomorrow.
# =========================================
# job_qs.next_run = job_qs.next_run + timedelta(minutes=10)
# job_qs.save()
def companyinspector(job_qs, job_params):
from core_companies_house.functions import Compare_Company_House_Data
# Step 1: Company information.
# ============================
Compare_Company_House_Data(**job_params)
# Step 2: Change next run date to an hour.
# ========================================
job_qs.next_run = datetime.now() + timedelta(days=1)
job_qs.save()
| [
"portman-asset-finance@outlook.com"
] | portman-asset-finance@outlook.com |
b512093d11373376c94d052c0bfe28753e2b2185 | 9ebeb33e168798d41b54a8ab474b00c160de43a2 | /orders/admin.py | df86378c01955d423140cb8c8bebc5fec8b3fc35 | [] | no_license | danielspring-crypto/tritrade | 0c1f961138b9e4892d53ece98b54094be0e4c4b9 | 6fc7c644c1657a7744703cd144be7fbb5320397c | refs/heads/master | 2022-12-04T13:21:07.761942 | 2020-08-28T00:02:36 | 2020-08-28T00:02:36 | 290,908,032 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,456 | py | from django.contrib import admin
from .models import Order, OrderItem
import csv
import datetime
from django.http import HttpResponse
from django.urls import reverse
from django.utils.safestring import mark_safe
class OrderItemInline(admin.TabularInline):
model = OrderItem
raw_id_fields = ['product']
def export_to_csv(modeladmin, request, queryset):
opts = modeladmin.model._meta
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment;'\
'filename={}.csv'.format(opts.verbose_name)
writer = csv.writer(response)
fields = [field for field in opts.get_fields() if not field.many_to_many\
and not field.one_to_many]
writer.writerow([field.verbose_name for field in fields])
for obj in queryset:
data_row = []
for field in fields:
value = getattr(obj, field.name)
if isinstance(value, datetime.datetime):
value = value.strftime('%d/%m/%Y')
data_row.append(value)
writer.writerow(data_row)
return response
export_to_csv.short_description = 'Export to CSV'
def order_detail(obj):
return mark_safe('<a href="{}">View</a>'.format(reverse('orders:admin_order_detail', args=[obj.id])))
@admin.register(Order)
class OrderAdmin(admin.ModelAdmin):
list_display = ['id', 'first_name', 'last_name', 'email', 'address', 'postal_code', 'city', 'paid', 'created', 'updated', order_detail]
list_filter = ['paid', 'created', 'updated']
inlines = [OrderItemInline]
actions = [export_to_csv]
| [
"you@example.com"
] | you@example.com |
19bdaab92ff9acf46406c787601bdbd5030cbce5 | d88397be1c6a31985bc2283280e743fd3b988dd1 | /nncf/common/pruning/utils.py | 1a4881128bf37c0d29153e2ab5e79af087528b48 | [
"Apache-2.0"
] | permissive | sshyran/openvino-nncf-pytorch | f5e09066a216fa786927937a91a0e6742f347660 | fd02652950cd803a36f5283f5a5df999bb45433b | refs/heads/develop | 2023-04-18T06:58:54.646669 | 2021-03-12T15:41:39 | 2021-03-12T15:41:39 | 347,374,166 | 0 | 0 | Apache-2.0 | 2023-04-03T23:52:21 | 2021-03-13T13:11:32 | null | UTF-8 | Python | false | false | 8,826 | py | """
Copyright (c) 2021 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import math
from functools import partial
from typing import List, Tuple, Optional
from nncf.common.graph.graph import NNCFGraph
from nncf.common.graph.graph import NNCFNode
from nncf.common.graph.module_attributes import ConvolutionModuleAttributes
from nncf.common.utils.registry import Registry
def is_grouped_conv(node: NNCFNode) -> bool:
return isinstance(node.module_attributes, ConvolutionModuleAttributes) \
and node.module_attributes.groups != 1
def get_sources_of_node(nncf_node: NNCFNode, graph: NNCFGraph, sources_types: List[str]) -> List[NNCFNode]:
"""
Source is a node of source such that there is path from this node to nncf_node and on this path
no node has one of sources_types type.
:param sources_types: list of sources types
:param nncf_node: NNCFNode to get sources
:param graph: NNCF graph to work with
:return: list of all sources nodes
"""
visited = {node_id: False for node_id in graph.get_all_node_idxs()}
partial_traverse_function = partial(traverse_function, type_check_fn=lambda x: x in sources_types,
visited=visited)
nncf_nodes = [nncf_node]
if nncf_node.node_type in sources_types:
nncf_nodes = graph.get_previous_nodes(nncf_node)
source_nodes = []
for node in nncf_nodes:
source_nodes.extend(graph.traverse_graph(node, partial_traverse_function, False))
return source_nodes
def find_next_nodes_not_of_types(graph: NNCFGraph, nncf_node: NNCFNode, types: List[str]) -> List[NNCFNode]:
"""
Traverse nodes in the graph from nncf node to find first nodes that aren't of type from types list.
First nodes with some condition mean nodes:
- for which this condition is true
- reachable from nncf_node such that on the path from nncf_node to this nodes there are no other nodes with
fulfilled condition
:param graph: graph to work with
:param nncf_node: NNCFNode to start search
:param types: list of types
:return: list of next nodes for nncf_node of type not from types list
"""
visited = {node_id: False for node_id in graph.get_all_node_idxs()}
partial_traverse_function = partial(traverse_function, type_check_fn=lambda x: x not in types,
visited=visited)
nncf_nodes = [nncf_node]
if nncf_node.node_type not in types:
nncf_nodes = graph.get_next_nodes(nncf_node)
next_nodes = []
for node in nncf_nodes:
next_nodes.extend(graph.traverse_graph(node, partial_traverse_function))
return next_nodes
def get_next_nodes_of_types(graph: NNCFGraph, nncf_node: NNCFNode, types: List[str]) -> List[NNCFNode]:
"""
Looking for nodes with type from types list from nncf_node such that there is path from nncf_node to this node and
on this path no node has one of types type.
:param graph: graph to work with
:param nncf_node: NNCFNode to start search
:param types: list of types to find
:return: list of next nodes of nncf_node with type from types list
"""
sources_types = types
visited = {node_id: False for node_id in graph.get_all_node_idxs()}
partial_traverse_function = partial(traverse_function, type_check_fn=lambda x: x in sources_types,
visited=visited)
nncf_nodes = [nncf_node]
if nncf_node.node_type in sources_types:
nncf_nodes = graph.get_next_nodes(nncf_node)
next_nodes = []
for node in nncf_nodes:
next_nodes.extend(graph.traverse_graph(node, partial_traverse_function))
return next_nodes
def get_rounded_pruned_element_number(total: int, sparsity_rate: float, multiple_of: int = 8) -> int:
"""
Calculates number of sparsified elements (approximately sparsity rate) from total such as
number of remaining items will be multiple of some value.
Always rounds number of remaining elements up.
:param total: total elements number
:param sparsity_rate: prorortion of zero elements in total.
:param multiple_of:
:return: number of elements to be zeroed
"""
remaining_elems = math.ceil((total - total * sparsity_rate) / multiple_of) * multiple_of
return max(total - remaining_elems, 0)
def traverse_function(node: NNCFNode, output: List[NNCFNode], type_check_fn, visited) \
-> Tuple[bool, List[NNCFNode]]:
if visited[node.node_id]:
return True, output
visited[node.node_id] = True
if not type_check_fn(node.node_type):
return False, output
output.append(node)
return True, output
def get_first_nodes_of_type(graph: NNCFGraph, op_types: List[str]) -> List[NNCFNode]:
"""
Looking for first node in graph with type in op_types.
First == layer with type in op_types, that there is a path from the input such that there are no other
operations with type in op_types on it.
:param op_types: types of modules to track
:param graph: graph to work with
:return: list of all first nodes with type in op_types
"""
graph_roots = graph.get_input_nodes() # NNCFNodes here
visited = {node_id: False for node_id in graph.get_all_node_idxs()}
partial_traverse_function = partial(traverse_function,
type_check_fn=lambda x: x in op_types,
visited=visited)
first_nodes_of_type = []
for root in graph_roots:
first_nodes_of_type.extend(graph.traverse_graph(root, partial_traverse_function))
return first_nodes_of_type
def get_last_nodes_of_type(graph: NNCFGraph, op_types: List[str]) -> List[NNCFNode]:
"""
Looking for last node in graph with type in op_types.
Last == layer with type in op_types, that there is a path from this layer to the model output
such that there are no other operations with type in op_types on it.
:param op_types: types of modules to track
:param graph: graph to work with
:return: list of all last pruned nodes
"""
graph_outputs = graph.get_graph_outputs() # NNCFNodes here
visited = {node_id: False for node_id in graph.get_all_node_idxs()}
partial_traverse_function = partial(traverse_function,
type_check_fn=lambda x: x in op_types,
visited=visited)
last_nodes_of_type = []
for output in graph_outputs:
last_nodes_of_type.extend(graph.traverse_graph(output, partial_traverse_function, False))
return last_nodes_of_type
def get_previous_conv(graph: NNCFGraph, nncf_node: NNCFNode,
pruning_types: List[str], stop_propagation_ops: List[str]) -> Optional[NNCFNode]:
"""
Return source convolution of node. If node has other source type or there are more than one source - return None.
"""
sources = get_sources_of_node(nncf_node, graph, pruning_types + stop_propagation_ops)
if len(sources) == 1 and sources[0].node_type in pruning_types:
return sources[0]
return None
class PruningOperationsMetatypeRegistry(Registry):
def __init__(self, name):
super().__init__(name)
self._op_name_to_op_class = {}
def register(self, name=None):
name_ = name
super_register = super()._register
def wrap(obj):
cls_name = name_
if cls_name is None:
cls_name = obj.__name__
super_register(obj, cls_name)
op_names = obj.get_all_op_aliases()
for name in op_names:
name = self.get_version_agnostic_name(name)
if name not in self._op_name_to_op_class:
self._op_name_to_op_class[name] = obj
else:
assert self._op_name_to_op_class[name] == obj, \
"Inconsistent operator type registry - single patched op name maps to multiple metatypes!"
return obj
return wrap
def get_operator_metatype_by_op_name(self, op_name: str):
if op_name in self._op_name_to_op_class:
return self._op_name_to_op_class[op_name]
return None
@staticmethod
def get_version_agnostic_name(name):
raise NotImplementedError
| [
"noreply@github.com"
] | sshyran.noreply@github.com |
d20e0e97a7ef4f224d978a799662d41217cf965c | 12b5711e8bafc24216de2fec68e0d0d8875453d6 | /app.py | aef872ec1ee72a88fdd5b2cb1e2acdf99a028989 | [
"MIT"
] | permissive | miguelgrinberg/microflack_ui | 1167888e3fd7de40150c7e48f43e550f0b9430b6 | 2a46f4b3010d80c516b3937273ac4939e03aad67 | refs/heads/master | 2023-09-01T22:28:48.945408 | 2019-10-22T09:02:48 | 2019-10-22T09:02:48 | 89,034,662 | 19 | 12 | MIT | 2022-05-25T01:35:29 | 2017-04-22T00:52:44 | JavaScript | UTF-8 | Python | false | false | 515 | py | import os
from flask import Flask, render_template
from flask_bootstrap import Bootstrap
import config
app = Flask(__name__)
config_name = os.environ.get('FLASK_CONFIG', 'dev')
app.config.from_object(getattr(config, config_name.title() + 'Config'))
Bootstrap(app)
@app.route('/')
def index():
"""Serve client-side application."""
return render_template('index.html',
use_socketio=not app.config['NO_SOCKETIO'])
if __name__ == '__main__':
app.run() # pragma: no cover
| [
"miguel.grinberg@gmail.com"
] | miguel.grinberg@gmail.com |
32609732a13129cd185a37d0a5726b983c199eb9 | 14675f0c66fb4f4eeaa6ad1e8e691b9edf8f0bdb | /All other combo programs/List_Set_Dict_Generator_Comprehensions.py | 4ba1a3a2f0b5912a27273c61ebc118395b6c377d | [] | no_license | abhishekjoshi1991/Python_Learning | 9a94529643eac7394615289e2ecd96106e70ddb8 | a74293d0776304638b5cf976b3534481e57b17f2 | refs/heads/master | 2023-04-16T02:21:30.588052 | 2021-04-21T13:58:29 | 2021-04-21T13:58:29 | 360,176,922 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,083 | py | #List, Set, Dict, Generator Comprehensions
#-------------------------------------------------------------
#1. List Comprehension
#-------------------------------------------------------------
'''
uses [] symbol to define
syntax: [expression for each in iterables]
syntax: [expression for each in iterables if condition]
if there is one condition it can be mentioned after for and
if there are two conditions then it has to be mentioned
before for
List comprehensions are used for creating new lists
from other iterables like tuples, strings, arrays,
lists, etc.
'''
print('\n')
print('list comprehension')
#Program-1
#Squares of numbers from 1 to 10
print([x**2 for x in range(1,11)])
#Program-2
#squares of even numbers from 1 to 10
print([x**2 for x in range(1,11) if x%2==0])
#Program-3
#squares of even numbers and cubes of odd nums from 1 to 10
print([x**2 if x%2==0 else x**3 for x in range(1,11)])
#Program-3
#to print hello world many times as per len of string
print(['hello' for x in 'ethans'])
#Program-4
#[11,33,50]-->113350
l1=[11,33,50]
print(int(''.join(str(i) for i in l1)))
#Program-5
#Program to print table of 5
print([i*5 for i in range(1,11)])
#Program-6
#Nested list comprehension
print([[j for j in range(1,5)] for i in range(0,3)])
#Program-7
#list comprehension with lambda
#to display table of 6
print(list(map(lambda x:x*6,[x for x in range(1,11)])))
#Program-8
#Reverse each string in tuple
print([i[::-1] for i in ('Geeks', 'for', 'Geeks')])
#-------------------------------------------------------------
#2. Set Comprehension
#-------------------------------------------------------------
'''
gives u ique elements and uses {} brackets
'''
print('\n')
print('set comprehension')
l1=[1,2,3,2,2,3,5,3,5,3,5]
print({x**2 for x in l1})
#-------------------------------------------------------------
#3. generator Comprehension
#-------------------------------------------------------------
'''
uses () brackets.
it throws the object at o/p, data can be genereated through
it whenever required by iterating over it or type cast it
into suitable data type
'''
print('\n')
print('generator comprehension')
print((x**2 for x in range(1,10)))#throws object
a=(x**2 for x in range(1,10))
print(list(a))
#-------------------------------------------------------------
#4. dict Comprehension
#-------------------------------------------------------------
'''
it also uses {} brackets but it contains two expression
one for key and other for value.
two expressions are seperated by colon:
syntax:{expression1:expression2 for each in iterable}
'''
print('\n')
print('dict comprehension')
print({x:x**2 for x in range(1,11)})
#program to print {1:'A',2:'B'...}
print({x:chr(x+64) for x in range(1,27)})
#Program to inverse the given dict
d1={1:'A',2:'B',3:'C',4:'D'}
print({y:x for x,y in d1.items()})
print({d1[i]:i for i in d1})
#Program to find occurances of elements from list
l=[1,2,2,3,4,2,2,3,3,4,4,5,6]
print({i:l.count(i) for i in l})#as duplicated keys can not be present
#optimized program of above
print({i:l.count(i) for i in set(l)})
| [
"abhijsh61@gmail.com"
] | abhijsh61@gmail.com |
d1e3a7aff30186e5ac6890b52818949e7ad24857 | 9d74cbd676e629f8acdc68a4bac3dea0a98b9776 | /yc200/734.py | f47a58edc75b643d7171e490dd42a510a9e53f58 | [
"MIT"
] | permissive | c-yan/yukicoder | 01166de35e8059eaa8e3587456bba52f35bd0e44 | dcfd89b0a03759156dcea8c2e61a7705543dc0d4 | refs/heads/master | 2022-03-20T06:50:48.225922 | 2022-02-25T15:48:50 | 2022-02-25T15:48:50 | 237,735,377 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 124 | py | A, B, C = map(int, input().split())
A *= 60
C *= 3600
t = A - B
if t <= 0:
print(-1)
else:
print((C + t - 1) // t)
| [
"c-yan@users.noreply.github.com"
] | c-yan@users.noreply.github.com |
a83a6406c180911bbf530d33d506e5cfbe3c240b | cbfb679bd068a1153ed855f0db1a8b9e0d4bfd98 | /leet/greedy/1383_Maximum_Performance_of_a_Team.py | 2a07bfc817f1708de9c9eecd51851bd7153eec00 | [] | no_license | arsamigullin/problem_solving_python | 47715858a394ba9298e04c11f2fe7f5ec0ee443a | 59f70dc4466e15df591ba285317e4a1fe808ed60 | refs/heads/master | 2023-03-04T01:13:51.280001 | 2023-02-27T18:20:56 | 2023-02-27T18:20:56 | 212,953,851 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,016 | py | import heapq
from typing import List
class Solution:
def maxPerformance(self, n: int, speed: List[int], efficiency: List[int], k: int) -> int:
ef_sp = zip(efficiency, speed)
# it is important step. We will have sorted by efficiency in Desc order array
ef_sp = sorted(ef_sp, key=lambda x: x[0], reverse=True)
print(ef_sp)
speed_heap = []
perf = 0
sum_speed = 0
for e, s in ef_sp:
# since we first check and only then add to the queue, we use k-1 here
# once we have a team of k members, before adding a new member
if len(speed_heap) > k - 1:
# we extract the member with the lowest speed
sum_speed -= heapq.heappop(speed_heap)
heapq.heappush(speed_heap, s)
sum_speed += s
perf = max(perf, sum_speed * e)
return perf % (10 ** 9 + 7)
if __name__ == '__main__':
s = Solution()
s.maxPerformance(6,[2,10,3,1,5,8],[5,4,3,9,7,2],2) | [
"ar.smglln@gmail.com"
] | ar.smglln@gmail.com |
6135c68dd55c44f72e47a858216e8c329d8d7419 | 5d28c38dfdd185875ba0edaf77281e684c81da0c | /mlflow/pipelines/__init__.py | 303c3ad04f3f5bd62219cd57ad2a4e454ab9bc32 | [
"Apache-2.0"
] | permissive | imrehg/mlflow | 3a68acc1730b3ee6326c1366760d6ddc7e66099c | 5ddfe9a1b48e065540094d83125040d3273c48fa | refs/heads/master | 2022-09-24T05:39:02.767657 | 2022-09-20T00:14:07 | 2022-09-20T00:14:07 | 244,945,486 | 1 | 0 | Apache-2.0 | 2020-03-04T16:11:54 | 2020-03-04T16:11:53 | null | UTF-8 | Python | false | false | 1,738 | py | # pylint: disable=line-too-long
"""
MLflow Pipelines is an opinionated framework for structuring MLOps workflows that simplifies and
standardizes machine learning application development and productionization. MLflow Pipelines
makes it easy for data scientists to follow best practices for creating production-ready ML
deliverables, allowing them to focus on developing excellent models. MLflow Pipelines also enables
ML engineers and DevOps teams to seamlessly deploy these models to production and incorporate them
into applications.
MLflow Pipelines provides production-quality :ref:`Pipeline Templates <pipeline-templates>` for
common ML problem types, such as regression & classification, and MLOps tasks, such as batch
scoring. Pipelines are structured as git repositories with YAML-based configuration files and
Python code, offering developers a declarative approach to ML application development that reduces
boilerplate.
MLflow Pipelines also implements a cache-aware executor for pipeline steps, ensuring that steps
are only executed when associated
:py:ref:`code or configurations <pipeline-repositories-key-concept>` have changed. This enables
data scientists, ML engineers, and DevOps teams to iterate very quickly within their domains of
expertise. MLflow offers |run() APIs| for executing pipelines, as well as an
|mlflow pipelines run CLI|.
For more information, see the :ref:`MLflow Pipelines Overview <pipelines>`.
.. |mlflow pipelines run CLI| replace:: :ref:`mlflow pipelines run <cli>` CLI
.. |run() APIs| replace:: :py:func:`run() <mlflow.pipelines.regression.v1.pipeline.RegressionPipeline.run>` APIs
"""
# pylint: enable=line-too-long
from mlflow.pipelines.pipeline import Pipeline
__all__ = ["Pipeline"]
| [
"noreply@github.com"
] | imrehg.noreply@github.com |
79d103fb39c6bbed9c3fefba0bd1f83375a6608c | 8f24e443e42315a81028b648e753c50967c51c78 | /python/ray/tune/config_parser.py | bae26454b0392acb0dcec18d469fc0de2144e326 | [
"MIT",
"BSD-3-Clause",
"Apache-2.0"
] | permissive | simon-mo/ray | d07efdada8d05c6e10417f96e8dfc35f9ad33397 | 1e42e6cd15e2fb96c217cba8484e59ed0ef4b0c8 | refs/heads/master | 2023-03-06T00:09:35.758834 | 2022-12-23T18:46:48 | 2022-12-23T18:46:48 | 122,156,396 | 4 | 2 | Apache-2.0 | 2023-03-04T08:56:56 | 2018-02-20T04:47:06 | Python | UTF-8 | Python | false | false | 205 | py | from ray.tune._structure_refactor import warn_structure_refactor
from ray.tune.experiment.config_parser import * # noqa: F401, F403
warn_structure_refactor(__name__, "ray.tune.experiment.config_parser")
| [
"noreply@github.com"
] | simon-mo.noreply@github.com |
c89ad2f2c715d426be96676525cbe2cbbe7e083d | 6622c0cd289ec73078d5cf1cb88d9246160087ef | /src/day12二叉树问题/test.py | bfff24930aaced0e0f2c8f870420955dd998217d | [] | no_license | chifeng111/python_demo | 366540e8b284b4d3f2ac2377a9187a4be45192b5 | af3404935aa7148b7eb41e63b5bb782d5995e01b | refs/heads/master | 2021-01-02T08:53:53.033682 | 2017-10-14T12:34:22 | 2017-10-14T12:34:22 | 98,880,212 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,580 | py | # coding: utf-8
'''
{0x00, 0x01},
{0x02, 0x03},
{0x03, 0x04},
{0x05, 0x06}
'''
import sys
def AddDependency(m1, m2, _map):
if m1 not in _map:
_map[m1] = [m2]
else:
_map[m1].append(m2)
if m2 not in _map:
_map[m2] = []
def ModulesCycleDependency(m, _map):
d = _map[m]
if not d:
return False
if m in d:
return True
while d:
v = d.pop()
if ModulesCycleDependency(v, _map):
return True
return False
def myprint(dependencyMap):
k = dependencyMap.keys()
k = list(k)
for i in range(len(k) - 1):
if dependencyMap[k[i]]:
print("{" + "{}, {}".format(k[i], 'true') + "},")
else:
print("{" + "{}, {}".format(k[i], 'false') + "},")
if dependencyMap[k[len(k) - 1]]:
print("{" + "{}, {}".format(k[len(k) - 1], 'true') + "}")
else:
print("{" + "{}, {}".format(k[len(k) - 1], 'false') + "}")
if __name__ == '__main__':
_map = {}
while True:
s = sys.stdin.readline().strip()
if s[-1] != ",":
m1, m2 = s[1:-1].split(",")[0], s[1:-1].split(",")[1]
m2 = m2[1:]
AddDependency(m1, m2, _map)
break
else:
m1, m2 = s[1:-2].split(",")[0], s[1:-2].split(",")[1]
m2 = m2[1:]
AddDependency(m1, m2, _map)
dependencyMap = {}
for i in _map.keys():
if ModulesCycleDependency(i, _map):
dependencyMap[i] = True
else:
dependencyMap[i] = False
myprint(dependencyMap)
| [
"liaozhenhua1129@gmail.com"
] | liaozhenhua1129@gmail.com |
7070b76cab38dbd62ff206a9f2c312c8e9a3b96e | 4266e9b1c59ddef83eede23e0fcbd6e09e0fa5cb | /vs/gyp/test/mac/gyptest-app-error.py | 8371bb26870f6403beff80717b411c765231fd6d | [
"BSD-3-Clause"
] | permissive | barrystudy/study | b3ba6ed652d1a0bcf8c2e88a2a693fa5f6bf2115 | 96f6bb98966d3633b47aaf8e533cd36af253989f | refs/heads/master | 2020-12-24T14:53:06.219236 | 2017-10-23T02:22:28 | 2017-10-23T02:22:28 | 41,944,841 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,215 | py | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that invalid strings files cause the build to fail.
"""
import TestCmd
import TestGyp
import sys
if sys.platform == 'darwin':
expected_error = 'Old-style plist parser: missing semicolon in dictionary'
saw_expected_error = [False] # Python2 has no "nonlocal" keyword.
def match(a, b):
if a == b:
return True
if not TestCmd.is_List(a):
a = a.split('\n')
if not TestCmd.is_List(b):
b = b.split('\n')
if expected_error in '\n'.join(a) + '\n'.join(b):
saw_expected_error[0] = True
return True
return False
test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'], match=match)
test.run_gyp('test-error.gyp', chdir='app-bundle')
test.build('test-error.gyp', test.ALL, chdir='app-bundle')
# Ninja pipes stderr of subprocesses to stdout.
if test.format == 'ninja' and expected_error in test.stdout():
saw_expected_error[0] = True
if saw_expected_error[0]:
test.pass_test()
else:
test.fail_test()
| [
"2935973620@qq.com"
] | 2935973620@qq.com |
aa95213f5dfd8f12244b3ad4c0357b0ec1ab1210 | 90673e9b40a95a4f33b22172339cc67fd7b3cc1d | /boostedhiggs/nanoevents.py | b4050b4b3ea4f9f08587b02c90a36ec6ce6975b4 | [] | no_license | SangeonPark/boostedhiggs | f2b86bb9724e2c188692a9a319cc6ea16f2a78fd | bb2f0f6c111dd67a3aa2af215e8fe412cff71548 | refs/heads/master | 2023-04-11T00:59:23.618041 | 2019-12-11T23:16:37 | 2019-12-11T23:16:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,595 | py | import numpy as np
import awkward as ak
from .methods import (
METVector,
LorentzVector,
Candidate,
Electron,
Muon,
Photon,
Tau,
)
def _mixin(methods, awkwardtype):
'''Like ak.Methods.mixin but also captures methods in dir() and propagate docstr'''
newtype = type(methods.__name__ + 'Array', (methods, awkwardtype), {})
newtype.__dir__ = lambda self: dir(methods) + awkwardtype.__dir__(self)
newtype.__doc__ = methods.__doc__
return newtype
class NanoCollection(ak.VirtualArray):
@classmethod
def _lazyflatten(cls, array):
return array.array.content
@classmethod
def from_arrays(cls, arrays, name, methods=None):
'''
arrays : object
An object with attributes: columns, __len__, and __getitem__
where the latter returns virtual arrays or virtual jagged arrays
'''
jagged = 'n' + name in arrays.columns
columns = {k[len(name) + 1:]: arrays[k] for k in arrays.columns if k.startswith(name + '_')}
if len(columns) == 0:
# single-item collection, just forward lazy array (possibly jagged)
if name not in arrays.columns:
raise RuntimeError('Could not find collection %s in dataframe' % name)
if methods:
ArrayType = _mixin(methods, type(arrays[name]))
return ArrayType(arrays[name])
return arrays[name]
elif not jagged:
if methods is None:
Table = ak.Table
else:
Table = _mixin(methods, ak.Table)
table = Table.named(name)
for k, v in columns.items():
table[k] = v
return table
else: # jagged
if methods:
cls = _mixin(methods, cls)
tabletype = ak.type.TableType()
for k, array in columns.items():
tabletype[k] = array.type.to.to
counts = arrays['n' + name]
out = cls(
cls._lazyjagged,
(name, counts, columns, methods),
type=ak.type.ArrayType(len(arrays), float('inf'), tabletype),
)
out.__doc__ = counts.__doc__
return out
@classmethod
def _lazyjagged(cls, name, counts, columns, methods=None):
offsets = ak.JaggedArray.counts2offsets(counts.array)
if methods is None:
JaggedArray = ak.JaggedArray
Table = ak.Table
else:
JaggedArray = _mixin(methods, ak.JaggedArray)
Table = _mixin(methods, ak.Table)
table = Table.named(name)
for k, v in columns.items():
if not isinstance(v, ak.VirtualArray):
raise RuntimeError
col = type(v)(NanoCollection._lazyflatten, (v,), type=ak.type.ArrayType(offsets[-1], v.type.to.to))
col.__doc__ = v.__doc__
table[k] = col
out = JaggedArray.fromoffsets(offsets, table)
out.__doc__ = counts.__doc__
return out
def _lazyindexed(self, indices, destination):
if not isinstance(destination.array, ak.JaggedArray):
raise RuntimeError
if not isinstance(self.array, ak.JaggedArray):
raise NotImplementedError
content = np.zeros(len(self.array.content) * len(indices), dtype=ak.JaggedArray.INDEXTYPE)
for i, k in enumerate(indices):
content[i::len(indices)] = np.array(self.array.content[k])
globalindices = ak.JaggedArray.fromoffsets(
self.array.offsets,
content=ak.JaggedArray.fromoffsets(
np.arange((len(self.array.content) + 1) * len(indices), step=len(indices)),
content,
)
)
globalindices = globalindices[globalindices >= 0] + destination.array.starts
out = globalindices.copy(
content=type(destination.array).fromoffsets(
globalindices.content.offsets,
content=destination.array.content[globalindices.flatten().flatten()]
)
)
return out
def __setitem__(self, key, value):
if self.ismaterialized:
super(NanoCollection, self).__setitem__(key, value)
_, _, columns, _ = self._args
columns[key] = value
self._type.to.to[key] = value.type.to.to
def __delitem__(self, key):
if self.ismaterialized:
super(NanoCollection, self).__delitem__(key)
_, _, columns, _ = self._args
del columns[key]
del self._type.to.to[key]
class NanoEvents(ak.Table):
collection_methods = {
'CaloMET': METVector,
'ChsMET': METVector,
'GenMET': METVector,
'MET': METVector,
'METFixEE2017': METVector,
'PuppiMET': METVector,
'RawMET': METVector,
'TkMET': METVector,
# pseudo-lorentz: pt, eta, phi, mass=0
'IsoTrack': LorentzVector,
'SoftActivityJet': LorentzVector,
'TrigObj': LorentzVector,
# True lorentz: pt, eta, phi, mass
'FatJet': LorentzVector,
'GenDressedLepton': LorentzVector,
'GenJet': LorentzVector,
'GenJetAK8': LorentzVector,
'GenPart': LorentzVector,
'Jet': LorentzVector,
'LHEPart': LorentzVector,
'SV': LorentzVector,
'SubGenJetAK8': LorentzVector,
'SubJet': LorentzVector,
# Candidate: LorentzVector + charge
'Electron': Electron,
'Muon': Muon,
'Photon': Photon,
'Tau': Tau,
'GenVisTau': Candidate,
}
@classmethod
def from_arrays(cls, arrays, collection_methods_overrides={}):
events = cls.named('event')
collections = {k.split('_')[0] for k in arrays.columns}
collections -= {k for k in collections if k.startswith('n') and k[1:] in collections}
allmethods = {**cls.collection_methods, **collection_methods_overrides}
for name in collections:
methods = allmethods.get(name, None)
events[name] = NanoCollection.from_arrays(arrays, name, methods)
# finalize
del events.Photon['mass']
embedded_subjets = type(events.SubJet)(
events.FatJet._lazyindexed,
args=(['subJetIdx1', 'subJetIdx2'], events.SubJet),
type=ak.type.ArrayType(len(events), float('inf'), float('inf'), events.SubJet.type.to.to),
)
embedded_subjets.__doc__ = events.SubJet.__doc__
events.FatJet['subjets'] = embedded_subjets
return events
| [
"nick.smith@cern.ch"
] | nick.smith@cern.ch |
8b199a38bfca4e56b6fc689c0255d55c4c2c5db7 | 51fd9e45e48bd1cea58207f6d3d472e83b419194 | /src/scripts/2diff.py | 7f32a67190a46683b63c2c03df8a100caa0bbc3e | [
"Apache-2.0"
] | permissive | helioxgroup/deepspeech-reconstruction | 6076f4405dd1287723436b558c694f5ece415179 | 72f28d1e9064d221b3421c302a8725a8c71859ee | refs/heads/main | 2023-04-04T07:25:20.488237 | 2021-04-15T21:24:33 | 2021-04-15T21:24:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 946 | py | # Test 2nd derivative computation
import numpy as np
import tensorflow as tf
np.random.seed(0)
# loss = 'ctc'
loss = 'ce'
bs = 32
fdim = 26
ilen = 100
olen = 10
nlabels = 28
x = tf.Variable(np.random.rand(bs, ilen, fdim), dtype=tf.float32)
W = tf.Variable(np.random.rand(fdim, nlabels), dtype=tf.float32)
with tf.GradientTape() as g1:
with tf.GradientTape() as g2:
logits = tf.linalg.matmul(x, W)
if loss == 'ctc':
logits = tf.transpose(logits, [1, 0, 2])
y = tf.Variable(np.random.randint(0, nlabels, (bs, olen)))
loss = tf.reduce_mean(tf.nn.ctc_loss(y, logits, [olen] * bs, [ilen] * bs))
elif loss == 'ce':
y = tf.Variable(np.random.rand(bs, ilen, nlabels), dtype=tf.float32)
loss = tf.nn.log_poisson_loss(y, logits)
g2.watch(W)
dl_dW = g2.gradient(loss, W)
d = tf.linalg.norm(dl_dW)
dd_dx = g1.gradient(d, x)
print(dd_dx) | [
"trungv.dang@outlook.com"
] | trungv.dang@outlook.com |
4fbeac8a60c377c4ab8bb5b1c063ee1960165f4b | ef1458fae5fbd6b7a9281ccd4d9bc8289f3dd38b | /tests/test_samplestats.py | 17c502dcdc229ebcbd94c60f9ab5f831a554c7ce | [
"BSD-3-Clause"
] | permissive | vt100/mystic | a42910537c3de90d1c2a5637bad5d866308e8863 | 7589eee4b9a7cb6056114ee6770579d173d9007b | refs/heads/master | 2021-01-17T22:28:57.743493 | 2015-07-17T15:25:35 | 2015-07-17T15:25:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,392 | py | import numpy as np
from mystic.math.measures import _k
if __name__ == '__main__':
# even-length
w = [3,1,1,1,3,3]
assert _k(w) == w
# even-length clipping
assert (np.array(_k(w,(10,10),clip=True)) > 0).tolist() == [1,1,1,1,1,1]
assert (np.array(_k(w,(25,25),clip=True)) > 0).tolist() == [0,1,1,1,1,0]
assert (np.array(_k(w,(50,50),clip=True)) > 0).tolist() == [0,0,0,1,1,0]
assert (np.array(_k(w,(49,50),clip=True)) > 0).tolist() == [0,0,0,1,0,0]
assert (np.array(_k(w,(50,49),clip=True)) > 0).tolist() == [0,0,0,0,1,0]
assert (np.array(_k(w,(49,49),clip=True)) > 0).tolist() == [0,0,0,1,1,0]
assert (np.array(_k(w,(25,75),clip=True)) > 0).tolist() == [1,1,0,0,0,0]
assert (np.array(_k(w,(24,75),clip=True)) > 0).tolist() == [1,0,0,0,0,0]
assert (np.array(_k(w,(25,74),clip=True)) > 0).tolist() == [0,1,0,0,0,0]
assert (np.array(_k(w,(24,74),clip=True)) > 0).tolist() == [1,1,0,0,0,0]
assert (np.array(_k(w,(75,25),clip=True)) > 0).tolist() == [0,0,0,0,1,1]
assert (np.array(_k(w,(74,25),clip=True)) > 0).tolist() == [0,0,0,0,1,0]
assert (np.array(_k(w,(75,24),clip=True)) > 0).tolist() == [0,0,0,0,0,1]
assert (np.array(_k(w,(74,24),clip=True)) > 0).tolist() == [0,0,0,0,1,1]
# even-length trimming
assert (np.array(_k(w,(10,10))) > 0).tolist() == [1,1,1,1,1,1]
assert (np.array(_k(w,(25,25))) > 0).tolist() == [0,1,1,1,1,0]
assert (np.array(_k(w,(50,50))) > 0).tolist() == [0,0,0,0,0,0]
assert (np.array(_k(w,(49,50))) > 0).tolist() == [0,0,0,1,0,0]
assert (np.array(_k(w,(50,49))) > 0).tolist() == [0,0,0,0,1,0]
assert (np.array(_k(w,(49,49))) > 0).tolist() == [0,0,0,1,1,0]
assert (np.array(_k(w,(25,75))) > 0).tolist() == [0,0,0,0,0,0]
assert (np.array(_k(w,(24,75))) > 0).tolist() == [1,0,0,0,0,0]
assert (np.array(_k(w,(25,74))) > 0).tolist() == [0,1,0,0,0,0]
assert (np.array(_k(w,(24,74))) > 0).tolist() == [1,1,0,0,0,0]
assert (np.array(_k(w,(75,25))) > 0).tolist() == [0,0,0,0,0,0]
assert (np.array(_k(w,(74,25))) > 0).tolist() == [0,0,0,0,1,0]
assert (np.array(_k(w,(75,24))) > 0).tolist() == [0,0,0,0,0,1]
assert (np.array(_k(w,(74,24))) > 0).tolist() == [0,0,0,0,1,1]
# odd-length
w = [4,2,4,2,4]
assert _k(w) == w
# odd-length clipping
assert (np.array(_k(w,(10,10),clip=True)) > 0).tolist() == [1,1,1,1,1]
assert (np.array(_k(w,(25,25),clip=True)) > 0).tolist() == [0,1,1,1,0]
assert (np.array(_k(w,(50,50),clip=True)) > 0).tolist() == [0,0,1,0,0]
assert (np.array(_k(w,(37.5,37.5),clip=True)) > 0).tolist() == [0,0,1,0,0]
assert (np.array(_k(w,(37.4,37.5),clip=True)) > 0).tolist() == [0,1,1,0,0]
assert (np.array(_k(w,(37.5,37.4),clip=True)) > 0).tolist() == [0,0,1,1,0]
assert (np.array(_k(w,(37.4,37.4),clip=True)) > 0).tolist() == [0,1,1,1,0]
assert (np.array(_k(w,(25,75),clip=True)) > 0).tolist() == [1,1,0,0,0]
assert (np.array(_k(w,(24,75),clip=True)) > 0).tolist() == [1,0,0,0,0]
assert (np.array(_k(w,(25,74),clip=True)) > 0).tolist() == [0,1,0,0,0]
assert (np.array(_k(w,(24,74),clip=True)) > 0).tolist() == [1,1,0,0,0]
assert (np.array(_k(w,(75,25),clip=True)) > 0).tolist() == [0,0,0,1,1]
assert (np.array(_k(w,(74,25),clip=True)) > 0).tolist() == [0,0,0,1,0]
assert (np.array(_k(w,(75,24),clip=True)) > 0).tolist() == [0,0,0,0,1]
# odd-length trimming
assert (np.array(_k(w,(10,10))) > 0).tolist() == [1,1,1,1,1]
assert (np.array(_k(w,(25,25))) > 0).tolist() == [0,1,1,1,0]
assert (np.array(_k(w,(50,50))) > 0).tolist() == [0,0,0,0,0]
assert (np.array(_k(w,(37.5,37.5))) > 0).tolist() == [0,0,1,0,0]
assert (np.array(_k(w,(37.4,37.5))) > 0).tolist() == [0,1,1,0,0]
assert (np.array(_k(w,(37.5,37.4))) > 0).tolist() == [0,0,1,1,0]
assert (np.array(_k(w,(37.4,37.4))) > 0).tolist() == [0,1,1,1,0]
assert (np.array(_k(w,(25,75))) > 0).tolist() == [0,0,0,0,0]
assert (np.array(_k(w,(24,75))) > 0).tolist() == [1,0,0,0,0]
assert (np.array(_k(w,(25,74))) > 0).tolist() == [0,1,0,0,0]
assert (np.array(_k(w,(24,74))) > 0).tolist() == [1,1,0,0,0]
assert (np.array(_k(w,(75,25))) > 0).tolist() == [0,0,0,0,0]
assert (np.array(_k(w,(74,25))) > 0).tolist() == [0,0,0,1,0]
assert (np.array(_k(w,(75,24))) > 0).tolist() == [0,0,0,0,1]
assert (np.array(_k(w,(74,24))) > 0).tolist() == [0,0,0,1,1]
# EOF
| [
"mmckerns@968178ea-60bd-409e-af13-df8a517b6005"
] | mmckerns@968178ea-60bd-409e-af13-df8a517b6005 |
c209285b831f71b03207b5130742b461b5e9cdad | 8e3eb5fa2cf80e2f6d265faaa410cf850ca01242 | /화물 도크.py | 7156bac8cd7dcaf79511872ede0783776c203861 | [] | no_license | first0506/Algorithm-Problem-Solving | 7a35f5fc1ea5dc0c06e3fc4b96abcbaf85fd13b1 | 4ef67297ead3eba0711de0f49b8c099ffaa29bf8 | refs/heads/master | 2022-11-06T17:18:16.360292 | 2020-07-06T11:38:12 | 2020-07-06T11:38:12 | 263,899,706 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 391 | py | T = int(input())
for test_case in range(1, T+1):
N = int(input())
times = []
for _ in range(N):
times.append(list(map(int, input().split())))
times = sorted(times, key=lambda x:x[1])
end = times[0][0]
cnt = 1
for i in range(1, N):
if end <= times[i][0]:
end = times[i][1]
cnt += 1
print('#{} {}'.format(test_case, cnt)) | [
"first0506@naver.com"
] | first0506@naver.com |
b8be43f3b61e0e0b5c7da42de25d1e1c492e733b | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/4/knq.py | 05c0177bca06f6b7c9ebc61d60edabef92ba9a73 | [] | no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'kNQ':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"juliettaylorswift@gmail.com"
] | juliettaylorswift@gmail.com |
25f627eefb8b0a812a8709085b737eedc77257f3 | 65bea3dc57eb4d6bc27fe53418c8c4bbcd8f0ca3 | /profiles/migrations/0001_initial.py | d6fc76c35694baff5230c27308233e5bcdbdfac3 | [
"MIT"
] | permissive | ezekieltech/eduTech-backend | e339e1eae12529ae414a9220b67b428afdaa057f | 33b82f57add98285b73d89bc9d97f499cdb3f1e4 | refs/heads/main | 2023-03-25T22:31:01.704584 | 2021-03-21T04:12:31 | 2021-03-21T04:12:31 | 326,237,291 | 0 | 0 | MIT | 2021-03-17T16:38:54 | 2021-01-02T17:39:46 | Python | UTF-8 | Python | false | false | 2,087 | py | # Generated by Django 3.1.5 on 2021-03-13 15:56
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='EduconsultantProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(blank=True, max_length=30, null=True)),
('last_name', models.CharField(blank=True, max_length=30, null=True)),
('bio', models.TextField(blank=True, null=True)),
('gender', models.CharField(blank=True, max_length=10, null=True)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='MenteeProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(blank=True, max_length=30, null=True)),
('last_name', models.CharField(blank=True, max_length=30, null=True)),
('bio', models.TextField(blank=True, null=True)),
('gender', models.CharField(blank=True, max_length=10, null=True)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='MentorProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(blank=True, max_length=30, null=True)),
('last_name', models.CharField(blank=True, max_length=30, null=True)),
('bio', models.TextField(blank=True, null=True)),
('gender', models.CharField(blank=True, max_length=10, null=True)),
],
options={
'abstract': False,
},
),
]
| [
"ezekielobhafuoso@gmail.com"
] | ezekielobhafuoso@gmail.com |
8df77a27d2b747cb1e1fafc772f2d5e5fad088d6 | 5ae615019b126421a9ccd66fd6c9052af9a27923 | /opem/Test/test_Functions.py | b7aae03591b49561955dd7de66798d7a99167193 | [
"MIT"
] | permissive | guoyanyanyun/opem | 62b401f12d990309b19b08f0637782dd408accce | 02e946397f132802b27a9384d7ff7ba4a7fca580 | refs/heads/master | 2020-04-09T11:12:00.413549 | 2018-11-12T09:44:14 | 2018-11-12T09:44:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,264 | py | # -*- coding: utf-8 -*-
'''
>>> from opem.Functions import *
>>> data=[i for i in range(100)]
>>> integrate(data,1)
4867.666666666666
>>> data[0]=None
>>> integrate(data,1)
>>> linear_plot([1,2,3],[2,4,6])
[[2.0, 4.0, 6.0], 0.0, 2.0]
>>> isfloat("2")
True
>>> isfloat("2.02")
True
>>> isfloat('ss')
False
>>> filter_lambda({"lambda":24})
[Warning] Opem Automatically Set Lambda To Maximum Value (23)
{'lambda': 23}
>>> filter_alpha({"alpha":2})
[Warning] Opem Automatically Set Alpha To Maximum Value (1)
{'alpha': 1}
>>> filter_lambda({"lambda":13})
[Warning] Opem Automatically Set Lambda To Minimum Value (14)
{'lambda': 23}
>>> filter_alpha({"alpha":-0.1})
[Warning] Opem Automatically Set Alpha To Maximum Value (0)
{'alpha': 0}
>>> Input_dict=Get_Input({"T": "Cell Operation Temperature [K]", "PH2": "Partial Pressure [atm]", "PO2": "Partial Pressure [atm]"},input_item=input_test)
>>> Input_keys=list(Input_dict.keys())
>>> Input_keys.sort()
>>> print(Input_keys)
['Name', 'PH2', 'PO2', 'T']
>>> description_print("Model1",{"Model1":"Test"})
###########
<BLANKLINE>
<BLANKLINE>
Test
<BLANKLINE>
<BLANKLINE>
###########
>>> check_update(1)
>>> check_update(0.1)
###########
New Version (0.9) Is Available!
Website : http://www.ecsim.ir/opem
###########
'''
| [
"sepand.haghighi@yahoo.com"
] | sepand.haghighi@yahoo.com |
c947f71d2d1777f4abb4dfb167d066818ab7a3ff | 09120532659f7eb134163f92ac2f65423a04dc03 | /zproject/django/survey/teacher/migrations/0001_initial.py | 2b17accc1d2d1a6230850e396f2749e5e35e5155 | [] | no_license | hoboland21/survey | 7b2dafd76db0e9317037a0cec163a97c0ec9a8ec | 93e71f3304b381a6be03c8f813d2ba3a0b6eb218 | refs/heads/master | 2023-01-28T07:38:38.934710 | 2019-05-13T08:55:41 | 2019-05-13T08:55:41 | 182,874,928 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,853 | py | # Generated by Django 2.2 on 2019-04-30 06:00
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='ImportFile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.CharField(blank=True, max_length=256)),
('document', models.FileField(upload_to='uploads/')),
('created', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='Question',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('format', models.CharField(blank=True, max_length=30)),
('question', models.CharField(max_length=2048)),
('label', models.CharField(blank=True, max_length=120)),
('group', models.CharField(blank=True, max_length=120)),
('created', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='Survey',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=256)),
('requester', models.CharField(max_length=256)),
('description', models.CharField(max_length=1024)),
('subject', models.CharField(max_length=256)),
('label', models.CharField(blank=True, max_length=120)),
('group', models.CharField(blank=True, max_length=120)),
('created', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='Student',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('group', models.CharField(blank=True, max_length=120)),
('label', models.CharField(blank=True, max_length=120)),
('test_code', models.CharField(max_length=32)),
('name', models.CharField(max_length=128)),
('created', models.DateTimeField(auto_now=True)),
('survey', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='teacher.Survey')),
],
),
migrations.CreateModel(
name='Items',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('label', models.CharField(blank=True, max_length=120)),
('group', models.CharField(blank=True, max_length=120)),
('sequence', models.SmallIntegerField()),
('page', models.SmallIntegerField(default=1)),
('question', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='teacher.Question')),
('survey', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='teacher.Survey')),
],
),
migrations.CreateModel(
name='Answers',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('answer', models.CharField(max_length=10)),
('question', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='teacher.Question')),
('student', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='teacher.Student')),
],
),
]
| [
"jc@saipantech.com"
] | jc@saipantech.com |
ff10a5df981de1a7bb30b7f765cf5730c05fa6db | 7ccb528e4bc0874cf30256ac50869c0d4f07b459 | /search algorithms/aifrontmodified.py | d344ea84cf4b0d66e746a92a69a9638debc5acb0 | [] | no_license | pramod3009/Algorithms | bba9f58608249c475725c594e39f7eb147af635a | 41016079ea975617f41fb29316a43dc506b21b6c | refs/heads/master | 2022-11-10T11:04:07.764896 | 2020-06-19T21:18:20 | 2020-06-19T21:18:20 | 273,407,757 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,426 | py | import tkinter as tk
from tkinter import *
from ttk import Button, Style
from PIL import ImageTk, Image
import os
import astarf
import bfs
import dfs
import djikstra
import dynamic
TITLE_FONT = ("Helvetica", 18, "bold")
class SampleApp(tk.Tk):
def __init__(self, *args, **kwargs):
tk.Tk.__init__(self, *args, **kwargs)
# the container is where we'll stack a bunch of frames
# on top of each other, then the one we want visible
# will be raised above the others
container = tk.Frame(self)
container.pack(side="top", fill="both", expand=True)
container.grid_rowconfigure(0, weight=1)
container.grid_columnconfigure(0, weight=1)
self.frames = {}
for F in (StartPage, PageOne, PageTwo, PageThree, PageFour):
page_name = F.__name__
frame = F(parent=container, controller=self)
self.frames[page_name] = frame
# put all of the pages in the same location;
# the one on the top of the stacking order
# will be the one that is visible.
frame.grid(row=0, column=0, sticky="nsew")
frame.configure(bg="#a1dbcd")
self.show_frame("StartPage")
def show_frame(self, page_name):
'''Show a frame for the given page name'''
frame = self.frames[page_name]
frame.tkraise()
class StartPage(tk.Frame):
def __init__(self, parent, controller):
tk.Frame.__init__(self, parent)
self.controller = controller
Style().configure("TButton", padding=(0, 5, 0, 5), font='serif 10')
self.instr1 = Label(self, bg='gray12', fg="white", width=125, justify=RIGHT, height=6, text="AGENT PERCEPT")
self.instr1.configure(font=("Times New Roman", 14, "bold"))
self.instr1.grid(row=0, column=1, columnspan=4, sticky=W + E)
self.pack(fill=BOTH, expand=True)
'''self.text1 = Text(self, width=50, height=2)
self.text1.grid(row=4, column=3, sticky=W)
self.text1.insert(0.0, "insert your statement here")'''
self.rowconfigure(2, pad=15)
self.rowconfigure(4, pad=15)
self.rowconfigure(8, pad=15)
self.rowconfigure(6, pad=15)
self.rowconfigure(10, pad=25)
self.rowconfigure(20, pad=25)
self.obtn = Button(self, text="BREADTH SEARCH", command=self.contains2)
self.obtn.grid(row=2, column=2, columnspan=3, sticky=W)
self.obtn = Button(self, text="LEVEL SEARCH", command=self.contains2)
self.obtn.grid(row=2, column=3, columnspan=3, sticky=W)
self.obtn = Button(self, text="DEAPTH ", command=self.contains3)
self.obtn.grid(row=4, column=2, columnspan=3, sticky=W)
self.obtn = Button(self, text="FINITE GRAPH", command=self.contains3)
self.obtn.grid(row=4, column=3, columnspan=3, sticky=W)
self.obtn = Button(self, text="ONLY PATH LENGTH", command=self.contains4)
self.obtn.grid(row=6, column=2, columnspan=3, sticky=W)
self.obtn = Button(self, text="UNIIFORM COST", command=self.contains4)
self.obtn.grid(row=6, column=3, columnspan=3, sticky=W)
self.obtn = Button(self, text="PATH LENGTH AND HEURISTIC VALUE", command=self.contains1)
self.obtn.grid(row=8, column=2, columnspan=3, sticky=W)
self.obtn = Button(self, text="BEST PATH", command=self.contains1)
self.obtn.grid(row=8, column=3, columnspan=3, sticky=W)
self.obtn = Button(self, text="USER INPUT", command=self.contains5)
self.obtn.grid(row=20, column=4, columnspan=4, sticky=W)
self.pack()
def contains2(self):
# print(input)
self.controller.show_frame("PageTwo")
return
def contains3(self):
# print(input)
self.controller.show_frame("PageThree")
return
def contains4(self):
# print(input)
self.controller.show_frame("PageFour")
return
def contains1(self):
# print(input)
self.controller.show_frame("PageOne")
return
def contains5(self):
dynamic.start()
return
class PageOne(tk.Frame):
def __init__(self, parent, controller):
tk.Frame.__init__(self, parent)
self.controller = controller
Style().configure("TButton", padding=(0, 5, 0, 5), font='serif 10')
self.columnconfigure(5, pad=5)
'''''
self.columnconfigure(1, pad=5)
self.columnconfigure(2, pad=5)
self.columnconfigure(3, pad=5)
self.columnconfigure(4, pad=5)
self.rowconfigure(0, pad=15)
self.rowconfigure(1, pad=15)
self.rowconfigure(2, pad=15)
self.rowconfigure(3, pad=15)
self.rowconfigure(4, pad=15)
self.rowconfigure(18, pad=15)
self.rowconfigure(46, pad=15)""""""
'''
imagehead = Image.open("astar.png")
tkimage = ImageTk.PhotoImage(imagehead)
self.tkimage = tkimage
panel1 = Label(self, image=tkimage, width=600, height=500)
panel1.grid(row=10, column=0, sticky=E)
# for two big textboxes
self.tb8 = Text(self, width=55, height=8, font=("Helvetica", 11), wrap=WORD)
self.tb8.grid(row=10, column=20, columnspan=2, sticky=W)
# forsmall two textboxes
self.tb1 = Text(self, width=30, height=5)
self.tb1.grid(row=0, column=0, sticky=W)
self.tb1.insert(0.0, "insert start state")
self.tb2 = Text(self, width=30, height=5)
self.tb2.insert(0.0, "insert goal state")
self.tb2.grid(row=0, column=1, sticky=W)
# buttons
self.hbtn = Button(self, text="BACK", command=lambda: controller.show_frame("StartPage"))
self.hbtn.grid(row=1, column=0, columnspan=2, sticky=W)
self.obtn = Button(self, text="SUBMIT", command=lambda: self.info())
self.obtn.grid(row=1, column=1, columnspan=2, sticky=W)
def info(self):
# print(val)
point1 = self.tb1.get("1.0", "end-1c")
point2 = self.tb2.get("1.0", "end-1c")
point1 = point1.split()
point2 = point2.split()
x1 = int(point1[0])
y1 = int(point1[1])
x2 = int(point2[0])
y2 = int(point2[1])
s = astarf.start(x1, y1, x2, y2)
x = "start"
for i in s:
x = x + "-->" + str(i)
self.tb8.configure(state=NORMAL)
self.tb8.delete(1.0, END)
self.tb8.insert(0.0, x)
self.tb8.configure(state=DISABLED)
class PageTwo(tk.Frame):
def __init__(self, parent, controller):
tk.Frame.__init__(self, parent)
self.controller = controller
Style().configure("TButton", padding=(0, 5, 0, 5), font='serif 10')
self.columnconfigure(5, pad=5)
'''''
self.columnconfigure(1, pad=5)
self.columnconfigure(2, pad=5)
self.columnconfigure(3, pad=5)
self.columnconfigure(4, pad=5)
self.rowconfigure(0, pad=15)
self.rowconfigure(1, pad=15)
self.rowconfigure(2, pad=15)
self.rowconfigure(3, pad=15)
self.rowconfigure(4, pad=15)
self.rowconfigure(18, pad=15)
self.rowconfigure(46, pad=15)""""""
'''
imagehead = Image.open("bfs_dfs.png")
tkimage = ImageTk.PhotoImage(imagehead)
self.tkimage = tkimage
panel1 = Label(self, image=tkimage, width=650, height=500)
panel1.grid(row=10, column=0, sticky=E)
# for two big textboxes
self.tb8 = Text(self, width=55, height=8, font=("Helvetica", 11), wrap=WORD)
self.tb8.grid(row=10, column=20, columnspan=2, sticky=W)
# forsmall two textboxes
self.tb1 = Text(self, width=30, height=5)
self.tb1.grid(row=0, column=0, sticky=W)
self.tb1.insert(0.0, "insert goal state assuming start node is 0")
# self.tb2 = Text(self, width=30, height=5)
# self.tb2.insert(0.0, "insert goal state")
# self.tb2.grid(row=0, column=1, sticky=W)
# buttons
self.hbtn = Button(self, text="BACK", command=lambda: controller.show_frame("StartPage"))
self.hbtn.grid(row=1, column=0, columnspan=2, sticky=W)
self.obtn = Button(self, text="SUBMIT", command=lambda: self.info())
self.obtn.grid(row=1, column=1, columnspan=2, sticky=W)
def info(self):
# print(val)
point1 = self.tb1.get("1.0", "end-1c")
point1 = int(point1)
x = bfs.start(point1)
self.tb8.configure(state=NORMAL)
self.tb8.delete(1.0, END)
self.tb8.insert(0.0, x)
self.tb8.configure(state=DISABLED)
class PageThree(tk.Frame):
def __init__(self, parent, controller):
tk.Frame.__init__(self, parent)
self.controller = controller
Style().configure("TButton", padding=(0, 5, 0, 5), font='serif 10')
self.columnconfigure(5, pad=5)
'''''
self.columnconfigure(1, pad=5)
self.columnconfigure(2, pad=5)
self.columnconfigure(3, pad=5)
self.columnconfigure(4, pad=5)
self.rowconfigure(0, pad=15)
self.rowconfigure(1, pad=15)
self.rowconfigure(2, pad=15)
self.rowconfigure(3, pad=15)
self.rowconfigure(4, pad=15)
self.rowconfigure(18, pad=15)
self.rowconfigure(46, pad=15)""""""
'''
imagehead = Image.open("bfs_dfs.png")
tkimage = ImageTk.PhotoImage(imagehead)
self.tkimage = tkimage
panel1 = Label(self, image=tkimage, width=650, height=500)
panel1.grid(row=10, column=0, sticky=E)
# for two big textboxes
self.tb8 = Text(self, width=55, height=8, font=("Helvetica", 11), wrap=WORD)
self.tb8.grid(row=10, column=20, columnspan=2, sticky=W)
# forsmall two textboxes
self.tb1 = Text(self, width=30, height=5)
self.tb1.grid(row=0, column=0, sticky=W)
self.tb1.insert(0.0, "insert goal state assuming start node is 0")
# self.tb2 = Text(self, width=30, height=5)
# self.tb2.insert(0.0, "insert goal state")
# self.tb2.grid(row=0, column=1, sticky=W)
# buttons
self.hbtn = Button(self, text="BACK", command=lambda: controller.show_frame("StartPage"))
self.hbtn.grid(row=1, column=0, columnspan=2, sticky=W)
self.obtn = Button(self, text="SUBMIT", command=lambda: self.info())
self.obtn.grid(row=1, column=1, columnspan=2, sticky=W)
def info(self):
# print(val)
point1 = self.tb1.get("1.0", "end-1c")
point1 = int(point1)
x = dfs.start(point1)
self.tb8.configure(state=NORMAL)
self.tb8.delete(1.0, END)
self.tb8.insert(0.0, x)
self.tb8.configure(state=DISABLED)
class PageFour(tk.Frame):
def __init__(self, parent, controller):
tk.Frame.__init__(self, parent)
self.controller = controller
Style().configure("TButton", padding=(0, 5, 0, 5), font='serif 10')
self.columnconfigure(5, pad=5)
'''''
self.columnconfigure(1, pad=5)
self.columnconfigure(2, pad=5)
self.columnconfigure(3, pad=5)
self.columnconfigure(4, pad=5)
self.rowconfigure(0, pad=15)
self.rowconfigure(1, pad=15)
self.rowconfigure(2, pad=15)
self.rowconfigure(3, pad=15)
self.rowconfigure(4, pad=15)
self.rowconfigure(18, pad=15)
self.rowconfigure(46, pad=15)""""""
'''
imagehead = Image.open("ucs.png")
tkimage = ImageTk.PhotoImage(imagehead)
self.tkimage = tkimage
panel1 = Label(self, image=tkimage, width=600, height=500)
panel1.grid(row=10, column=0, sticky=E)
# for two big textboxes
self.tb8 = Text(self, width=55, height=8, font=("Helvetica", 11), wrap=WORD)
self.tb8.grid(row=10, column=20, columnspan=2, sticky=W)
# forsmall two textboxes
self.tb1 = Text(self, width=30, height=5)
self.tb1.grid(row=0, column=0, sticky=W)
self.tb1.insert(0.0, "insert start state")
self.tb2 = Text(self, width=30, height=5)
self.tb2.insert(0.0, "insert goal state")
self.tb2.grid(row=0, column=1, sticky=W)
# buttons
self.hbtn = Button(self, text="BACK", command=lambda: controller.show_frame("StartPage"))
self.hbtn.grid(row=1, column=0, columnspan=2, sticky=W)
self.obtn = Button(self, text="SUBMIT", command=lambda: self.info())
self.obtn.grid(row=1, column=1, columnspan=2, sticky=W)
def info(self):
# print(val)
self.tb8.configure(state=NORMAL)
self.tb8.delete(1.0, END)
point1 = self.tb1.get("1.0", "end-1c")
point2 = self.tb2.get("1.0", "end-1c")
x = int(point1)
y = int(point2)
a = djikstra.start(x, y)
self.tb8.insert(0.0, a)
self.tb8.configure(state=DISABLED)
if __name__ == "__main__":
app = SampleApp()
app.mainloop() | [
"="
] | = |
e6cdf753fc9c2aca1f92d5bb34b67009aee6d712 | cb9281a34c3c5a36d4b3a846fb6ff22ede12f2f6 | /communities_CK_calculations_threshold_summary_GC_tops_withins_comm.py | 74b385d99726212799dca71afbed440d7153744b | [] | no_license | juliettapc/CalorieKing | 9cb9f35ae9b239d2284175b0802cf2c60dc79d1d | 5f80bffb65fe4644a81ae2ab0b1738861e028331 | refs/heads/master | 2022-02-10T07:52:24.133379 | 2022-02-08T01:25:18 | 2022-02-08T01:25:18 | 153,174,891 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,035 | py | """
Created by Julia Poncela on January 2011.
It reads all the graph files at once (months, quarters, year) from the 2, 5 and 10-point folders and calculates the modulary, number and size of communities, ..., just for the giant component of each network.
It prints out the results in a general 'summary_modularity_analysis' file, and also in a bunch of other files 'path/graph_name'+'_list_modularity_analysis', one for each network.
It doesn't take any arguments.
"""
import subprocess as sp
import networkx as nx
file1 = open('summary_modularity_analysis_GC','wt') # one summary file for everything
print >> file1, "data: #_points time_scale GC_size <k> k(hub) modularity #_communities <community_size> max_community_size\n"
file1.close()
name_list=[] # list of names of the input files
scale_list=[2,5,10]
for scale in scale_list:
for index in range(0,11): #months
name_list.append(str(scale)+'_points_network/data/friend_graph_month'+str(index))
for index in range(0,4): # quarters
name_list.append(str(scale)+'_points_network/data/friend_graph_quarter'+str(index))
# year
name_list.append(str(scale)+'_points_network/data/friend_graph_all0')
map (str, name_list) # recordar que lo que escribo tiene que ser un string!!!!
for name in name_list: # loop to go over files (== over networks)
calculations=[] # list of atributes for every network (modularity, number of communities, averages,...)
list_of_data_list=[] #list of atributes that are lists (5top hubs, communitiy sizes,...)
print "\n\nfile: "+name
edge_data = open(name).readlines()
partir=name.split("_points_network/data/friend_graph_") #parto el nombre del fichero en una lista con dos componentes: num_ptos y time_scale
num_points=partir[0]
time_scale=partir[1]
H=nx.read_edgelist(name) # create the network from the original input file
components=nx.connected_component_subgraphs(H)
G=components[0] # i take just the GC as a subgraph to perform the community ID algorithm
# G is a list of tuples: [(n1,n2),(n3,n4),(n2,n3),...]
calculations.append("\n") # just to separate from the next set of data
calculations.append(num_points)
calculations.append(time_scale)
calculations.append(len(G))
#print len(G) # N
#print G.number_of_nodes()# N
new_edge_data = [] #this list is what i will pass to Roger's code
for e in G.edges(): # e is a list of two neighbors: [n1,n2]
#i have to convert e to str because it is in some other format and the algorithm may not recognise it
new_edge_data.append(" ".join(map(str,e))) # i join the two neighbors, separating them just by a space, so now they are just one element of the edge_list, which is: [n1 n2, n3 n4, n2 n3,...]
degree_values=sorted(nx.degree(G).values())
most_connected=[]
for i in range (1,11):
most_connected.append(degree_values[-i])
list_of_data_list.append(most_connected) # save the connectivity values of the 5 top highest connected nodes
average_network_degree=int(round(sum(G.degree().values())/float(len(G)),0) )
calculations.append(average_network_degree)
calculations.append(degree_values[-1])
p = sp.Popen(["/opt/communityID"], stdin=sp.PIPE, stdout=sp.PIPE)
output, error = p.communicate("\n".join(new_edge_data))
community_lines = output.split("part")
modularity = float(community_lines[0])
partition_lines = community_lines[1].split("\n")
modules = []
calculations.append(modularity)
most_connected_within_all_network=[]
max_max_degree=0
max_size=0
average_size=0
average_max_degree=0
size_list=[]
max_conect_list=[]
average_k_list=[]
for p in partition_lines:
this_module = p.split("---")
if len(this_module) > 1:
this_module = this_module[1] # 'this_module' is the list of nodes in the current module
this_module = map(int, this_module.split())
label_and_k=[]#lista de conectividades y label_nodos en una communidad
modules.append(this_module) # list of modules (list of lists)
size=0
conect_list=[]
averageK=0
degree_values_within=[]#list of k within the current community
for node in this_module: # loop over the nodes of the current module
node=str(node)
conect_list.append(G.degree(node)) #create a connectivity list for the nodes in the module
averageK=averageK+G.degree(node)
size=size+1
degree_values_within.append(G.degree(node)) #ESTOY GUARDANDO K'S NO LAS LABEL DE LOS NODOS!!!!!
pair=[]
pair.append(G.degree(node))
pair.append(node)
label_and_k.append(pair) #guardo parejas de [k_nodo,label_nodo]
# end loop nodes of a given module.
label_and_k=sorted(label_and_k) # i sorted the nodes belonging to the current community by connectivity
most_connected_within=[]#list of top-10 most connected nodes in the community
if len(this_module) >= 10:
for i in range (1,11):
most_connected_within.append(label_and_k[-i][1])
elif len(this_module) >= 5:
for i in range (1,6):
most_connected_within.append(label_and_k[-i][1])
else:
for i in range(len(this_module)):
most_connected_within.append(label_and_k[-i][1])
most_connected_within_all_network.append(most_connected_within)#list lists of top-10 most connected nodes in the community
size_list.append(size)# list of community sizes
averageK=averageK/float(size)
average_k_list.append(int(round(averageK,0)))
if max_size < size:
max_size = size
if max_max_degree < max(conect_list):
max_max_degree = max(conect_list)
average_size=average_size+size
average_max_degree=average_max_degree+max(conect_list)
max_conect_list.append(max(conect_list))
# end of loop over communities
#average over communities
average_size=average_size/len(modules)
average_max_degree=average_max_degree/len(modules)
calculations.append(len(modules)) #number of cummunities
calculations.append(average_size) # average sizes of communities
calculations.append(max_size) # maximum size of communities
list_of_data_list.append(max_conect_list) # list of maximum conectivity per each community
list_of_data_list.append(average_k_list) # list of average conectivity per each community
list_of_data_list.append(size_list) # list of community sizes
#print the results
print "N:",len(G)," number of communities detected:"+str(len(modules))
print "average_size:", average_size," average_max_degree:",average_max_degree
print "max_size:", max_size," max_max_degree:",max_max_degree
output_string = "modularity: " + str(modularity) +"\n" #print modularity
for s in modules:
module_string = ",".join(map(str,s))
output_string += module_string + ";\n" # print the elements of every community
#print output_string
# write the output files
file2 = open(name+'_list_modularity_analysis_GC','wt') #one output file per each input file
print >> file2, "data: list_10top_hubs list_max(k)_each_comm list_<k>_each_comm list_community_sizes\n"
for item in list_of_data_list:
print >> file2, item
print >> file2, "\n"
file2.close()
file1 = open('summary_modularity_analysis_GC','at') # one summary file for everything
for calculation in calculations:
print >> file1, calculation, # with a comma at the end, there is not \n between values
file1.close()
if name== '5_points_network/data/friend_graph_all0':
file3 = open('list_of_communities_'+str(num_points)+'points_'+str(time_scale),'wt')
print >> file3, modules # imprimo lista de listas (==nodos en cada comunidad)
file3.close()
file4 =open('list_of_top10_hubs_withing_communities_'+str(num_points)+'points_'+str(time_scale),'wt')
print >> file4, most_connected_within_all_network
file4.close()
| [
"julia@chem-eng.northwestern.edu"
] | julia@chem-eng.northwestern.edu |
9667b44160f348981ee70530ce3320ba0a586f90 | 2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae | /python/python_8422.py | 1495d5f0126aea6c5d6f5b0bb9d9a93e04099200 | [] | no_license | AK-1121/code_extraction | cc812b6832b112e3ffcc2bb7eb4237fd85c88c01 | 5297a4a3aab3bb37efa24a89636935da04a1f8b6 | refs/heads/master | 2020-05-23T08:04:11.789141 | 2015-10-22T19:19:40 | 2015-10-22T19:19:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 66 | py | # Problems with Celery & Redis Backend
CELERY_ALWAYS_EAGER = True
| [
"ubuntu@ip-172-31-7-228.us-west-2.compute.internal"
] | ubuntu@ip-172-31-7-228.us-west-2.compute.internal |
e70928a610cf94f7cb28e1ce8d31ce8d6dd954ef | df42b0d05038a2940606591d548637bc51d6573d | /mounth02/day11/demo02.py | fed207512a913987794058f8243ffb92c9eece52 | [] | no_license | ThreePointFive/aid1907_0814 | 440113f5ae2df28e53a088bd3ea420d5558214b4 | 99eea9aafdf8211278425c33aba2e64d5eb2500b | refs/heads/master | 2022-12-03T19:31:21.085608 | 2019-11-09T06:25:33 | 2019-11-09T06:25:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 309 | py | '''property
属性来控制输出
'''
class Wife:
def __init__(self,name,age):
self.name = name
self.age=age
def get_age(self):
return self.age
def set_age(self,age):
self.age=age
property=(get_age,set_age)
w01=Wife('caster',25)
w01.set_age(268)
print(w01.age)
| [
"760373741@qq.com"
] | 760373741@qq.com |
cadb44f3e2911eb21b4fb5601d9ae1730971a64f | 6f1034b17b49f373a41ecf3a5a8923fb4948992b | /pychron/lasers/laser_managers/laser_script_executor.py | c72107cc7928ef0326d5f0a36493cd8a05122f43 | [
"Apache-2.0"
] | permissive | NMGRL/pychron | a6ec1854488e74eb5d3ff53eee8537ecf98a6e2f | 8cfc8085393ace2aee6b98d36bfd6fba0bcb41c6 | refs/heads/main | 2023-08-30T07:00:34.121528 | 2023-06-12T17:43:25 | 2023-06-12T17:43:25 | 14,438,041 | 38 | 28 | Apache-2.0 | 2023-08-09T22:47:17 | 2013-11-15T23:46:10 | Python | UTF-8 | Python | false | false | 20,799 | py | # ===============================================================================
# Copyright 2011 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
from __future__ import absolute_import
import os
from threading import Thread
from traits.api import Instance, Bool, Button, Property, Str
from traitsui.api import View, Item, ButtonEditor, UItem
from pychron.core.helpers.filetools import pathtolist
from pychron.lasers.laser_managers.ilaser_manager import ILaserManager
from pychron.loggable import Loggable
from pychron.paths import paths
from six.moves import map
from six.moves import range
class LaserScriptExecutor(Loggable):
laser_manager = Instance(ILaserManager)
execute_button = Button
execute_label = Property(depends_on="_executing")
message = Str
_executing = Bool(False)
_cancel = False
def execute(self):
if self._executing:
self._cancel = True
self._executing = False
else:
self._cancel = False
self._executing = True
t = Thread(target=self._execute)
t.start()
def _execute(self):
pass
# handlers
def _execute_button_fired(self):
self.execute()
def _get_execute_label(self):
return "Stop" if self._executing else "Execute"
def traits_view(self):
v = View(
Item(
"execute_button",
show_label=False,
editor=ButtonEditor(label_value="execute_label"),
),
UItem("message", style="readonly"),
width=400,
title="Laser Script Executor",
)
return v
class UVLaserScriptExecutor(LaserScriptExecutor):
def _get_script_lines(self):
with open(self._script_path, "r") as rfile:
return rfile.readlines()
def _execute(self):
path = os.path.join(paths.scripts_dir, "uvlaser.txt")
self.info("starting LaserScript")
for i, line in enumerate(pathtolist(path)):
if self._cancel:
self.debug("Script Canceled")
break
line = line.strip()
self.debug("execute {:02n}:".format(i))
self._execute_line(line)
else:
self.info("Script completed")
def _execute_line(self, line):
"""
<command>: value[,value2...]
:param line:
:return:
"""
cmd, args = line.split(":")
try:
func = getattr(self, "_cmd_{}".format(cmd))
except AttributeError:
self.warning('Invalid command: "{}". line={}'.format(cmd, line))
return
try:
self.message = line
func(*args)
except BaseException as e:
self.warning("Failed to execute err:{}, line={}".format(e, line))
self.message = ""
# commands
def _cmd_line_y(self, x, y, step, n, nburst):
self._line(x, y, step, n, "vertical", nburst)
def _cmd_line_x(self, x, y, step, n, nburst):
self._line(x, y, step, n, "horizontal", nburst)
def _cmd_move_z(self, z):
z = float(z)
self.laser_manager.stage_manager.set_z(z, block=True)
def _cmd_move_xy(self, x, y):
x, y = list(map(float, (x, y)))
self.laser_manager.linear_move(x, y, block=True)
def _cmd_fire(self, nburst):
atl = self.laser_manager.atl_controller
atl.set_burst_mode(True)
atl.set_nburst(nburst)
self.laser_manager.single_burst()
def _line(self, x, y, step, n, orientation, nburst):
for x, y in self._gen_line(x, y, step, n, orientation):
if self._cancel:
break
self._cmd_move_xy(x, y)
self._cmd_fire(nburst)
def _gen_line(self, x, y, step, n, orientation):
xi, yi = x, y
for i in range(n):
if orientation == "vertical":
yi = y + step * i
else:
xi = x + step * i
yield xi, yi
# ============= EOF =============================================
# with open(name, 'r') as rfile:
# def shot(delay=3):
# if not self._cancel:
# lm.single_burst(delay=delay)
#
# d = yaml.load(rfile.read())
#
# device = d['device']
# if device == 'z':
# def iteration(p):
# sm.set_z(p, block=True)
# shot()
# elif device == 'laser':
# if self.names == 'trench':
# atl.set_burst_mode(False)
# else:
# atl.set_burst_mode(True)
#
# def iteration(p):
# if self.names == 'trench':
# if p == 0:
# atl.laser_run()
# else:
# atl.laser_stop()
#
# else:
# if self.names == 'burst':
# atl.set_nburst(p, save=False)
#
# shot()
# else:
# motor = lm.get_motor(device)
#
# def iteration(p):
# motor.trait_set(data_position=p)
# motor.block(4)
# shot()
#
# sx, sy = d['xy']
# xstep = d['xstep']
# ystep = d['ystep']
# # ny=d['y_nsteps']
# # nx=d['x_nsteps']
# ncols = d['ncols']
# ms = d['start']
# me = d['stop']
# mstep = d['step']
# sign = 1
# if me < ms:
# sign = -1
#
# n = (abs(ms - me) + 1) / mstep
#
# nx = ncols
# ny = int(n / int(ncols) + 1)
#
# v = d['velocity']
# # atl.set_nburst(nb)
# dp = ms
# for r in range(ny):
# if self._cancel:
# break
#
# for c in range(nx):
# if self._cancel:
# break
#
# dp = ms + (r * nx + c) * mstep
#
# if sign == 1:
# if dp > me:
# break
# else:
# if dp < me:
# break
#
# x, y = sx + c * xstep, sy + r * ystep
#
# # move at normal speed to first pos
# if r == 0 and c == 0:
# sm.linear_move(x, y, block=True)
# else:
# sm.linear_move(x, y, velocity=v, block=True)
#
# if self._cancel:
# break
#
# iteration(dp)
# if self._cancel:
# break
#
# if sign == 1:
# if dp > me:
# break
# else:
# if dp < me:
# break
# else:
# self.info('LaserScript truncated at row={}, col={}'.format(r, c))
#
# self._executing = False
# self.info('LaserScript finished'.format(name))
# import os
# import time
# from threading import Thread
#
# import yaml
# from numpy import linspace, array
# from traits.api import Any, Property, Bool, Enum, Button
# from traitsui.api import View, Item, ButtonEditor
#
# from pychron.core.helpers.filetools import unique_path
# from pychron.envisage.view_util import open_view
# from pychron.graph.stream_graph import StreamStackedGraph
# from pychron.loggable import Loggable
# from pychron.paths import paths
#
#
# class LaserScriptExecutor(Loggable):
# laser_manager = Any
# _executing = Bool(False)
# _cancel = False
# execute_button = Button
# execute_label = Property(depends_on='_executing')
# kind = Enum('scan', 'calibration')
#
# def _kind_default(self):
# return 'scan'
#
# def _get_execute_label(self):
# return 'Stop' if self._executing else 'Execute'
#
# def _execute_button_fired(self):
# self.execute()
#
# def execute(self):
# if self._executing:
# self._cancel = True
# self._executing = False
# else:
# self._cancel = False
# self._executing = True
# if self.kind == 'scan':
# func = self._execute_scan
# else:
# func = self._execute_calibration
# t = Thread(target=func)
# t.start()
#
# def _execute_calibration(self):
# name = os.path.join(paths.scripts_dir, '{}_calibration_scan.yaml'.format(self.name))
#
# import csv
#
# d = os.path.join(paths.data_dir, 'diode_scans')
# p, _cnt = unique_path(d, 'calibration', extension='csv')
# # st = None
# #
# # py = self.laser_manager.pyrometer
# # tc = self.laser_manager.get_device('temperature_monitor')
#
# g = StreamStackedGraph()
# g.clear()
#
# g.new_plot(scan_delay=1)
# g.new_series(x=[], y=[])
# g.new_plot(scan_delay=1)
# g.new_series(x=[], y=[], plotid=1)
#
# open_view(g)
# record = False
# if record:
# self.laser_manager.stage_manager.start_recording()
# time.sleep(1)
# # def gfunc(t, v1, v2):
# # g.add_datum((t, v1))
# # g.add_datum((t, v2), plotid=1)
#
# def gfunc(v1, v2):
# g.record(v1)
# g.record(v2, plotid=1)
#
# yd = yaml.load(open(name).read())
#
# start = yd['start']
# end = yd['end']
# step = yd['step']
# mean_tol = yd['mean_tol']
# std = yd['std']
# n = (end - start) / step + 1
# # nn = 30
# #
# # py = self.laser_manager.pyrometer
# # tc = self.laser_manager.get_device('temperature_monitor')
#
# with open(p, 'w') as wfile:
# writer = csv.writer(wfile)
# st = time.time()
# for ti in linspace(start, end, n):
# if self._cancel:
# break
# args = self._equilibrate_temp(ti, gfunc, st, mean_tol, std)
# if args:
# self.info('{} equilibrated'.format(ti))
# py_t, tc_t = args
# writer.writerow((ti, py_t, tc_t))
# else:
# break
#
# self.laser_manager.set_laser_temperature(0)
# if record:
# self.laser_manager.stage_manager.stop_recording()
# self._executing = False
#
# def _equilibrate_temp(self, temp, func, st, tol, std):
# """ wait until pyrometer temp equilibrated
# """
#
# temps = []
# # ttemps = []
# py = self.laser_manager.pyrometer
# tc = self.laser_manager.get_device('temperature_monitor')
#
# n = 15
#
# self.laser_manager.set_laser_temperature(temp)
# ctemp = self.laser_manager.map_temperature(temp)
# # ctemp = self.laser_manager.temperature_controller.map_temperature(temp)
# while 1:
# if self._cancel:
# break
# sti = time.time()
# py_t = py.read_temperature(verbose=False)
# tc_t = tc.read_temperature(verbose=False)
# # t = time.time() - st
# func(py_t, tc_t)
#
# temps.append(py_t)
# # ttemps.append(tc_t)
# ns = array(temps[-n:])
# # ts = array(ttemps[-n:])
# if abs(ns.mean() - ctemp) < tol and ns.std() < std:
# break
#
# elapsed = time.time() - sti
# time.sleep(max(0.0001, min(1, 1 - elapsed)))
#
# nn = 30
# ptemps = []
# ctemps = []
# for _ in range(nn):
# if self._cancel:
# break
#
# sti = time.time()
#
# # t = sti - st
# py_t = py.read_temperature(verbose=False)
# tc_t = tc.read_temperature(verbose=False)
# func(py_t, tc_t)
#
# ptemps.append(py_t)
# ctemps.append(tc_t)
# elapsed = time.time() - sti
# time.sleep(max(0.0001, min(1, 1 - elapsed)))
#
# return array(ptemps).mean(), array(ctemps).mean()
#
# # return ns.mean(), ts.mean()
#
# def _execute_scan(self):
# name = os.path.join(paths.scripts_dir, '{}_scan.yaml'.format(self.name))
#
# import csv
#
# d = os.path.join(paths.data_dir, 'diode_scans')
# p, _cnt = unique_path(d, 'scan', extension='csv')
# st = None
#
# py = self.laser_manager.pyrometer
# tc = self.laser_manager.get_device('temperature_monitor')
# yd = yaml.load(open(name).read())
#
# power = yd['power']
# duration = yd['duration']
# power_on = yd['power_on']
# power_off = yd['power_off']
# period = yd['period']
# if 'temp' in yd:
# temp = yd['temp']
# else:
# temp = None
#
# g = StreamStackedGraph()
# g.new_plot(scan_delay=1, )
# g.new_series(x=[], y=[])
# g.new_plot(scan_delay=1, )
# g.new_series(x=[], y=[], plotid=1)
#
# open_view(g)
# self.laser_manager.stage_manager.start_recording()
# time.sleep(1)
#
# def gfunc(v1, v2):
# g.record(v1)
# g.record(v2, plotid=1)
#
# pi = 0
# with open(p, 'w') as wfile:
# writer = csv.writer(wfile)
# t = 0
# ti = 0
# while ti <= duration:
# if self._cancel:
# break
# # print ti, power_off, pi, ti >= power_off, (ti >= power_off and pi)
# if ti == power_on:
# # turn on set laser to power
# if temp:
# self.laser_manager.set_laser_temperature(temp)
# pi = temp
# else:
# pi = power
# self.laser_manager.set_laser_power(power)
# elif ti >= power_off and pi:
# print 'setting power off'
# if temp:
# self.laser_manager.set_laser_temperature(0)
# else:
# self.laser_manager.set_laser_power(0)
# pi = 0
#
# if st is None:
# st = time.time()
#
# t = time.time() - st
#
# py_t = py.read_temperature(verbose=False)
# tc_t = tc.read_temperature(verbose=False)
# gfunc(py_t, tc_t)
#
# writer.writerow((ti, pi, t, py_t, tc_t))
# ti += 1
#
# time.sleep(period)
#
# if temp:
# self.laser_manager.set_laser_temperature(0)
# else:
# self.laser_manager.set_laser_power(0)
# self.laser_manager.stage_manager.stop_recording()
# self._executing = False
#
# def traits_view(self):
# v = View(Item('execute_button', show_label=False,
# editor=ButtonEditor(label_value='execute_label'),
#
# ),
# Item('kind', show_label=False)
# )
# return v
#
#
# class UVLaserScriptExecutor(LaserScriptExecutor):
# names = Enum('mask', 'z', 'attenuator', 'burst')
#
# def _execute_button_fired(self):
# n = self.names
# if n is None:
# n = 'mask'
#
# name = os.path.join(paths.scripts_dir, 'uv_matrix_{}.yaml'.format(n))
# self.execute(name)
#
# def _execute(self, name):
# self.info('starting LaserScript {}'.format(name))
#
# lm = self.laser_manager
# sm = lm.stage_manager
# atl = lm.atl_controller
#
# with open(name, 'r') as rfile:
# def shot(delay=3):
# if not self._cancel:
# lm.single_burst(delay=delay)
#
# d = yaml.load(rfile.read())
#
# device = d['device']
# if device == 'z':
# def iteration(p):
# sm.set_z(p, block=True)
# shot()
# elif device == 'laser':
# if self.names == 'trench':
# atl.set_burst_mode(False)
# else:
# atl.set_burst_mode(True)
#
# def iteration(p):
# if self.names == 'trench':
# if p == 0:
# atl.laser_run()
# else:
# atl.laser_stop()
#
# else:
# if self.names == 'burst':
# atl.set_nburst(p, save=False)
#
# shot()
# else:
# motor = lm.get_motor(device)
#
# def iteration(p):
# motor.trait_set(data_position=p)
# motor.block(4)
# shot()
#
# sx, sy = d['xy']
# xstep = d['xstep']
# ystep = d['ystep']
# # ny=d['y_nsteps']
# # nx=d['x_nsteps']
# ncols = d['ncols']
# ms = d['start']
# me = d['stop']
# mstep = d['step']
# sign = 1
# if me < ms:
# sign = -1
#
# n = (abs(ms - me) + 1) / mstep
#
# nx = ncols
# ny = int(n / int(ncols) + 1)
#
# v = d['velocity']
# # atl.set_nburst(nb)
# dp = ms
# for r in range(ny):
# if self._cancel:
# break
#
# for c in range(nx):
# if self._cancel:
# break
#
# dp = ms + (r * nx + c) * mstep
#
# if sign == 1:
# if dp > me:
# break
# else:
# if dp < me:
# break
#
# x, y = sx + c * xstep, sy + r * ystep
#
# # move at normal speed to first pos
# if r == 0 and c == 0:
# sm.linear_move(x, y, block=True)
# else:
# sm.linear_move(x, y, velocity=v, block=True)
#
# if self._cancel:
# break
#
# iteration(dp)
# if self._cancel:
# break
#
# if sign == 1:
# if dp > me:
# break
# else:
# if dp < me:
# break
# else:
# self.info('LaserScript truncated at row={}, col={}'.format(r, c))
#
# self._executing = False
# self.info('LaserScript finished'.format(name))
#
#
# if __name__ == '__main__':
# lm = LaserScriptExecutor()
# name = '/Users/uv/Pychrondata_uv/scripts/uv_laser.yaml'
# lm.execute(name)
| [
"jirhiker@gmail.com"
] | jirhiker@gmail.com |
938991e16d2eccaad9073b085a155e4da1bbce96 | 563274d0bfb720b2d8c4dfe55ce0352928e0fa66 | /TestProject/src/sqlalchemy-default/test/orm/test_attributes.py | 1fc70fd77b63fed7d925d9191a6c2b095e4c4994 | [
"MIT"
] | permissive | wangzhengbo1204/Python | 30488455637ad139abc2f173a0a595ecaf28bcdc | 63f7488d9df9caf1abec2cab7c59cf5d6358b4d0 | refs/heads/master | 2020-05-19T19:48:27.092764 | 2013-05-11T06:49:41 | 2013-05-11T06:49:41 | 6,544,357 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 85,766 | py | import pickle
from sqlalchemy.orm import attributes, instrumentation, exc as orm_exc
from sqlalchemy.orm.collections import collection
from sqlalchemy.orm.interfaces import AttributeExtension
from sqlalchemy import exc as sa_exc
from sqlalchemy.testing import eq_, ne_, assert_raises, \
assert_raises_message
from sqlalchemy.testing import fixtures
from sqlalchemy.testing.util import gc_collect, all_partial_orderings
from sqlalchemy.util import jython
from sqlalchemy import event
from sqlalchemy import testing
# global for pickling tests
MyTest = None
MyTest2 = None
class AttributeImplAPITest(fixtures.MappedTest):
def _scalar_obj_fixture(self):
class A(object):
pass
class B(object):
pass
instrumentation.register_class(A)
instrumentation.register_class(B)
attributes.register_attribute(A, "b", uselist=False, useobject=True)
return A, B
def _collection_obj_fixture(self):
class A(object):
pass
class B(object):
pass
instrumentation.register_class(A)
instrumentation.register_class(B)
attributes.register_attribute(A, "b", uselist=True, useobject=True)
return A, B
def test_scalar_obj_remove_invalid(self):
A, B = self._scalar_obj_fixture()
a1 = A()
b1 = B()
b2 = B()
A.b.impl.append(
attributes.instance_state(a1),
attributes.instance_dict(a1), b1, None
)
assert a1.b is b1
assert_raises_message(
ValueError,
"Object <B at .*?> not "
"associated with <A at .*?> on attribute 'b'",
A.b.impl.remove,
attributes.instance_state(a1),
attributes.instance_dict(a1), b2, None
)
def test_scalar_obj_pop_invalid(self):
A, B = self._scalar_obj_fixture()
a1 = A()
b1 = B()
b2 = B()
A.b.impl.append(
attributes.instance_state(a1),
attributes.instance_dict(a1), b1, None
)
assert a1.b is b1
A.b.impl.pop(
attributes.instance_state(a1),
attributes.instance_dict(a1), b2, None
)
assert a1.b is b1
def test_scalar_obj_pop_valid(self):
A, B = self._scalar_obj_fixture()
a1 = A()
b1 = B()
A.b.impl.append(
attributes.instance_state(a1),
attributes.instance_dict(a1), b1, None
)
assert a1.b is b1
A.b.impl.pop(
attributes.instance_state(a1),
attributes.instance_dict(a1), b1, None
)
assert a1.b is None
def test_collection_obj_remove_invalid(self):
A, B = self._collection_obj_fixture()
a1 = A()
b1 = B()
b2 = B()
A.b.impl.append(
attributes.instance_state(a1),
attributes.instance_dict(a1), b1, None
)
assert a1.b == [b1]
assert_raises_message(
ValueError,
r"list.remove\(x\): x not in list",
A.b.impl.remove,
attributes.instance_state(a1),
attributes.instance_dict(a1), b2, None
)
def test_collection_obj_pop_invalid(self):
A, B = self._collection_obj_fixture()
a1 = A()
b1 = B()
b2 = B()
A.b.impl.append(
attributes.instance_state(a1),
attributes.instance_dict(a1), b1, None
)
assert a1.b == [b1]
A.b.impl.pop(
attributes.instance_state(a1),
attributes.instance_dict(a1), b2, None
)
assert a1.b == [b1]
def test_collection_obj_pop_valid(self):
A, B = self._collection_obj_fixture()
a1 = A()
b1 = B()
A.b.impl.append(
attributes.instance_state(a1),
attributes.instance_dict(a1), b1, None
)
assert a1.b == [b1]
A.b.impl.pop(
attributes.instance_state(a1),
attributes.instance_dict(a1), b1, None
)
assert a1.b == []
class AttributesTest(fixtures.ORMTest):
def setup(self):
global MyTest, MyTest2
class MyTest(object): pass
class MyTest2(object): pass
def teardown(self):
global MyTest, MyTest2
MyTest, MyTest2 = None, None
def test_basic(self):
class User(object):
pass
instrumentation.register_class(User)
attributes.register_attribute(User, 'user_id', uselist=False,
useobject=False)
attributes.register_attribute(User, 'user_name', uselist=False,
useobject=False)
attributes.register_attribute(User, 'email_address',
uselist=False, useobject=False)
u = User()
u.user_id = 7
u.user_name = 'john'
u.email_address = 'lala@123.com'
self.assert_(u.user_id == 7 and u.user_name == 'john'
and u.email_address == 'lala@123.com')
attributes.instance_state(u)._commit_all(attributes.instance_dict(u))
self.assert_(u.user_id == 7 and u.user_name == 'john'
and u.email_address == 'lala@123.com')
u.user_name = 'heythere'
u.email_address = 'foo@bar.com'
self.assert_(u.user_id == 7 and u.user_name == 'heythere'
and u.email_address == 'foo@bar.com')
def test_pickleness(self):
instrumentation.register_class(MyTest)
instrumentation.register_class(MyTest2)
attributes.register_attribute(MyTest, 'user_id', uselist=False,
useobject=False)
attributes.register_attribute(MyTest, 'user_name',
uselist=False, useobject=False)
attributes.register_attribute(MyTest, 'email_address',
uselist=False, useobject=False)
attributes.register_attribute(MyTest2, 'a', uselist=False,
useobject=False)
attributes.register_attribute(MyTest2, 'b', uselist=False,
useobject=False)
# shouldnt be pickling callables at the class level
def somecallable(state, passive):
return None
attributes.register_attribute(MyTest, 'mt2', uselist=True,
trackparent=True, callable_=somecallable,
useobject=True)
o = MyTest()
o.mt2.append(MyTest2())
o.user_id=7
o.mt2[0].a = 'abcde'
pk_o = pickle.dumps(o)
o2 = pickle.loads(pk_o)
pk_o2 = pickle.dumps(o2)
# so... pickle is creating a new 'mt2' string after a roundtrip here,
# so we'll brute-force set it to be id-equal to the original string
if False:
o_mt2_str = [ k for k in o.__dict__ if k == 'mt2'][0]
o2_mt2_str = [ k for k in o2.__dict__ if k == 'mt2'][0]
self.assert_(o_mt2_str == o2_mt2_str)
self.assert_(o_mt2_str is not o2_mt2_str)
# change the id of o2.__dict__['mt2']
former = o2.__dict__['mt2']
del o2.__dict__['mt2']
o2.__dict__[o_mt2_str] = former
# Relies on dict ordering
if not jython:
self.assert_(pk_o == pk_o2)
# the above is kind of distrurbing, so let's do it again a little
# differently. the string-id in serialization thing is just an
# artifact of pickling that comes up in the first round-trip.
# a -> b differs in pickle memoization of 'mt2', but b -> c will
# serialize identically.
o3 = pickle.loads(pk_o2)
pk_o3 = pickle.dumps(o3)
o4 = pickle.loads(pk_o3)
pk_o4 = pickle.dumps(o4)
# Relies on dict ordering
if not jython:
self.assert_(pk_o3 == pk_o4)
# and lastly make sure we still have our data after all that.
# identical serialzation is great, *if* it's complete :)
self.assert_(o4.user_id == 7)
self.assert_(o4.user_name is None)
self.assert_(o4.email_address is None)
self.assert_(len(o4.mt2) == 1)
self.assert_(o4.mt2[0].a == 'abcde')
self.assert_(o4.mt2[0].b is None)
@testing.requires.predictable_gc
def test_state_gc(self):
"""test that InstanceState always has a dict, even after host
object gc'ed."""
class Foo(object):
pass
instrumentation.register_class(Foo)
f = Foo()
state = attributes.instance_state(f)
f.bar = "foo"
eq_(state.dict, {'bar': 'foo', state.manager.STATE_ATTR: state})
del f
gc_collect()
assert state.obj() is None
assert state.dict == {}
def test_object_dereferenced_error(self):
class Foo(object):
pass
class Bar(object):
def __init__(self):
gc_collect()
instrumentation.register_class(Foo)
instrumentation.register_class(Bar)
attributes.register_attribute(Foo,
'bars',
uselist=True,
useobject=True)
assert_raises_message(
orm_exc.ObjectDereferencedError,
"Can't emit change event for attribute "
"'Foo.bars' - parent object of type <Foo> "
"has been garbage collected.",
lambda: Foo().bars.append(Bar())
)
def test_deferred(self):
class Foo(object):pass
data = {'a':'this is a', 'b':12}
def loader(state, keys):
for k in keys:
state.dict[k] = data[k]
return attributes.ATTR_WAS_SET
instrumentation.register_class(Foo)
manager = attributes.manager_of_class(Foo)
manager.deferred_scalar_loader = loader
attributes.register_attribute(Foo, 'a', uselist=False, useobject=False)
attributes.register_attribute(Foo, 'b', uselist=False, useobject=False)
f = Foo()
attributes.instance_state(f)._expire(attributes.instance_dict(f),
set())
eq_(f.a, 'this is a')
eq_(f.b, 12)
f.a = 'this is some new a'
attributes.instance_state(f)._expire(attributes.instance_dict(f),
set())
eq_(f.a, 'this is a')
eq_(f.b, 12)
attributes.instance_state(f)._expire(attributes.instance_dict(f),
set())
f.a = 'this is another new a'
eq_(f.a, 'this is another new a')
eq_(f.b, 12)
attributes.instance_state(f)._expire(attributes.instance_dict(f),
set())
eq_(f.a, 'this is a')
eq_(f.b, 12)
del f.a
eq_(f.a, None)
eq_(f.b, 12)
attributes.instance_state(f)._commit_all(attributes.instance_dict(f),
set())
eq_(f.a, None)
eq_(f.b, 12)
def test_deferred_pickleable(self):
data = {'a':'this is a', 'b':12}
def loader(state, keys):
for k in keys:
state.dict[k] = data[k]
return attributes.ATTR_WAS_SET
instrumentation.register_class(MyTest)
manager = attributes.manager_of_class(MyTest)
manager.deferred_scalar_loader=loader
attributes.register_attribute(MyTest, 'a', uselist=False, useobject=False)
attributes.register_attribute(MyTest, 'b', uselist=False, useobject=False)
m = MyTest()
attributes.instance_state(m)._expire(attributes.instance_dict(m), set())
assert 'a' not in m.__dict__
m2 = pickle.loads(pickle.dumps(m))
assert 'a' not in m2.__dict__
eq_(m2.a, "this is a")
eq_(m2.b, 12)
def test_list(self):
class User(object):pass
class Address(object):pass
instrumentation.register_class(User)
instrumentation.register_class(Address)
attributes.register_attribute(User, 'user_id', uselist=False,
useobject=False)
attributes.register_attribute(User, 'user_name', uselist=False,
useobject=False)
attributes.register_attribute(User, 'addresses', uselist=True,
useobject=True)
attributes.register_attribute(Address, 'address_id',
uselist=False, useobject=False)
attributes.register_attribute(Address, 'email_address',
uselist=False, useobject=False)
u = User()
u.user_id = 7
u.user_name = 'john'
u.addresses = []
a = Address()
a.address_id = 10
a.email_address = 'lala@123.com'
u.addresses.append(a)
self.assert_(u.user_id == 7 and u.user_name == 'john'
and u.addresses[0].email_address == 'lala@123.com')
(u,
attributes.instance_state(a)._commit_all(attributes.instance_dict(a)))
self.assert_(u.user_id == 7 and u.user_name == 'john'
and u.addresses[0].email_address == 'lala@123.com')
u.user_name = 'heythere'
a = Address()
a.address_id = 11
a.email_address = 'foo@bar.com'
u.addresses.append(a)
eq_(u.user_id, 7)
eq_(u.user_name, 'heythere')
eq_(u.addresses[0].email_address,'lala@123.com')
eq_(u.addresses[1].email_address,'foo@bar.com')
def test_extension_commit_attr(self):
"""test that an extension which commits attribute history
maintains the end-result history.
This won't work in conjunction with some unitofwork extensions.
"""
class Foo(fixtures.BasicEntity):
pass
class Bar(fixtures.BasicEntity):
pass
class ReceiveEvents(AttributeExtension):
def __init__(self, key):
self.key = key
def append(self, state, child, initiator):
if commit:
state._commit_all(state.dict)
return child
def remove(self, state, child, initiator):
if commit:
state._commit_all(state.dict)
return child
def set(self, state, child, oldchild, initiator):
if commit:
state._commit_all(state.dict)
return child
instrumentation.register_class(Foo)
instrumentation.register_class(Bar)
b1, b2, b3, b4 = Bar(id='b1'), Bar(id='b2'), Bar(id='b3'), Bar(id='b4')
def loadcollection(state, passive):
if passive is attributes.PASSIVE_NO_FETCH:
return attributes.PASSIVE_NO_RESULT
return [b1, b2]
def loadscalar(state, passive):
if passive is attributes.PASSIVE_NO_FETCH:
return attributes.PASSIVE_NO_RESULT
return b2
attributes.register_attribute(Foo, 'bars',
uselist=True,
useobject=True,
callable_=loadcollection,
extension=[ReceiveEvents('bars')])
attributes.register_attribute(Foo, 'bar',
uselist=False,
useobject=True,
callable_=loadscalar,
extension=[ReceiveEvents('bar')])
attributes.register_attribute(Foo, 'scalar',
uselist=False,
useobject=False, extension=[ReceiveEvents('scalar')])
def create_hist():
def hist(key, shouldmatch, fn, *arg):
attributes.instance_state(f1)._commit_all(attributes.instance_dict(f1))
fn(*arg)
histories.append((shouldmatch,
attributes.get_history(f1, key)))
f1 = Foo()
hist('bars', True, f1.bars.append, b3)
hist('bars', True, f1.bars.append, b4)
hist('bars', False, f1.bars.remove, b2)
hist('bar', True, setattr, f1, 'bar', b3)
hist('bar', True, setattr, f1, 'bar', None)
hist('bar', True, setattr, f1, 'bar', b4)
hist('scalar', True, setattr, f1, 'scalar', 5)
hist('scalar', True, setattr, f1, 'scalar', None)
hist('scalar', True, setattr, f1, 'scalar', 4)
histories = []
commit = False
create_hist()
without_commit = list(histories)
histories[:] = []
commit = True
create_hist()
with_commit = histories
for without, with_ in zip(without_commit, with_commit):
shouldmatch, woc = without
shouldmatch, wic = with_
if shouldmatch:
eq_(woc, wic)
else:
ne_(woc, wic)
def test_extension_lazyload_assertion(self):
class Foo(fixtures.BasicEntity):
pass
class Bar(fixtures.BasicEntity):
pass
class ReceiveEvents(AttributeExtension):
def append(self, state, child, initiator):
state.obj().bars
return child
def remove(self, state, child, initiator):
state.obj().bars
return child
def set(self, state, child, oldchild, initiator):
return child
instrumentation.register_class(Foo)
instrumentation.register_class(Bar)
bar1, bar2, bar3 = [Bar(id=1), Bar(id=2), Bar(id=3)]
def func1(state, passive):
if passive is attributes.PASSIVE_NO_FETCH:
return attributes.PASSIVE_NO_RESULT
return [bar1, bar2, bar3]
attributes.register_attribute(Foo, 'bars', uselist=True,
callable_=func1, useobject=True,
extension=[ReceiveEvents()])
attributes.register_attribute(Bar, 'foos', uselist=True,
useobject=True, backref='bars')
x = Foo()
assert_raises(AssertionError, Bar(id=4).foos.append, x)
x.bars
b = Bar(id=4)
b.foos.append(x)
attributes.instance_state(x)._expire_attributes(attributes.instance_dict(x),
['bars'])
assert_raises(AssertionError, b.foos.remove, x)
def test_scalar_listener(self):
# listeners on ScalarAttributeImpl aren't used normally. test that
# they work for the benefit of user extensions
class Foo(object):
pass
results = []
class ReceiveEvents(AttributeExtension):
def append(self, state, child, initiator):
assert False
def remove(self, state, child, initiator):
results.append(("remove", state.obj(), child))
def set(self, state, child, oldchild, initiator):
results.append(("set", state.obj(), child, oldchild))
return child
instrumentation.register_class(Foo)
attributes.register_attribute(Foo, 'x', uselist=False,
useobject=False,
extension=ReceiveEvents())
f = Foo()
f.x = 5
f.x = 17
del f.x
eq_(results, [
('set', f, 5, attributes.NEVER_SET),
('set', f, 17, 5),
('remove', f, 17),
])
def test_lazytrackparent(self):
"""test that the "hasparent" flag works properly
when lazy loaders and backrefs are used
"""
class Post(object):pass
class Blog(object):pass
instrumentation.register_class(Post)
instrumentation.register_class(Blog)
# set up instrumented attributes with backrefs
attributes.register_attribute(Post, 'blog', uselist=False,
backref='posts',
trackparent=True, useobject=True)
attributes.register_attribute(Blog, 'posts', uselist=True,
backref='blog',
trackparent=True, useobject=True)
# create objects as if they'd been freshly loaded from the database (without history)
b = Blog()
p1 = Post()
attributes.instance_state(b)._set_callable(attributes.instance_dict(b),
'posts', lambda passive:[p1])
attributes.instance_state(p1)._set_callable(attributes.instance_dict(p1),
'blog', lambda passive:b)
p1, attributes.instance_state(b)._commit_all(attributes.instance_dict(b))
# no orphans (called before the lazy loaders fire off)
assert attributes.has_parent(Blog, p1, 'posts', optimistic=True)
assert attributes.has_parent(Post, b, 'blog', optimistic=True)
# assert connections
assert p1.blog is b
assert p1 in b.posts
# manual connections
b2 = Blog()
p2 = Post()
b2.posts.append(p2)
assert attributes.has_parent(Blog, p2, 'posts')
assert attributes.has_parent(Post, b2, 'blog')
def test_illegal_trackparent(self):
class Post(object):pass
class Blog(object):pass
instrumentation.register_class(Post)
instrumentation.register_class(Blog)
attributes.register_attribute(Post, 'blog', useobject=True)
assert_raises_message(
AssertionError,
"This AttributeImpl is not configured to track parents.",
attributes.has_parent, Post, Blog(), 'blog'
)
assert_raises_message(
AssertionError,
"This AttributeImpl is not configured to track parents.",
Post.blog.impl.sethasparent, "x", "x", True
)
def test_inheritance(self):
"""tests that attributes are polymorphic"""
class Foo(object):pass
class Bar(Foo):pass
instrumentation.register_class(Foo)
instrumentation.register_class(Bar)
def func1(state, passive):
return "this is the foo attr"
def func2(state, passive):
return "this is the bar attr"
def func3(state, passive):
return "this is the shared attr"
attributes.register_attribute(Foo, 'element', uselist=False,
callable_=func1, useobject=True)
attributes.register_attribute(Foo, 'element2', uselist=False,
callable_=func3, useobject=True)
attributes.register_attribute(Bar, 'element', uselist=False,
callable_=func2, useobject=True)
x = Foo()
y = Bar()
assert x.element == 'this is the foo attr'
assert y.element == 'this is the bar attr'
assert x.element2 == 'this is the shared attr'
assert y.element2 == 'this is the shared attr'
def test_no_double_state(self):
states = set()
class Foo(object):
def __init__(self):
states.add(attributes.instance_state(self))
class Bar(Foo):
def __init__(self):
states.add(attributes.instance_state(self))
Foo.__init__(self)
instrumentation.register_class(Foo)
instrumentation.register_class(Bar)
b = Bar()
eq_(len(states), 1)
eq_(list(states)[0].obj(), b)
def test_inheritance2(self):
"""test that the attribute manager can properly traverse the
managed attributes of an object, if the object is of a
descendant class with managed attributes in the parent class"""
class Foo(object):
pass
class Bar(Foo):
pass
class Element(object):
_state = True
instrumentation.register_class(Foo)
instrumentation.register_class(Bar)
attributes.register_attribute(Foo, 'element', uselist=False,
useobject=True)
el = Element()
x = Bar()
x.element = el
eq_(attributes.get_state_history(attributes.instance_state(x),
'element'), ([el], (), ()))
attributes.instance_state(x)._commit_all(attributes.instance_dict(x))
added, unchanged, deleted = \
attributes.get_state_history(attributes.instance_state(x),
'element')
assert added == ()
assert unchanged == [el]
def test_lazyhistory(self):
"""tests that history functions work with lazy-loading attributes"""
class Foo(fixtures.BasicEntity):
pass
class Bar(fixtures.BasicEntity):
pass
instrumentation.register_class(Foo)
instrumentation.register_class(Bar)
bar1, bar2, bar3, bar4 = [Bar(id=1), Bar(id=2), Bar(id=3),
Bar(id=4)]
def func1(state, passive):
return 'this is func 1'
def func2(state, passive):
return [bar1, bar2, bar3]
attributes.register_attribute(Foo, 'col1', uselist=False,
callable_=func1, useobject=True)
attributes.register_attribute(Foo, 'col2', uselist=True,
callable_=func2, useobject=True)
attributes.register_attribute(Bar, 'id', uselist=False,
useobject=True)
x = Foo()
attributes.instance_state(x)._commit_all(attributes.instance_dict(x))
x.col2.append(bar4)
eq_(attributes.get_state_history(attributes.instance_state(x),
'col2'), ([bar4], [bar1, bar2, bar3], []))
def test_parenttrack(self):
class Foo(object):
pass
class Bar(object):
pass
instrumentation.register_class(Foo)
instrumentation.register_class(Bar)
attributes.register_attribute(Foo, 'element', uselist=False,
trackparent=True, useobject=True)
attributes.register_attribute(Bar, 'element', uselist=False,
trackparent=True, useobject=True)
f1 = Foo()
f2 = Foo()
b1 = Bar()
b2 = Bar()
f1.element = b1
b2.element = f2
assert attributes.has_parent(Foo, b1, 'element')
assert not attributes.has_parent(Foo, b2, 'element')
assert not attributes.has_parent(Foo, f2, 'element')
assert attributes.has_parent(Bar, f2, 'element')
b2.element = None
assert not attributes.has_parent(Bar, f2, 'element')
# test that double assignment doesn't accidentally reset the
# 'parent' flag.
b3 = Bar()
f4 = Foo()
b3.element = f4
assert attributes.has_parent(Bar, f4, 'element')
b3.element = f4
assert attributes.has_parent(Bar, f4, 'element')
def test_descriptorattributes(self):
"""changeset: 1633 broke ability to use ORM to map classes with
unusual descriptor attributes (for example, classes that inherit
from ones implementing zope.interface.Interface). This is a
simple regression test to prevent that defect. """
class des(object):
def __get__(self, instance, owner):
raise AttributeError('fake attribute')
class Foo(object):
A = des()
instrumentation.register_class(Foo)
instrumentation.unregister_class(Foo)
def test_collectionclasses(self):
class Foo(object):
pass
instrumentation.register_class(Foo)
attributes.register_attribute(Foo, 'collection', uselist=True,
typecallable=set, useobject=True)
assert attributes.manager_of_class(Foo).is_instrumented('collection'
)
assert isinstance(Foo().collection, set)
attributes.unregister_attribute(Foo, 'collection')
assert not attributes.manager_of_class(Foo).is_instrumented('collection'
)
try:
attributes.register_attribute(Foo, 'collection',
uselist=True, typecallable=dict, useobject=True)
assert False
except sa_exc.ArgumentError, e:
assert str(e) \
== 'Type InstrumentedDict must elect an appender '\
'method to be a collection class'
class MyDict(dict):
@collection.appender
def append(self, item):
self[item.foo] = item
@collection.remover
def remove(self, item):
del self[item.foo]
attributes.register_attribute(Foo, 'collection', uselist=True,
typecallable=MyDict, useobject=True)
assert isinstance(Foo().collection, MyDict)
attributes.unregister_attribute(Foo, 'collection')
class MyColl(object):
pass
try:
attributes.register_attribute(Foo, 'collection',
uselist=True, typecallable=MyColl, useobject=True)
assert False
except sa_exc.ArgumentError, e:
assert str(e) \
== 'Type MyColl must elect an appender method to be a '\
'collection class'
class MyColl(object):
@collection.iterator
def __iter__(self):
return iter([])
@collection.appender
def append(self, item):
pass
@collection.remover
def remove(self, item):
pass
attributes.register_attribute(Foo, 'collection', uselist=True,
typecallable=MyColl, useobject=True)
try:
Foo().collection
assert True
except sa_exc.ArgumentError, e:
assert False
class GetNoValueTest(fixtures.ORMTest):
def _fixture(self, expected):
class Foo(object):
pass
class Bar(object):
pass
def lazy_callable(state, passive):
return expected
instrumentation.register_class(Foo)
instrumentation.register_class(Bar)
if expected is not None:
attributes.register_attribute(Foo,
"attr", useobject=True,
uselist=False, callable_=lazy_callable)
else:
attributes.register_attribute(Foo,
"attr", useobject=True,
uselist=False)
f1 = Foo()
return Foo.attr.impl,\
attributes.instance_state(f1), \
attributes.instance_dict(f1)
def test_passive_no_result(self):
attr, state, dict_ = self._fixture(attributes.PASSIVE_NO_RESULT)
eq_(
attr.get(state, dict_, passive=attributes.PASSIVE_NO_INITIALIZE),
attributes.PASSIVE_NO_RESULT
)
def test_passive_no_result_never_set(self):
attr, state, dict_ = self._fixture(attributes.NEVER_SET)
eq_(
attr.get(state, dict_, passive=attributes.PASSIVE_NO_INITIALIZE),
attributes.PASSIVE_NO_RESULT
)
assert 'attr' not in dict_
def test_passive_ret_never_set_never_set(self):
attr, state, dict_ = self._fixture(attributes.NEVER_SET)
eq_(
attr.get(state, dict_, passive=attributes.PASSIVE_RETURN_NEVER_SET),
attributes.NEVER_SET
)
assert 'attr' not in dict_
def test_passive_ret_never_set_empty(self):
attr, state, dict_ = self._fixture(None)
eq_(
attr.get(state, dict_, passive=attributes.PASSIVE_RETURN_NEVER_SET),
attributes.NEVER_SET
)
assert 'attr' not in dict_
def test_off_empty(self):
attr, state, dict_ = self._fixture(None)
eq_(
attr.get(state, dict_, passive=attributes.PASSIVE_OFF),
None
)
assert 'attr' in dict_
class UtilTest(fixtures.ORMTest):
def test_helpers(self):
class Foo(object):
pass
class Bar(object):
pass
instrumentation.register_class(Foo)
instrumentation.register_class(Bar)
attributes.register_attribute(Foo, "coll", uselist=True, useobject=True)
f1 = Foo()
b1 = Bar()
b2 = Bar()
coll = attributes.init_collection(f1, "coll")
assert coll.data is f1.coll
assert attributes.get_attribute(f1, "coll") is f1.coll
attributes.set_attribute(f1, "coll", [b1])
assert f1.coll == [b1]
eq_(attributes.get_history(f1, "coll"), ([b1], [], []))
attributes.set_committed_value(f1, "coll", [b2])
eq_(attributes.get_history(f1, "coll"), ((), [b2], ()))
attributes.del_attribute(f1, "coll")
assert "coll" not in f1.__dict__
class BackrefTest(fixtures.ORMTest):
def test_m2m(self):
class Student(object):pass
class Course(object):pass
instrumentation.register_class(Student)
instrumentation.register_class(Course)
attributes.register_attribute(Student, 'courses', uselist=True,
backref="students", useobject=True)
attributes.register_attribute(Course, 'students', uselist=True,
backref="courses", useobject=True)
s = Student()
c = Course()
s.courses.append(c)
self.assert_(c.students == [s])
s.courses.remove(c)
self.assert_(c.students == [])
(s1, s2, s3) = (Student(), Student(), Student())
c.students = [s1, s2, s3]
self.assert_(s2.courses == [c])
self.assert_(s1.courses == [c])
s1.courses.remove(c)
self.assert_(c.students == [s2,s3])
def test_o2m(self):
class Post(object):pass
class Blog(object):pass
instrumentation.register_class(Post)
instrumentation.register_class(Blog)
attributes.register_attribute(Post, 'blog', uselist=False,
backref='posts',
trackparent=True, useobject=True)
attributes.register_attribute(Blog, 'posts', uselist=True,
backref='blog',
trackparent=True, useobject=True)
b = Blog()
(p1, p2, p3) = (Post(), Post(), Post())
b.posts.append(p1)
b.posts.append(p2)
b.posts.append(p3)
self.assert_(b.posts == [p1, p2, p3])
self.assert_(p2.blog is b)
p3.blog = None
self.assert_(b.posts == [p1, p2])
p4 = Post()
p4.blog = b
self.assert_(b.posts == [p1, p2, p4])
p4.blog = b
p4.blog = b
self.assert_(b.posts == [p1, p2, p4])
# assert no failure removing None
p5 = Post()
p5.blog = None
del p5.blog
def test_o2o(self):
class Port(object):pass
class Jack(object):pass
instrumentation.register_class(Port)
instrumentation.register_class(Jack)
attributes.register_attribute(Port, 'jack', uselist=False,
useobject=True, backref="port")
attributes.register_attribute(Jack, 'port', uselist=False,
useobject=True, backref="jack")
p = Port()
j = Jack()
p.jack = j
self.assert_(j.port is p)
self.assert_(p.jack is not None)
j.port = None
self.assert_(p.jack is None)
def test_symmetric_o2o_inheritance(self):
"""Test that backref 'initiator' catching goes against
a token that is global to all InstrumentedAttribute objects
within a particular class, not just the indvidual IA object
since we use distinct objects in an inheritance scenario.
"""
class Parent(object):
pass
class Child(object):
pass
class SubChild(Child):
pass
p_token = object()
c_token = object()
instrumentation.register_class(Parent)
instrumentation.register_class(Child)
instrumentation.register_class(SubChild)
attributes.register_attribute(Parent, 'child', uselist=False,
backref="parent",
parent_token = p_token,
useobject=True)
attributes.register_attribute(Child, 'parent', uselist=False,
backref="child",
parent_token = c_token,
useobject=True)
attributes.register_attribute(SubChild, 'parent',
uselist=False,
backref="child",
parent_token = c_token,
useobject=True)
p1 = Parent()
c1 = Child()
p1.child = c1
c2 = SubChild()
c2.parent = p1
def test_symmetric_o2m_inheritance(self):
class Parent(object):
pass
class SubParent(Parent):
pass
class Child(object):
pass
p_token = object()
c_token = object()
instrumentation.register_class(Parent)
instrumentation.register_class(SubParent)
instrumentation.register_class(Child)
attributes.register_attribute(Parent, 'children', uselist=True,
backref='parent',
parent_token = p_token,
useobject=True)
attributes.register_attribute(SubParent, 'children', uselist=True,
backref='parent',
parent_token = p_token,
useobject=True)
attributes.register_attribute(Child, 'parent', uselist=False,
backref='children',
parent_token = c_token,
useobject=True)
p1 = Parent()
p2 = SubParent()
c1 = Child()
p1.children.append(c1)
assert c1.parent is p1
assert c1 in p1.children
p2.children.append(c1)
assert c1.parent is p2
# note its still in p1.children -
# the event model currently allows only
# one level deep. without the parent_token,
# it keeps going until a ValueError is raised
# and this condition changes.
assert c1 in p1.children
class CyclicBackrefAssertionTest(fixtures.TestBase):
"""test that infinite recursion due to incorrect backref assignments
is blocked.
"""
def test_scalar_set_type_assertion(self):
A, B, C = self._scalar_fixture()
c1 = C()
b1 = B()
assert_raises_message(
ValueError,
"Object <B at .*> not associated with attribute of type C.a",
setattr, c1, 'a', b1
)
def test_collection_append_type_assertion(self):
A, B, C = self._collection_fixture()
c1 = C()
b1 = B()
assert_raises_message(
ValueError,
"Object <B at .*> not associated with attribute of type C.a",
c1.a.append, b1
)
def _scalar_fixture(self):
class A(object):
pass
class B(object):
pass
class C(object):
pass
instrumentation.register_class(A)
instrumentation.register_class(B)
instrumentation.register_class(C)
attributes.register_attribute(C, 'a', backref='c', useobject=True)
attributes.register_attribute(C, 'b', backref='c', useobject=True)
attributes.register_attribute(A, 'c', backref='a', useobject=True,
uselist=True)
attributes.register_attribute(B, 'c', backref='b', useobject=True,
uselist=True)
return A, B, C
def _collection_fixture(self):
class A(object):
pass
class B(object):
pass
class C(object):
pass
instrumentation.register_class(A)
instrumentation.register_class(B)
instrumentation.register_class(C)
attributes.register_attribute(C, 'a', backref='c', useobject=True,
uselist=True)
attributes.register_attribute(C, 'b', backref='c', useobject=True,
uselist=True)
attributes.register_attribute(A, 'c', backref='a', useobject=True)
attributes.register_attribute(B, 'c', backref='b', useobject=True)
return A, B, C
class PendingBackrefTest(fixtures.ORMTest):
def setup(self):
global Post, Blog, called, lazy_load
class Post(object):
def __init__(self, name):
self.name = name
__hash__ = None
def __eq__(self, other):
return other is not None and other.name == self.name
class Blog(object):
def __init__(self, name):
self.name = name
__hash__ = None
def __eq__(self, other):
return other is not None and other.name == self.name
called = [0]
lazy_load = []
def lazy_posts(state, passive):
if passive is not attributes.PASSIVE_NO_FETCH:
called[0] += 1
return lazy_load
else:
return attributes.PASSIVE_NO_RESULT
instrumentation.register_class(Post)
instrumentation.register_class(Blog)
attributes.register_attribute(Post, 'blog', uselist=False,
backref='posts', trackparent=True, useobject=True)
attributes.register_attribute(Blog, 'posts', uselist=True,
backref='blog', callable_=lazy_posts, trackparent=True,
useobject=True)
def test_lazy_add(self):
global lazy_load
p1, p2, p3 = Post("post 1"), Post("post 2"), Post("post 3")
lazy_load = [p1, p2, p3]
b = Blog("blog 1")
p = Post("post 4")
p.blog = b
p = Post("post 5")
p.blog = b
# setting blog doesnt call 'posts' callable
assert called[0] == 0
# calling backref calls the callable, populates extra posts
assert b.posts == [p1, p2, p3, Post("post 4"), Post("post 5")]
assert called[0] == 1
def test_lazy_history(self):
global lazy_load
p1, p2, p3 = Post("post 1"), Post("post 2"), Post("post 3")
lazy_load = [p1, p2, p3]
b = Blog("blog 1")
p = Post("post 4")
p.blog = b
p4 = Post("post 5")
p4.blog = b
assert called[0] == 0
eq_(attributes.instance_state(b).
get_history('posts', attributes.PASSIVE_OFF),
([p, p4], [p1, p2, p3], []))
assert called[0] == 1
def test_state_on_add_remove(self):
b = Blog("blog 1")
p = Post("post 1")
p.blog = b
p.blog = None
eq_(called[0], 0)
eq_(b.posts, [])
eq_(called[0], 1)
def test_pending_combines_with_lazy(self):
global lazy_load
b = Blog("blog 1")
p = Post("post 1")
p2 = Post("post 2")
p.blog = b
lazy_load = [p, p2]
# lazy loaded + pending get added together.
# This isn't seen often with the ORM due
# to usual practices surrounding the
# load/flush/load cycle.
eq_(b.posts, [p, p2, p])
eq_(called[0], 1)
def test_normal_load(self):
global lazy_load
lazy_load = (p1, p2, p3) = [Post("post 1"), Post("post 2"), Post("post 3")]
called[0] = 0
b = Blog("blog 1")
# assign without using backref system
p2.__dict__['blog'] = b
assert b.posts == [Post("post 1"), Post("post 2"), Post("post 3")]
assert called[0] == 1
p2.blog = None
p4 = Post("post 4")
p4.blog = b
assert b.posts == [Post("post 1"), Post("post 3"), Post("post 4")]
assert called[0] == 1
called[0] = 0
lazy_load = (p1, p2, p3) = [Post("post 1"), Post("post 2"), Post("post 3")]
def test_commit_removes_pending(self):
global lazy_load
lazy_load = (p1, ) = [Post("post 1"), ]
called[0] = 0
b = Blog("blog 1")
p1.blog = b
attributes.instance_state(b)._commit_all(attributes.instance_dict(b))
attributes.instance_state(p1)._commit_all(attributes.instance_dict(p1))
assert b.posts == [Post("post 1")]
class HistoryTest(fixtures.TestBase):
def _fixture(self, uselist, useobject, active_history, **kw):
class Foo(fixtures.BasicEntity):
pass
instrumentation.register_class(Foo)
attributes.register_attribute(
Foo, 'someattr',
uselist=uselist,
useobject=useobject,
active_history=active_history,
**kw)
return Foo
def _two_obj_fixture(self, uselist):
class Foo(fixtures.BasicEntity):
pass
class Bar(fixtures.BasicEntity):
def __nonzero__(self):
assert False
instrumentation.register_class(Foo)
instrumentation.register_class(Bar)
attributes.register_attribute(Foo, 'someattr', uselist=uselist,
useobject=True)
return Foo, Bar
def _someattr_history(self, f, **kw):
return attributes.get_state_history(
attributes.instance_state(f),
'someattr', **kw)
def _commit_someattr(self, f):
attributes.instance_state(f)._commit(attributes.instance_dict(f),
['someattr'])
def _someattr_committed_state(self, f):
Foo = f.__class__
return Foo.someattr.impl.get_committed_value(
attributes.instance_state(f),
attributes.instance_dict(f))
def test_committed_value_init(self):
Foo = self._fixture(uselist=False, useobject=False,
active_history=False)
f = Foo()
eq_(self._someattr_committed_state(f), None)
def test_committed_value_set(self):
Foo = self._fixture(uselist=False, useobject=False,
active_history=False)
f = Foo()
f.someattr = 3
eq_(self._someattr_committed_state(f), None)
def test_committed_value_set_commit(self):
Foo = self._fixture(uselist=False, useobject=False,
active_history=False)
f = Foo()
f.someattr = 3
self._commit_someattr(f)
eq_(self._someattr_committed_state(f), 3)
def test_scalar_init(self):
Foo = self._fixture(uselist=False, useobject=False,
active_history=False)
f = Foo()
eq_(self._someattr_history(f), ((), (), ()))
def test_object_init(self):
Foo = self._fixture(uselist=False, useobject=True,
active_history=False)
f = Foo()
eq_(self._someattr_history(f), ((), (), ()))
def test_object_init_active_history(self):
Foo = self._fixture(uselist=False, useobject=True,
active_history=True)
f = Foo()
eq_(self._someattr_history(f), ((), (), ()))
def test_scalar_no_init_side_effect(self):
Foo = self._fixture(uselist=False, useobject=False,
active_history=False)
f = Foo()
self._someattr_history(f)
# no side effects
assert 'someattr' not in f.__dict__
assert 'someattr' not in attributes.instance_state(f).committed_state
def test_scalar_set(self):
Foo = self._fixture(uselist=False, useobject=False,
active_history=False)
f = Foo()
f.someattr = 'hi'
eq_(self._someattr_history(f), (['hi'], (), ()))
def test_scalar_set_commit(self):
Foo = self._fixture(uselist=False, useobject=False,
active_history=False)
f = Foo()
f.someattr = 'hi'
self._commit_someattr(f)
eq_(self._someattr_history(f), ((), ['hi'], ()))
def test_scalar_set_commit_reset(self):
Foo = self._fixture(uselist=False, useobject=False,
active_history=False)
f = Foo()
f.someattr = 'hi'
self._commit_someattr(f)
f.someattr = 'there'
eq_(self._someattr_history(f), (['there'], (), ['hi']))
def test_scalar_set_commit_reset_commit(self):
Foo = self._fixture(uselist=False, useobject=False,
active_history=False)
f = Foo()
f.someattr = 'hi'
self._commit_someattr(f)
f.someattr = 'there'
self._commit_someattr(f)
eq_(self._someattr_history(f), ((), ['there'], ()))
def test_scalar_set_commit_reset_commit_del(self):
Foo = self._fixture(uselist=False, useobject=False,
active_history=False)
f = Foo()
f.someattr = 'there'
self._commit_someattr(f)
del f.someattr
eq_(self._someattr_history(f), ((), (), ['there']))
def test_scalar_set_dict(self):
Foo = self._fixture(uselist=False, useobject=False,
active_history=False)
f = Foo()
f.__dict__['someattr'] = 'new'
eq_(self._someattr_history(f), ((), ['new'], ()))
def test_scalar_set_dict_set(self):
Foo = self._fixture(uselist=False, useobject=False,
active_history=False)
f = Foo()
f.__dict__['someattr'] = 'new'
self._someattr_history(f)
f.someattr = 'old'
eq_(self._someattr_history(f), (['old'], (), ['new']))
def test_scalar_set_dict_set_commit(self):
Foo = self._fixture(uselist=False, useobject=False,
active_history=False)
f = Foo()
f.__dict__['someattr'] = 'new'
self._someattr_history(f)
f.someattr = 'old'
self._commit_someattr(f)
eq_(self._someattr_history(f), ((), ['old'], ()))
def test_scalar_set_None(self):
Foo = self._fixture(uselist=False, useobject=False,
active_history=False)
f = Foo()
f.someattr = None
eq_(self._someattr_history(f), ([None], (), ()))
def test_scalar_set_None_from_dict_set(self):
Foo = self._fixture(uselist=False, useobject=False,
active_history=False)
f = Foo()
f.__dict__['someattr'] = 'new'
f.someattr = None
eq_(self._someattr_history(f), ([None], (), ['new']))
def test_scalar_set_twice_no_commit(self):
Foo = self._fixture(uselist=False, useobject=False,
active_history=False)
f = Foo()
f.someattr = 'one'
eq_(self._someattr_history(f), (['one'], (), ()))
f.someattr = 'two'
eq_(self._someattr_history(f), (['two'], (), ()))
def test_scalar_active_init(self):
Foo = self._fixture(uselist=False, useobject=False,
active_history=True)
f = Foo()
eq_(self._someattr_history(f), ((), (), ()))
def test_scalar_active_no_init_side_effect(self):
Foo = self._fixture(uselist=False, useobject=False,
active_history=True)
f = Foo()
self._someattr_history(f)
# no side effects
assert 'someattr' not in f.__dict__
assert 'someattr' not in attributes.instance_state(f).committed_state
def test_collection_never_set(self):
Foo = self._fixture(uselist=True, useobject=True,
active_history=True)
f = Foo()
eq_(self._someattr_history(f, passive=True), ((), (), ()))
def test_scalar_obj_never_set(self):
Foo = self._fixture(uselist=False, useobject=True,
active_history=True)
f = Foo()
eq_(self._someattr_history(f, passive=True), ((), (), ()))
def test_scalar_never_set(self):
Foo = self._fixture(uselist=False, useobject=False,
active_history=True)
f = Foo()
eq_(self._someattr_history(f, passive=True), ((), (), ()))
def test_scalar_active_set(self):
Foo = self._fixture(uselist=False, useobject=False,
active_history=True)
f = Foo()
f.someattr = 'hi'
eq_(self._someattr_history(f), (['hi'], (), ()))
def test_scalar_active_set_commit(self):
Foo = self._fixture(uselist=False, useobject=False,
active_history=True)
f = Foo()
f.someattr = 'hi'
self._commit_someattr(f)
eq_(self._someattr_history(f), ((), ['hi'], ()))
def test_scalar_active_set_commit_reset(self):
Foo = self._fixture(uselist=False, useobject=False,
active_history=True)
f = Foo()
f.someattr = 'hi'
self._commit_someattr(f)
f.someattr = 'there'
eq_(self._someattr_history(f), (['there'], (), ['hi']))
def test_scalar_active_set_commit_reset_commit(self):
Foo = self._fixture(uselist=False, useobject=False,
active_history=True)
f = Foo()
f.someattr = 'hi'
self._commit_someattr(f)
f.someattr = 'there'
self._commit_someattr(f)
eq_(self._someattr_history(f), ((), ['there'], ()))
def test_scalar_active_set_commit_reset_commit_del(self):
Foo = self._fixture(uselist=False, useobject=False,
active_history=True)
f = Foo()
f.someattr = 'there'
self._commit_someattr(f)
del f.someattr
eq_(self._someattr_history(f), ((), (), ['there']))
def test_scalar_active_set_dict(self):
Foo = self._fixture(uselist=False, useobject=False,
active_history=True)
f = Foo()
f.__dict__['someattr'] = 'new'
eq_(self._someattr_history(f), ((), ['new'], ()))
def test_scalar_active_set_dict_set(self):
Foo = self._fixture(uselist=False, useobject=False,
active_history=True)
f = Foo()
f.__dict__['someattr'] = 'new'
self._someattr_history(f)
f.someattr = 'old'
eq_(self._someattr_history(f), (['old'], (), ['new']))
def test_scalar_active_set_dict_set_commit(self):
Foo = self._fixture(uselist=False, useobject=False,
active_history=True)
f = Foo()
f.__dict__['someattr'] = 'new'
self._someattr_history(f)
f.someattr = 'old'
self._commit_someattr(f)
eq_(self._someattr_history(f), ((), ['old'], ()))
def test_scalar_active_set_None(self):
Foo = self._fixture(uselist=False, useobject=False,
active_history=True)
f = Foo()
f.someattr = None
eq_(self._someattr_history(f), ([None], (), ()))
def test_scalar_active_set_None_from_dict_set(self):
Foo = self._fixture(uselist=False, useobject=False,
active_history=True)
f = Foo()
f.__dict__['someattr'] = 'new'
f.someattr = None
eq_(self._someattr_history(f), ([None], (), ['new']))
def test_scalar_active_set_twice_no_commit(self):
Foo = self._fixture(uselist=False, useobject=False,
active_history=True)
f = Foo()
f.someattr = 'one'
eq_(self._someattr_history(f), (['one'], (), ()))
f.someattr = 'two'
eq_(self._someattr_history(f), (['two'], (), ()))
def test_scalar_inplace_mutation_set(self):
Foo = self._fixture(uselist=False, useobject=False,
active_history=False)
f = Foo()
f.someattr = {'a': 'b'}
eq_(self._someattr_history(f), ([{'a': 'b'}], (), ()))
def test_scalar_inplace_mutation_set_commit(self):
Foo = self._fixture(uselist=False, useobject=False,
active_history=False)
f = Foo()
f.someattr = {'a': 'b'}
self._commit_someattr(f)
eq_(self._someattr_history(f), ((), [{'a': 'b'}], ()))
def test_scalar_inplace_mutation_set_commit_set(self):
Foo = self._fixture(uselist=False, useobject=False,
active_history=False)
f = Foo()
f.someattr = {'a': 'b'}
self._commit_someattr(f)
f.someattr['a'] = 'c'
eq_(self._someattr_history(f), ((), [{'a': 'c'}], ()))
def test_scalar_inplace_mutation_set_commit_flag_modified(self):
Foo = self._fixture(uselist=False, useobject=False,
active_history=False)
f = Foo()
f.someattr = {'a': 'b'}
self._commit_someattr(f)
attributes.flag_modified(f, 'someattr')
eq_(self._someattr_history(f), ([{'a': 'b'}], (), ()))
def test_scalar_inplace_mutation_set_commit_set_flag_modified(self):
Foo = self._fixture(uselist=False, useobject=False,
active_history=False)
f = Foo()
f.someattr = {'a': 'b'}
self._commit_someattr(f)
f.someattr['a'] = 'c'
attributes.flag_modified(f, 'someattr')
eq_(self._someattr_history(f), ([{'a': 'c'}], (), ()))
def test_scalar_inplace_mutation_set_commit_flag_modified_set(self):
Foo = self._fixture(uselist=False, useobject=False,
active_history=False)
f = Foo()
f.someattr = {'a': 'b'}
self._commit_someattr(f)
attributes.flag_modified(f, 'someattr')
eq_(self._someattr_history(f), ([{'a': 'b'}], (), ()))
f.someattr = ['a']
eq_(self._someattr_history(f), ([['a']], (), ()))
def test_use_object_init(self):
Foo, Bar = self._two_obj_fixture(uselist=False)
f = Foo()
eq_(self._someattr_history(f), ((), (), ()))
def test_use_object_no_init_side_effect(self):
Foo, Bar = self._two_obj_fixture(uselist=False)
f = Foo()
self._someattr_history(f)
assert 'someattr' not in f.__dict__
assert 'someattr' not in attributes.instance_state(f).committed_state
def test_use_object_set(self):
Foo, Bar = self._two_obj_fixture(uselist=False)
f = Foo()
hi = Bar(name='hi')
f.someattr = hi
eq_(self._someattr_history(f), ([hi], (), ()))
def test_use_object_set_commit(self):
Foo, Bar = self._two_obj_fixture(uselist=False)
f = Foo()
hi = Bar(name='hi')
f.someattr = hi
self._commit_someattr(f)
eq_(self._someattr_history(f), ((), [hi], ()))
def test_use_object_set_commit_set(self):
Foo, Bar = self._two_obj_fixture(uselist=False)
f = Foo()
hi = Bar(name='hi')
f.someattr = hi
self._commit_someattr(f)
there = Bar(name='there')
f.someattr = there
eq_(self._someattr_history(f), ([there], (), [hi]))
def test_use_object_set_commit_set_commit(self):
Foo, Bar = self._two_obj_fixture(uselist=False)
f = Foo()
hi = Bar(name='hi')
f.someattr = hi
self._commit_someattr(f)
there = Bar(name='there')
f.someattr = there
self._commit_someattr(f)
eq_(self._someattr_history(f), ((), [there], ()))
def test_use_object_set_commit_del(self):
Foo, Bar = self._two_obj_fixture(uselist=False)
f = Foo()
hi = Bar(name='hi')
f.someattr = hi
self._commit_someattr(f)
del f.someattr
eq_(self._someattr_history(f), ((), (), [hi]))
def test_use_object_set_dict(self):
Foo, Bar = self._two_obj_fixture(uselist=False)
f = Foo()
hi = Bar(name='hi')
f.__dict__['someattr'] = hi
eq_(self._someattr_history(f), ((), [hi], ()))
def test_use_object_set_dict_set(self):
Foo, Bar = self._two_obj_fixture(uselist=False)
f = Foo()
hi = Bar(name='hi')
f.__dict__['someattr'] = hi
there = Bar(name='there')
f.someattr = there
eq_(self._someattr_history(f), ([there], (), [hi]))
def test_use_object_set_dict_set_commit(self):
Foo, Bar = self._two_obj_fixture(uselist=False)
f = Foo()
hi = Bar(name='hi')
f.__dict__['someattr'] = hi
there = Bar(name='there')
f.someattr = there
self._commit_someattr(f)
eq_(self._someattr_history(f), ((), [there], ()))
def test_use_object_set_None(self):
Foo, Bar = self._two_obj_fixture(uselist=False)
f = Foo()
f.someattr = None
eq_(self._someattr_history(f), ((), [None], ()))
def test_use_object_set_dict_set_None(self):
Foo, Bar = self._two_obj_fixture(uselist=False)
f = Foo()
hi =Bar(name='hi')
f.__dict__['someattr'] = hi
f.someattr = None
eq_(self._someattr_history(f), ([None], (), [hi]))
def test_use_object_set_value_twice(self):
Foo, Bar = self._two_obj_fixture(uselist=False)
f = Foo()
hi = Bar(name='hi')
there = Bar(name='there')
f.someattr = hi
f.someattr = there
eq_(self._someattr_history(f), ([there], (), ()))
def test_object_collections_set(self):
# TODO: break into individual tests
Foo, Bar = self._two_obj_fixture(uselist=True)
hi = Bar(name='hi')
there = Bar(name='there')
old = Bar(name='old')
new = Bar(name='new')
# case 1. new object
f = Foo()
eq_(attributes.get_state_history(attributes.instance_state(f),
'someattr'), ((), [], ()))
f.someattr = [hi]
eq_(attributes.get_state_history(attributes.instance_state(f),
'someattr'), ([hi], [], []))
self._commit_someattr(f)
eq_(attributes.get_state_history(attributes.instance_state(f),
'someattr'), ((), [hi], ()))
f.someattr = [there]
eq_(attributes.get_state_history(attributes.instance_state(f),
'someattr'), ([there], [], [hi]))
self._commit_someattr(f)
eq_(attributes.get_state_history(attributes.instance_state(f),
'someattr'), ((), [there], ()))
f.someattr = [hi]
eq_(attributes.get_state_history(attributes.instance_state(f),
'someattr'), ([hi], [], [there]))
f.someattr = [old, new]
eq_(attributes.get_state_history(attributes.instance_state(f),
'someattr'), ([old, new], [], [there]))
# case 2. object with direct settings (similar to a load
# operation)
f = Foo()
collection = attributes.init_collection(f, 'someattr')
collection.append_without_event(new)
attributes.instance_state(f)._commit_all(attributes.instance_dict(f))
eq_(attributes.get_state_history(attributes.instance_state(f),
'someattr'), ((), [new], ()))
f.someattr = [old]
eq_(attributes.get_state_history(attributes.instance_state(f),
'someattr'), ([old], [], [new]))
self._commit_someattr(f)
eq_(attributes.get_state_history(attributes.instance_state(f),
'someattr'), ((), [old], ()))
def test_dict_collections(self):
# TODO: break into individual tests
class Foo(fixtures.BasicEntity):
pass
class Bar(fixtures.BasicEntity):
pass
from sqlalchemy.orm.collections import attribute_mapped_collection
instrumentation.register_class(Foo)
instrumentation.register_class(Bar)
attributes.register_attribute(Foo, 'someattr', uselist=True,
useobject=True,
typecallable=attribute_mapped_collection('name'))
hi = Bar(name='hi')
there = Bar(name='there')
old = Bar(name='old')
new = Bar(name='new')
f = Foo()
eq_(attributes.get_state_history(attributes.instance_state(f),
'someattr'), ((), [], ()))
f.someattr['hi'] = hi
eq_(attributes.get_state_history(attributes.instance_state(f),
'someattr'), ([hi], [], []))
f.someattr['there'] = there
eq_(tuple([set(x) for x in
attributes.get_state_history(attributes.instance_state(f),
'someattr')]), (set([hi, there]), set(), set()))
self._commit_someattr(f)
eq_(tuple([set(x) for x in
attributes.get_state_history(attributes.instance_state(f),
'someattr')]), (set(), set([hi, there]), set()))
def test_object_collections_mutate(self):
# TODO: break into individual tests
class Foo(fixtures.BasicEntity):
pass
class Bar(fixtures.BasicEntity):
pass
instrumentation.register_class(Foo)
attributes.register_attribute(Foo, 'someattr', uselist=True,
useobject=True)
attributes.register_attribute(Foo, 'id', uselist=False,
useobject=False)
instrumentation.register_class(Bar)
hi = Bar(name='hi')
there = Bar(name='there')
old = Bar(name='old')
new = Bar(name='new')
# case 1. new object
f = Foo(id=1)
eq_(attributes.get_state_history(attributes.instance_state(f),
'someattr'), ((), [], ()))
f.someattr.append(hi)
eq_(attributes.get_state_history(attributes.instance_state(f),
'someattr'), ([hi], [], []))
self._commit_someattr(f)
eq_(attributes.get_state_history(attributes.instance_state(f),
'someattr'), ((), [hi], ()))
f.someattr.append(there)
eq_(attributes.get_state_history(attributes.instance_state(f),
'someattr'), ([there], [hi], []))
self._commit_someattr(f)
eq_(attributes.get_state_history(attributes.instance_state(f),
'someattr'), ((), [hi, there], ()))
f.someattr.remove(there)
eq_(attributes.get_state_history(attributes.instance_state(f),
'someattr'), ([], [hi], [there]))
f.someattr.append(old)
f.someattr.append(new)
eq_(attributes.get_state_history(attributes.instance_state(f),
'someattr'), ([old, new], [hi], [there]))
attributes.instance_state(f)._commit(attributes.instance_dict(f),
['someattr'])
eq_(attributes.get_state_history(attributes.instance_state(f),
'someattr'), ((), [hi, old, new], ()))
f.someattr.pop(0)
eq_(attributes.get_state_history(attributes.instance_state(f),
'someattr'), ([], [old, new], [hi]))
# case 2. object with direct settings (similar to a load
# operation)
f = Foo()
f.__dict__['id'] = 1
collection = attributes.init_collection(f, 'someattr')
collection.append_without_event(new)
attributes.instance_state(f)._commit_all(attributes.instance_dict(f))
eq_(attributes.get_state_history(attributes.instance_state(f),
'someattr'), ((), [new], ()))
f.someattr.append(old)
eq_(attributes.get_state_history(attributes.instance_state(f),
'someattr'), ([old], [new], []))
attributes.instance_state(f)._commit(attributes.instance_dict(f),
['someattr'])
eq_(attributes.get_state_history(attributes.instance_state(f),
'someattr'), ((), [new, old], ()))
f = Foo()
collection = attributes.init_collection(f, 'someattr')
collection.append_without_event(new)
attributes.instance_state(f)._commit_all(attributes.instance_dict(f))
eq_(attributes.get_state_history(attributes.instance_state(f),
'someattr'), ((), [new], ()))
f.id = 1
f.someattr.remove(new)
eq_(attributes.get_state_history(attributes.instance_state(f),
'someattr'), ([], [], [new]))
# case 3. mixing appends with sets
f = Foo()
f.someattr.append(hi)
eq_(attributes.get_state_history(attributes.instance_state(f),
'someattr'), ([hi], [], []))
f.someattr.append(there)
eq_(attributes.get_state_history(attributes.instance_state(f),
'someattr'), ([hi, there], [], []))
f.someattr = [there]
eq_(attributes.get_state_history(attributes.instance_state(f),
'someattr'), ([there], [], []))
# case 4. ensure duplicates show up, order is maintained
f = Foo()
f.someattr.append(hi)
f.someattr.append(there)
f.someattr.append(hi)
eq_(attributes.get_state_history(attributes.instance_state(f),
'someattr'), ([hi, there, hi], [], []))
attributes.instance_state(f)._commit_all(attributes.instance_dict(f))
eq_(attributes.get_state_history(attributes.instance_state(f),
'someattr'), ((), [hi, there, hi], ()))
f.someattr = []
eq_(attributes.get_state_history(attributes.instance_state(f),
'someattr'), ([], [], [hi, there, hi]))
def test_collections_via_backref(self):
# TODO: break into individual tests
class Foo(fixtures.BasicEntity):
pass
class Bar(fixtures.BasicEntity):
pass
instrumentation.register_class(Foo)
instrumentation.register_class(Bar)
attributes.register_attribute(Foo, 'bars', uselist=True,
backref='foo', trackparent=True, useobject=True)
attributes.register_attribute(Bar, 'foo', uselist=False,
backref='bars', trackparent=True, useobject=True)
f1 = Foo()
b1 = Bar()
eq_(attributes.get_state_history(attributes.instance_state(f1),
'bars'), ((), [], ()))
eq_(attributes.get_state_history(attributes.instance_state(b1),
'foo'), ((), (), ()))
# b1.foo = f1
f1.bars.append(b1)
eq_(attributes.get_state_history(attributes.instance_state(f1),
'bars'), ([b1], [], []))
eq_(attributes.get_state_history(attributes.instance_state(b1),
'foo'), ([f1], (), ()))
b2 = Bar()
f1.bars.append(b2)
eq_(attributes.get_state_history(attributes.instance_state(f1),
'bars'), ([b1, b2], [], []))
eq_(attributes.get_state_history(attributes.instance_state(b1),
'foo'), ([f1], (), ()))
eq_(attributes.get_state_history(attributes.instance_state(b2),
'foo'), ([f1], (), ()))
def test_deprecated_flags(self):
assert_raises_message(
sa_exc.SADeprecationWarning,
"Passing True for 'passive' is deprecated. "
"Use attributes.PASSIVE_NO_INITIALIZE",
attributes.get_history, object(), 'foo', True
)
assert_raises_message(
sa_exc.SADeprecationWarning,
"Passing False for 'passive' is deprecated. "
"Use attributes.PASSIVE_OFF",
attributes.get_history, object(), 'foo', False
)
class LazyloadHistoryTest(fixtures.TestBase):
def test_lazy_backref_collections(self):
# TODO: break into individual tests
class Foo(fixtures.BasicEntity):
pass
class Bar(fixtures.BasicEntity):
pass
lazy_load = []
def lazyload(state, passive):
return lazy_load
instrumentation.register_class(Foo)
instrumentation.register_class(Bar)
attributes.register_attribute(Foo, 'bars', uselist=True,
backref='foo', trackparent=True, callable_=lazyload,
useobject=True)
attributes.register_attribute(Bar, 'foo', uselist=False,
backref='bars', trackparent=True, useobject=True)
bar1, bar2, bar3, bar4 = [Bar(id=1), Bar(id=2), Bar(id=3),
Bar(id=4)]
lazy_load = [bar1, bar2, bar3]
f = Foo()
bar4 = Bar()
bar4.foo = f
eq_(attributes.get_state_history(attributes.instance_state(f),
'bars'), ([bar4], [bar1, bar2, bar3], []))
lazy_load = None
f = Foo()
bar4 = Bar()
bar4.foo = f
eq_(attributes.get_state_history(attributes.instance_state(f),
'bars'), ([bar4], [], []))
lazy_load = [bar1, bar2, bar3]
attributes.instance_state(f)._expire_attributes(attributes.instance_dict(f),
['bars'])
eq_(attributes.get_state_history(attributes.instance_state(f),
'bars'), ((), [bar1, bar2, bar3], ()))
def test_collections_via_lazyload(self):
# TODO: break into individual tests
class Foo(fixtures.BasicEntity):
pass
class Bar(fixtures.BasicEntity):
pass
lazy_load = []
def lazyload(state, passive):
return lazy_load
instrumentation.register_class(Foo)
instrumentation.register_class(Bar)
attributes.register_attribute(Foo, 'bars', uselist=True,
callable_=lazyload, trackparent=True, useobject=True)
bar1, bar2, bar3, bar4 = [Bar(id=1), Bar(id=2), Bar(id=3),
Bar(id=4)]
lazy_load = [bar1, bar2, bar3]
f = Foo()
f.bars = []
eq_(attributes.get_state_history(attributes.instance_state(f),
'bars'), ([], [], [bar1, bar2, bar3]))
f = Foo()
f.bars.append(bar4)
eq_(attributes.get_state_history(attributes.instance_state(f),
'bars'), ([bar4], [bar1, bar2, bar3], []))
f = Foo()
f.bars.remove(bar2)
eq_(attributes.get_state_history(attributes.instance_state(f),
'bars'), ([], [bar1, bar3], [bar2]))
f.bars.append(bar4)
eq_(attributes.get_state_history(attributes.instance_state(f),
'bars'), ([bar4], [bar1, bar3], [bar2]))
f = Foo()
del f.bars[1]
eq_(attributes.get_state_history(attributes.instance_state(f),
'bars'), ([], [bar1, bar3], [bar2]))
lazy_load = None
f = Foo()
f.bars.append(bar2)
eq_(attributes.get_state_history(attributes.instance_state(f),
'bars'), ([bar2], [], []))
def test_scalar_via_lazyload(self):
# TODO: break into individual tests
class Foo(fixtures.BasicEntity):
pass
lazy_load = None
def lazyload(state, passive):
return lazy_load
instrumentation.register_class(Foo)
attributes.register_attribute(Foo, 'bar', uselist=False,
callable_=lazyload, useobject=False)
lazy_load = 'hi'
# with scalar non-object and active_history=False, the lazy
# callable is only executed on gets, not history operations
f = Foo()
eq_(f.bar, 'hi')
eq_(attributes.get_state_history(attributes.instance_state(f),
'bar'), ((), ['hi'], ()))
f = Foo()
f.bar = None
eq_(attributes.get_state_history(attributes.instance_state(f),
'bar'), ([None], (), ()))
f = Foo()
f.bar = 'there'
eq_(attributes.get_state_history(attributes.instance_state(f),
'bar'), (['there'], (), ()))
f.bar = 'hi'
eq_(attributes.get_state_history(attributes.instance_state(f),
'bar'), (['hi'], (), ()))
f = Foo()
eq_(f.bar, 'hi')
del f.bar
eq_(attributes.get_state_history(attributes.instance_state(f),
'bar'), ((), (), ['hi']))
assert f.bar is None
eq_(attributes.get_state_history(attributes.instance_state(f),
'bar'), ([None], (), ['hi']))
def test_scalar_via_lazyload_with_active(self):
# TODO: break into individual tests
class Foo(fixtures.BasicEntity):
pass
lazy_load = None
def lazyload(state, passive):
return lazy_load
instrumentation.register_class(Foo)
attributes.register_attribute(Foo, 'bar', uselist=False,
callable_=lazyload, useobject=False,
active_history=True)
lazy_load = 'hi'
# active_history=True means the lazy callable is executed on set
# as well as get, causing the old value to appear in the history
f = Foo()
eq_(f.bar, 'hi')
eq_(attributes.get_state_history(attributes.instance_state(f),
'bar'), ((), ['hi'], ()))
f = Foo()
f.bar = None
eq_(attributes.get_state_history(attributes.instance_state(f),
'bar'), ([None], (), ['hi']))
f = Foo()
f.bar = 'there'
eq_(attributes.get_state_history(attributes.instance_state(f),
'bar'), (['there'], (), ['hi']))
f.bar = 'hi'
eq_(attributes.get_state_history(attributes.instance_state(f),
'bar'), ((), ['hi'], ()))
f = Foo()
eq_(f.bar, 'hi')
del f.bar
eq_(attributes.get_state_history(attributes.instance_state(f),
'bar'), ((), (), ['hi']))
assert f.bar is None
eq_(attributes.get_state_history(attributes.instance_state(f),
'bar'), ([None], (), ['hi']))
def test_scalar_object_via_lazyload(self):
# TODO: break into individual tests
class Foo(fixtures.BasicEntity):
pass
class Bar(fixtures.BasicEntity):
pass
lazy_load = None
def lazyload(state, passive):
return lazy_load
instrumentation.register_class(Foo)
instrumentation.register_class(Bar)
attributes.register_attribute(Foo, 'bar', uselist=False,
callable_=lazyload, trackparent=True, useobject=True)
bar1, bar2 = [Bar(id=1), Bar(id=2)]
lazy_load = bar1
# with scalar object, the lazy callable is only executed on gets
# and history operations
f = Foo()
eq_(attributes.get_state_history(attributes.instance_state(f),
'bar'), ((), [bar1], ()))
f = Foo()
f.bar = None
eq_(attributes.get_state_history(attributes.instance_state(f),
'bar'), ([None], (), [bar1]))
f = Foo()
f.bar = bar2
eq_(attributes.get_state_history(attributes.instance_state(f),
'bar'), ([bar2], (), [bar1]))
f.bar = bar1
eq_(attributes.get_state_history(attributes.instance_state(f),
'bar'), ((), [bar1], ()))
f = Foo()
eq_(f.bar, bar1)
del f.bar
eq_(attributes.get_state_history(attributes.instance_state(f),
'bar'), ((), (), [bar1]))
assert f.bar is None
eq_(attributes.get_state_history(attributes.instance_state(f),
'bar'), ([None], (), [bar1]))
class ListenerTest(fixtures.ORMTest):
def test_receive_changes(self):
"""test that Listeners can mutate the given value."""
class Foo(object):
pass
class Bar(object):
pass
def append(state, child, initiator):
b2 = Bar()
b2.data = b1.data + " appended"
return b2
def on_set(state, value, oldvalue, initiator):
return value + " modified"
instrumentation.register_class(Foo)
instrumentation.register_class(Bar)
attributes.register_attribute(Foo, 'data', uselist=False,
useobject=False)
attributes.register_attribute(Foo, 'barlist', uselist=True,
useobject=True)
attributes.register_attribute(Foo, 'barset', typecallable=set,
uselist=True, useobject=True)
attributes.register_attribute(Bar, 'data', uselist=False,
useobject=False)
event.listen(Foo.data, 'set', on_set, retval=True)
event.listen(Foo.barlist, 'append', append, retval=True)
event.listen(Foo.barset, 'append', append, retval=True)
f1 = Foo()
f1.data = 'some data'
eq_(f1.data, 'some data modified')
b1 = Bar()
b1.data = 'some bar'
f1.barlist.append(b1)
assert b1.data == 'some bar'
assert f1.barlist[0].data == 'some bar appended'
f1.barset.add(b1)
assert f1.barset.pop().data == 'some bar appended'
def test_none_on_collection_event(self):
"""test that append/remove of None in collections emits events.
This is new behavior as of 0.8.
"""
class Foo(object):
pass
class Bar(object):
pass
instrumentation.register_class(Foo)
instrumentation.register_class(Bar)
attributes.register_attribute(Foo, 'barlist', uselist=True,
useobject=True)
canary = []
def append(state, child, initiator):
canary.append((state, child))
def remove(state, child, initiator):
canary.append((state, child))
event.listen(Foo.barlist, 'append', append)
event.listen(Foo.barlist, 'remove', remove)
b1, b2 = Bar(), Bar()
f1 = Foo()
f1.barlist.append(None)
eq_(canary, [(f1, None)])
canary[:] = []
f1 = Foo()
f1.barlist = [None, b2]
eq_(canary, [(f1, None), (f1, b2)])
canary[:] = []
f1 = Foo()
f1.barlist = [b1, None, b2]
eq_(canary, [(f1, b1), (f1, None), (f1, b2)])
f1.barlist.remove(None)
eq_(canary, [(f1, b1), (f1, None), (f1, b2), (f1, None)])
def test_propagate(self):
classes = [None, None, None]
canary = []
def make_a():
class A(object):
pass
classes[0] = A
def make_b():
class B(classes[0]):
pass
classes[1] = B
def make_c():
class C(classes[1]):
pass
classes[2] = C
def instrument_a():
instrumentation.register_class(classes[0])
def instrument_b():
instrumentation.register_class(classes[1])
def instrument_c():
instrumentation.register_class(classes[2])
def attr_a():
attributes.register_attribute(classes[0], 'attrib',
uselist=False, useobject=False)
def attr_b():
attributes.register_attribute(classes[1], 'attrib',
uselist=False, useobject=False)
def attr_c():
attributes.register_attribute(classes[2], 'attrib',
uselist=False, useobject=False)
def set(state, value, oldvalue, initiator):
canary.append(value)
def events_a():
event.listen(classes[0].attrib, 'set', set, propagate=True)
def teardown():
classes[:] = [None, None, None]
canary[:] = []
ordering = [
(instrument_a, instrument_b),
(instrument_b, instrument_c),
(attr_a, attr_b),
(attr_b, attr_c),
(make_a, instrument_a),
(instrument_a, attr_a),
(attr_a, events_a),
(make_b, instrument_b),
(instrument_b, attr_b),
(make_c, instrument_c),
(instrument_c, attr_c),
(make_a, make_b),
(make_b, make_c)
]
elements = [make_a, make_b, make_c,
instrument_a, instrument_b, instrument_c,
attr_a, attr_b, attr_c, events_a]
for i, series in enumerate(all_partial_orderings(ordering, elements)):
for fn in series:
fn()
b = classes[1]()
b.attrib = "foo"
eq_(b.attrib, "foo")
eq_(canary, ["foo"])
c = classes[2]()
c.attrib = "bar"
eq_(c.attrib, "bar")
eq_(canary, ["foo", "bar"])
teardown()
class TestUnlink(fixtures.TestBase):
def setUp(self):
class A(object):
pass
class B(object):
pass
self.A = A
self.B = B
instrumentation.register_class(A)
instrumentation.register_class(B)
attributes.register_attribute(A, 'bs', uselist=True,
useobject=True)
def test_expired(self):
A, B = self.A, self.B
a1 = A()
coll = a1.bs
a1.bs.append(B())
state = attributes.instance_state(a1)
state._expire(state.dict, set())
assert_raises(
Warning,
coll.append, B()
)
def test_replaced(self):
A, B = self.A, self.B
a1 = A()
coll = a1.bs
a1.bs.append(B())
a1.bs = []
# a bulk replace empties the old collection
assert len(coll) == 0
coll.append(B())
assert len(coll) == 1
def test_pop_existing(self):
A, B = self.A, self.B
a1 = A()
coll = a1.bs
a1.bs.append(B())
state = attributes.instance_state(a1)
state._reset(state.dict, "bs")
assert_raises(
Warning,
coll.append, B()
)
def test_ad_hoc_lazy(self):
A, B = self.A, self.B
a1 = A()
coll = a1.bs
a1.bs.append(B())
state = attributes.instance_state(a1)
state._set_callable(state.dict, "bs", lambda: B())
assert_raises(
Warning,
coll.append, B()
)
| [
"wangzhengbo1204@gmail.com"
] | wangzhengbo1204@gmail.com |
3d817ef9508b53859f184549c58777d00d0ecbf7 | 4b7db29ef0eede67efbb55baf0c300a7003b8310 | /Section 3/Designer_code/Video2_First_Design.py | 3133f9c7a8794e9b105a7227870409ddb1e4467b | [
"MIT"
] | permissive | PacktPublishing/-Hands-on-Python-3.x-GUI-Programming | 40ffc8b37180eb9d16e5516668efa9309f9e67b2 | 2506987b026bf30c7f9d53672755b0a22fce3379 | refs/heads/master | 2021-06-20T02:07:36.057694 | 2021-01-18T09:48:49 | 2021-01-18T09:48:49 | 174,128,053 | 11 | 4 | null | null | null | null | UTF-8 | Python | false | false | 2,033 | py | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'Video2_First_Design.ui'
#
# Created by: PyQt5 UI code generator 5.11.3
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(400, 300)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 400, 21))
self.menubar.setObjectName("menubar")
self.menuFile = QtWidgets.QMenu(self.menubar)
self.menuFile.setObjectName("menuFile")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.actionNew = QtWidgets.QAction(MainWindow)
self.actionNew.setObjectName("actionNew")
self.menuFile.addAction(self.actionNew)
self.menubar.addAction(self.menuFile.menuAction())
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
MainWindow.setStatusTip(_translate("MainWindow", "This is the status bar"))
self.menuFile.setTitle(_translate("MainWindow", "File"))
self.actionNew.setText(_translate("MainWindow", "New"))
self.actionNew.setStatusTip(_translate("MainWindow", "New File"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
| [
"meghb@packtpub.com"
] | meghb@packtpub.com |
d8b2259a4784af00b7eb6df6d3392ff14912b084 | a34f36f2f08791d353b63e786fa99fe7e7c65d9f | /1271A.py | 257db5f672839f50487f90a6efe3b20aaeba7768 | [] | no_license | vijay9908/code_forces | 5f758c4417d448fb2637dd4b896dfc59409f8b97 | 7d58e52aabea612dfed52dd3534e38563bf78633 | refs/heads/master | 2021-06-25T11:54:55.179108 | 2020-11-19T15:24:08 | 2020-11-19T15:24:08 | 173,603,181 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 230 | py | a = int(input())
b = int(input())
c = int(input())
d = int(input())
e = int(input())
f = int(input())
x=min(a,d)
awd=a-x
dwd=d-x
y=min(dwd,b,c)
z=min(d,b,c)
d2=d-z
g=min(d2,a)
first=x*e+y*f
second=z*f+g*e
print(max(first,second))
| [
"vijaytanmay055@gmail.com"
] | vijaytanmay055@gmail.com |
bff1c589f0daa9c4c1748c6ff163db8a770639fe | f7ec01cc0419fa38639a8f4514aeb288bf70e8d5 | /project/object_detection/yolo_v3/utils/IOU.py | 4b0028f66b364588e3126cbaebb70bbe9e3e6dab | [] | no_license | lovejing0306/TensorFlow | dd10e58734603cb0f22c4adf32d849a4cfb08dcd | d4d4aec4d086ab916ffb1db7f992edd1b1a31eb4 | refs/heads/master | 2021-06-18T23:48:43.995706 | 2019-05-08T06:19:48 | 2019-05-08T06:19:48 | 92,510,824 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,340 | py | import numpy as np
import tensorflow as tf
def calculate_min(point, data):
min_point = point - data
return min_point
def calculate_max(point, data):
max_point = point + data
return max_point
def IOU_calculator(x, y, width, height, l_x, l_y, l_width, l_height):
'''
Cculate IOU
:param x: net predicted x
:param y: net predicted y
:param width: net predicted width
:param height: net predicted height
:param l_x: label x
:param l_y: label y
:param l_width: label width
:param l_height: label height
:return: IOU
'''
x_max = calculate_max(x, width / 2)
y_max = calculate_max(y, height / 2)
x_min = calculate_min(x, width / 2)
y_min = calculate_min(y, height / 2)
l_x_max = calculate_max(l_x, width / 2)
l_y_max = calculate_max(l_y, height / 2)
l_x_min = calculate_min(l_x, width / 2)
l_y_min = calculate_min(l_y, height / 2)
'''--------Caculate Both Area's point--------'''
xend = tf.minimum(x_max, l_x_max)
xstart = tf.maximum(x_min, l_x_min)
yend = tf.minimum(y_max, l_y_max)
ystart = tf.maximum(y_min, l_y_min)
area_width = xend - xstart
area_height = yend - ystart
'''--------Caculate the IOU--------'''
area = area_width * area_height
all_area = tf.cond((width * height + l_width * l_height - area) <= 0, lambda: tf.cast(1e-8, tf.float32),
lambda: (width * height + l_width * l_height - area))
IOU = area / all_area
IOU = tf.cond(area_width < 0, lambda: tf.cast(1e-8, tf.float32), lambda: IOU)
IOU = tf.cond(area_height < 0, lambda: tf.cast(1e-8, tf.float32), lambda: IOU)
return IOU
'''--------Test the IOU function--------'''
if __name__ == '__main__':
IOU1 = IOU_calculator(tf.cast(1, tf.float32), tf.cast(1, tf.float32), tf.cast(2, tf.float32),
tf.cast(2, tf.float32),
tf.cast(2, tf.float32), tf.cast(2, tf.float32), tf.cast(2, tf.float32),
tf.cast(2, tf.float32))
IOU = IOU_calculator(tf.cast(0, tf.float32), tf.cast(0, tf.float32), tf.cast(0, tf.float32), tf.cast(0, tf.float32),
tf.cast(0, tf.float32), tf.cast(0, tf.float32), tf.cast(0, tf.float32), tf.cast(0, tf.float32))
sess = tf.Session()
print(sess.run(IOU))
| [
"lovejing0306@gmail.com"
] | lovejing0306@gmail.com |
8316dd070bfe50ffe819d3cb3362e71859ec6e89 | dd3bbd4e7aaee7a8a5f26b927ce28ac472c855a5 | /eggs/PIL-1.1.6-py2.7-linux-i686.egg/EGG-INFO/scripts/pilprint.py | 434bed3e44cd2b2c91d8c55fefa724a1bc9c6417 | [] | no_license | nacho22martin/tesis | ea0a822f8bdbdef6f13f41276ecd4d6e85427ca5 | e137eb6225cc5e724bee74a892567796166134ac | refs/heads/master | 2020-12-24T13:20:58.334839 | 2013-11-09T12:42:41 | 2013-11-09T12:42:41 | 14,261,570 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,370 | py | #!/home/ignacio/plone-virtualenv/bin/python
#
# The Python Imaging Library.
# $Id: pilprint.py,v 1.1.1.1 2007/09/26 00:00:36 chrism Exp $
#
# print image files to postscript printer
#
# History:
# 0.1 1996-04-20 fl Created
# 0.2 1996-10-04 fl Use draft mode when converting.
# 0.3 2003-05-06 fl Fixed a typo or two.
#
VERSION = "pilprint 0.3/2003-05-05"
import Image
import PSDraw
letter = ( 1.0*72, 1.0*72, 7.5*72, 10.0*72 )
def description(file, image):
import os
title = os.path.splitext(os.path.split(file)[1])[0]
format = " (%dx%d "
if image.format:
format = " (" + image.format + " %dx%d "
return title + format % image.size + image.mode + ")"
import getopt, os, sys
if len(sys.argv) == 1:
print "PIL Print 0.2a1/96-10-04 -- print image files"
print "Usage: pilprint files..."
print "Options:"
print " -c colour printer (default is monochrome)"
print " -p print via lpr (default is stdout)"
print " -P <printer> same as -p but use given printer"
sys.exit(1)
try:
opt, argv = getopt.getopt(sys.argv[1:], "cdpP:")
except getopt.error, v:
print v
sys.exit(1)
printer = None # print to stdout
monochrome = 1 # reduce file size for most common case
for o, a in opt:
if o == "-d":
# debug: show available drivers
Image.init()
print Image.ID
sys.exit(1)
elif o == "-c":
# colour printer
monochrome = 0
elif o == "-p":
# default printer channel
printer = "lpr"
elif o == "-P":
# printer channel
printer = "lpr -P%s" % a
for file in argv:
try:
im = Image.open(file)
title = description(file, im)
if monochrome and im.mode not in ["1", "L"]:
im.draft("L", im.size)
im = im.convert("L")
if printer:
fp = os.popen(printer, "w")
else:
fp = sys.stdout
ps = PSDraw.PSDraw(fp)
ps.begin_document()
ps.setfont("Helvetica-Narrow-Bold", 18)
ps.text((letter[0], letter[3]+24), title)
ps.setfont("Helvetica-Narrow-Bold", 8)
ps.text((letter[0], letter[1]-30), VERSION)
ps.image(letter, im)
ps.end_document()
except:
print "cannot print image",
print "(%s:%s)" % (sys.exc_type, sys.exc_value)
| [
"ignacio@plone.(none)"
] | ignacio@plone.(none) |
4df1dadf680c16c281cfbbe879483918b31fd7b7 | 7851a61b7c5b690cbf581e6be9555cdf34d56dea | /py/minesweeper.py | 109abea5beb0fc111f3c0ae497089f3bb247734b | [] | no_license | kawain/algo | 85ed8da1cac6bd59f1b419c77d6ea3c9bebdda54 | bb2f2b3138694a2116e153a732f525c8b3c4b5c8 | refs/heads/master | 2023-09-03T23:52:30.056680 | 2021-10-05T12:11:17 | 2021-10-05T12:11:17 | 368,516,190 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,125 | py | import pygame
import sys
import random
class MyRect(pygame.Rect):
def __init__(self, left, top, width, height, kind):
super().__init__(left, top, width, height)
self.kind = kind
self.visited = False
self.flag = False
def open(self):
# y,x座標をGameのメソッドに渡す
y = int(self.top / self.width)
x = int(self.left / self.width)
return y, x
def put_flag(self):
self.flag = not self.flag
class Game:
def __init__(self, level=1):
# 初級:9×9のマスに10個の地雷
l_1 = (9, 9, 10)
# 中級:16×16のマスに40個の地雷
l_2 = (16, 16, 40)
# 上級:30×16のマスに99個の地雷
l_3 = (30, 16, 99)
# レベル選択
if level == 2:
self.col = l_2[0]
self.row = l_2[1]
self.bomb = l_2[2]
elif level == 3:
self.col = l_3[0]
self.row = l_3[1]
self.bomb = l_3[2]
else:
self.col = l_1[0]
self.row = l_1[1]
self.bomb = l_1[2]
# セルの大きさ
self.cell = 30
# 幅
self.width = self.cell * self.col
# 高さ
self.height = self.cell * self.row
# 爆弾以外のセルの数
self.other = self.row * self.col - self.bomb
# 色
self.white = (255, 255, 255)
self.black = (0, 0, 0)
self.blue = (0, 0, 255)
self.red = (255, 0, 0)
self.orangered = (255, 69, 0)
self.dimgray = (105, 105, 105)
self.navy = (0, 0, 128)
self.gray = (128, 128, 128)
# ゲームオーバー
self.game_over = False
# ゲームクリア
self.game_clear = False
# MyRectオブジェクト2次元配列定義
self.obj_arr = self.make_arr()
def make_arr(self):
# 爆弾の数の配列 9 にする
bomb = [9] * self.bomb
# 残りのセルの配列 0 にする
tmp = [0] * self.other
# 配列を合算
tmp = tmp + bomb
# 配列をシャッフル
random.shuffle(tmp)
# 配列の形を2次元にする
board = []
i = 0
for _ in range(self.row):
cols = []
for _ in range(self.col):
cols.append(tmp[i])
i += 1
board.append(cols)
# 周辺の爆弾の場所を数えて更新
for y in range(self.row):
for x in range(self.col):
# 爆弾の場所なら無視
if board[y][x] == 9:
continue
board[y][x] = self.bomb_around_count(board, y, x)
return self.make_obj_arr(board)
def bomb_around_count(self, arr, row, col):
count = 0
# max, min ではじめから範囲外対策
for r in range(max(0, row - 1), min(self.row - 1, row + 1) + 1):
for c in range(max(0, col - 1), min(self.col - 1, col + 1) + 1):
# 起点は無視
if row == r and col == c:
continue
# 周囲の爆弾を数える
if arr[r][c] == 9:
count += 1
return count
def make_obj_arr(self, arr):
# MyRectの配列を作成
obj_arr = []
for y in range(self.row):
tmp = []
for x in range(self.col):
left = x * self.cell
top = y * self.cell
tmp.append(MyRect(left, top, self.cell, self.cell, arr[y][x]))
obj_arr.append(tmp)
return obj_arr
def draw(self, pygame, screen, font):
for rows in self.obj_arr:
for v in rows:
pygame.draw.rect(
screen, self.black,
(v.left, v.top, self.cell, self.cell), 1
)
if v.kind == 0:
kind = ""
else:
kind = str(v.kind)
text = font.render(kind, True, self.blue)
screen.blit(text, [v.left + 5, v.top])
if v.kind == 9:
pygame.draw.ellipse(
screen, self.red, (v.left, v.top, self.cell, self.cell))
if not v.visited:
pygame.draw.rect(
screen, self.dimgray, (v.left,
v.top, self.cell, self.cell)
)
if v.flag:
pygame.draw.rect(
screen, self.navy, (v.left, v.top,
self.cell, self.cell)
)
pygame.draw.rect(
screen, self.black, (v.left, v.top,
self.cell, self.cell), 1
)
def game_over_check(self, y, x):
# ゲームオーバー
if self.obj_arr[y][x].kind == 9:
for rows in self.obj_arr:
for v in rows:
v.flag = False
v.visited = True
self.game_over = True
return True
return False
def game_clear_check(self):
n = 0
for rows in self.obj_arr:
for v in rows:
if v.kind != 9 and v.visited:
n += 1
if n == self.other:
for rows in self.obj_arr:
for v in rows:
v.flag = False
v.visited = True
self.game_clear = True
def search(self, y, x):
if self.obj_arr[y][x].kind == 9:
return
# 訪問済み
if self.obj_arr[y][x].visited:
return
# 1以上は開いて抜ける
if self.obj_arr[y][x].kind > 0:
self.obj_arr[y][x].visited = True
return
# max, min ではじめから範囲外対策
for r in range(max(0, y - 1), min(self.row - 1, y + 1) + 1):
for c in range(max(0, x - 1), min(self.col - 1, x + 1) + 1):
# 全部開く
self.obj_arr[y][x].visited = True
# 再帰する
self.search(r, c)
def text(screen, obj, font, color, txt):
value = font.render(txt, True, color)
text_width = value.get_width()
text_height = value.get_height()
screen.blit(
value,
[
obj.width // 2 - text_width // 2,
obj.height // 2 - text_height // 2,
]
)
def main():
args = sys.argv
if len(args) == 2:
level = int(args[1])
else:
level = 1
pygame.init()
pygame.display.set_caption("Minesweeper")
obj = Game(level)
screen = pygame.display.set_mode((obj.width, obj.height))
clock = pygame.time.Clock()
font1 = pygame.font.Font(None, 48)
font2 = pygame.font.SysFont("Consolas", 48)
font3 = pygame.font.SysFont("Consolas", 48, bold=True)
while True:
clock.tick(10)
if obj.game_over or obj.game_clear:
screen.fill(obj.gray)
else:
screen.fill(obj.white)
obj.draw(pygame, screen, font1)
if obj.game_over:
text(screen, obj, font3, obj.black, "Game Over")
text(screen, obj, font2, obj.white, "Game Over")
elif obj.game_clear:
text(screen, obj, font3, obj.black, "Game Clear")
text(screen, obj, font2, obj.white, "Game Clear")
pygame.display.update()
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
elif event.type == pygame.MOUSEBUTTONDOWN and event.button == 1:
for rows in obj.obj_arr:
for v in rows:
if v.collidepoint(event.pos):
if v.flag:
break
if v.visited:
break
y, x = v.open()
if not obj.game_over_check(y, x):
obj.search(y, x)
if not obj.game_over or not obj.game_clear:
obj.game_clear_check()
break
elif event.type == pygame.MOUSEBUTTONDOWN and event.button == 3:
for rows in obj.obj_arr:
for v in rows:
if v.collidepoint(event.pos):
if v.visited:
break
v.put_flag()
break
if __name__ == "__main__":
main()
| [
"unknown@example.com"
] | unknown@example.com |
e4d003c6dcaac8fbe6dd7037f13d01fe0ecd2493 | 0add7953d3e3ce2df9e8265102be39b758579753 | /built-in/TensorFlow/Official/cv/image_segmentation/DeeplabV3_for_TensorFlow/00-access/host_model.py | 38f9d1040de257b31a3ef7f11a56cf5724c2043c | [
"Apache-2.0"
] | permissive | Huawei-Ascend/modelzoo | ae161c0b4e581f8b62c77251e9204d958c4cf6c4 | df51ed9c1d6dbde1deef63f2a037a369f8554406 | refs/heads/master | 2023-04-08T08:17:40.058206 | 2020-12-07T08:04:57 | 2020-12-07T08:04:57 | 319,219,518 | 1 | 1 | Apache-2.0 | 2023-03-24T22:22:00 | 2020-12-07T06:01:32 | Python | UTF-8 | Python | false | false | 8,971 | py | import tensorflow as tf
import model
import six
import common
from utils import train_utils
from tensorflow.contrib import metrics as contrib_metrics
from tensorflow.contrib.framework import get_variables_to_restore, assign_from_checkpoint_fn
import numpy as np
from npu_bridge.estimator.npu.npu_optimizer import NPUDistributedOptimizer
_NUM_EXAMPLES_NAME="num_examples"
slim = tf.contrib.slim
FLAGS = tf.app.flags.FLAGS
class RestoreHook(tf.train.SessionRunHook):
def __init__(self, init_fn):
self.init_fn = init_fn
def after_create_session(self, session, coord=None):
if session.run(tf.train.get_or_create_global_step()) == 0:
self.init_fn(session)
class Model(object):
def __init__(self):
self.outputs_to_num_classes = {'semantic':21}
self.ignore_label = 255
self.model_options = common.ModelOptions(
outputs_to_num_classes=self.outputs_to_num_classes,
crop_size=[int(sz) for sz in FLAGS.train_crop_size],
atrous_rates=FLAGS.atrous_rates,
output_stride=FLAGS.output_stride)
def get_estimator_model_func(self, features, labels, mode, params=None):
if mode == tf.estimator.ModeKeys.TRAIN:
return self.train_fn(features,labels,mode)
else:
return self.evaluate_fn(features,labels,mode)
def train_fn(self,features,labels,mode):
samples = {}
outputs_to_num_classes = self.outputs_to_num_classes
ignore_label = self.ignore_label
model_options = self.model_options
samples[common.IMAGE] = features
samples[common.LABEL] = labels
outputs_to_scales_to_logits = model.multi_scale_logits(
samples[common.IMAGE],
model_options=model_options,
image_pyramid=FLAGS.image_pyramid,
weight_decay=FLAGS.weight_decay,
is_training=True,
fine_tune_batch_norm=FLAGS.fine_tune_batch_norm,
nas_training_hyper_parameters={
'drop_path_keep_prob': FLAGS.drop_path_keep_prob,
'total_training_steps': FLAGS.training_number_of_steps,
})
# Add name to graph node so we can add to summary.
output_type_dict = outputs_to_scales_to_logits[common.OUTPUT_TYPE]
output_type_dict[model.MERGED_LOGITS_SCOPE] = tf.identity(
output_type_dict[model.MERGED_LOGITS_SCOPE], name=common.OUTPUT_TYPE)
for output, num_classes in six.iteritems(outputs_to_num_classes):
train_utils.add_softmax_cross_entropy_loss_for_each_scale(
outputs_to_scales_to_logits[output],
samples[common.LABEL],
num_classes,
ignore_label,
loss_weight=model_options.label_weights,
upsample_logits=FLAGS.upsample_logits,
hard_example_mining_step=FLAGS.hard_example_mining_step,
top_k_percent_pixels=FLAGS.top_k_percent_pixels,
scope=output)
regularization_losses = tf.get_collection(
tf.GraphKeys.REGULARIZATION_LOSSES)
reg = tf.add_n(regularization_losses)
reg_loss = tf.identity(reg, "reg_loss")
loss = tf.get_collection(tf.GraphKeys.LOSSES)
loss = tf.add_n(loss)
loss = tf.identity(loss, name="loss")
all_losses = []
all_losses.append(loss)
all_losses.append(reg_loss)
total_loss = tf.add_n(all_losses)
total_loss = tf.identity(total_loss, name='total_loss')
global_step = tf.train.get_global_step()
learning_rate = train_utils.get_model_learning_rate(
FLAGS.learning_policy,
FLAGS.base_learning_rate,
FLAGS.learning_rate_decay_step,
FLAGS.learning_rate_decay_factor,
FLAGS.training_number_of_steps,
FLAGS.learning_power,
FLAGS.slow_start_step,
FLAGS.slow_start_learning_rate,
decay_steps=FLAGS.decay_steps,
end_learning_rate=FLAGS.end_learning_rate)
learning_rate = tf.identity(learning_rate, name='learning_rate')
optimizer = tf.train.MomentumOptimizer(learning_rate, FLAGS.momentum)
opt = NPUDistributedOptimizer(optimizer)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) or []
gate_gradients = tf.train.Optimizer.GATE_NONE
grads_and_vars = opt.compute_gradients(total_loss, gate_gradients=gate_gradients)
last_layers = model.get_extra_layer_scopes(
FLAGS.last_layers_contain_logits_only)
grad_mult = train_utils.get_model_gradient_multipliers(
last_layers, FLAGS.last_layer_gradient_multiplier, FLAGS.bias_multiplier)
if grad_mult:
grads_and_vars = slim.learning.multiply_gradients(
grads_and_vars, grad_mult)
# Create gradient update op.
grad_updates = opt.apply_gradients(
grads_and_vars, global_step=global_step)
update_ops.append(grad_updates)
update_op = tf.group(*update_ops)
with tf.control_dependencies([update_op]):
train_op = tf.identity(total_loss, name='train_op')
var_list = get_variables_to_restore(exclude=['global_step'])
init_fn = assign_from_checkpoint_fn(FLAGS.tf_initial_checkpoint, var_list, ignore_missing_vars=True)
return tf.estimator.EstimatorSpec(mode, loss=total_loss, train_op=train_op,
training_hooks=[RestoreHook(init_fn)])
def evaluate_fn(self,features,labels,mode):
samples = {}
ignore_label = self.ignore_label
model_options = self.model_options
samples[common.IMAGE] = features
samples[common.LABEL] = labels
samples[common.IMAGE].set_shape(
[FLAGS.eval_batch_size,
int(FLAGS.eval_crop_size[0]),
int(FLAGS.eval_crop_size[1]),
3])
if tuple(FLAGS.eval_scales) == (1.0,):
tf.logging.info('Performing single-scale test.')
predictions = model.predict_labels(samples[common.IMAGE], model_options,
image_pyramid=FLAGS.image_pyramid)
else:
tf.logging.info('Performing multi-scale test.')
if FLAGS.quantize_delay_step >= 0:
raise ValueError(
'Quantize mode is not supported with multi-scale test.')
predictions = model.predict_labels_multi_scale(
samples[common.IMAGE],
model_options=model_options,
eval_scales=FLAGS.eval_scales,
add_flipped_images=FLAGS.add_flipped_images)
predictions = predictions[common.OUTPUT_TYPE]
predictions = tf.reshape(predictions, shape=[-1])
labels = tf.reshape(samples[common.LABEL], shape=[-1])
weights = tf.to_float(tf.not_equal(labels, ignore_label))
# Set ignore_label regions to label 0, because metrics.mean_iou requires
# range of labels = [0, dataset.num_classes). Note the ignore_label regions
# are not evaluated since the corresponding regions contain weights = 0.
labels = tf.where(
tf.equal(labels, ignore_label), tf.zeros_like(labels), labels)
predictions_tag = 'miou'
for eval_scale in FLAGS.eval_scales:
predictions_tag += '_' + str(eval_scale)
if FLAGS.add_flipped_images:
predictions_tag += '_flipped'
# Define the evaluation metric.
metric_map = {}
num_classes = 21
# IoU for each class.
one_hot_predictions = tf.one_hot(predictions, num_classes)
one_hot_predictions = tf.reshape(one_hot_predictions, [-1, num_classes])
one_hot_labels = tf.one_hot(labels, num_classes)
one_hot_labels = tf.reshape(one_hot_labels, [-1, num_classes])
for c in range(num_classes):
predictions_tag_c = '%s_class_%d' % (predictions_tag, c)
tp, tp_op = tf.metrics.true_positives(
labels=one_hot_labels[:, c], predictions=one_hot_predictions[:, c],
weights=weights)
fp, fp_op = tf.metrics.false_positives(
labels=one_hot_labels[:, c], predictions=one_hot_predictions[:, c],
weights=weights)
fn, fn_op = tf.metrics.false_negatives(
labels=one_hot_labels[:, c], predictions=one_hot_predictions[:, c],
weights=weights)
tp_fp_fn_op = tf.group(tp_op, fp_op, fn_op)
iou = tf.where(tf.greater(tp + fn, 0.0),
tp / (tp + fn + fp),
tf.constant(np.NaN))
metric_map['eval/%s' % predictions_tag_c] = (iou, tp_fp_fn_op)
total_loss = tf.losses.get_total_loss()
return tf.estimator.EstimatorSpec(mode, loss=total_loss, eval_metric_ops=metric_map)
| [
"1571856591@qq.com"
] | 1571856591@qq.com |
37395963f8379853974f5f8696e8b8931e11ce62 | 9edaf93c833ba90ae9a903aa3c44c407a7e55198 | /travelport/models/profile_create_tags_rsp_2.py | ad47f748c488af1111b4d2fa7afdfb17c4763642 | [] | no_license | tefra/xsdata-samples | c50aab4828b8c7c4448dbdab9c67d1ebc519e292 | ef027fe02e6a075d8ed676c86a80e9647d944571 | refs/heads/main | 2023-08-14T10:31:12.152696 | 2023-07-25T18:01:22 | 2023-07-25T18:01:22 | 222,543,692 | 6 | 1 | null | 2023-06-25T07:21:04 | 2019-11-18T21:00:37 | Python | UTF-8 | Python | false | false | 766 | py | from __future__ import annotations
from dataclasses import dataclass, field
from travelport.models.base_rsp_5 import BaseRsp5
from travelport.models.tag_2 import Tag2
__NAMESPACE__ = "http://www.travelport.com/schema/uprofile_v37_0"
@dataclass
class ProfileCreateTagsRsp2(BaseRsp5):
"""
Response with all the tags for the agency.
Parameters
----------
tag
A tag that belongs to the agency.
"""
class Meta:
name = "ProfileCreateTagsRsp"
namespace = "http://www.travelport.com/schema/uprofile_v37_0"
tag: list[Tag2] = field(
default_factory=list,
metadata={
"name": "Tag",
"type": "Element",
"min_occurs": 1,
"max_occurs": 15,
}
)
| [
"chris@komposta.net"
] | chris@komposta.net |
634891dd887f82412cd1b7ec26823a973afb2113 | d4a30b61cf9b4602fe09a056f80f210d7426830f | /clinvoc/loinc.py | aea903aa670a8dd14fd2cf7761d1e074243e1184 | [
"MIT"
] | permissive | dustinrbunch/clinvoc | c3bbe444ad1e39f63a1ed9c8268cd8ee64e02113 | 98b0be94f4c3c5a9ea58a343c5ce5e27b688d1a0 | refs/heads/master | 2021-08-22T18:24:12.819112 | 2017-11-30T23:09:24 | 2017-11-30T23:09:24 | 116,181,004 | 0 | 1 | null | 2018-01-03T20:54:04 | 2018-01-03T20:54:03 | null | UTF-8 | Python | false | false | 1,043 | py | import csv
from .base import RegexVocabulary, LexicographicPatternMatchVocabulary, LexicographicRangeFillVocabulary, \
LexicographicVocabulary, left_pad, ObservationVocabulary
import os
from .resources import resources
from six import next
import io
def _read_text_file(filename):
codes = []
with io.open(filename, mode='rt', encoding='utf-8') as infile:
reader = csv.reader(infile, delimiter=',', quoting=csv.QUOTE_ALL)
next(reader)
for line in reader:
codes.append(line[0])
return codes
_all_loinc_codes = _read_text_file(os.path.join(resources, 'LOINC_2.59_Text', 'loinc.csv'))
class LOINC(RegexVocabulary, LexicographicPatternMatchVocabulary, LexicographicRangeFillVocabulary, ObservationVocabulary):
vocab_name = 'LOINC'
def __init__(self):
RegexVocabulary.__init__(self, '[\d\*]{1,5}\-[\d\*]')
LexicographicVocabulary.__init__(self, map(self.standardize, _all_loinc_codes))
def _standardize(self, code):
return left_pad(code, 7)
| [
"jcrudy@gmail.com"
] | jcrudy@gmail.com |
f7b39286d2b091f1ad630633a4d2b7ec3098387d | 94e7c790d17ba08e8a2a74077dd8b75e7ac120b0 | /chapter02/Exercise26_02.py | 9d8e9d3a031246e8e7c67e7f58065308f71f28bc | [] | no_license | lutfar9427/Exercises_Solution_of_INTRODUCTION_TO_PROGRAMMING_USING_Python | 9632e515428685dcaa7d057cf52f0e191e9f7ae0 | d037475316e6c6b7c6a7a7023318ef4ab4ed3f8d | refs/heads/master | 2020-09-02T09:04:44.990668 | 2018-10-20T00:50:12 | 2018-10-20T00:50:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 918 | py | ''' **2.26 (Turtle: draw a circle) Write a program that prompts the user to enter the
center and radius of a circle, and then displays the circle and its area.
/**
* @author BASSAM FARAMAWI
* @email tiodaronzi3@yahoo.com
* @since 2018
*/
'''
import turtle # Import turtle module
import math # Import cmath module
# Prompt the user to enter center point
centerX, centerY = eval(input("Enter the circle center in X and Y: "))
# Prompt the user to enter the circle radius
radius = eval(input("Enter the circle radius: "))
turtle.showturtle() # Show the turtle graphics window
# Draw circle
turtle.penup()
turtle.goto(centerX, centerY - radius)
turtle.pendown()
turtle.circle(radius)
turtle.penup()
# Write area inside circle
turtle.goto(centerX, centerY)
turtle.pendown()
area = math.pi * radius ** 2
turtle.write(int(area * 100) / 100)
turtle.done() # Don't close the turtle graphics window
| [
"tiodaronzi3@yahoo.com"
] | tiodaronzi3@yahoo.com |
b78e01d3e839e6f6c9a74c11e5ff4bc3a6cd0f88 | 70e970ce9ec131449b0888388f65f0bb55f098cd | /SignalMC/python/pythia8/AMSB_gluinoToChargino_M-1600GeV_M-700GeV_CTau-10cm_TuneCP5_13TeV_pythia8_cff.py | 035618a10cec7e4d889596252199520fcdd5d624 | [] | no_license | OSU-CMS/DisappTrks | 53b790cc05cc8fe3a9f7fbd097284c5663e1421d | 1d1c076863a9f8dbd3f0c077d5821a8333fc5196 | refs/heads/master | 2023-09-03T15:10:16.269126 | 2023-05-25T18:37:40 | 2023-05-25T18:37:40 | 13,272,469 | 5 | 12 | null | 2023-09-13T12:15:49 | 2013-10-02T13:58:51 | Python | UTF-8 | Python | false | false | 8,097 | py | COM_ENERGY = 13000.
MGLU = 1600 # GeV
MCHI = 700 # GeV
CTAU = 100 # mm
CROSS_SECTION = 0.00887 # pb
SLHA_TABLE="""
# ISAJET SUSY parameters in SUSY Les Houches Accord 2 format
# Created by ISALHA 2.0 Last revision: C. Balazs 21 Apr 2009
Block SPINFO # Program information
1 ISASUGRA from ISAJET # Spectrum Calculator
2 7.80 29-OCT-2009 12:50:36 # Version number
Block MODSEL # Model selection
1 3 # Minimal anomaly mediated (AMSB) model
Block SMINPUTS # Standard Model inputs
1 1.27842453E+02 # alpha_em^(-1)
2 1.16570000E-05 # G_Fermi
3 1.17200002E-01 # alpha_s(M_Z)
4 9.11699982E+01 # m_{Z}(pole)
5 4.19999981E+00 # m_{b}(m_{b})
6 1.73070007E+02 # m_{top}(pole)
7 1.77699995E+00 # m_{tau}(pole)
Block MINPAR # SUSY breaking input parameters
1 1.50000000E+03 # m_0
2 2.46440000E+05 # m_{3/2}
3 5.00000000E+00 # tan(beta)
4 1.00000000E+00 # sign(mu)
Block EXTPAR # Non-universal SUSY breaking parameters
0 1.04228903E+16 # Input scale
Block MASS # Scalar and gaugino mass spectrum
# PDG code mass particle
24 8.04229965E+01 # W^+
25 1.16918777E+02 # h^0
35 4.13995459E+03 # H^0
36 4.11271240E+03 # A^0
37 4.12772119E+03 # H^+
1000001 4.68634814E+03 # dnl
1000002 4.68567432E+03 # upl
1000003 4.68634814E+03 # stl
1000004 4.68567480E+03 # chl
1000005 4.09400562E+03 # b1
1000006 3.40991528E+03 # t1
1000011 1.14678894E+03 # el-
1000012 1.12562231E+03 # nuel
1000013 1.14678894E+03 # mul-
1000014 1.12562231E+03 # numl
1000015 1.02227649E+03 # tau1
1000016 1.11225781E+03 # nutl
1000021 %.9g # glss
1000022 6.99874146E+02 # z1ss
1000023 2.26904956E+03 # z2ss
1000024 7.00047607E+02 # w1ss
1000025 -3.87153369E+03 # z3ss
1000035 3.87282349E+03 # z4ss
1000037 3.87772314E+03 # w2ss
2000001 4.76078076E+03 # dnr
2000002 4.71648975E+03 # upr
2000003 4.76078076E+03 # str
2000004 4.71649023E+03 # chr
2000005 4.72474414E+03 # b2
2000006 4.13260303E+03 # t2
2000011 1.02800623E+03 # er-
2000013 1.02800623E+03 # mur-
2000015 1.12574829E+03 # tau2
Block ALPHA # Effective Higgs mixing parameter
-1.97664991E-01 # alpha
Block STOPMIX # stop mixing matrix
1 1 8.36024433E-02 # O_{11}
1 2 -9.96499181E-01 # O_{12}
2 1 9.96499181E-01 # O_{21}
2 2 8.36024433E-02 # O_{22}
Block SBOTMIX # sbottom mixing matrix
1 1 9.99983907E-01 # O_{11}
1 2 5.66892792E-03 # O_{12}
2 1 -5.66892792E-03 # O_{21}
2 2 9.99983907E-01 # O_{22}
Block STAUMIX # stau mixing matrix
1 1 1.32659495E-01 # O_{11}
1 2 9.91161644E-01 # O_{12}
2 1 -9.91161644E-01 # O_{21}
2 2 1.32659495E-01 # O_{22}
Block NMIX # neutralino mixing matrix
1 1 -8.25339637E-04 #
1 2 9.99776781E-01 #
1 3 -2.02405099E-02 #
1 4 6.01018919E-03 #
2 1 9.99794424E-01 #
2 2 1.23403966E-03 #
2 3 1.68632567E-02 #
2 4 -1.11932158E-02 #
3 1 -4.01982665E-03 #
3 2 1.00584431E-02 #
3 3 7.06979156E-01 #
3 4 7.07151294E-01 #
4 1 1.98580157E-02 #
4 2 -1.85414888E-02 #
4 3 -7.06743419E-01 #
4 4 7.06947982E-01 #
Block UMIX # chargino U mixing matrix
1 1 -9.99564528E-01 # U_{11}
1 2 2.95085218E-02 # U_{12}
2 1 -2.95085218E-02 # U_{21}
2 2 -9.99564528E-01 # U_{22}
Block VMIX # chargino V mixing matrix
1 1 -9.99936998E-01 # V_{11}
1 2 1.12252701E-02 # V_{12}
2 1 -1.12252701E-02 # V_{21}
2 2 -9.99936998E-01 # V_{22}
Block GAUGE Q= 3.58269727E+03 #
1 3.57497722E-01 # g`
2 6.52475953E-01 # g_2
3 1.22070026E+00 # g_3
Block YU Q= 3.58269727E+03 #
3 3 8.38887691E-01 # y_t
Block YD Q= 3.58269727E+03 #
3 3 6.52210116E-02 # y_b
Block YE Q= 3.58269727E+03 #
3 3 5.15824445E-02 # y_tau
Block HMIX Q= 3.58269727E+03 # Higgs mixing parameters
1 3.87514209E+03 # mu(Q)
2 5.00000000E+00 # tan(beta)(M_GUT)
3 2.51709106E+02 # Higgs vev at Q
4 1.69144040E+07 # m_A^2(Q)
Block MSOFT Q= 3.58269727E+03 # DRbar SUSY breaking parameters
1 2.30335156E+03 # M_1(Q)
2 6.64254944E+02 # M_2(Q)
3 -4.50376855E+03 # M_3(Q)
31 1.12926123E+03 # MeL(Q)
32 1.12926123E+03 # MmuL(Q)
33 1.11625525E+03 # MtauL(Q)
34 1.03541077E+03 # MeR(Q)
35 1.03541077E+03 # MmuR(Q)
36 9.99967957E+02 # MtauR(Q)
41 4.45722266E+03 # MqL1(Q)
42 4.45722266E+03 # MqL2(Q)
43 3.91252832E+03 # MqL3(Q)
44 4.48730469E+03 # MuR(Q)
45 4.48730469E+03 # McR(Q)
46 3.28067163E+03 # MtR(Q)
47 4.53066406E+03 # MdR(Q)
48 4.53066406E+03 # MsR(Q)
49 4.55108252E+03 # MbR(Q)
Block AU Q= 3.58269727E+03 #
1 1 3.86256177E+03 # A_u
2 2 3.86256177E+03 # A_c
3 3 3.86256177E+03 # A_t
Block AD Q= 3.58269727E+03 #
1 1 9.22079785E+03 # A_d
2 2 9.22079785E+03 # A_s
3 3 9.22079785E+03 # A_b
Block AE Q= 3.58269727E+03 #
1 1 2.57661255E+03 # A_e
2 2 2.57661255E+03 # A_mu
3 3 2.57661255E+03 # A_tau
#
#
#
# =================
# |The decay table|
# =================
#
# PDG Width
DECAY 1000021 5.50675438E+00 # gluino decay
# BR NDA ID1 ID2 ID3
2.50000000E-01 3 1 -1 1000022
2.50000000E-01 3 2 -2 1000022
2.50000000E-01 3 1 -2 1000024
2.50000000E-01 3 -1 2 -1000024
#
# PDG Width
DECAY 1000024 %.9g # chargino decay
#
""" % (MGLU, (1.97326979e-13 / CTAU))
import FWCore.ParameterSet.Config as cms
from Configuration.Generator.Pythia8CommonSettings_cfi import *
from Configuration.Generator.MCTunes2017.PythiaCP5Settings_cfi import *
generator = cms.EDFilter("Pythia8GeneratorFilter",
pythiaPylistVerbosity = cms.untracked.int32(0),
filterEfficiency = cms.untracked.double(-1),
pythiaHepMCVerbosity = cms.untracked.bool(False),
SLHATableForPythia8 = cms.string('%s' % SLHA_TABLE),
comEnergy = cms.double(COM_ENERGY),
crossSection = cms.untracked.double(CROSS_SECTION),
maxEventsToPrint = cms.untracked.int32(0),
PythiaParameters = cms.PSet(
pythia8CommonSettingsBlock,
pythia8CP5SettingsBlock,
processParameters = cms.vstring(
'SUSY:all = off',
'SUSY:gg2gluinogluino = on',
'SUSY:qqbar2gluinogluino = on',
'1000024:isResonance = false',
'1000024:oneChannel = 1 1.0 100 1000022 211',
'1000024:tau0 = %.1f' % CTAU,
'ParticleDecays:tau0Max = %.1f' % (CTAU * 10),
),
parameterSets = cms.vstring(
'pythia8CommonSettings',
'pythia8CP5Settings',
'processParameters')
),
# The following parameters are required by Exotica_HSCP_SIM_cfi:
slhaFile = cms.untracked.string(''), # value not used
processFile = cms.untracked.string('SimG4Core/CustomPhysics/data/RhadronProcessList.txt'),
useregge = cms.bool(False),
hscpFlavor = cms.untracked.string('stau'),
massPoint = cms.untracked.int32(MCHI), # value not used
particleFile = cms.untracked.string('Configuration/GenProduction/python/ThirteenTeV/DisappTrksAMSBCascade/test/geant4_AMSB_chargino_%sGeV_ctau%scm.slha' % (MCHI, CTAU/10))
)
ProductionFilterSequence = cms.Sequence(generator)
| [
"ahart@cern.ch"
] | ahart@cern.ch |
5456d98fda23ccd01542b2a032e887e72ebec876 | 57780a29b7dd0a67a29e3c01b55a83b77ef16134 | /tests/file_io/encrypted_stream_io.py | 2b260531227ae8cda85ff3e8eb28be3f65c3fcdd | [
"Apache-2.0"
] | permissive | atilaromero/dfvfs | 989c7095a29e4ad8928a1b75bf3f7c63628d8b16 | 981796fd79376e54302a494f886ca310a0663360 | refs/heads/master | 2021-05-14T17:35:12.185865 | 2018-01-02T21:49:24 | 2018-01-02T21:49:24 | 116,050,662 | 0 | 0 | null | 2018-01-02T19:42:56 | 2018-01-02T19:42:56 | null | UTF-8 | Python | false | false | 15,045 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""Tests for the encrypted stream file-like object."""
from __future__ import unicode_literals
import os
import unittest
from dfvfs.file_io import encrypted_stream_io
from dfvfs.file_io import os_file_io
from dfvfs.lib import definitions
from dfvfs.path import encrypted_stream_path_spec
from dfvfs.path import os_path_spec
from dfvfs.resolver import context
from dfvfs.resolver import resolver
from tests import test_lib as shared_test_lib
from tests.file_io import test_lib
@shared_test_lib.skipUnlessHasTestFile(['syslog.aes'])
class AESEncryptedStreamWithKeyChainTest(test_lib.PaddedSyslogTestCase):
"""Tests the RC4 encrypted stream file-like object.
The credentials are passed via the key chain.
"""
_AES_KEY = b'This is a key123'
_AES_MODE = definitions.ENCRYPTION_MODE_CBC
_AES_IV = b'This is an IV456'
def setUp(self):
"""Sets up the needed objects used throughout the test."""
self._resolver_context = context.Context()
test_file = self._GetTestFilePath(['syslog.aes'])
self._os_path_spec = os_path_spec.OSPathSpec(location=test_file)
self._encrypted_stream_path_spec = (
encrypted_stream_path_spec.EncryptedStreamPathSpec(
encryption_method=definitions.ENCRYPTION_METHOD_AES,
parent=self._os_path_spec))
resolver.Resolver.key_chain.SetCredential(
self._encrypted_stream_path_spec, 'key', self._AES_KEY)
resolver.Resolver.key_chain.SetCredential(
self._encrypted_stream_path_spec, 'initialization_vector',
self._AES_IV)
resolver.Resolver.key_chain.SetCredential(
self._encrypted_stream_path_spec, 'cipher_mode', self._AES_MODE)
self.padding_size = 1
def testOpenCloseFileObject(self):
"""Test the open and close functionality using a file-like object."""
os_file_object = os_file_io.OSFile(self._resolver_context)
os_file_object.open(path_spec=self._os_path_spec)
file_object = encrypted_stream_io.EncryptedStream(
self._resolver_context,
encryption_method=definitions.ENCRYPTION_METHOD_AES,
file_object=os_file_object)
file_object.open(path_spec=self._encrypted_stream_path_spec)
self._TestGetSizeFileObject(file_object)
file_object.close()
os_file_object.close()
def testOpenClosePathSpec(self):
"""Test the open and close functionality using a path specification."""
file_object = encrypted_stream_io.EncryptedStream(self._resolver_context)
file_object.open(path_spec=self._encrypted_stream_path_spec)
self._TestGetSizeFileObject(file_object)
file_object.close()
def testSeek(self):
"""Test the seek functionality."""
file_object = encrypted_stream_io.EncryptedStream(self._resolver_context)
file_object.open(path_spec=self._encrypted_stream_path_spec)
self._TestSeekFileObject(file_object)
file_object.close()
# TODO: Test SEEK_CUR after open.
# Test SEEK_END after open.
file_object = encrypted_stream_io.EncryptedStream(self._resolver_context)
file_object.open(path_spec=self._encrypted_stream_path_spec)
file_object.seek(-10 - self.padding_size, os.SEEK_END)
self.assertEqual(file_object.read(5), b'times')
file_object.close()
def testRead(self):
"""Test the read functionality."""
file_object = encrypted_stream_io.EncryptedStream(self._resolver_context)
file_object.open(path_spec=self._encrypted_stream_path_spec)
self._TestReadFileObject(file_object)
file_object.close()
@shared_test_lib.skipUnlessHasTestFile(['syslog.aes'])
class AESEncryptedStreamTest(test_lib.PaddedSyslogTestCase):
"""The unit test for a AES encrypted stream file-like object.
The credentials are passed via the path specification.
"""
_AES_CIPHER_MODE = definitions.ENCRYPTION_MODE_CBC
_AES_INITIALIZATION_VECTOR = b'This is an IV456'
_AES_KEY = b'This is a key123'
def setUp(self):
"""Sets up the needed objects used throughout the test."""
self._resolver_context = context.Context()
test_file = self._GetTestFilePath(['syslog.aes'])
self._os_path_spec = os_path_spec.OSPathSpec(location=test_file)
self._encrypted_stream_path_spec = (
encrypted_stream_path_spec.EncryptedStreamPathSpec(
cipher_mode=self._AES_CIPHER_MODE,
encryption_method=definitions.ENCRYPTION_METHOD_AES,
initialization_vector=self._AES_INITIALIZATION_VECTOR,
key=self._AES_KEY, parent=self._os_path_spec))
self.padding_size = 1
def testOpenCloseFileObject(self):
"""Test the open and close functionality using a file-like object."""
os_file_object = os_file_io.OSFile(self._resolver_context)
os_file_object.open(path_spec=self._os_path_spec)
file_object = encrypted_stream_io.EncryptedStream(
self._resolver_context,
encryption_method=definitions.ENCRYPTION_METHOD_AES,
file_object=os_file_object)
file_object.open(path_spec=self._encrypted_stream_path_spec)
self._TestGetSizeFileObject(file_object)
file_object.close()
os_file_object.close()
def testOpenClosePathSpec(self):
"""Test the open and close functionality using a path specification."""
file_object = encrypted_stream_io.EncryptedStream(self._resolver_context)
file_object.open(path_spec=self._encrypted_stream_path_spec)
self._TestGetSizeFileObject(file_object)
file_object.close()
def testSeek(self):
"""Test the seek functionality."""
file_object = encrypted_stream_io.EncryptedStream(self._resolver_context)
file_object.open(path_spec=self._encrypted_stream_path_spec)
self._TestSeekFileObject(file_object)
file_object.close()
# TODO: Test SEEK_CUR after open.
# Test SEEK_END after open.
file_object = encrypted_stream_io.EncryptedStream(self._resolver_context)
file_object.open(path_spec=self._encrypted_stream_path_spec)
file_object.seek(-10 - self.padding_size, os.SEEK_END)
self.assertEqual(file_object.read(5), b'times')
file_object.close()
def testRead(self):
"""Test the read functionality."""
file_object = encrypted_stream_io.EncryptedStream(self._resolver_context)
file_object.open(path_spec=self._encrypted_stream_path_spec)
self._TestReadFileObject(file_object)
file_object.close()
@shared_test_lib.skipUnlessHasTestFile(['syslog.blowfish'])
class BlowfishEncryptedStreamWithKeyChainTest(test_lib.PaddedSyslogTestCase):
"""Tests the Blowfish encrypted stream file-like object.
The credentials are passed via the key chain.
"""
_BLOWFISH_KEY = b'This is a key123'
_BLOWFISH_MODE = definitions.ENCRYPTION_MODE_CBC
_BLOWFISH_IV = b'This IV!'
def setUp(self):
"""Sets up the needed objects used throughout the test."""
self._resolver_context = context.Context()
test_file = self._GetTestFilePath(['syslog.blowfish'])
self._os_path_spec = os_path_spec.OSPathSpec(location=test_file)
self._encrypted_stream_path_spec = (
encrypted_stream_path_spec.EncryptedStreamPathSpec(
encryption_method=definitions.ENCRYPTION_METHOD_BLOWFISH,
parent=self._os_path_spec))
resolver.Resolver.key_chain.SetCredential(
self._encrypted_stream_path_spec, 'key', self._BLOWFISH_KEY)
resolver.Resolver.key_chain.SetCredential(
self._encrypted_stream_path_spec, 'initialization_vector',
self._BLOWFISH_IV)
resolver.Resolver.key_chain.SetCredential(
self._encrypted_stream_path_spec, 'cipher_mode', self._BLOWFISH_MODE)
self.padding_size = 1
def testOpenCloseFileObject(self):
"""Test the open and close functionality using a file-like object."""
os_file_object = os_file_io.OSFile(self._resolver_context)
os_file_object.open(path_spec=self._os_path_spec)
file_object = encrypted_stream_io.EncryptedStream(
self._resolver_context,
encryption_method=definitions.ENCRYPTION_METHOD_BLOWFISH,
file_object=os_file_object)
file_object.open(path_spec=self._encrypted_stream_path_spec)
self._TestGetSizeFileObject(file_object)
file_object.close()
os_file_object.close()
def testOpenClosePathSpec(self):
"""Test the open and close functionality using a path specification."""
file_object = encrypted_stream_io.EncryptedStream(self._resolver_context)
file_object.open(path_spec=self._encrypted_stream_path_spec)
self._TestGetSizeFileObject(file_object)
file_object.close()
def testSeek(self):
"""Test the seek functionality."""
file_object = encrypted_stream_io.EncryptedStream(self._resolver_context)
file_object.open(path_spec=self._encrypted_stream_path_spec)
self._TestSeekFileObject(file_object)
file_object.close()
# TODO: Test SEEK_CUR after open.
# Test SEEK_END after open.
file_object = encrypted_stream_io.EncryptedStream(self._resolver_context)
file_object.open(path_spec=self._encrypted_stream_path_spec)
file_object.seek(-10 - self.padding_size, os.SEEK_END)
self.assertEqual(file_object.read(5), b'times')
file_object.close()
def testRead(self):
"""Test the read functionality."""
file_object = encrypted_stream_io.EncryptedStream(self._resolver_context)
file_object.open(path_spec=self._encrypted_stream_path_spec)
self._TestReadFileObject(file_object)
file_object.close()
@shared_test_lib.skipUnlessHasTestFile(['syslog.des3'])
class DES3EncryptedStreamWithKeyChainTest(test_lib.PaddedSyslogTestCase):
"""Tests the Triple DES encrypted stream file-like object.
The credentials are passed via the key chain.
"""
_DES3_KEY = b'This is a key123'
_DES3_MODE = definitions.ENCRYPTION_MODE_CBC
_DES3_IV = b'This IV!'
def setUp(self):
"""Sets up the needed objects used throughout the test."""
self._resolver_context = context.Context()
test_file = self._GetTestFilePath(['syslog.des3'])
self._os_path_spec = os_path_spec.OSPathSpec(location=test_file)
self._encrypted_stream_path_spec = (
encrypted_stream_path_spec.EncryptedStreamPathSpec(
encryption_method=definitions.ENCRYPTION_METHOD_DES3,
parent=self._os_path_spec))
resolver.Resolver.key_chain.SetCredential(
self._encrypted_stream_path_spec, 'key', self._DES3_KEY)
resolver.Resolver.key_chain.SetCredential(
self._encrypted_stream_path_spec, 'initialization_vector',
self._DES3_IV)
resolver.Resolver.key_chain.SetCredential(
self._encrypted_stream_path_spec, 'cipher_mode', self._DES3_MODE)
self.padding_size = 1
def testOpenCloseFileObject(self):
"""Test the open and close functionality using a file-like object."""
os_file_object = os_file_io.OSFile(self._resolver_context)
os_file_object.open(path_spec=self._os_path_spec)
file_object = encrypted_stream_io.EncryptedStream(
self._resolver_context,
encryption_method=definitions.ENCRYPTION_METHOD_DES3,
file_object=os_file_object)
file_object.open(path_spec=self._encrypted_stream_path_spec)
self._TestGetSizeFileObject(file_object)
file_object.close()
os_file_object.close()
def testOpenClosePathSpec(self):
"""Test the open and close functionality using a path specification."""
file_object = encrypted_stream_io.EncryptedStream(self._resolver_context)
file_object.open(path_spec=self._encrypted_stream_path_spec)
self._TestGetSizeFileObject(file_object)
file_object.close()
def testSeek(self):
"""Test the seek functionality."""
file_object = encrypted_stream_io.EncryptedStream(self._resolver_context)
file_object.open(path_spec=self._encrypted_stream_path_spec)
self._TestSeekFileObject(file_object)
file_object.close()
# TODO: Test SEEK_CUR after open.
# Test SEEK_END after open.
file_object = encrypted_stream_io.EncryptedStream(self._resolver_context)
file_object.open(path_spec=self._encrypted_stream_path_spec)
file_object.seek(-10 - self.padding_size, os.SEEK_END)
self.assertEqual(file_object.read(5), b'times')
file_object.close()
def testRead(self):
"""Test the read functionality."""
file_object = encrypted_stream_io.EncryptedStream(self._resolver_context)
file_object.open(path_spec=self._encrypted_stream_path_spec)
self._TestReadFileObject(file_object)
file_object.close()
@shared_test_lib.skipUnlessHasTestFile(['syslog.rc4'])
class RC4EncryptedStreamWithKeyChainTest(test_lib.SylogTestCase):
"""Tests the RC4 encrypted stream file-like object.
The credentials are passed via the key chain.
"""
_RC4_KEY = b'rc4test'
def setUp(self):
"""Sets up the needed objects used throughout the test."""
self._resolver_context = context.Context()
test_file = self._GetTestFilePath(['syslog.rc4'])
self._os_path_spec = os_path_spec.OSPathSpec(location=test_file)
self._encrypted_stream_path_spec = (
encrypted_stream_path_spec.EncryptedStreamPathSpec(
encryption_method=definitions.ENCRYPTION_METHOD_RC4,
parent=self._os_path_spec))
resolver.Resolver.key_chain.SetCredential(
self._encrypted_stream_path_spec, 'key', self._RC4_KEY)
def testOpenCloseFileObject(self):
"""Test the open and close functionality using a file-like object."""
os_file_object = os_file_io.OSFile(self._resolver_context)
os_file_object.open(path_spec=self._os_path_spec)
file_object = encrypted_stream_io.EncryptedStream(
self._resolver_context,
encryption_method=definitions.ENCRYPTION_METHOD_RC4,
file_object=os_file_object)
file_object.open(path_spec=self._encrypted_stream_path_spec)
self._TestGetSizeFileObject(file_object)
file_object.close()
os_file_object.close()
def testOpenClosePathSpec(self):
"""Test the open and close functionality using a path specification."""
file_object = encrypted_stream_io.EncryptedStream(self._resolver_context)
file_object.open(path_spec=self._encrypted_stream_path_spec)
self._TestGetSizeFileObject(file_object)
file_object.close()
def testSeek(self):
"""Test the seek functionality."""
file_object = encrypted_stream_io.EncryptedStream(self._resolver_context)
file_object.open(path_spec=self._encrypted_stream_path_spec)
self._TestSeekFileObject(file_object)
file_object.close()
# TODO: Test SEEK_CUR after open.
# Test SEEK_END after open.
file_object = encrypted_stream_io.EncryptedStream(self._resolver_context)
file_object.open(path_spec=self._encrypted_stream_path_spec)
file_object.seek(-10, os.SEEK_END)
self.assertEqual(file_object.read(5), b'times')
file_object.close()
def testRead(self):
"""Test the read functionality."""
file_object = encrypted_stream_io.EncryptedStream(self._resolver_context)
file_object.open(path_spec=self._encrypted_stream_path_spec)
self._TestReadFileObject(file_object)
file_object.close()
if __name__ == '__main__':
unittest.main()
| [
"joachim.metz@gmail.com"
] | joachim.metz@gmail.com |
60a7145e797fe9b9da04053539801eccab969c45 | 5623771414b26c021be54facaaaefbd9314b389d | /pynativesite/ex7-stringcounter.py | 975d996c8e378511a3c7587ff2534e80a53bbd48 | [] | no_license | saxenasamarth/BootCamp_PythonLearning | 36b705b83c7f0e297931bb8d75cb541088690248 | d5b8fe2d6fcfe54c5a7393f218414b1122f3e49e | refs/heads/master | 2023-04-17T15:29:05.402863 | 2019-08-29T08:46:34 | 2019-08-29T08:46:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 161 | py | a="Emma is a good developer. Emma is also a writer"
l=a.split()
myDict ={}
for i in l:
if i not in myDict:
myDict[i]=1
else:
myDict[i]+=1
print(myDict)
| [
"saxenasamarth@gmail.com"
] | saxenasamarth@gmail.com |
d6e9267928a3c0f506fc52dcf65dd8766979f701 | 63e6dbbca3fd45438300080e6def65896fe0f7ea | /tests/test_runner.py | 4ef52015c20d486a8ac3abf7e373495717d62d34 | [
"MIT"
] | permissive | rubyvirus/ApiTestEngine | ab7084b26ec8d046cb592df87d8a74cfa3cbe830 | be73317f593ecc2d42425f8e51109d45d3752d46 | refs/heads/master | 2021-01-01T19:33:27.153275 | 2017-07-27T13:12:10 | 2017-07-27T13:12:10 | 98,614,939 | 2 | 0 | null | 2017-07-28T06:11:58 | 2017-07-28T06:11:58 | null | UTF-8 | Python | false | false | 4,756 | py | import os
import requests
from ate import runner, exception, utils
from tests.base import ApiServerUnittest
class TestRunner(ApiServerUnittest):
def setUp(self):
self.test_runner = runner.Runner()
self.reset_all()
self.testcase_file_path_list = [
os.path.join(
os.getcwd(), 'tests/data/demo_testset_hardcode.yml'),
os.path.join(
os.getcwd(), 'tests/data/demo_testset_hardcode.json')
]
def reset_all(self):
url = "%s/api/reset-all" % self.host
headers = self.get_authenticated_headers()
return self.api_client.get(url, headers=headers)
def test_run_single_testcase(self):
for testcase_file_path in self.testcase_file_path_list:
testcases = utils.load_testcases(testcase_file_path)
testcase = testcases[0]["test"]
success, _ = self.test_runner.run_test(testcase)
self.assertTrue(success)
testcase = testcases[1]["test"]
success, _ = self.test_runner.run_test(testcase)
self.assertTrue(success)
testcase = testcases[2]["test"]
success, _ = self.test_runner.run_test(testcase)
self.assertTrue(success)
def test_run_single_testcase_fail(self):
testcase = {
"name": "get token",
"request": {
"url": "http://127.0.0.1:5000/api/get-token",
"method": "POST",
"headers": {
"content-type": "application/json",
"user_agent": "iOS/10.3",
"device_sn": "HZfFBh6tU59EdXJ",
"os_platform": "ios",
"app_version": "2.8.6"
},
"json": {
"sign": "f1219719911caae89ccc301679857ebfda115ca2"
}
},
"extract_binds": [
{"token": "content.token"}
],
"validators": [
{"check": "status_code", "comparator": "eq", "expected": 205},
{"check": "content.token", "comparator": "len_eq", "expected": 19}
]
}
success, diff_content_list = self.test_runner.run_test(testcase)
self.assertFalse(success)
self.assertEqual(
diff_content_list[0],
{"check": "status_code", "comparator": "eq", "expected": 205, 'value': 200}
)
def test_run_testset_hardcode(self):
for testcase_file_path in self.testcase_file_path_list:
testsets = utils.load_testcases_by_path(testcase_file_path)
results = self.test_runner.run_testset(testsets[0])
self.assertEqual(len(results), 3)
self.assertEqual(results, [(True, [])] * 3)
def test_run_testsets_hardcode(self):
for testcase_file_path in self.testcase_file_path_list:
testsets = utils.load_testcases_by_path(testcase_file_path)
results = self.test_runner.run_testsets(testsets)
self.assertEqual(len(results), 1)
self.assertEqual(results, [[(True, [])] * 3])
def test_run_testset_template_variables(self):
testcase_file_path = os.path.join(
os.getcwd(), 'tests/data/demo_testset_variables.yml')
testsets = utils.load_testcases_by_path(testcase_file_path)
results = self.test_runner.run_testset(testsets[0])
self.assertEqual(len(results), 3)
self.assertEqual(results, [(True, [])] * 3)
def test_run_testset_template_import_functions(self):
testcase_file_path = os.path.join(
os.getcwd(), 'tests/data/demo_testset_template_import_functions.yml')
testsets = utils.load_testcases_by_path(testcase_file_path)
results = self.test_runner.run_testset(testsets[0])
self.assertEqual(len(results), 3)
self.assertEqual(results, [(True, [])] * 3)
def test_run_testsets_template_import_functions(self):
testcase_file_path = os.path.join(
os.getcwd(), 'tests/data/demo_testset_template_import_functions.yml')
testsets = utils.load_testcases_by_path(testcase_file_path)
results = self.test_runner.run_testsets(testsets)
self.assertEqual(len(results), 1)
self.assertEqual(results, [[(True, [])] * 3])
def test_run_testsets_template_lambda_functions(self):
testcase_file_path = os.path.join(
os.getcwd(), 'tests/data/demo_testset_template_lambda_functions.yml')
testsets = utils.load_testcases_by_path(testcase_file_path)
results = self.test_runner.run_testsets(testsets)
self.assertEqual(len(results), 1)
self.assertEqual(results, [[(True, [])] * 3])
| [
"mail@debugtalk.com"
] | mail@debugtalk.com |
680a6c84c39e1de7ff9a01a0299a6d53e240bf45 | 2c74bb301f1ed83b79254944183ac5a18a639fdf | /tests/components/octoprint/test_button.py | 644c1e39437a6d69a666b58933b51f6fe644fd34 | [
"Apache-2.0"
] | permissive | Adminiuga/home-assistant | 5bec93007ddac1a268cc359bf7e48530c5f73b38 | dcf68d768e4f628d038f1fdd6e40bad713fbc222 | refs/heads/dev | 2023-02-22T22:03:31.013931 | 2022-11-09T00:27:20 | 2022-11-09T00:27:20 | 123,929,062 | 5 | 4 | Apache-2.0 | 2023-02-22T06:14:31 | 2018-03-05T14:11:09 | Python | UTF-8 | Python | false | false | 6,445 | py | """Test the OctoPrint buttons."""
from unittest.mock import patch
from pyoctoprintapi import OctoprintPrinterInfo
import pytest
from homeassistant.components.button import DOMAIN as BUTTON_DOMAIN, SERVICE_PRESS
from homeassistant.components.octoprint import OctoprintDataUpdateCoordinator
from homeassistant.components.octoprint.button import InvalidPrinterState
from homeassistant.components.octoprint.const import DOMAIN
from homeassistant.const import ATTR_ENTITY_ID
from homeassistant.core import HomeAssistant
from . import init_integration
async def test_pause_job(hass: HomeAssistant):
"""Test the pause job button."""
await init_integration(hass, BUTTON_DOMAIN)
corrdinator: OctoprintDataUpdateCoordinator = hass.data[DOMAIN]["uuid"][
"coordinator"
]
# Test pausing the printer when it is printing
with patch("pyoctoprintapi.OctoprintClient.pause_job") as pause_command:
corrdinator.data["printer"] = OctoprintPrinterInfo(
{"state": {"flags": {"printing": True}}, "temperature": []}
)
await hass.services.async_call(
BUTTON_DOMAIN,
SERVICE_PRESS,
{
ATTR_ENTITY_ID: "button.octoprint_pause_job",
},
blocking=True,
)
assert len(pause_command.mock_calls) == 1
# Test pausing the printer when it is paused
with patch("pyoctoprintapi.OctoprintClient.pause_job") as pause_command:
corrdinator.data["printer"] = OctoprintPrinterInfo(
{"state": {"flags": {"printing": False, "paused": True}}, "temperature": []}
)
await hass.services.async_call(
BUTTON_DOMAIN,
SERVICE_PRESS,
{
ATTR_ENTITY_ID: "button.octoprint_pause_job",
},
blocking=True,
)
assert len(pause_command.mock_calls) == 0
# Test pausing the printer when it is stopped
with patch(
"pyoctoprintapi.OctoprintClient.pause_job"
) as pause_command, pytest.raises(InvalidPrinterState):
corrdinator.data["printer"] = OctoprintPrinterInfo(
{
"state": {"flags": {"printing": False, "paused": False}},
"temperature": [],
}
)
await hass.services.async_call(
BUTTON_DOMAIN,
SERVICE_PRESS,
{
ATTR_ENTITY_ID: "button.octoprint_pause_job",
},
blocking=True,
)
async def test_resume_job(hass: HomeAssistant):
"""Test the resume job button."""
await init_integration(hass, BUTTON_DOMAIN)
corrdinator: OctoprintDataUpdateCoordinator = hass.data[DOMAIN]["uuid"][
"coordinator"
]
# Test resuming the printer when it is paused
with patch("pyoctoprintapi.OctoprintClient.resume_job") as resume_command:
corrdinator.data["printer"] = OctoprintPrinterInfo(
{"state": {"flags": {"printing": False, "paused": True}}, "temperature": []}
)
await hass.services.async_call(
BUTTON_DOMAIN,
SERVICE_PRESS,
{
ATTR_ENTITY_ID: "button.octoprint_resume_job",
},
blocking=True,
)
assert len(resume_command.mock_calls) == 1
# Test resuming the printer when it is printing
with patch("pyoctoprintapi.OctoprintClient.resume_job") as resume_command:
corrdinator.data["printer"] = OctoprintPrinterInfo(
{"state": {"flags": {"printing": True, "paused": False}}, "temperature": []}
)
await hass.services.async_call(
BUTTON_DOMAIN,
SERVICE_PRESS,
{
ATTR_ENTITY_ID: "button.octoprint_resume_job",
},
blocking=True,
)
assert len(resume_command.mock_calls) == 0
# Test resuming the printer when it is stopped
with patch(
"pyoctoprintapi.OctoprintClient.resume_job"
) as resume_command, pytest.raises(InvalidPrinterState):
corrdinator.data["printer"] = OctoprintPrinterInfo(
{
"state": {"flags": {"printing": False, "paused": False}},
"temperature": [],
}
)
await hass.services.async_call(
BUTTON_DOMAIN,
SERVICE_PRESS,
{
ATTR_ENTITY_ID: "button.octoprint_resume_job",
},
blocking=True,
)
async def test_stop_job(hass: HomeAssistant):
"""Test the stop job button."""
await init_integration(hass, BUTTON_DOMAIN)
corrdinator: OctoprintDataUpdateCoordinator = hass.data[DOMAIN]["uuid"][
"coordinator"
]
# Test stopping the printer when it is paused
with patch("pyoctoprintapi.OctoprintClient.cancel_job") as stop_command:
corrdinator.data["printer"] = OctoprintPrinterInfo(
{"state": {"flags": {"printing": False, "paused": True}}, "temperature": []}
)
await hass.services.async_call(
BUTTON_DOMAIN,
SERVICE_PRESS,
{
ATTR_ENTITY_ID: "button.octoprint_stop_job",
},
blocking=True,
)
assert len(stop_command.mock_calls) == 1
# Test stopping the printer when it is printing
with patch("pyoctoprintapi.OctoprintClient.cancel_job") as stop_command:
corrdinator.data["printer"] = OctoprintPrinterInfo(
{"state": {"flags": {"printing": True, "paused": False}}, "temperature": []}
)
await hass.services.async_call(
BUTTON_DOMAIN,
SERVICE_PRESS,
{
ATTR_ENTITY_ID: "button.octoprint_stop_job",
},
blocking=True,
)
assert len(stop_command.mock_calls) == 1
# Test stopping the printer when it is stopped
with patch("pyoctoprintapi.OctoprintClient.cancel_job") as stop_command:
corrdinator.data["printer"] = OctoprintPrinterInfo(
{
"state": {"flags": {"printing": False, "paused": False}},
"temperature": [],
}
)
await hass.services.async_call(
BUTTON_DOMAIN,
SERVICE_PRESS,
{
ATTR_ENTITY_ID: "button.octoprint_stop_job",
},
blocking=True,
)
assert len(stop_command.mock_calls) == 0
| [
"noreply@github.com"
] | Adminiuga.noreply@github.com |
a9a82af96b485411b2ec5a3f59de2862037ee495 | 2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae | /python/python_18996.py | af685c7f5079cba4ddd37ae43905fd65376a76de | [] | no_license | AK-1121/code_extraction | cc812b6832b112e3ffcc2bb7eb4237fd85c88c01 | 5297a4a3aab3bb37efa24a89636935da04a1f8b6 | refs/heads/master | 2020-05-23T08:04:11.789141 | 2015-10-22T19:19:40 | 2015-10-22T19:19:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 97 | py | # Tracking down where builtin function is being called from in Python
stats.print_callers('zip')
| [
"ubuntu@ip-172-31-7-228.us-west-2.compute.internal"
] | ubuntu@ip-172-31-7-228.us-west-2.compute.internal |
5249e7fc960985d57a355934617c01ef033bc8b0 | bc5e3ec2665f795b84671317ce736719ab79dc0f | /unit_tests/source_information/test_delete_source_information.py | e3fc02bbbeac4e5baefc5f7d83a8dc72323e5342 | [
"MIT"
] | permissive | uk-gov-mirror/LandRegistry.maintain-frontend | 9de44a9f42c4c29682276420dcf297d0afb48e5f | d92446a9972ebbcd9a43a7a7444a528aa2f30bf7 | refs/heads/master | 2021-09-26T16:14:55.686790 | 2018-10-29T15:37:03 | 2018-10-31T14:42:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,681 | py | from flask_testing import TestCase
from flask import url_for
from maintain_frontend import main
from unit_tests.utilities import Utilities
from unittest.mock import patch
from maintain_frontend.dependencies.session_api.session import Session
from maintain_frontend.constants.permissions import Permissions
class TestAddSourceInformation(TestCase):
def create_app(self):
main.app.testing = True
Utilities.mock_session_cookie_flask_test(self)
return main.app
def setUp(self):
main.app.config['Testing'] = True
main.app.config['WTF_CSRF_ENABLED'] = False
self.client.set_cookie('localhost', Session.session_cookie_name,
'cookie_value')
def test_get_delete_source_information(self):
self.mock_session.return_value.user.permissions = [Permissions.manage_source_information]
response = self.client.get(url_for('source_info.get_delete_source_information'))
self.assert200(response)
def test_get_delete_source_information_no_permissions(self):
self.mock_session.return_value.user.permissions = []
response = self.client.get(url_for('source_info.get_delete_source_information'))
self.assert_status(response, 302)
self.assertRedirects(response, '/not-authorised')
@patch('maintain_frontend.source_information.delete_source_information.LocalAuthorityService')
@patch('maintain_frontend.source_information.delete_source_information.request')
def test_post_delete_source_information(self, mock_request, mock_local_authority_service):
self.mock_session.return_value.user.permissions = [Permissions.manage_source_information]
self.mock_session.return_value.user.organisation = "Test Organisation"
self.mock_session.return_value.source_information_id = 1
self.mock_session.return_value.submit_token = "previous-token"
mock_request.form = {"csrf_token": "new-token"}
response = self.client.post(url_for('source_info.post_delete_source_information'))
self.assert_status(response, 302)
self.assertRedirects(response, url_for('source_info.get_delete_source_information_success'))
self.assertTrue(self.mock_session.return_value.commit.called)
self.assertEqual(self.mock_session.return_value.submit_token, "new-token")
mock_local_authority_service.return_value.delete_source_information_for_organisation\
.assert_called_with("Test Organisation", 1)
def test_post_delete_source_information_no_permissions(self):
self.mock_session.return_value.user.permissions = []
response = self.client.post(url_for('source_info.post_delete_source_information'))
self.assert_status(response, 302)
self.assertRedirects(response, '/not-authorised')
def test_get_delete_source_information_success(self):
self.mock_session.return_value.user.permissions = [Permissions.manage_source_information]
self.mock_session.return_value.source_information = "Source information"
response = self.client.get(url_for('source_info.get_delete_source_information_success'))
self.assert_status(response, 200)
self.assertTrue(self.mock_session.return_value.commit.called)
self.assertIsNone(self.mock_session.return_value.source_information)
def test_get_delete_source_information_success_no_permission(self):
self.mock_session.return_value.user.permissions = []
response = self.client.get(url_for('source_info.get_delete_source_information_success'))
self.assert_status(response, 302)
self.assertRedirects(response, '/not-authorised')
| [
"james.lademann@landregistry.gov.uk"
] | james.lademann@landregistry.gov.uk |
392aa046fe0104e22e235838f4d8355d8a3939fa | 7f44a279773732b183963349d146a8dd9a195b88 | /home/migrations/0029_room_exhibition_page.py | a976be38f8ae51532c2347c6b5fa4bca07ade57c | [] | no_license | pseudobabble/cms-boilerplate | f138060e2f25721191289eb261185136ae9cf6bd | 3923a8ebe1541118c5551b0996557f241943831f | refs/heads/master | 2022-12-28T01:30:49.554898 | 2020-10-15T15:23:10 | 2020-10-15T15:23:10 | 283,308,347 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 563 | py | # Generated by Django 3.1.1 on 2020-09-30 21:52
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('wagtailcore', '0052_pagelogentry'),
('home', '0028_auto_20200930_2134'),
]
operations = [
migrations.AddField(
model_name='room',
name='exhibition_page',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailcore.page'),
),
]
| [
"harryjohnson500@gmail.com"
] | harryjohnson500@gmail.com |
d26eeb5552b0369f084f7a5128394a09e9dc4b0c | e0980f704a573894350e285f66f4cf390837238e | /.history/menus/models_20201030113954.py | 8695d3c226c94c245723c3f81cade7c00b47a490 | [] | no_license | rucpata/WagtailWebsite | 28008474ec779d12ef43bceb61827168274a8b61 | 5aa44f51592f49c9a708fc5515ad877c6a29dfd9 | refs/heads/main | 2023-02-09T15:30:02.133415 | 2021-01-05T14:55:45 | 2021-01-05T14:55:45 | 303,961,094 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 307 | py | from django.db import models
from django_extensions.db.fields
from modelcluster.models import ClusterableModel
# Create your models here.
class Menu(ClusterableModel):
title = models.CharField(max_length=100)
slug = AutoSlugField(
populate_from='title',
editable=True,
)
| [
"rucinska.patrycja@gmail.com"
] | rucinska.patrycja@gmail.com |
5c874cba631eba15e0314dd533f3b1f495c59b31 | ec1059f4ccea10deb2cb8fd7f9458700a5e6ca4c | /venv/Lib/site-packages/qiskit/circuit/library/standard_gates/iswap.py | b20ce83c988b144870152947a13771d29c91abc7 | [
"Apache-2.0",
"MIT"
] | permissive | shivam675/Quantum-CERN | b60c697a3a7ad836b3653ee9ce3875a6eafae3ba | ce02d9198d9f5a1aa828482fea9b213a725b56bb | refs/heads/main | 2023-01-06T20:07:15.994294 | 2020-11-13T10:01:38 | 2020-11-13T10:01:38 | 330,435,191 | 1 | 0 | MIT | 2021-01-17T16:29:26 | 2021-01-17T16:29:25 | null | UTF-8 | Python | false | false | 3,414 | py | # This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""iSWAP gate."""
import numpy as np
from qiskit.circuit.gate import Gate
from qiskit.circuit.quantumregister import QuantumRegister
class iSwapGate(Gate):
r"""iSWAP gate.
A 2-qubit XX+YY interaction.
This is a Clifford and symmetric gate. Its action is to swap two qubit
states and phase the :math:`|01\rangle` and :math:`|10\rangle`
amplitudes by i.
**Circuit Symbol:**
.. parsed-literal::
q_0: ─⨂─
│
q_1: ─⨂─
**Reference Implementation:**
.. parsed-literal::
┌───┐┌───┐ ┌───┐
q_0: ┤ S ├┤ H ├──■──┤ X ├─────
├───┤└───┘┌─┴─┐└─┬─┘┌───┐
q_1: ┤ S ├─────┤ X ├──■──┤ H ├
└───┘ └───┘ └───┘
**Matrix Representation:**
.. math::
iSWAP = R_{XX+YY}(-\frac{\pi}{2})
= exp(i \frac{\pi}{4} (X{\otimes}X+Y{\otimes}Y)) =
\begin{pmatrix}
1 & 0 & 0 & 0 \\
0 & 0 & i & 0 \\
0 & i & 0 & 0 \\
0 & 0 & 0 & 1
\end{pmatrix}
This gate is equivalent to a SWAP up to a diagonal.
.. math::
iSWAP =
\begin{pmatrix}
1 & 0 & 0 & 0 \\
0 & 0 & 1 & 0 \\
0 & 1 & 0 & 0 \\
0 & 0 & 0 & 1
\end{pmatrix}
. \begin{pmatrix}
1 & 0 & 0 & 0 \\
0 & i & 0 & 0 \\
0 & 0 & i & 0 \\
0 & 0 & 0 & 1
\end{pmatrix}
"""
def __init__(self):
"""Create new iSwap gate."""
super().__init__('iswap', 2, [])
def _define(self):
"""
gate iswap a,b {
s q[0];
s q[1];
h q[0];
cx q[0],q[1];
cx q[1],q[0];
h q[1];
}
"""
# pylint: disable=cyclic-import
from qiskit.circuit.quantumcircuit import QuantumCircuit
from .h import HGate
from .s import SGate
from .x import CXGate
q = QuantumRegister(2, 'q')
qc = QuantumCircuit(q, name=self.name)
rules = [
(SGate(), [q[0]], []),
(SGate(), [q[1]], []),
(HGate(), [q[0]], []),
(CXGate(), [q[0], q[1]], []),
(CXGate(), [q[1], q[0]], []),
(HGate(), [q[1]], [])
]
for instr, qargs, cargs in rules:
qc._append(instr, qargs, cargs)
self.definition = qc
def to_matrix(self):
"""Return a numpy.array for the iSWAP gate."""
return np.array([[1, 0, 0, 0],
[0, 0, 1j, 0],
[0, 1j, 0, 0],
[0, 0, 0, 1]], dtype=complex)
| [
"vinfinitysailor@gmail.com"
] | vinfinitysailor@gmail.com |
78a63cec4223431150d0dde5618a6e42f56ad4dc | 868ac4e558cf5fe945e8b557564f34f79b3ad01e | /purity_fb/purity_fb_1dot9/models/support.py | 70107a9cc30e0ba79af4174a90200ad49ebb8729 | [
"Apache-2.0"
] | permissive | mabdelhafez/purity_fb_python_client | f4253ce8497fb3cff648e0a0cd1e567f48129fa7 | a9856875b3df43b4302a2e4addd1a6b71f51f5ce | refs/heads/master | 2022-04-20T09:24:22.031408 | 2020-04-20T22:11:32 | 2020-04-20T22:15:44 | 257,372,596 | 0 | 0 | NOASSERTION | 2020-04-20T18:40:24 | 2020-04-20T18:40:23 | null | UTF-8 | Python | false | false | 10,381 | py | # coding: utf-8
"""
Pure Storage FlashBlade REST 1.9 Python SDK
Pure Storage FlashBlade REST 1.9 Python SDK, developed by [Pure Storage, Inc](http://www.purestorage.com/). Documentations can be found at [purity-fb.readthedocs.io](http://purity-fb.readthedocs.io/).
OpenAPI spec version: 1.9
Contact: info@purestorage.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class Support(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'name': 'str',
'created': 'int',
'phonehome_enabled': 'bool',
'proxy': 'str',
'remote_assist_active': 'bool',
'remote_assist_opened': 'str',
'remote_assist_expires': 'str',
'remote_assist_status': 'str',
'remote_assist_paths': 'list[SupportRemoteAssistPaths]'
}
attribute_map = {
'name': 'name',
'created': 'created',
'phonehome_enabled': 'phonehome_enabled',
'proxy': 'proxy',
'remote_assist_active': 'remote_assist_active',
'remote_assist_opened': 'remote_assist_opened',
'remote_assist_expires': 'remote_assist_expires',
'remote_assist_status': 'remote_assist_status',
'remote_assist_paths': 'remote_assist_paths'
}
def __init__(self, name=None, created=None, phonehome_enabled=None, proxy=None, remote_assist_active=None, remote_assist_opened=None, remote_assist_expires=None, remote_assist_status=None, remote_assist_paths=None):
"""
Support - a model defined in Swagger
"""
self._name = None
self._created = None
self._phonehome_enabled = None
self._proxy = None
self._remote_assist_active = None
self._remote_assist_opened = None
self._remote_assist_expires = None
self._remote_assist_status = None
self._remote_assist_paths = None
if name is not None:
self.name = name
if created is not None:
self.created = created
if phonehome_enabled is not None:
self.phonehome_enabled = phonehome_enabled
if proxy is not None:
self.proxy = proxy
if remote_assist_active is not None:
self.remote_assist_active = remote_assist_active
if remote_assist_opened is not None:
self.remote_assist_opened = remote_assist_opened
if remote_assist_expires is not None:
self.remote_assist_expires = remote_assist_expires
if remote_assist_status is not None:
self.remote_assist_status = remote_assist_status
if remote_assist_paths is not None:
self.remote_assist_paths = remote_assist_paths
@property
def name(self):
"""
Gets the name of this Support.
name of the object (e.g., a file system or snapshot)
:return: The name of this Support.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this Support.
name of the object (e.g., a file system or snapshot)
:param name: The name of this Support.
:type: str
"""
self._name = name
@property
def created(self):
"""
Gets the created of this Support.
creation timestamp of the object
:return: The created of this Support.
:rtype: int
"""
return self._created
@created.setter
def created(self, created):
"""
Sets the created of this Support.
creation timestamp of the object
:param created: The created of this Support.
:type: int
"""
self._created = created
@property
def phonehome_enabled(self):
"""
Gets the phonehome_enabled of this Support.
Is phonehome of logs enabled?
:return: The phonehome_enabled of this Support.
:rtype: bool
"""
return self._phonehome_enabled
@phonehome_enabled.setter
def phonehome_enabled(self, phonehome_enabled):
"""
Sets the phonehome_enabled of this Support.
Is phonehome of logs enabled?
:param phonehome_enabled: The phonehome_enabled of this Support.
:type: bool
"""
self._phonehome_enabled = phonehome_enabled
@property
def proxy(self):
"""
Gets the proxy of this Support.
Server to use as the HTTP or HTTPS proxy. Specify the server name, including the scheme and proxy port number.
:return: The proxy of this Support.
:rtype: str
"""
return self._proxy
@proxy.setter
def proxy(self, proxy):
"""
Sets the proxy of this Support.
Server to use as the HTTP or HTTPS proxy. Specify the server name, including the scheme and proxy port number.
:param proxy: The proxy of this Support.
:type: str
"""
self._proxy = proxy
@property
def remote_assist_active(self):
"""
Gets the remote_assist_active of this Support.
The switch to open all remote-assist sessions. Modifiable.
:return: The remote_assist_active of this Support.
:rtype: bool
"""
return self._remote_assist_active
@remote_assist_active.setter
def remote_assist_active(self, remote_assist_active):
"""
Sets the remote_assist_active of this Support.
The switch to open all remote-assist sessions. Modifiable.
:param remote_assist_active: The remote_assist_active of this Support.
:type: bool
"""
self._remote_assist_active = remote_assist_active
@property
def remote_assist_opened(self):
"""
Gets the remote_assist_opened of this Support.
The time when the session opened.
:return: The remote_assist_opened of this Support.
:rtype: str
"""
return self._remote_assist_opened
@remote_assist_opened.setter
def remote_assist_opened(self, remote_assist_opened):
"""
Sets the remote_assist_opened of this Support.
The time when the session opened.
:param remote_assist_opened: The remote_assist_opened of this Support.
:type: str
"""
self._remote_assist_opened = remote_assist_opened
@property
def remote_assist_expires(self):
"""
Gets the remote_assist_expires of this Support.
The time when the session expires.
:return: The remote_assist_expires of this Support.
:rtype: str
"""
return self._remote_assist_expires
@remote_assist_expires.setter
def remote_assist_expires(self, remote_assist_expires):
"""
Sets the remote_assist_expires of this Support.
The time when the session expires.
:param remote_assist_expires: The remote_assist_expires of this Support.
:type: str
"""
self._remote_assist_expires = remote_assist_expires
@property
def remote_assist_status(self):
"""
Gets the remote_assist_status of this Support.
The status of the remote-assist sessions. Possible values are connected, partially_connected, reconnecting, and disconnected.
:return: The remote_assist_status of this Support.
:rtype: str
"""
return self._remote_assist_status
@remote_assist_status.setter
def remote_assist_status(self, remote_assist_status):
"""
Sets the remote_assist_status of this Support.
The status of the remote-assist sessions. Possible values are connected, partially_connected, reconnecting, and disconnected.
:param remote_assist_status: The remote_assist_status of this Support.
:type: str
"""
self._remote_assist_status = remote_assist_status
@property
def remote_assist_paths(self):
"""
Gets the remote_assist_paths of this Support.
:return: The remote_assist_paths of this Support.
:rtype: list[SupportRemoteAssistPaths]
"""
return self._remote_assist_paths
@remote_assist_paths.setter
def remote_assist_paths(self, remote_assist_paths):
"""
Sets the remote_assist_paths of this Support.
:param remote_assist_paths: The remote_assist_paths of this Support.
:type: list[SupportRemoteAssistPaths]
"""
self._remote_assist_paths = remote_assist_paths
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, Support):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| [
"tlewis@purestorage.com"
] | tlewis@purestorage.com |
11fe2fa01c814fae5d0430170f17bba2e1579500 | 9d07335de5a17453bf8ae290d70993d7b20dddcd | /.history/rw_visual_20210223182409.py | 873abd9382e1b2417301033de09af55b5ee79f83 | [] | no_license | wh-debug/Matplotlib | 8d12291cd4135b3b42c185e6700f22c627ddb046 | b4f5bf63d977620f799d953c67b262c75344a1cb | refs/heads/master | 2023-03-14T10:09:33.602492 | 2021-02-23T13:51:21 | 2021-02-23T13:51:21 | 340,374,612 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 396 | py | '''
Author: your name
Date: 2021-02-23 18:21:32
LastEditTime: 2021-02-23 18:24:09
LastEditors: Please set LastEditors
Description: In User Settings Edit
FilePath: \Matplotlib\rw_visual.py
'''
import matplotlib.pyplot as plt
from randowwalk import Randomwalk
rw = Randomwalk()
re.fill_walk()
plt.style.use('classic')
fig, ax = plt.subplots()
ax.scatter(rw.x_values, rw.y_values, s=15)
plt.show() | [
"1813763848@qq.com"
] | 1813763848@qq.com |
7e50e95d03e70a3bfb183699c250db14e12f123e | e9539de5b8832e2a09365917fe201a945bf5d99b | /leetcode415.py | e00d00059a0b342b775f908c101966fc440b5988 | [] | no_license | JoshuaW1990/leetcode-session1 | 56d57df30b21ccade3fe54e3fd56a2b3383bd793 | 6fc170c04fadec6966fb7938a07474d4ee107b61 | refs/heads/master | 2021-09-20T16:18:15.640839 | 2018-08-12T09:40:51 | 2018-08-12T09:40:51 | 76,912,955 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 847 | py | class Solution(object):
def addStrings(self, num1, num2):
"""
:type num1: str
:type num2: str
:rtype: str
"""
reversed_num1 = list(reversed(num1))
reversed_num2 = list(reversed(num2))
length = max(len(num1), len(num2))
res = [0 for _ in xrange(length + 1)]
for i in xrange(length):
if i < len(reversed_num1):
digit1 = reversed_num1[i]
else:
digit1 = '0'
if i < len(reversed_num2):
digit2 = reversed_num2[i]
else:
digit2 = '0'
res[i] += int(digit1) + int(digit2)
res[i + 1] += res[i] / 10
res[i] %= 10
while len(res) > 1 and res[-1] == 0:
res.pop()
return ''.join(map(str, reversed(res))) | [
"Jun.Wang@tufts.edu"
] | Jun.Wang@tufts.edu |
22aadef6ca46681513ceb1a412a170568db64286 | 3ace18551fdcd8d27d5e1542e24889c54b1de3a6 | /extpickle.py | a6a349e5d8bd2968be173afcdf1dcfd85f2b3bee | [
"BSD-2-Clause"
] | permissive | albertz/extpickle | 9d7436d140af1373c88c755c7a53b38d525a6a58 | 4727fa8957e66472f77ae411198c56bceee5091e | refs/heads/master | 2021-01-19T04:52:22.706548 | 2016-06-17T17:07:36 | 2016-06-17T17:07:36 | 60,965,046 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,997 | py |
import sys
import types
from importlib import import_module
import marshal
import pickle
PY3 = sys.version_info[0] >= 3
if PY3:
def get_func_closure(f): return f.__closure__
# (code, globals[, name[, argdefs[, closure]]])
def get_func_tuple(f):
return (
f.__code__,
f.__globals__,
f.__name__,
f.__defaults__,
f.__closure__,
)
else:
def get_func_closure(f): return f.func_closure
def get_func_tuple(f):
return (
f.func_code,
f.func_globals,
f.func_name,
f.func_defaults,
f.func_closure,
)
Unpickler = pickle.Unpickler
_closure = (lambda x: lambda: x)(0)
# noinspection PyUnresolvedReferences
_cell = get_func_closure(_closure)[0]
CellType = type(_cell)
ModuleType = type(sys)
# noinspection PyUnresolvedReferences
DictType = dict if PY3 else types.DictionaryType
if PY3:
class BufferType: "Dummy"
def make_buffer(*args): assert False
else:
# noinspection PyUnresolvedReferences
make_buffer = buffer
# noinspection PyUnresolvedReferences
BufferType = types.BufferType
def bytes(x, *args): return str(x)
if PY3:
_old_style_class = None
class OldStyleClass: "Dummy"
class _new_style_class: pass
NewStyleClass = type
else:
class _old_style_class: pass
class _new_style_class(object): pass
OldStyleClass = type(_old_style_class) # == types.ClassType (classobj)
NewStyleClass = type(_new_style_class) # (type)
try:
import numpy
numpy_ndarray = numpy.ndarray
except ImportError:
numpy = None
class numpy_ndarray: "Dummy"
def makeFuncCell(value):
return get_func_closure((lambda: value))[0]
def getModuleDict(modname):
mod = import_module(modname)
return mod.__dict__
def getModNameForModDict(obj):
"""
:type obj: dict
:rtype: str | None
:returns The module name or None. It will not return '__main__' in any case
because that likely will not be the same in the unpickling environment.
"""
mods = {id(mod.__dict__): modname for (modname, mod) in sys.modules.items() if mod and modname != "__main__"}
modname = mods.get(id(obj), None)
return modname
def getNormalDict(d):
"""
:type d: dict[str] | dictproxy
:rtype: dict[str]
It also removes getset_descriptor. New-style classes have those.
"""
r = {}
for k, v in d.items():
if isinstance(v, types.GetSetDescriptorType): continue
r[k] = v
return r
def make_numpy_ndarray_fromstring(s, dtype, shape):
import numpy
return numpy.fromstring(s, dtype=dtype).reshape(shape)
_BasePickler = getattr(pickle, "_Pickler", pickle.Pickler)
class Pickler(_BasePickler):
"""
We extend the standard Pickler to be able to pickle some more types,
such as lambdas and functions, code, func cells, buffer and more.
"""
def __init__(self, *args, **kwargs):
if not "protocol" in kwargs:
kwargs["protocol"] = pickle.HIGHEST_PROTOCOL
_BasePickler.__init__(self, *args, **kwargs)
dispatch = _BasePickler.dispatch.copy()
def save_func(self, obj):
try:
self.save_global(obj)
return
except pickle.PicklingError:
pass
assert type(obj) is types.FunctionType
self.save(types.FunctionType)
self.save(get_func_tuple(obj))
self.write(pickle.REDUCE)
if id(obj) not in self.memo: # Could be if we recursively landed here. See also pickle.save_tuple().
self.memoize(obj)
dispatch[types.FunctionType] = save_func
def save_method(self, obj):
try:
self.save_global(obj)
return
except pickle.PicklingError:
pass
assert type(obj) is types.MethodType
self.save(types.MethodType)
self.save((obj.im_func, obj.im_self, obj.im_class))
self.write(pickle.REDUCE)
self.memoize(obj)
dispatch[types.MethodType] = save_method
def save_code(self, obj):
assert type(obj) is types.CodeType
self.save(marshal.loads)
self.save((marshal.dumps(obj),))
self.write(pickle.REDUCE)
self.memoize(obj)
dispatch[types.CodeType] = save_code
def save_cell(self, obj):
assert type(obj) is CellType
self.save(makeFuncCell)
self.save((obj.cell_contents,))
self.write(pickle.REDUCE)
self.memoize(obj)
dispatch[CellType] = save_cell
# We also search for module dicts and reference them.
# This is for FunctionType.func_globals.
def intellisave_dict(self, obj):
modname = getModNameForModDict(obj)
if modname:
self.save(getModuleDict)
self.save((modname,))
self.write(pickle.REDUCE)
self.memoize(obj)
return
self.save_dict(obj)
dispatch[DictType] = intellisave_dict
def save_module(self, obj):
modname = getModNameForModDict(obj.__dict__)
if modname:
self.save(import_module)
self.save((modname,))
self.write(pickle.REDUCE)
self.memoize(obj)
return
# We could maybe construct it manually. For now, just fail.
raise pickle.PicklingError('cannot pickle module %r' % obj)
dispatch[ModuleType] = save_module
def save_buffer(self, obj):
self.save(make_buffer)
self.save((str(obj),))
self.write(pickle.REDUCE)
dispatch[BufferType] = save_buffer
def save_ndarray(self, obj):
# For some reason, Numpy fromstring/tostring is faster than Numpy loads/dumps.
self.save(make_numpy_ndarray_fromstring)
self.save((obj.tostring(), str(obj.dtype), obj.shape))
self.write(pickle.REDUCE)
dispatch[numpy_ndarray] = save_ndarray
# Overwrite to avoid the broken pickle.whichmodule() which might return "__main__".
def save_global(self, obj, name=None):
assert obj
assert id(obj) not in self.memo
if name is None:
name = obj.__name__
module = getattr(obj, "__module__", None)
if module is None or module == "__main__":
module = pickle.whichmodule(obj, name)
if module is None or module == "__main__":
raise pickle.PicklingError(
"Can't pickle %r: module not found: %s" % (obj, module))
try:
__import__(module)
mod = sys.modules[module]
klass = getattr(mod, name)
except (ImportError, KeyError, AttributeError):
raise pickle.PicklingError(
"Can't pickle %r: it's not found as %s.%s" % (obj, module, name))
else:
if klass is not obj:
raise pickle.PicklingError(
"Can't pickle %r: it's not the same object as %s.%s" % (obj, module, name))
assert "\n" not in module
assert "\n" not in name
self.write(pickle.GLOBAL + bytes(module + '\n' + name + '\n', "utf8"))
self.memoize(obj)
def save_type(self, obj):
try:
self.save_global(obj)
return
except pickle.PicklingError:
pass
# Some types in the types modules are not correctly referenced,
# such as types.FunctionType. This is fixed here.
for modname in ["types"]:
moddict = sys.modules[modname].__dict__
for modobjname, modobj in moddict.items():
if modobj is obj:
self.write(pickle.GLOBAL + bytes(modname + '\n' + modobjname + '\n', "utf8"))
self.memoize(obj)
return
# Generic serialization of new-style classes.
self.save(type)
self.save((obj.__name__, obj.__bases__, getNormalDict(obj.__dict__)))
self.write(pickle.REDUCE)
self.memoize(obj)
dispatch[NewStyleClass] = save_type
# This is about old-style classes.
def save_class(self, cls):
try:
# First try with a global reference. This works normally. This is the default original pickle behavior.
self.save_global(cls)
return
except pickle.PicklingError:
pass
# It didn't worked. But we can still serialize it.
# Note that this could potentially confuse the code if the class is reference-able in some other way
# - then we will end up with two versions of the same class.
self.save(OldStyleClass)
self.save((cls.__name__, cls.__bases__, cls.__dict__))
self.write(pickle.REDUCE)
self.memoize(cls)
return
dispatch[OldStyleClass] = save_class
# avoid pickling instances of ourself. this mostly doesn't make sense and leads to trouble.
# however, also doesn't break. it mostly makes sense to just ignore.
def __getstate__(self): return None
def __setstate__(self, state): pass
| [
"albert.zeyer@rwth-aachen.de"
] | albert.zeyer@rwth-aachen.de |
dc0f6e6e1f31a83777b94b3ca2f56521a5f9a717 | 3ccd609f68016aad24829b8dd3cdbb535fb0ff6d | /python/bpy/types/ArmatureGpencilModifier.py | 2455daa657818cda30d0712d8e5bd83aeba06697 | [] | no_license | katharostech/blender_externs | 79b2eed064fd927e3555aced3e2eb8a45840508e | fdf7f019a460de0fe7e62375c1c94f7ab0e9f68d | refs/heads/master | 2020-04-11T14:00:29.393478 | 2018-10-01T00:40:51 | 2018-10-01T00:40:51 | 161,838,212 | 1 | 1 | null | 2018-12-14T20:41:32 | 2018-12-14T20:41:32 | null | UTF-8 | Python | false | false | 204 | py | class ArmatureGpencilModifier:
invert_vertex_group = None
object = None
use_bone_envelopes = None
use_deform_preserve_volume = None
use_vertex_groups = None
vertex_group = None
| [
"troyedwardsjr@gmail.com"
] | troyedwardsjr@gmail.com |
d46e09613a217555e6e5ed34418ab5ecf17dcf85 | fbe77e9e2a53a4600a1d9b00b5f2c29ee3e8c59a | /externals/binaryen/test/waterfall/src/proc.py | 84bafe0942961b131f95feb6ae137242f3dae47a | [
"Apache-2.0",
"MIT",
"BSD-3-Clause"
] | permissive | AcuteAngleCloud/Acute-Angle-Chain | 8d4a1ad714f6de1493954326e109b6af112561b9 | 5ea50bee042212ccff797ece5018c64f3f50ceff | refs/heads/master | 2021-04-26T21:52:25.560457 | 2020-03-21T07:29:06 | 2020-03-21T07:29:06 | 124,164,376 | 10 | 5 | MIT | 2020-07-16T07:14:45 | 2018-03-07T02:03:53 | C++ | UTF-8 | Python | false | false | 1,838 | py | #! /usr/bin/env python
# Copyright 2016 WebAssembly Community Group participants
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module is intended to be a drop-in replacement for the standard
# subprocess module, with the difference that it logs commands before it runs
# them. Everything not overriden should pass through to the subprocess module
# via the import trick below.
# Imports subprocess in its own namespace so we can always refer directly to
# its attributes.
import subprocess
import os
import sys
# Imports all of subprocess into the current namespace, effectively
# re-exporting everything.
from subprocess import * # flake8: noqa
# Now we can override any parts of subprocess we want, while leaving the rest.
def check_call(cmd, **kwargs):
cwd = kwargs.get('cwd', os.getcwd())
c = ' '.join('"' + c + '"' if ' ' in c else c for c in cmd)
print 'subprocess.check_call(`%s`, cwd=`%s`)' % (c, cwd)
sys.stdout.flush()
subprocess.check_call(cmd, **kwargs)
sys.stdout.flush()
def check_output(cmd, **kwargs):
cwd = kwargs.get('cwd', os.getcwd())
c = ' '.join('"' + c + '"' if ' ' in c else c for c in cmd)
print 'subprocess.check_output(`%s`, cwd=`%s`)' % (c, cwd)
sys.stdout.flush()
try:
return subprocess.check_output(cmd, **kwargs)
finally:
sys.stdout.flush()
| [
"caokun@acuteangle.cn"
] | caokun@acuteangle.cn |
8ed4f6f0e37c328482f585887457c3d4bff0a522 | 87033ff3349f069b18081730e98412bf56804996 | /google/cloud/websecurityscanner_v1beta/services/web_security_scanner/client.py | 29a0910d2a21f3859a6dc5849ab3744f34007793 | [
"Apache-2.0"
] | permissive | renovate-bot/python-websecurityscanner | 9ad4034e63594f8e98bf2c79212f42434cf641b8 | f9d530918b6da33139ae3dd3512475ea65ab8086 | refs/heads/master | 2023-06-08T07:01:34.872433 | 2021-07-27T02:26:01 | 2021-07-27T02:26:01 | 237,301,948 | 0 | 0 | Apache-2.0 | 2020-01-30T20:44:21 | 2020-01-30T20:44:20 | null | UTF-8 | Python | false | false | 61,676 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from distutils import util
import os
import re
from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union
import pkg_resources
from google.api_core import client_options as client_options_lib # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
from google.cloud.websecurityscanner_v1beta.services.web_security_scanner import pagers
from google.cloud.websecurityscanner_v1beta.types import crawled_url
from google.cloud.websecurityscanner_v1beta.types import finding
from google.cloud.websecurityscanner_v1beta.types import finding_addon
from google.cloud.websecurityscanner_v1beta.types import finding_type_stats
from google.cloud.websecurityscanner_v1beta.types import scan_config
from google.cloud.websecurityscanner_v1beta.types import scan_config as gcw_scan_config
from google.cloud.websecurityscanner_v1beta.types import scan_run
from google.cloud.websecurityscanner_v1beta.types import scan_run_error_trace
from google.cloud.websecurityscanner_v1beta.types import scan_run_warning_trace
from google.cloud.websecurityscanner_v1beta.types import web_security_scanner
from google.protobuf import field_mask_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
from .transports.base import WebSecurityScannerTransport, DEFAULT_CLIENT_INFO
from .transports.grpc import WebSecurityScannerGrpcTransport
from .transports.grpc_asyncio import WebSecurityScannerGrpcAsyncIOTransport
class WebSecurityScannerClientMeta(type):
"""Metaclass for the WebSecurityScanner client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[WebSecurityScannerTransport]]
_transport_registry["grpc"] = WebSecurityScannerGrpcTransport
_transport_registry["grpc_asyncio"] = WebSecurityScannerGrpcAsyncIOTransport
def get_transport_class(
cls, label: str = None,
) -> Type[WebSecurityScannerTransport]:
"""Returns an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class WebSecurityScannerClient(metaclass=WebSecurityScannerClientMeta):
"""Cloud Web Security Scanner Service identifies security
vulnerabilities in web applications hosted on Google Cloud
Platform. It crawls your application, and attempts to exercise
as many user inputs and event handlers as possible.
"""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Converts api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "websecurityscanner.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
WebSecurityScannerClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(info)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
WebSecurityScannerClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(filename)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> WebSecurityScannerTransport:
"""Returns the transport used by the client instance.
Returns:
WebSecurityScannerTransport: The transport used by the client
instance.
"""
return self._transport
@staticmethod
def finding_path(
project: str, scan_config: str, scan_run: str, finding: str,
) -> str:
"""Returns a fully-qualified finding string."""
return "projects/{project}/scanConfigs/{scan_config}/scanRuns/{scan_run}/findings/{finding}".format(
project=project,
scan_config=scan_config,
scan_run=scan_run,
finding=finding,
)
@staticmethod
def parse_finding_path(path: str) -> Dict[str, str]:
"""Parses a finding path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/scanConfigs/(?P<scan_config>.+?)/scanRuns/(?P<scan_run>.+?)/findings/(?P<finding>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def scan_config_path(project: str, scan_config: str,) -> str:
"""Returns a fully-qualified scan_config string."""
return "projects/{project}/scanConfigs/{scan_config}".format(
project=project, scan_config=scan_config,
)
@staticmethod
def parse_scan_config_path(path: str) -> Dict[str, str]:
"""Parses a scan_config path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/scanConfigs/(?P<scan_config>.+?)$", path
)
return m.groupdict() if m else {}
@staticmethod
def scan_run_path(project: str, scan_config: str, scan_run: str,) -> str:
"""Returns a fully-qualified scan_run string."""
return "projects/{project}/scanConfigs/{scan_config}/scanRuns/{scan_run}".format(
project=project, scan_config=scan_config, scan_run=scan_run,
)
@staticmethod
def parse_scan_run_path(path: str) -> Dict[str, str]:
"""Parses a scan_run path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/scanConfigs/(?P<scan_config>.+?)/scanRuns/(?P<scan_run>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
"""Returns a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str,) -> str:
"""Returns a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder,)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str,) -> str:
"""Returns a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization,)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str,) -> str:
"""Returns a fully-qualified project string."""
return "projects/{project}".format(project=project,)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str,) -> str:
"""Returns a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project, location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path)
return m.groupdict() if m else {}
def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[str, WebSecurityScannerTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the web security scanner client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, WebSecurityScannerTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
# Create SSL credentials for mutual TLS if needed.
use_client_cert = bool(
util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"))
)
client_cert_source_func = None
is_mtls = False
if use_client_cert:
if client_options.client_cert_source:
is_mtls = True
client_cert_source_func = client_options.client_cert_source
else:
is_mtls = mtls.has_default_client_cert_source()
if is_mtls:
client_cert_source_func = mtls.default_client_cert_source()
else:
client_cert_source_func = None
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
else:
use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_mtls_env == "never":
api_endpoint = self.DEFAULT_ENDPOINT
elif use_mtls_env == "always":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
elif use_mtls_env == "auto":
if is_mtls:
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
else:
api_endpoint = self.DEFAULT_ENDPOINT
else:
raise MutualTLSChannelError(
"Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted "
"values: never, auto, always"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, WebSecurityScannerTransport):
# transport is a WebSecurityScannerTransport instance.
if credentials or client_options.credentials_file:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
if client_options.scopes:
raise ValueError(
"When providing a transport instance, provide its scopes "
"directly."
)
self._transport = transport
else:
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials,
credentials_file=client_options.credentials_file,
host=api_endpoint,
scopes=client_options.scopes,
client_cert_source_for_mtls=client_cert_source_func,
quota_project_id=client_options.quota_project_id,
client_info=client_info,
always_use_jwt_access=(
Transport == type(self).get_transport_class("grpc")
or Transport == type(self).get_transport_class("grpc_asyncio")
),
)
def create_scan_config(
self,
request: web_security_scanner.CreateScanConfigRequest = None,
*,
parent: str = None,
scan_config: gcw_scan_config.ScanConfig = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> gcw_scan_config.ScanConfig:
r"""Creates a new ScanConfig.
Args:
request (google.cloud.websecurityscanner_v1beta.types.CreateScanConfigRequest):
The request object. Request for the `CreateScanConfig`
method.
parent (str):
Required. The parent resource name
where the scan is created, which should
be a project resource name in the format
'projects/{projectId}'.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
scan_config (google.cloud.websecurityscanner_v1beta.types.ScanConfig):
Required. The ScanConfig to be
created.
This corresponds to the ``scan_config`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.websecurityscanner_v1beta.types.ScanConfig:
A ScanConfig resource contains the
configurations to launch a scan.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, scan_config])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a web_security_scanner.CreateScanConfigRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, web_security_scanner.CreateScanConfigRequest):
request = web_security_scanner.CreateScanConfigRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if scan_config is not None:
request.scan_config = scan_config
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.create_scan_config]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def delete_scan_config(
self,
request: web_security_scanner.DeleteScanConfigRequest = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Deletes an existing ScanConfig and its child
resources.
Args:
request (google.cloud.websecurityscanner_v1beta.types.DeleteScanConfigRequest):
The request object. Request for the `DeleteScanConfig`
method.
name (str):
Required. The resource name of the
ScanConfig to be deleted. The name
follows the format of
'projects/{projectId}/scanConfigs/{scanConfigId}'.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a web_security_scanner.DeleteScanConfigRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, web_security_scanner.DeleteScanConfigRequest):
request = web_security_scanner.DeleteScanConfigRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.delete_scan_config]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
def get_scan_config(
self,
request: web_security_scanner.GetScanConfigRequest = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> scan_config.ScanConfig:
r"""Gets a ScanConfig.
Args:
request (google.cloud.websecurityscanner_v1beta.types.GetScanConfigRequest):
The request object. Request for the `GetScanConfig`
method.
name (str):
Required. The resource name of the
ScanConfig to be returned. The name
follows the format of
'projects/{projectId}/scanConfigs/{scanConfigId}'.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.websecurityscanner_v1beta.types.ScanConfig:
A ScanConfig resource contains the
configurations to launch a scan.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a web_security_scanner.GetScanConfigRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, web_security_scanner.GetScanConfigRequest):
request = web_security_scanner.GetScanConfigRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_scan_config]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def list_scan_configs(
self,
request: web_security_scanner.ListScanConfigsRequest = None,
*,
parent: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListScanConfigsPager:
r"""Lists ScanConfigs under a given project.
Args:
request (google.cloud.websecurityscanner_v1beta.types.ListScanConfigsRequest):
The request object. Request for the `ListScanConfigs`
method.
parent (str):
Required. The parent resource name,
which should be a project resource name
in the format 'projects/{projectId}'.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.websecurityscanner_v1beta.services.web_security_scanner.pagers.ListScanConfigsPager:
Response for the ListScanConfigs method.
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a web_security_scanner.ListScanConfigsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, web_security_scanner.ListScanConfigsRequest):
request = web_security_scanner.ListScanConfigsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.list_scan_configs]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListScanConfigsPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
def update_scan_config(
self,
request: web_security_scanner.UpdateScanConfigRequest = None,
*,
scan_config: gcw_scan_config.ScanConfig = None,
update_mask: field_mask_pb2.FieldMask = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> gcw_scan_config.ScanConfig:
r"""Updates a ScanConfig. This method support partial
update of a ScanConfig.
Args:
request (google.cloud.websecurityscanner_v1beta.types.UpdateScanConfigRequest):
The request object. Request for the
`UpdateScanConfigRequest` method.
scan_config (google.cloud.websecurityscanner_v1beta.types.ScanConfig):
Required. The ScanConfig to be
updated. The name field must be set to
identify the resource to be updated. The
values of fields not covered by the mask
will be ignored.
This corresponds to the ``scan_config`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
update_mask (google.protobuf.field_mask_pb2.FieldMask):
Required. The update mask applies to the resource. For
the ``FieldMask`` definition, see
https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#fieldmask
This corresponds to the ``update_mask`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.websecurityscanner_v1beta.types.ScanConfig:
A ScanConfig resource contains the
configurations to launch a scan.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([scan_config, update_mask])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a web_security_scanner.UpdateScanConfigRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, web_security_scanner.UpdateScanConfigRequest):
request = web_security_scanner.UpdateScanConfigRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if scan_config is not None:
request.scan_config = scan_config
if update_mask is not None:
request.update_mask = update_mask
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.update_scan_config]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("scan_config.name", request.scan_config.name),)
),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def start_scan_run(
self,
request: web_security_scanner.StartScanRunRequest = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> scan_run.ScanRun:
r"""Start a ScanRun according to the given ScanConfig.
Args:
request (google.cloud.websecurityscanner_v1beta.types.StartScanRunRequest):
The request object. Request for the `StartScanRun`
method.
name (str):
Required. The resource name of the
ScanConfig to be used. The name follows
the format of
'projects/{projectId}/scanConfigs/{scanConfigId}'.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.websecurityscanner_v1beta.types.ScanRun:
A ScanRun is a output-only resource
representing an actual run of the scan.
Next id: 12
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a web_security_scanner.StartScanRunRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, web_security_scanner.StartScanRunRequest):
request = web_security_scanner.StartScanRunRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.start_scan_run]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def get_scan_run(
self,
request: web_security_scanner.GetScanRunRequest = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> scan_run.ScanRun:
r"""Gets a ScanRun.
Args:
request (google.cloud.websecurityscanner_v1beta.types.GetScanRunRequest):
The request object. Request for the `GetScanRun` method.
name (str):
Required. The resource name of the
ScanRun to be returned. The name follows
the format of
'projects/{projectId}/scanConfigs/{scanConfigId}/scanRuns/{scanRunId}'.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.websecurityscanner_v1beta.types.ScanRun:
A ScanRun is a output-only resource
representing an actual run of the scan.
Next id: 12
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a web_security_scanner.GetScanRunRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, web_security_scanner.GetScanRunRequest):
request = web_security_scanner.GetScanRunRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_scan_run]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def list_scan_runs(
self,
request: web_security_scanner.ListScanRunsRequest = None,
*,
parent: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListScanRunsPager:
r"""Lists ScanRuns under a given ScanConfig, in
descending order of ScanRun stop time.
Args:
request (google.cloud.websecurityscanner_v1beta.types.ListScanRunsRequest):
The request object. Request for the `ListScanRuns`
method.
parent (str):
Required. The parent resource name,
which should be a scan resource name in
the format
'projects/{projectId}/scanConfigs/{scanConfigId}'.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.websecurityscanner_v1beta.services.web_security_scanner.pagers.ListScanRunsPager:
Response for the ListScanRuns method.
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a web_security_scanner.ListScanRunsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, web_security_scanner.ListScanRunsRequest):
request = web_security_scanner.ListScanRunsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.list_scan_runs]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListScanRunsPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
def stop_scan_run(
self,
request: web_security_scanner.StopScanRunRequest = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> scan_run.ScanRun:
r"""Stops a ScanRun. The stopped ScanRun is returned.
Args:
request (google.cloud.websecurityscanner_v1beta.types.StopScanRunRequest):
The request object. Request for the `StopScanRun`
method.
name (str):
Required. The resource name of the
ScanRun to be stopped. The name follows
the format of
'projects/{projectId}/scanConfigs/{scanConfigId}/scanRuns/{scanRunId}'.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.websecurityscanner_v1beta.types.ScanRun:
A ScanRun is a output-only resource
representing an actual run of the scan.
Next id: 12
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a web_security_scanner.StopScanRunRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, web_security_scanner.StopScanRunRequest):
request = web_security_scanner.StopScanRunRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.stop_scan_run]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def list_crawled_urls(
self,
request: web_security_scanner.ListCrawledUrlsRequest = None,
*,
parent: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListCrawledUrlsPager:
r"""List CrawledUrls under a given ScanRun.
Args:
request (google.cloud.websecurityscanner_v1beta.types.ListCrawledUrlsRequest):
The request object. Request for the `ListCrawledUrls`
method.
parent (str):
Required. The parent resource name,
which should be a scan run resource name
in the format
'projects/{projectId}/scanConfigs/{scanConfigId}/scanRuns/{scanRunId}'.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.websecurityscanner_v1beta.services.web_security_scanner.pagers.ListCrawledUrlsPager:
Response for the ListCrawledUrls method.
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a web_security_scanner.ListCrawledUrlsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, web_security_scanner.ListCrawledUrlsRequest):
request = web_security_scanner.ListCrawledUrlsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.list_crawled_urls]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListCrawledUrlsPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
def get_finding(
self,
request: web_security_scanner.GetFindingRequest = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> finding.Finding:
r"""Gets a Finding.
Args:
request (google.cloud.websecurityscanner_v1beta.types.GetFindingRequest):
The request object. Request for the `GetFinding` method.
name (str):
Required. The resource name of the
Finding to be returned. The name follows
the format of
'projects/{projectId}/scanConfigs/{scanConfigId}/scanRuns/{scanRunId}/findings/{findingId}'.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.websecurityscanner_v1beta.types.Finding:
A Finding resource represents a
vulnerability instance identified during
a ScanRun.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a web_security_scanner.GetFindingRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, web_security_scanner.GetFindingRequest):
request = web_security_scanner.GetFindingRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_finding]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def list_findings(
self,
request: web_security_scanner.ListFindingsRequest = None,
*,
parent: str = None,
filter: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListFindingsPager:
r"""List Findings under a given ScanRun.
Args:
request (google.cloud.websecurityscanner_v1beta.types.ListFindingsRequest):
The request object. Request for the `ListFindings`
method.
parent (str):
Required. The parent resource name,
which should be a scan run resource name
in the format
'projects/{projectId}/scanConfigs/{scanConfigId}/scanRuns/{scanRunId}'.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
filter (str):
Required. The filter expression. The expression must be
in the format: . Supported field: 'finding_type'.
Supported operator: '='.
This corresponds to the ``filter`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.websecurityscanner_v1beta.services.web_security_scanner.pagers.ListFindingsPager:
Response for the ListFindings method.
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, filter])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a web_security_scanner.ListFindingsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, web_security_scanner.ListFindingsRequest):
request = web_security_scanner.ListFindingsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if filter is not None:
request.filter = filter
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.list_findings]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListFindingsPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
def list_finding_type_stats(
self,
request: web_security_scanner.ListFindingTypeStatsRequest = None,
*,
parent: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> web_security_scanner.ListFindingTypeStatsResponse:
r"""List all FindingTypeStats under a given ScanRun.
Args:
request (google.cloud.websecurityscanner_v1beta.types.ListFindingTypeStatsRequest):
The request object. Request for the
`ListFindingTypeStats` method.
parent (str):
Required. The parent resource name,
which should be a scan run resource name
in the format
'projects/{projectId}/scanConfigs/{scanConfigId}/scanRuns/{scanRunId}'.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.websecurityscanner_v1beta.types.ListFindingTypeStatsResponse:
Response for the ListFindingTypeStats method.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a web_security_scanner.ListFindingTypeStatsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, web_security_scanner.ListFindingTypeStatsRequest):
request = web_security_scanner.ListFindingTypeStatsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.list_finding_type_stats]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-websecurityscanner",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = ("WebSecurityScannerClient",)
| [
"noreply@github.com"
] | renovate-bot.noreply@github.com |
064838b3c14fb7efa1e5da2a38c2ffa93073fad8 | 44846980df148e1a0621e8a359a7fd357482fd74 | /01-Defining_classes/06-Flower.py | d74019506df0fb22837b693e4de386c3b8f96df6 | [
"MIT"
] | permissive | Beshkov/Python_OOP | 2dbd3383126f226895b652c4feaf8d79d867d4f8 | 297edadb3e7801dfeee5752a20aae6aead8da610 | refs/heads/main | 2023-04-03T14:24:59.218856 | 2021-04-18T15:13:11 | 2021-04-18T15:13:11 | 341,330,079 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 597 | py | class Flower:
def __init__(self, name, water_requirements):
self.name = name
self.water_requirements = water_requirements
self.is_happy = False
self.quantity = 0
def water(self, quantity):
self.quantity = quantity
if self.quantity >= self.water_requirements:
self.is_happy = True
def status(self):
if self.is_happy:
return f'{self.name} is happy'
return f'{self.name} is not happy'
flower = Flower("Lilly", 100)
flower.water(50)
print(flower.status())
flower.water(100)
print(flower.status())
| [
"alexander.beshkov@gmail.com"
] | alexander.beshkov@gmail.com |
3fe35b303c4495a32d225afeb818b22ab559ed00 | 709f9dd4c975718df248a22431b99321b8840988 | /solutions/data_structures/avg_rating_nested.py | cac491fdf3fed5d6eaa39e3f6985d7c65d1e3089 | [] | no_license | Lionking2018/intropython | f3dccea8fba4713ac91bafcc99d7a3969e0423de | 4382189d78bcb0f1403300d473244712eb49858e | refs/heads/master | 2020-05-04T01:10:33.074114 | 2019-04-01T03:30:59 | 2019-04-01T03:30:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 790 | py | netflix_favs = {
'Narcos': {
'premiere_year': 2015,
'total_seasons': 3,
'rating': 5,
'age_limit': '16+'
},
'The Good Place': {
'premiere_year': 2016,
'total_seasons': 3,
'rating': 4,
'age_limit': '13+'
},
'Sense8': {
'premiere_year': 2015,
'total_seasons': 2,
'rating': 3,
'age_limit': '16+'
},
'La niebla': {
'premiere_year': 2017,
'total_seasons': 1,
'rating': 5,
'age_limit': '16+'
},
}
premiere_years = []
for features in netflix_favs.values():
premiere_year = features['premiere_year']
premiere_years.append(premiere_year)
avg_premiere_years = sum(premiere_years) // len(premiere_years)
print(avg_premiere_years)
| [
"sdelquin@gmail.com"
] | sdelquin@gmail.com |
a6377abe573cb58c29683b449754f581e213f387 | 07d40ece1379dd95b6259b23f9358cafcd1daa36 | /business/my_page_business.py | 0eba7d4e43232d7ad421f9bb4555fbf044f451ba | [] | no_license | z1069867141/zzmx | b4c5881bae275b694a20649014439377cf916e46 | ab0266c935f6f21a158998dc84b5c02443f6d628 | refs/heads/master | 2022-08-24T06:49:24.834403 | 2020-05-24T17:06:40 | 2020-05-24T17:06:40 | 264,108,222 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,783 | py | import os
import sys
sys.path.append(os.getcwd())
from handle.my_page_handle import my_p
from selenium import webdriver
import time
import pymysql
from mysql.mysql_function import mysql_function
class my_page(object):
def __init__(self,driver):
self.mp_b = my_p(driver)
def click_set(self):
self.mp_b.click_set_button()
return self.check_shop_title()
def click_talk(self):
self.mp_b.click_talk_button()
return self.check_shop_title()
def click_login(self):
self.mp_b.click_login_button()
return self.check_shop_title()
def click_all_order(self):
self.mp_b.click_all_order_button()
return self.check_shop_title()
def click_wait_pay(self):
self.mp_b.click_wait_pay_button()
return self.check_shop_title()
def click_good_to_be_received(self):
self.mp_b.click_good_to_be_received_button()
return self.check_shop_title()
def click_to_be_delivered(self):
self.mp_b.click_to_be_delivered_button()
return self.check_shop_title()
def click_received(self):
self.mp_b.click_received_button()
return self.check_shop_title()
def click_my_wallet(self):
self.mp_b.click_my_wallet_button()
return self.check_shop_title()
def click_my_favourite(self):
self.mp_b.click_my_favourite_button()
return self.check_shop_title()
def click_my_customer_service(self):
self.mp_b.click_my_customer_service_button()
return self.check_shop_title()
def check_shop_title(self):
try:
if self.mp_b.get_login_button_text()=="登录":
return True
else:
return False
except:
return False | [
"919824370@qq.com"
] | 919824370@qq.com |
4ae106e866373b5dd410478098c0d0aed0281297 | a7f855efff14e0b15cffb3f035d8dc9f7f102afe | /mfb/extraMac/UTpackages/UTvolrend/UTVolumeLibrary.py | 4c446437e786ecadae139e63ffac5efdd7d37e4e | [] | no_license | BlenderCN-Org/FlipbookApp | 76fcd92644c4e18dd90885eeb49e5aecae28f6f0 | 0df2acebf76b40105812d2e3af8f0ef4784ab74c | refs/heads/master | 2020-05-27T14:33:25.330291 | 2014-07-10T17:47:29 | 2014-07-10T17:47:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,606 | py | # This file was automatically generated by SWIG (http://www.swig.org).
# Version 1.3.31
#
# Don't modify this file, modify the SWIG interface instead.
# This file is compatible with both classic and new-style classes.
import _UTVolumeLibrary
import new
new_instancemethod = new.instancemethod
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
def _swig_setattr_nondynamic(self,class_type,name,value,static=1):
if (name == "thisown"): return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'PySwigObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name,None)
if method: return method(self,value)
if (not static) or hasattr(self,name):
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self,class_type,name,value):
return _swig_setattr_nondynamic(self,class_type,name,value,0)
def _swig_getattr(self,class_type,name):
if (name == "thisown"): return self.this.own()
method = class_type.__swig_getmethods__.get(name,None)
if method: return method(self)
raise AttributeError,name
def _swig_repr(self):
try: strthis = "proxy of " + self.this.__repr__()
except: strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
import types
try:
_object = types.ObjectType
_newclass = 1
except AttributeError:
class _object : pass
_newclass = 0
del types
class VolumeRenderer(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, VolumeRenderer, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, VolumeRenderer, name)
__repr__ = _swig_repr
def __init__(self, *args):
this = _UTVolumeLibrary.new_VolumeRenderer(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _UTVolumeLibrary.delete_VolumeRenderer
__del__ = lambda self : None;
def initRenderer(*args): return _UTVolumeLibrary.VolumeRenderer_initRenderer(*args)
def setAspectRatio(*args): return _UTVolumeLibrary.VolumeRenderer_setAspectRatio(*args)
def setTextureSubCube(*args): return _UTVolumeLibrary.VolumeRenderer_setTextureSubCube(*args)
def setQuality(*args): return _UTVolumeLibrary.VolumeRenderer_setQuality(*args)
def getQuality(*args): return _UTVolumeLibrary.VolumeRenderer_getQuality(*args)
def setNearPlane(*args): return _UTVolumeLibrary.VolumeRenderer_setNearPlane(*args)
def getNearPlane(*args): return _UTVolumeLibrary.VolumeRenderer_getNearPlane(*args)
def isShadedRenderingAvailable(*args): return _UTVolumeLibrary.VolumeRenderer_isShadedRenderingAvailable(*args)
def enableShadedRendering(*args): return _UTVolumeLibrary.VolumeRenderer_enableShadedRendering(*args)
def disableShadedRendering(*args): return _UTVolumeLibrary.VolumeRenderer_disableShadedRendering(*args)
def uploadColorMappedData(*args): return _UTVolumeLibrary.VolumeRenderer_uploadColorMappedData(*args)
def uploadColorMappedDataWithBorder(*args): return _UTVolumeLibrary.VolumeRenderer_uploadColorMappedDataWithBorder(*args)
def testColorMappedData(*args): return _UTVolumeLibrary.VolumeRenderer_testColorMappedData(*args)
def testColorMappedDataWithBorder(*args): return _UTVolumeLibrary.VolumeRenderer_testColorMappedDataWithBorder(*args)
def uploadRGBAData(*args): return _UTVolumeLibrary.VolumeRenderer_uploadRGBAData(*args)
def uploadGradients(*args): return _UTVolumeLibrary.VolumeRenderer_uploadGradients(*args)
def calculateGradientsFromDensities(*args): return _UTVolumeLibrary.VolumeRenderer_calculateGradientsFromDensities(*args)
def uploadColorMap(*args): return _UTVolumeLibrary.VolumeRenderer_uploadColorMap(*args)
def getNumberOfPlanesRendered(*args): return _UTVolumeLibrary.VolumeRenderer_getNumberOfPlanesRendered(*args)
def renderVolume(*args): return _UTVolumeLibrary.VolumeRenderer_renderVolume(*args)
def uploadZeroPaddedData(*args): return _UTVolumeLibrary.VolumeRenderer_uploadZeroPaddedData(*args)
VolumeRenderer_swigregister = _UTVolumeLibrary.VolumeRenderer_swigregister
VolumeRenderer_swigregister(VolumeRenderer)
InitTexParameteri = _UTVolumeLibrary.InitTexParameteri
QueryExtension = _UTVolumeLibrary.QueryExtension
createNumArr = _UTVolumeLibrary.createNumArr
| [
"mike.c.pan@gmail.com"
] | mike.c.pan@gmail.com |
a27dd330b64895b45778896a3420a5b3299c3d2a | 7c8bd2e26fdabf1555e0150272ecf035f6c21bbd | /ps프로젝트/Tr/이진 검색 트리.py | fa0067dd613bfa12638a810af539b50dd098ff45 | [] | no_license | hyeokjinson/algorithm | 44090c2895763a0c53d48ff4084a96bdfc77f953 | 46c04e0f583d4c6ec4f51a24f19a373b173b3d5c | refs/heads/master | 2021-07-21T10:18:43.918149 | 2021-03-27T12:27:56 | 2021-03-27T12:27:56 | 245,392,582 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 517 | py | import sys
sys.setrecursionlimit(10**6)
def postorder(start,end):
if start>end:
return
div=end+1
for i in range(start+1,end+1):
if post[start]<post[i]:
div=i
break
postorder(start+1,div-1)
postorder(div,end)
print(post[start])
if __name__ == '__main__':
post=[]
count=0
while count<=10000:
try:
num=int(input())
except:
break
post.append(num)
count+=1
postorder(0,len(post)-1) | [
"hjson817@gmail.com"
] | hjson817@gmail.com |
4cd55693bc93d9e19bd3ab13a30a7a91bac1d33b | 161d7836e73fba496838c59ce7ee94bf685fb696 | /_unittests/ut_mokadi/test_speech_system.py | 30263604f1b1059df1b3c977cb98899e92b1e9f4 | [
"MIT"
] | permissive | sdpython/botadi | 80b985f21bdab5f917316348ed5f5cf9aa053c40 | 5e5464824a9c446ac567031245603205848558d3 | refs/heads/master | 2022-06-15T00:06:17.801753 | 2022-06-13T11:44:21 | 2022-06-13T11:44:21 | 163,576,608 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,323 | py | # -*- coding: utf-8 -*-
"""
@brief test log(time=10s)
"""
import os
import unittest
import warnings
from pyquickhelper.loghelper import fLOG
from pyquickhelper.pycode import is_travis_or_appveyor, add_missing_development_version, ExtTestCase
class TestSpeechSystem(ExtTestCase):
def setUp(self):
add_missing_development_version(["jyquickhelper", "pymmails"],
__file__, hide=True)
def test_speech_system(self):
fLOG(
__file__,
self._testMethodName,
OutputPrint=__name__ == "__main__")
if is_travis_or_appveyor():
# no keys
return
# bugged
warnings.warn(
"vocal_recognition_system does not return for a wav file.")
return
wav = os.path.join(os.path.abspath(
os.path.dirname(__file__)), "data", "output.wav")
with open(wav, "rb") as f:
content = f.read()
self.assertNotEmpty(content)
# from ensae_teaching_cs.cspython import vocal_recognition_system
# fLOG("start recognition")
# res = vocal_recognition_system(content)
# fLOG("end recognition")
# fLOG(res)
# self.assertTrue(isinstance(res, tuple))
if __name__ == "__main__":
unittest.main()
| [
"xavier.dupre@gmail.com"
] | xavier.dupre@gmail.com |
0d02876a6c11f287264f8d73c8660e9984834f4b | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/verbs/_notch.py | dd803a71f31f6a3836bc904e8c2875a9eca60c60 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 304 | py |
#calss header
class _NOTCH():
def __init__(self,):
self.name = "NOTCH"
self.definitions = [u'to cut a notch in something']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'verbs'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
447d838e87100cdb48f71811397a7365383257b2 | 5588ea35c23e9d2a1a6ff0d5400d83b5b6c9bead | /optbinning/binning/binning_information.py | 5fea471d46df13df8807fd723d7634f43cf383c8 | [
"Apache-2.0"
] | permissive | mnjenga2/optbinning | 378b97bc0c10a96399ce22d9f11bc21bf8f9533f | 25af0722e1bdf6ebc68cfc6f0ce0156ac9b2bcd8 | refs/heads/master | 2022-11-17T10:24:59.622236 | 2020-07-19T15:55:23 | 2020-07-19T15:55:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,685 | py | """
Optimal binning information.
"""
# Guillermo Navas-Palencia <g.navas.palencia@gmail.com>
# Copyright (C) 2019
import numpy as np
from sklearn.base import BaseEstimator
from .options import continuous_optimal_binning_default_options
from .options import multiclass_optimal_binning_default_options
from .options import optimal_binning_default_options
from .options import sboptimal_binning_default_options
try:
from localsolver import LSStatistics
LOCALSOLVER_AVAILABLE = True
except ImportError:
LOCALSOLVER_AVAILABLE = False
def print_header():
header = (
"optbinning (Version 0.7.0)\n"
"Copyright (c) 2019-2020 Guillermo Navas-Palencia, Apache License 2.0"
"\n")
print(header)
def print_optional_parameters(dict_default_options, dict_user_options):
option_format = " {:<24} {:>15} * {}\n"
str_options = " Begin options\n"
for key, value in dict_default_options.items():
user_value = dict_user_options[key]
user_flag = "d" if value == user_value else "U"
if user_value is None:
user_value = "no"
elif isinstance(user_value, (list, np.ndarray, dict)):
user_value = "yes"
elif isinstance(user_value, BaseEstimator):
user_value = "yes"
str_options += option_format.format(key, str(user_value), user_flag)
str_options += " End options\n"
print(str_options)
def print_prebinning_statistics(n_prebins, n_refinement):
prebinning_stats = (
" Pre-binning statistics\n"
" Number of pre-bins {:>10}\n"
" Number of refinements {:>10}\n"
).format(n_prebins, n_refinement)
print(prebinning_stats)
def print_solver_statistics(solver_type, solver):
if solver_type == "cp":
n_booleans = solver.NumBooleans()
n_branches = solver.NumBranches()
n_conflicts = solver.NumConflicts()
objective = int(solver.ObjectiveValue())
best_objective_bound = int(solver.BestObjectiveBound())
solver_stats = (
" Solver statistics\n"
" Type {:>10}\n"
" Number of booleans {:>10}\n"
" Number of branches {:>10}\n"
" Number of conflicts {:>10}\n"
" Objective value {:>10}\n"
" Best objective bound {:>10}\n"
).format(solver_type, n_booleans, n_branches, n_conflicts,
objective, best_objective_bound)
elif solver_type == "mip":
n_constraints = solver.NumConstraints()
n_variables = solver.NumVariables()
objective = solver.Objective().Value()
best_bound = solver.Objective().BestBound()
solver_stats = (
" Solver statistics\n"
" Type {:>10}\n"
" Number of variables {:>10}\n"
" Number of constraints {:>10}\n"
" Objective value {:>10.4f}\n"
" Best objective bound {:>10.4f}\n"
).format(solver_type, n_variables, n_constraints, objective,
best_bound)
elif solver_type == "ls":
if not LOCALSOLVER_AVAILABLE:
raise ImportError('Cannot import localsolver. Install LocalSolver '
'or choose another solver, options are "cp" and '
'"mip".')
n_iterations = LSStatistics.get_nb_iterations(solver.statistics)
solver_stats = (
" Solver statistics\n"
" Type {:>10}\n"
" Number of iterations {:>10}\n"
).format(solver_type, n_iterations)
print(solver_stats)
def print_timing(solver_type, solver, time_total, time_preprocessing,
time_prebinning, time_solver, time_postprocessing):
p_preprocessing = time_preprocessing / time_total
p_prebinning = time_prebinning / time_total
p_solver = time_solver / time_total
p_postprocessing = time_postprocessing / time_total
if solver_type == "cp" and solver is not None:
time_optimizer = solver.WallTime()
time_model_generation = time_solver - time_optimizer
p_model_generation = time_model_generation / time_solver
p_optimizer = time_optimizer / time_solver
time_stats = (
" Timing\n"
" Total time {:>18.2f} sec\n"
" Pre-processing {:>18.2f} sec ({:>7.2%})\n"
" Pre-binning {:>18.2f} sec ({:>7.2%})\n"
" Solver {:>18.2f} sec ({:>7.2%})\n"
" model generation {:>18.2f} sec ({:>7.2%})\n"
" optimizer {:>18.2f} sec ({:>7.2%})\n"
" Post-processing {:>18.2f} sec ({:>7.2%})\n"
).format(time_total, time_preprocessing, p_preprocessing,
time_prebinning, p_prebinning, time_solver, p_solver,
time_model_generation, p_model_generation, time_optimizer,
p_optimizer, time_postprocessing, p_postprocessing)
else:
time_stats = (
" Timing\n"
" Total time {:>18.2f} sec\n"
" Pre-processing {:>18.2f} sec ({:>7.2%})\n"
" Pre-binning {:>18.2f} sec ({:>7.2%})\n"
" Solver {:>18.2f} sec ({:>7.2%})\n"
" Post-processing {:>18.2f} sec ({:>7.2%})\n"
).format(time_total, time_preprocessing, p_preprocessing,
time_prebinning, p_prebinning, time_solver, p_solver,
time_postprocessing, p_postprocessing)
print(time_stats)
def print_name_status(name, status):
if not name:
name = "UNKNOWN"
print(" Name : {:<32}\n"
" Status : {:<32}\n".format(name, status))
def print_main_info(name, status, time_total):
print_name_status(name, status)
print(" Time : {:<7.4f} sec\n".format(round(time_total, 4)))
def print_binning_information(binning_type, print_level, name, status,
solver_type, solver, time_total,
time_preprocessing, time_prebinning, time_solver,
time_postprocessing, n_prebins, n_refinements,
dict_user_options):
print_header()
if print_level == 2:
if binning_type == "optimalbinning":
dict_default_options = optimal_binning_default_options
elif binning_type == "multiclassoptimalbinning":
dict_default_options = multiclass_optimal_binning_default_options
elif binning_type == "continuousoptimalbinning":
dict_default_options = continuous_optimal_binning_default_options
elif binning_type == "sboptimalbinning":
dict_default_options = sboptimal_binning_default_options
print_optional_parameters(dict_default_options, dict_user_options)
if print_level == 0:
print_main_info(name, status, time_total)
elif print_level >= 1:
print_name_status(name, status)
print_prebinning_statistics(n_prebins, n_refinements)
if status in ("OPTIMAL", "FEASIBLE"):
if solver is not None:
print_solver_statistics(solver_type, solver)
print_timing(solver_type, solver, time_total, time_preprocessing,
time_prebinning, time_solver, time_postprocessing)
| [
"g.navas.palencia@gmail.com"
] | g.navas.palencia@gmail.com |
331e12f77b1298ca687aa8abf1b06e8b53670ca8 | 40be08bbfed4bd6a951c18cc4bc0bf1f00e7e8a6 | /lib/systems/d-tyrosine.py | b8c0453bbd0d2565282bb8aa5af87923778961db | [
"BSD-3-Clause"
] | permissive | pulsar-chem/Pulsar-Core | 5bf4239c0a0de74d3f12a1c8b9bea2867fd8960c | f8e64e04fdb01947708f098e833600c459c2ff0e | refs/heads/master | 2021-01-18T06:51:05.905464 | 2017-06-04T02:31:44 | 2017-06-04T02:31:44 | 46,251,809 | 0 | 2 | null | 2017-05-25T14:59:51 | 2015-11-16T04:21:59 | C++ | UTF-8 | Python | false | false | 1,324 | py | import pulsar as psr
def load_ref_system():
""" Returns d-tyrosine as found in the IQMol fragment library.
All credit to https://github.com/nutjunkie/IQmol
"""
return psr.make_system("""
C 2.8922 -0.8221 -1.1174
C 1.4285 -0.8328 -0.6961
C 1.0097 0.6023 -0.3298
H 1.7288 1.0195 0.4035
H 1.0702 1.2635 -1.2191
C -0.3639 0.6346 0.2566
C -1.4389 1.1258 -0.4880
H -1.2751 1.4887 -1.5098
C -2.7154 1.1673 0.0543
H -3.5569 1.5535 -0.5321
C -2.9150 0.7096 1.3642
C -1.8442 0.2179 2.1232
H -1.9966 -0.1379 3.1486
C -0.5751 0.1861 1.5616
H 0.2696 -0.1942 2.1482
O -4.1959 0.7766 1.8369
H -4.1980 0.4421 2.7252
N 0.6337 -1.4483 -1.7847
O 3.7365 -1.4352 -0.2570
O 3.4022 -0.3272 -2.1058
H 1.2939 -1.4919 0.2002
H 0.7306 -0.9184 -2.6259
H -0.3269 -1.4625 -1.5117
H 4.6271 -1.4007 -0.5923
""")
| [
"noreply@github.com"
] | pulsar-chem.noreply@github.com |
55983848f94ecdb245bec4ba0edc792cf79762b7 | e1ec272017e65f35c7e96f2df8d116e5a064fa29 | /tools/test_ucas_aod_base.py | 46f7f22ae1980ea1e04b2f29b05c2bcfb5e81b6c | [
"Apache-2.0"
] | permissive | BUAATJWX/RotationDetection | c805306d93f826db01b2021d12f3152c143cc908 | 7b4a49797e5f93e53ceddb791bf6a9bb9ff32ac7 | refs/heads/main | 2023-02-18T19:41:58.680670 | 2021-01-18T02:02:02 | 2021-01-18T02:02:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,052 | py | # -*- coding:utf-8 -*-
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import os
import sys
import tensorflow as tf
import time
import cv2
import pickle
import numpy as np
import argparse
from tqdm import tqdm
sys.path.append("../")
from utils import tools
from libs.label_name_dict.label_dict import LabelMap
from libs.utils.draw_box_in_img import DrawBox
from libs.utils.coordinate_convert import forward_convert, backward_convert
from libs.utils import nms_rotate
from libs.utils.rotate_polygon_nms import rotate_gpu_nms
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Test HRSC2016')
parser.add_argument('--img_dir', dest='img_dir',
help='images path',
default='/data/dataset/UCAS-AOD/VOCdevkit_test/JPEGImages', type=str)
parser.add_argument('--image_ext', dest='image_ext',
help='image format',
default='.png', type=str)
parser.add_argument('--test_annotation_path', dest='test_annotation_path',
help='test annotate path',
default='/data/dataset/UCAS-AOD/VOCdevkit_test/Annotations', type=str)
parser.add_argument('--gpu', dest='gpu',
help='gpu index',
default='0', type=str)
parser.add_argument('--draw_imgs', '-s', default=False,
action='store_true')
parser.add_argument('--multi_scale', '-ms', default=False,
action='store_true')
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
return args
class TestUCASAOD(object):
def __init__(self, cfgs):
self.cfgs = cfgs
self.args = parse_args()
label_map = LabelMap(cfgs)
self.name_label_map, self.label_name_map = label_map.name2label(), label_map.label2name()
def eval_with_plac(self, img_dir, det_net, image_ext):
os.environ["CUDA_VISIBLE_DEVICES"] = self.args.gpu
# 1. preprocess img
img_plac = tf.placeholder(dtype=tf.uint8, shape=[None, None, 3]) # is RGB. not BGR
img_batch = tf.cast(img_plac, tf.float32)
if self.cfgs.NET_NAME in ['resnet152_v1d', 'resnet101_v1d', 'resnet50_v1d']:
img_batch = (img_batch / 255 - tf.constant(self.cfgs.PIXEL_MEAN_)) / tf.constant(self.cfgs.PIXEL_STD)
else:
img_batch = img_batch - tf.constant(self.cfgs.PIXEL_MEAN)
img_batch = tf.expand_dims(img_batch, axis=0)
detection_boxes, detection_scores, detection_category = det_net.build_whole_detection_network(
input_img_batch=img_batch)
init_op = tf.group(
tf.global_variables_initializer(),
tf.local_variables_initializer()
)
restorer, restore_ckpt = det_net.get_restorer()
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
sess.run(init_op)
if not restorer is None:
restorer.restore(sess, restore_ckpt)
print('restore model')
all_boxes_r = []
imgs = os.listdir(img_dir)
pbar = tqdm(imgs)
for a_img_name in pbar:
a_img_name = a_img_name.split(image_ext)[0]
raw_img = cv2.imread(os.path.join(img_dir,
a_img_name + image_ext))
raw_h, raw_w = raw_img.shape[0], raw_img.shape[1]
det_boxes_r_all, det_scores_r_all, det_category_r_all = [], [], []
img_short_side_len_list = self.cfgs.IMG_SHORT_SIDE_LEN if isinstance(self.cfgs.IMG_SHORT_SIDE_LEN, list) else [
self.cfgs.IMG_SHORT_SIDE_LEN]
img_short_side_len_list = [img_short_side_len_list[0]] if not self.args.multi_scale else img_short_side_len_list
for short_size in img_short_side_len_list:
max_len = self.cfgs.IMG_MAX_LENGTH
if raw_h < raw_w:
new_h, new_w = short_size, min(int(short_size * float(raw_w) / raw_h), max_len)
else:
new_h, new_w = min(int(short_size * float(raw_h) / raw_w), max_len), short_size
img_resize = cv2.resize(raw_img, (new_w, new_h))
resized_img, detected_boxes, detected_scores, detected_categories = \
sess.run(
[img_batch, detection_boxes, detection_scores, detection_category],
feed_dict={img_plac: img_resize[:, :, ::-1]}
)
if detected_boxes.shape[0] == 0:
continue
resized_h, resized_w = resized_img.shape[1], resized_img.shape[2]
detected_boxes = forward_convert(detected_boxes, False)
detected_boxes[:, 0::2] *= (raw_w / resized_w)
detected_boxes[:, 1::2] *= (raw_h / resized_h)
det_boxes_r_all.extend(detected_boxes)
det_scores_r_all.extend(detected_scores)
det_category_r_all.extend(detected_categories)
det_boxes_r_all = np.array(det_boxes_r_all)
det_scores_r_all = np.array(det_scores_r_all)
det_category_r_all = np.array(det_category_r_all)
box_res_rotate_ = []
label_res_rotate_ = []
score_res_rotate_ = []
if det_scores_r_all.shape[0] != 0:
for sub_class in range(1, self.cfgs.CLASS_NUM + 1):
index = np.where(det_category_r_all == sub_class)[0]
if len(index) == 0:
continue
tmp_boxes_r = det_boxes_r_all[index]
tmp_label_r = det_category_r_all[index]
tmp_score_r = det_scores_r_all[index]
tmp_boxes_r_ = backward_convert(tmp_boxes_r, False)
# try:
# inx = nms_rotate.nms_rotate_cpu(boxes=np.array(tmp_boxes_r_),
# scores=np.array(tmp_score_r),
# iou_threshold=self.cfgs.NMS_IOU_THRESHOLD,
# max_output_size=5000)
# except:
tmp_boxes_r_ = np.array(tmp_boxes_r_)
tmp = np.zeros([tmp_boxes_r_.shape[0], tmp_boxes_r_.shape[1] + 1])
tmp[:, 0:-1] = tmp_boxes_r_
tmp[:, -1] = np.array(tmp_score_r)
# Note: the IoU of two same rectangles is 0, which is calculated by rotate_gpu_nms
jitter = np.zeros([tmp_boxes_r_.shape[0], tmp_boxes_r_.shape[1] + 1])
jitter[:, 0] += np.random.rand(tmp_boxes_r_.shape[0], ) / 1000
inx = rotate_gpu_nms(np.array(tmp, np.float32) + np.array(jitter, np.float32),
float(self.cfgs.NMS_IOU_THRESHOLD), 0)
box_res_rotate_.extend(np.array(tmp_boxes_r)[inx])
score_res_rotate_.extend(np.array(tmp_score_r)[inx])
label_res_rotate_.extend(np.array(tmp_label_r)[inx])
if len(box_res_rotate_) == 0:
all_boxes_r.append(np.array([]))
continue
det_boxes_r_ = np.array(box_res_rotate_)
det_scores_r_ = np.array(score_res_rotate_)
det_category_r_ = np.array(label_res_rotate_)
if self.args.draw_imgs:
detected_indices = det_scores_r_ >= self.cfgs.VIS_SCORE
detected_scores = det_scores_r_[detected_indices]
detected_boxes = det_boxes_r_[detected_indices]
detected_categories = det_category_r_[detected_indices]
detected_boxes = backward_convert(detected_boxes, False)
drawer = DrawBox(self.cfgs)
det_detections_r = drawer.draw_boxes_with_label_and_scores(raw_img[:, :, ::-1],
boxes=detected_boxes,
labels=detected_categories,
scores=detected_scores,
method=1,
in_graph=True)
save_dir = os.path.join('test_hrsc', self.cfgs.VERSION, 'hrsc2016_img_vis')
tools.makedirs(save_dir)
cv2.imwrite(save_dir + '/{}.jpg'.format(a_img_name),
det_detections_r[:, :, ::-1])
det_boxes_r_ = backward_convert(det_boxes_r_, False)
x_c, y_c, w, h, theta = det_boxes_r_[:, 0], det_boxes_r_[:, 1], det_boxes_r_[:, 2], \
det_boxes_r_[:, 3], det_boxes_r_[:, 4]
boxes_r = np.transpose(np.stack([x_c, y_c, w, h, theta]))
dets_r = np.hstack((det_category_r_.reshape(-1, 1),
det_scores_r_.reshape(-1, 1),
boxes_r))
all_boxes_r.append(dets_r)
pbar.set_description("Eval image %s" % a_img_name)
# fw1 = open(cfgs.VERSION + '_detections_r.pkl', 'wb')
# pickle.dump(all_boxes_r, fw1)
return all_boxes_r
| [
"yangxue0827@126.com"
] | yangxue0827@126.com |
e44b5cc61b8f1316e7e39504e69b3d259b1fb826 | 61673ab9a42f7151de7337608c442fa6247f13bb | /turtle/hexagone/main.py | 79ce05588cb9650916614442edd18f018a6a02b6 | [
"MIT"
] | permissive | furas/python-examples | 22d101670ecd667a29376d7c7d7d86f8ec71f6cf | 95cb53b664f312e0830f010c0c96be94d4a4db90 | refs/heads/master | 2022-08-23T23:55:08.313936 | 2022-08-01T14:48:33 | 2022-08-01T14:48:33 | 45,575,296 | 176 | 91 | MIT | 2021-02-17T23:33:37 | 2015-11-04T23:54:32 | Python | UTF-8 | Python | false | false | 1,175 | py | import turtle
from math import pi, sin, cos
def hexagone(point, longueur,c):
l = longueur
x, y = point
turtle.up()
turtle.goto(point)
turtle.color(c[0]) #black
turtle.down()
turtle.begin_fill()
turtle.goto(l * cos(4 / 3 * pi )+x, l * sin(4 / 3 * pi)+y)
turtle.goto(l * cos(5 / 3 * pi)+x, l * sin(5 / 3 * pi)+y)
turtle.goto(l * cos(0)+x, l * sin(0)+y)
turtle.goto(point)
turtle.end_fill()
turtle.color(c[1]) #blue
turtle.begin_fill()
turtle.goto(l * cos(0)+x, l * sin(0)+y)
turtle.goto(l * cos(pi / 3)+x, l * sin(pi / 3)+y)
turtle.goto(l * cos(pi * 2 / 3)+x, l * sin(pi * 2 / 3)+y)
turtle.goto(point)
turtle.end_fill()
turtle.color(c[2]) #red
turtle.begin_fill()
turtle.goto(l * cos(pi * 2 / 3)+x, l * sin(pi * 2 / 3)+y)
turtle.goto(-l+x, 0+y)
turtle.goto(l * cos(4 / 3 * pi)+x, l * sin(4 / 3 * pi)+y)
turtle.goto(point)
turtle.end_fill()
turtle.up()
return True
hexagone((0,0), 50, ("black",("blue"),("red")))
hexagone((100,0), 50, ("black",("blue"),("red")))
hexagone((0,100), 50, ("black",("blue"),("red")))
hexagone((100,100), 50, ("black",("blue"),("red")))
turtle.done()
| [
"furas@tlen.pl"
] | furas@tlen.pl |
1a3e08e306facde599652aa55be243b8556bfc6d | 890d2361bcc185a65af1f1089fe594ce93a771c4 | /answers_100+_programs/tmp.py | bf7fb0dbe7b4d0949b23fbbadd4d9cf290527f9d | [] | no_license | panu2306/Python-programming-exercises | 307b255209233f95ac2b205cb063b56c303fe67d | a4df9c89d2cb07bbfb16d23be081efa55d738814 | refs/heads/master | 2023-01-22T18:12:56.047582 | 2020-12-05T11:21:21 | 2020-12-05T11:21:21 | 255,521,445 | 0 | 0 | null | 2020-04-14T05:48:30 | 2020-04-14T05:48:29 | null | UTF-8 | Python | false | false | 138 | py | import re
txt = "The rain in Spain"
x = re.search("^The.*Spain$", txt)
if x:
print("YES! We have a match!")
else:
print("No match")
| [
"pranavbhendawade@gmail.com"
] | pranavbhendawade@gmail.com |
d2e24dd5b76387fd4620ae86e797d5f2a4eeef1c | d5b48163d236ca770be8e687f92192e2971397e8 | /keysdict.py | 18963f29912462eeee5e2e06c2daef6dc14d3846 | [] | no_license | Kunal352000/python_program | 191f5d9c82980eb706e11457c2b5af54b0d2ae95 | 7a1c645f9eab87cc45a593955dcb61b35e2ce434 | refs/heads/main | 2023-07-12T19:06:19.121741 | 2021-08-21T11:58:41 | 2021-08-21T11:58:41 | 376,606,535 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 165 | py | x={'a':39,'b':78,'c':65,'11':23,'12':45,13:40}
print(x)
print(x.keys())
print(type(x.keys()))
for i in x.keys():
print(i,end=" ")
print()
print(type(i))
| [
"noreply@github.com"
] | Kunal352000.noreply@github.com |
16b649016334f76c35494556505fbc781db4a3fb | d8008fdbfab54e36167747e8bb5ed639935a7d28 | /BigProj/Chatissimo/app.py | fe12cb4c6ed6e0b254791d9f56da90f9a6cc914f | [] | no_license | dancb10/ppscu.com | 90cce23496eaf97b0212988b23138d13046dab3b | cf1e28b41dcd6048cf2236f081891360f2741d03 | refs/heads/master | 2022-12-09T05:30:58.328023 | 2021-01-07T12:48:06 | 2021-01-07T12:48:06 | 63,581,432 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 862 | py | from flask import Flask, render_template
from flask_socketio import SocketIO
app = Flask(__name__)
app.config['SECRET_KEY'] = 'secret!'
socketio = SocketIO(app)
from flask_socketio import join_room, leave_room
from flask_socketio import send, emit
@socketio.on('my event')
def handle_my_custom_event(data):
emit('my response', data, broadcast=True)
@socketio.on('message')
def handle_message(message):
send(message, namespace='/chat')
@socketio.on('join')
def on_join(data):
username = data['username']
room = data['room']
join_room(room)
send(username + ' has entered the room.', room=room)
@socketio.on('leave')
def on_leave(data):
username = data['username']
room = data['room']
leave_room(room)
send(username + ' has left the room.', room=room)
if __name__ == '__main__':
socketio.run(app, host='0.0.0.0')
| [
"dapopesc@Dans-MacBook-Pro.local"
] | dapopesc@Dans-MacBook-Pro.local |
d22bb51e93002a52c6bd583ccc04d99e47130c60 | e3d09f5467a29e457048e8caccdce08b01387c8a | /tests/test_catalog.py | dd5e4a2f6cd9c8d3698b3ac541d4f557aa59087f | [
"MIT"
] | permissive | sajabdoli/jschon | 958181d24bbdc440725274067c6038f60ecaea1a | 63a602745c825abce3851207bd37372e0ce4452d | refs/heads/main | 2023-08-03T09:07:55.700537 | 2021-09-20T06:13:15 | 2021-09-20T06:13:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,332 | py | import json
import pathlib
import tempfile
import pytest
from jschon import Catalog, CatalogError, URI, JSONPointer, JSONSchema, JSON, create_catalog
from tests import example_schema, metaschema_uri_2020_12
json_example = {"foo": "bar"}
@pytest.fixture
def new_catalog():
return Catalog(default=False)
def test_new_catalog(new_catalog):
assert not new_catalog._directories
assert not new_catalog._vocabularies
assert not new_catalog._format_validators
assert not new_catalog._schema_cache
@pytest.fixture
def setup_tmpdir():
"""Create a temp dir hierarchy containing a JSON file.
Yield (tmpdir path, subdir name, file name) and clean up
afterwards.
"""
with tempfile.TemporaryDirectory() as tmpdir_path:
with tempfile.TemporaryDirectory(dir=tmpdir_path) as subdir_path:
with tempfile.NamedTemporaryFile(dir=subdir_path) as f:
f.write(json.dumps(json_example).encode())
f.flush()
yield tmpdir_path, pathlib.Path(subdir_path).name, pathlib.Path(f.name).name
@pytest.mark.parametrize('base_uri', [
'http://example.com/',
'http://example.com/foo/',
'http://example.com/foo/bar/',
])
def test_add_directory_and_load_json(base_uri, setup_tmpdir, new_catalog):
tmpdir_path, subdir_name, jsonfile_name = setup_tmpdir
new_catalog.add_directory(URI(base_uri), pathlib.Path(tmpdir_path))
json_doc = new_catalog.load_json(URI(f'{base_uri}{subdir_name}/{jsonfile_name}'))
assert json_doc == json_example
# incorrect base URI
with pytest.raises(CatalogError):
new_catalog.load_json(URI(f'http://example.net/{subdir_name}/{jsonfile_name}'))
# incorrect file name
with pytest.raises(CatalogError):
new_catalog.load_json(URI(f'{base_uri}{subdir_name}/baz'))
@pytest.mark.parametrize('base_uri', [
'//example.com/foo/bar/', # no scheme
'http://Example.com/foo/bar/', # not normalized
'http://example.com/foo/#', # contains empty fragment
'http://example.com/foo/#bar', # contains non-empty fragment
'http://example.com/foo/bar', # does not end with '/'
])
def test_add_directory_invalid_uri(base_uri, setup_tmpdir, new_catalog):
tmpdir_path, subdir_name, jsonfile_name = setup_tmpdir
with pytest.raises(CatalogError):
new_catalog.add_directory(URI(base_uri), pathlib.Path(tmpdir_path))
def test_add_directory_invalid_dir(setup_tmpdir, new_catalog):
tmpdir_path, subdir_name, jsonfile_name = setup_tmpdir
# base_dir is a file
with pytest.raises(CatalogError):
new_catalog.add_directory(URI('http://example.com/'), pathlib.Path(tmpdir_path) / subdir_name / jsonfile_name)
# base_dir does not exist
with pytest.raises(CatalogError):
new_catalog.add_directory(URI('http://example.com/'), pathlib.Path(tmpdir_path) / 'foo')
@pytest.mark.parametrize('uri', [
'//example.com/foo/bar/file.json', # no scheme
'http://Example.com/foo/bar/file.json', # not normalized
'http://example.com/foo/file.json#', # contains empty fragment
'http://example.com/foo/file.json#bar', # contains non-empty fragment
])
def test_load_json_invalid_uri(uri, new_catalog):
with pytest.raises(CatalogError):
new_catalog.load_json(URI(uri))
@pytest.mark.parametrize('uri, is_known', [
("https://json-schema.org/draft/2020-12/vocab/core", True),
("https://json-schema.org/draft/2020-12/vocab/applicator", True),
("https://json-schema.org/draft/2020-12/vocab/unevaluated", True),
("https://json-schema.org/draft/2020-12/vocab/validation", True),
("https://json-schema.org/draft/2020-12/vocab/meta-data", True),
("https://json-schema.org/draft/2020-12/vocab/format-annotation", True),
("https://json-schema.org/draft/2020-12/meta/format-assertion", False),
("https://json-schema.org/draft/2020-12/vocab/content", True),
])
def test_get_vocabulary(uri, is_known, catalog):
if is_known:
vocabulary = catalog.get_vocabulary(URI(uri))
assert vocabulary.uri == uri
else:
with pytest.raises(CatalogError):
catalog.get_vocabulary(URI(uri))
@pytest.fixture
def example_schema_uri():
schema = JSONSchema(example_schema, metaschema_uri=metaschema_uri_2020_12)
return schema.uri
@pytest.mark.parametrize('ptr, is_schema', [
("", True),
("/$id", False),
("/$defs", False),
("/if", True),
("/then", True),
("/else", True),
])
def test_get_schema(example_schema_uri, ptr, is_schema, catalog):
uri = example_schema_uri.copy(fragment=ptr)
if is_schema:
subschema = catalog.get_schema(uri)
assert JSONPointer(ptr).evaluate(example_schema) == subschema
else:
with pytest.raises(CatalogError):
catalog.get_schema(uri)
def sessioned_schema(uri, schema, session):
kwargs = {'uri': uri, 'metaschema_uri': metaschema_uri_2020_12}
if session is not None:
kwargs['session'] = session
return JSONSchema(schema, **kwargs)
def test_session_independence(catalog):
uri = URI("http://example.com")
sessioned_schema(uri, {"const": 0}, None) # 'default' session
sessioned_schema(uri, {"const": 1}, 'one')
sessioned_schema(uri, {"const": 2}, 'two')
assert catalog.get_schema(uri)["const"] == 0
assert catalog.get_schema(uri, session='default')["const"] == 0
assert catalog.get_schema(uri, session='one')["const"] == 1
assert catalog.get_schema(uri, session='two')["const"] == 2
def test_metaschema_isolation():
new_catalog = create_catalog('2019-09', '2020-12')
assert new_catalog._schema_cache.keys() == {'__meta__'}
# mask the metaschema with a boolean false schema, in the fubar session
sessioned_schema(metaschema_uri_2020_12, False, 'fubar')
uri = URI("http://example.com")
fubar_schema = sessioned_schema(uri, {"$ref": str(metaschema_uri_2020_12)}, 'fubar')
assert fubar_schema.evaluate(JSON(True)).valid is False
# masking the metaschema has no impact on other sessions
okay_schema = sessioned_schema(uri, {"$ref": str(metaschema_uri_2020_12)}, 'okay')
assert okay_schema.evaluate(JSON(True)).valid is True
okay_schema = sessioned_schema(uri, {"$ref": str(metaschema_uri_2020_12)}, None)
assert okay_schema.evaluate(JSON(True)).valid is True
| [
"52427991+marksparkza@users.noreply.github.com"
] | 52427991+marksparkza@users.noreply.github.com |
8562715e27a81c5afaaa0de22707df58099f3ac3 | b5b117371b463ba68be14345549f16098bb311ef | /curso_em_video/mundo_01/desafios/usando_modulos_do_python/ex021.py | d96113d902bbefc8cd4de46c65df64d70bb01bf3 | [] | no_license | thuurzz/Python | f1d0f5038ed97fbf4dc83c352102efcdde25ace8 | 7bd61180fe7594aad7d6cb787772a384f18ced87 | refs/heads/master | 2022-11-05T17:22:02.661665 | 2021-05-17T02:59:37 | 2021-05-17T02:59:37 | 245,733,534 | 0 | 1 | null | 2022-10-23T12:20:43 | 2020-03-08T01:34:31 | Python | UTF-8 | Python | false | false | 142 | py | #tocando MP3 com a lib pygame
import pygame
pygame.init()
pygame.mixer_music.load('ex021.mp3')
pygame.mixer_music.play()
pygame.event.wait()
| [
"arthur.silva@aluno.faculdadeimpacta.com.br"
] | arthur.silva@aluno.faculdadeimpacta.com.br |
52b8783b1f0763da1f20afc8766800625bc5974b | e9c9e38ed91969df78bbd7f9ca2a0fdb264d8ddb | /lib/python3.8/site-packages/ansible_collections/fortinet/fortimanager/plugins/modules/fmgr_firewall_internetservicecustom_entry_portrange.py | 35de8d4e8e5f4223f9bc25054bf2a9f93c49dde4 | [] | no_license | Arceusir/PRELIM_SKILLS_EXAM | 882fcf2868926f0bbfe1fb18d50e5fe165936c02 | b685c5b28d058f59de2875c7579739c545df2e0c | refs/heads/master | 2023-08-15T07:30:42.303283 | 2021-10-09T01:27:19 | 2021-10-09T01:27:19 | 415,167,192 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,855 | py | #!/usr/bin/python
from __future__ import absolute_import, division, print_function
# Copyright 2019-2021 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fmgr_firewall_internetservicecustom_entry_portrange
short_description: Port ranges in the custom entry.
description:
- This module is able to configure a FortiManager device.
- Examples include all parameters and values which need to be adjusted to data sources before usage.
version_added: "2.10"
author:
- Link Zheng (@chillancezen)
- Jie Xue (@JieX19)
- Frank Shen (@fshen01)
- Hongbin Lu (@fgtdev-hblu)
notes:
- Running in workspace locking mode is supported in this FortiManager module, the top
level parameters workspace_locking_adom and workspace_locking_timeout help do the work.
- To create or update an object, use state present directive.
- To delete an object, use state absent directive.
- Normally, running one module can fail when a non-zero rc is returned. you can also override
the conditions to fail or succeed with parameters rc_failed and rc_succeeded
options:
enable_log:
description: Enable/Disable logging for task
required: false
type: bool
default: false
proposed_method:
description: The overridden method for the underlying Json RPC request
required: false
type: str
choices:
- update
- set
- add
bypass_validation:
description: only set to True when module schema diffs with FortiManager API structure, module continues to execute without validating parameters
required: false
type: bool
default: false
workspace_locking_adom:
description: the adom to lock for FortiManager running in workspace mode, the value can be global and others including root
required: false
type: str
workspace_locking_timeout:
description: the maximum time in seconds to wait for other user to release the workspace lock
required: false
type: int
default: 300
state:
description: the directive to create, update or delete an object
type: str
required: true
choices:
- present
- absent
rc_succeeded:
description: the rc codes list with which the conditions to succeed will be overriden
type: list
required: false
rc_failed:
description: the rc codes list with which the conditions to fail will be overriden
type: list
required: false
adom:
description: the parameter (adom) in requested url
type: str
required: true
internet-service-custom:
description: the parameter (internet-service-custom) in requested url
type: str
required: true
entry:
description: the parameter (entry) in requested url
type: str
required: true
firewall_internetservicecustom_entry_portrange:
description: the top level parameters set
required: false
type: dict
suboptions:
end-port:
type: int
description: 'Integer value for ending TCP/UDP/SCTP destination port in range (1 to 65535).'
id:
type: int
description: 'Custom entry port range ID.'
start-port:
type: int
description: 'Integer value for starting TCP/UDP/SCTP destination port in range (1 to 65535).'
'''
EXAMPLES = '''
- hosts: fortimanager-inventory
collections:
- fortinet.fortimanager
connection: httpapi
vars:
ansible_httpapi_use_ssl: True
ansible_httpapi_validate_certs: False
ansible_httpapi_port: 443
tasks:
- name: Port ranges in the custom entry.
fmgr_firewall_internetservicecustom_entry_portrange:
bypass_validation: False
workspace_locking_adom: <value in [global, custom adom including root]>
workspace_locking_timeout: 300
rc_succeeded: [0, -2, -3, ...]
rc_failed: [-2, -3, ...]
adom: <your own value>
internet-service-custom: <your own value>
entry: <your own value>
state: <value in [present, absent]>
firewall_internetservicecustom_entry_portrange:
end-port: <value of integer>
id: <value of integer>
start-port: <value of integer>
'''
RETURN = '''
request_url:
description: The full url requested
returned: always
type: str
sample: /sys/login/user
response_code:
description: The status of api request
returned: always
type: int
sample: 0
response_message:
description: The descriptive message of the api response
type: str
returned: always
sample: OK.
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible_collections.fortinet.fortimanager.plugins.module_utils.napi import NAPIManager
from ansible_collections.fortinet.fortimanager.plugins.module_utils.napi import check_galaxy_version
from ansible_collections.fortinet.fortimanager.plugins.module_utils.napi import check_parameter_bypass
def main():
jrpc_urls = [
'/pm/config/adom/{adom}/obj/firewall/internet-service-custom/{internet-service-custom}/entry/{entry}/port-range',
'/pm/config/global/obj/firewall/internet-service-custom/{internet-service-custom}/entry/{entry}/port-range'
]
perobject_jrpc_urls = [
'/pm/config/adom/{adom}/obj/firewall/internet-service-custom/{internet-service-custom}/entry/{entry}/port-range/{port-range}',
'/pm/config/global/obj/firewall/internet-service-custom/{internet-service-custom}/entry/{entry}/port-range/{port-range}'
]
url_params = ['adom', 'internet-service-custom', 'entry']
module_primary_key = 'id'
module_arg_spec = {
'enable_log': {
'type': 'bool',
'required': False,
'default': False
},
'proposed_method': {
'type': 'str',
'required': False,
'choices': [
'set',
'update',
'add'
]
},
'bypass_validation': {
'type': 'bool',
'required': False,
'default': False
},
'workspace_locking_adom': {
'type': 'str',
'required': False
},
'workspace_locking_timeout': {
'type': 'int',
'required': False,
'default': 300
},
'rc_succeeded': {
'required': False,
'type': 'list'
},
'rc_failed': {
'required': False,
'type': 'list'
},
'state': {
'type': 'str',
'required': True,
'choices': [
'present',
'absent'
]
},
'adom': {
'required': True,
'type': 'str'
},
'internet-service-custom': {
'required': True,
'type': 'str'
},
'entry': {
'required': True,
'type': 'str'
},
'firewall_internetservicecustom_entry_portrange': {
'required': False,
'type': 'dict',
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'options': {
'end-port': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'type': 'int'
},
'id': {
'required': True,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'type': 'int'
},
'start-port': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'type': 'int'
}
}
}
}
params_validation_blob = []
check_galaxy_version(module_arg_spec)
module = AnsibleModule(argument_spec=check_parameter_bypass(module_arg_spec, 'firewall_internetservicecustom_entry_portrange'),
supports_check_mode=False)
fmgr = None
if module._socket_path:
connection = Connection(module._socket_path)
connection.set_option('enable_log', module.params['enable_log'] if 'enable_log' in module.params else False)
fmgr = NAPIManager(jrpc_urls, perobject_jrpc_urls, module_primary_key, url_params, module, connection, top_level_schema_name='data')
fmgr.validate_parameters(params_validation_blob)
fmgr.process_curd(argument_specs=module_arg_spec)
else:
module.fail_json(msg='MUST RUN IN HTTPAPI MODE')
module.exit_json(meta=module.params)
if __name__ == '__main__':
main()
| [
"aaronchristopher.dalmacio@gmail.com"
] | aaronchristopher.dalmacio@gmail.com |
0ec455911c1ab290253082808da2e25622d4c158 | c7770d7631f2930cce80462f9c3ee7e2abe118bb | /src/muses/collection/models/period.py | 5bbb6ea5691c3dc24ad1739995e67d4f71ed7624 | [
"Apache-2.0"
] | permissive | Aincient/cleo | 4f277520a22792aa5b505601849a7ff3a4bd4196 | 933ef372fa7847d943206d72bfb03c201dbafbd6 | refs/heads/master | 2021-06-18T11:01:49.137359 | 2021-01-12T16:34:44 | 2021-01-12T16:34:44 | 150,566,366 | 0 | 3 | NOASSERTION | 2021-01-12T16:34:46 | 2018-09-27T10:00:20 | Python | UTF-8 | Python | false | false | 1,416 | py | from __future__ import absolute_import, unicode_literals
from django.db import models
from django.utils.translation import ugettext_lazy as _
from mptt.models import MPTTModel, TreeForeignKey
from six import python_2_unicode_compatible
__all__ = (
'Period',
)
@python_2_unicode_compatible
class Period(MPTTModel):
"""Period."""
name_en = models.TextField(
verbose_name=_("English name"),
unique=True
)
name_nl = models.TextField(
verbose_name=_("Dutch name"),
blank=True,
null=True,
unique=False,
)
parent = TreeForeignKey(
'self',
null=True,
blank=True,
related_name='children',
db_index=True
)
date_begin_en = models.CharField(
max_length=255,
null=True,
blank=True,
verbose_name=_("Date begin (EN)"),
)
date_end_en = models.CharField(
max_length=255,
null=True,
blank=True,
verbose_name=_("Date end (EN)"),
)
class MPTTMeta(object):
order_insertion_by = ['name_en']
def date_range(self):
"""Get a string of the date range of a period, if available
:return:
:rtype: str
"""
if self.date_begin_en and self.date_end_en:
return "{} until {}".format(self.date_begin_en, self.date_end_en)
def __str__(self):
return self.name_en
| [
"artur.barseghyan@gmail.com"
] | artur.barseghyan@gmail.com |
8a22f0fb8aa3a956133ff15591c5281360269bd6 | 38da8edb2102ad29eda8784cbb845cac0b96bbca | /176_deco_with_arg.py | da2dd44c77ee84d3fdec4a0a4fd76065eb523c25 | [] | no_license | Prateek2201/Python_codes | 1a655a3e6820e7ecb1fb8a8abd266a8ae0508cb5 | 436a36544edac80cbe420c7b9ddb718df46b68da | refs/heads/main | 2023-08-01T03:10:51.864186 | 2021-09-17T18:08:40 | 2021-09-17T18:08:40 | 407,635,606 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 555 | py | from functools import wraps
def only_datatype_allow(data_type):
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
if all([type(arg)==data_type for arg in args]):
return func(*args,**kwargs)
else:
return 'Invalid argsuments!'
return wrapper
return decorator
@only_datatype_allow(str)
def string_join(*args):
string= ''
for i in args:
string+= i+' '
return string
print(string_join('prateek','kumar','agrahari'))
| [
"noreply@github.com"
] | Prateek2201.noreply@github.com |
e88f6ac435a26acbd94c78dc15bacb75b8f7c55d | 2da02bd20ae4d621ef02d557ebb7ce20dd5482ff | /clitooltester/test_runner.py | 49eb485cbdbfadddaba8d177dcbb980222cab75f | [
"Apache-2.0"
] | permissive | dfirlabs/clitooltester | 54544eddbe3ec8d3b86a6a6846faa2fadcfc3e37 | ffe23b7b7458212d150390f476cda74e89fc97e1 | refs/heads/main | 2021-11-23T21:44:03.777813 | 2021-10-31T13:04:31 | 2021-10-31T13:04:31 | 228,607,376 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 320 | py | # -*- coding: utf-8 -*-
"""Command line tool test runner."""
from __future__ import unicode_literals
class TestRunner(object):
"""Command line tool test runner."""
def ReadConfiguration(self, path):
"""Reads the configuration from a file.
Args:
path (str): path of the configuration file.
"""
| [
"joachim.metz@gmail.com"
] | joachim.metz@gmail.com |
168cf8df7410467e2ce9e176451a4dd10705ab49 | fb5dd7410679bd28299cfe3841de6fe826d978cb | /src/user/migrations/0002_auto_20201207_1825.py | 9f881775e4d02973c4bc130028dbacda53f05e12 | [] | no_license | IvanYukish/finance-manager | 35202fde63a7f519b52d8e09f3f64dd547cccbc5 | 9147d09cff7543361f5ccefa79ec334a58efc9a1 | refs/heads/master | 2023-07-11T14:39:17.536557 | 2021-08-04T23:05:45 | 2021-08-04T23:05:45 | 317,544,811 | 1 | 0 | null | 2021-08-23T17:18:10 | 2020-12-01T13:09:50 | CSS | UTF-8 | Python | false | false | 546 | py | # Generated by Django 3.1.3 on 2020-12-07 18:25
from django.db import migrations, models
import user.validators
class Migration(migrations.Migration):
dependencies = [
('user', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='customuser',
name='phone_number',
field=models.CharField(blank=True, db_index=True, max_length=20, null=True, validators=[user.validators.CustomPhoneNumberValidator()], verbose_name='Номер Телефону'),
),
]
| [
"iwan.jukisch@gmail.com"
] | iwan.jukisch@gmail.com |
06d9b8ff55ed2fbb76bfbdcb87a4babf0d2bacd2 | 0874abd0a592c952a7aad6f4642776168312aee6 | /12-函数/02-注意事项.py | eb39ef7339f010e72b295b58b4eb1f0d5f23fb96 | [] | no_license | EndIFbiu/python-study | 075742d3923adad8061b5f720cabd4a33d3eb0a2 | 62a64a587077ef5f2dcd8a119ba56d3709073bf6 | refs/heads/master | 2023-02-10T08:14:08.144442 | 2020-12-27T12:23:14 | 2020-12-27T12:23:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 389 | py | # 1.使用一个函数 2.测试注意事项
# 定义函数
def info_print():
print('hello world')
# 调用函数
info_print()
"""
1.先定义后调用
2.没有调用函数,函数代码不会执行
3.函数的执行流程:
当调用函数的时候,解释器会回到定义函数的下方缩进代码执行
执行完后回到调用函数的地方继续向下执行
"""
| [
"270017772@qq.com"
] | 270017772@qq.com |
2777dbd194a8ef3326bbcab1b6100c10510741bb | 43e5441f74359d620be6f7f80c99622769ea9774 | /venv/Lib/site-packages/tb_paddle/file_writer.py | 7140441ed49b89658d66a95feff3501103a1f992 | [] | no_license | 33Da/deeplearn_eassy | 96f1bd09fe3df907c650378215eb686e4ab2801e | 82d60c5ec3aec60822d68d13f11ef1320d0bba2e | refs/heads/master | 2023-02-07T15:02:00.202693 | 2021-01-05T05:03:22 | 2021-01-05T05:03:22 | 326,892,905 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,950 | py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
from .event_file_writer import EventFileWriter
from .proto import event_pb2
class FileWriter(object):
"""Writes protocol buffers to event files to be consumed by TensorBoard.
The `FileWriter` class provides a mechanism to create an event file in a
given directory and add summaries and events to it. The class updates the
file contents asynchronously.
"""
def __init__(self, logdir, max_queue=1024, filename_suffix=''):
"""Creates a `FileWriter` and an event file.
On construction the writer creates a new event file in `logdir`.
The other arguments to the constructor control the asynchronous writes to
the event file.
:param logdir: Directory where event file will be written.
:type logdir: str
:param max_queue: Size of the queue for pending events and
summaries before one of the 'add' calls forces a flush to disk.
:type max_queue: int
:param filename_suffix: Suffix added to all event filenames in the logdir directory.
More details on filename construction in
tensorboard.summary.writer.event_file_writer.EventFileWriter.
:type filename_suffix: str
"""
self.logdir = str(logdir)
self.event_writer = EventFileWriter(self.logdir, max_queue, filename_suffix)
def get_logdir(self):
"""Returns the directory where event file will be written."""
return self.logdir
def add_event(self, event, step=None, walltime=None):
"""Adds an event to the event file.
:param event: An `Event` protocol buffer.
:param step: Optional global step value for training process to record with the event.
:type step: Number
:param walltime: Given time to override the default walltime.
:type walltime: Optional, float
"""
event.wall_time = time.time() if walltime is None else walltime
if step is not None:
# Make sure step is converted from numpy or other formats
# since protobuf might not convert depending on version
event.step = int(step)
self.event_writer.add_event(event)
def add_summary(self, summary, global_step=None, walltime=None):
"""Adds a `Summary` protocol buffer to the event file.
This method wraps the provided summary in an `Event` protocol buffer
and adds it to the event file.
:param summary: A `Summary` protocol buffer.
:param global_step: Optional global step value for training process to record with the summary.
:type global_step: Number
:param walltime: Given time to override the default walltime.
:type walltime: Optional, float
"""
event = event_pb2.Event(summary=summary)
self.add_event(event, global_step, walltime)
def add_graph(self, GraphDef_proto, walltime=None):
"""Adds a `GraphDef` protocol buffer to the event file.
:param graph_profile: A GraphDef protocol buffer.
:param walltime: Optional walltime to override default
(current) walltime (from time.time()) seconds after epoch.
:type walltime: Optional, float
"""
event = event_pb2.Event(graph_def=GraphDef_proto.SerializeToString())
self.add_event(event, None, walltime)
def add_run_metadata(self, run_metadata, tag, global_step=None, walltime=None):
"""Adds a metadata information for a single session.run() call.
:param run_metadata: A `RunMetadata` protobuf object.
:param tag: The tag name for this metadata.
:type tag: string
:param global_step: global step counter to record with the StepStats.
:type global_step: int
:param walltime: Given time to override the default walltime.
:type walltime: Optional, float
"""
tagged_metadata = event_pb2.TaggedRunMetadata(
tag=tag, run_metadata=run_metadata.SerializeToString())
event = event_pb2.Event(tagged_run_metadata=tagged_metadata)
self.add_event(event, global_step, walltime)
def flush(self):
"""Flushes the event file to disk.
Call this method to make sure that all pending events have been written to disk.
"""
self.event_writer.flush()
def close(self):
"""Flushes the event file to disk and close the file.
Call this method when you do not need the summary writer anymore.
"""
self.event_writer.close()
def reopen(self):
"""Reopens the EventFileWriter.
Can be called after `close()` to add more events in the same directory.
The events will go into a new events file.
Does nothing if the EventFileWriter was not closed.
"""
self.event_writer.reopen()
| [
"764720843@qq.com"
] | 764720843@qq.com |
aa9d2e9bb8d661e9be2db95c36fc2df05cd76db3 | c1120d1d6352f35dc988b9874b24cd30f83f2f58 | /search_submit/tests.py | 4731168e669f8ab9ac87f22768712156183b9f33 | [] | no_license | andrem122/Invoice-Management | 70032d86cfdfb2ed21479baae3a8057f88b61047 | 7f7a617a39602a656ff54724c344745038f304b4 | refs/heads/master | 2022-12-11T19:19:47.898336 | 2020-10-01T01:39:52 | 2020-10-01T01:39:52 | 120,393,609 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,051 | py | from django.test import TestCase
from .views import Search_Submit_View
from django.test import Client
class Test_Search(TestCase):
def setUp(self):
self.c = Client()
def test_values_normalize_query(self):
#test to see if incorrect data types are dealt with
search_submit_view = Search_Submit_View()
self.assertRaises(ValueError, search_submit_view.normalize_query, True)
self.assertRaises(ValueError, search_submit_view.normalize_query, 2)
self.assertRaises(ValueError, search_submit_view.normalize_query, ['list', 'list'])
self.assertRaises(ValueError, search_submit_view.normalize_query, {'key': 1, 'key': '1'})
def test_post(self):
response = self.c.post('/search/', {'search': 'all'})
self.assertEqual(response.status_code, 302)
response = self.c.post('/search/', {'search': 'test'})
self.assertEqual(response.status_code, 302)
def test_get(self):
response = self.c.get('/search/')
self.assertEqual(response.status_code, 302)
| [
"andre.mashraghi@gmail.com"
] | andre.mashraghi@gmail.com |
f73c52321076fade49523070a1e3b273c1795d7a | 4a1b61cf551db7843050cc7080cec6fd60c4f8cc | /2020/백준문제/백트래킹/14888_연산자 끼워넣기(godyd2702).py | 9cd8659a763dace3fcc1d348c9bc604318b17254 | [] | no_license | phoenix9373/Algorithm | 4551692027ca60e714437fd3b0c86462f635d8ff | c66fd70e14bb8357318e8b8f386d2e968f0c4d98 | refs/heads/master | 2023-08-24T10:01:20.798430 | 2021-10-15T07:57:36 | 2021-10-15T07:57:36 | 288,092,774 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 526 | py | M = -10 ** 9
m = 10 ** 9
N = int(input())
num = list(map(int, input().split()))
a, b, c, d = map(int, input().split())
def inst(n, i, d1, d2, d3, d4):
global M, m
if i == N:
M = max(M, n);m = min(m, n);return
else:
if d1: inst(n + num[i], i + 1, d1 - 1, d2, d3, d4)
if d2: inst(n - num[i], i + 1, d1, d2 - 1, d3, d4)
if d3: inst(n * num[i], i + 1, d1, d2, d3 - 1, d4)
if d4: inst(int(n / num[i]), i + 1, d1, d2, d3, d4 - 1)
inst(num[0], 1, a, b, c, d)
print(M)
print(m)
| [
"phoenix9373@naver.com"
] | phoenix9373@naver.com |
0b7be87224520bb9408cd8049f7d7e65582aa728 | 4aa7a4d0525095725eb99843c83827ba4806ceb1 | /ML/m26_earlyStopping.py | d85e06cd4038a8f0760a03f63d358972ec9dcef2 | [] | no_license | seonukim/Study | 65a70f5bdfad68f643abc3086d5c7484bb2439d4 | a5f2538f9ae8b5fc93b5149dd51704e8881f0a80 | refs/heads/master | 2022-12-04T17:04:31.489771 | 2020-08-21T00:35:15 | 2020-08-21T00:35:15 | 260,144,755 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,142 | py | # xgboost evaluate
import numpy as np
from sklearn.feature_selection import SelectFromModel
from xgboost import XGBRegressor
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score
from sklearn.datasets import load_boston
## 데이터
x, y = load_boston(return_X_y = True)
print(x.shape) # (506, 13)
print(y.shape) # (506,)
## train_test_split
x_train, x_test, y_train, y_test = train_test_split(
x, y, test_size = 0.2,
shuffle = True, random_state = 66)
## 모델링
model = XGBRegressor(n_estimators = 1000, # verbose의 갯수, epochs와 동일
learning_rate = 0.1)
model.fit(x_train, y_train,
verbose = True, eval_metric = 'rmse',
eval_set = [(x_train, y_train),
(x_test, y_test)],
early_stopping_rounds = 20)
# eval_metic의 종류 : rmse, mae, logloss, error(error가 0.2면 accuracy는 0.8), auc(정확도, 정밀도; accuracy의 친구다)
results = model.evals_result()
# print("eval's result : ", results)
y_pred = model.predict(x_test)
r2 = r2_score(y_test, y_pred)
print("R2 : ", r2) | [
"92.seoonooo@gmail.com"
] | 92.seoonooo@gmail.com |
3cd0488b6b634aac8022d5257434a461105d2364 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02578/s995045217.py | d1f71aa2eceeb57ffbd4b83ec3aa667c45437a0b | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 241 | py | from sys import stdin
from math import ceil
inp = lambda : stdin.readline().strip()
n = int(inp())
a = [int(x) for x in inp().split()]
curr = 0
ans = 0
for i in a:
curr = max(curr, i)
if i < curr:
ans += curr - i
print(ans) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
c4afa4d8b74bc42805d910469039f3a47e385759 | 59166105545cdd87626d15bf42e60a9ee1ef2413 | /test/test_locality_api.py | 653a1d95e0e5dc251e9a10fcf65664d83719718b | [] | no_license | mosoriob/dbpedia_api_client | 8c594fc115ce75235315e890d55fbf6bd555fa85 | 8d6f0d04a3a30a82ce0e9277e4c9ce00ecd0c0cc | refs/heads/master | 2022-11-20T01:42:33.481024 | 2020-05-12T23:22:54 | 2020-05-12T23:22:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 945 | py | # coding: utf-8
"""
DBpedia
This is the API of the DBpedia Ontology # noqa: E501
The version of the OpenAPI document: v0.0.1
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import dbpedia
from dbpedia.api.locality_api import LocalityApi # noqa: E501
from dbpedia.rest import ApiException
class TestLocalityApi(unittest.TestCase):
"""LocalityApi unit test stubs"""
def setUp(self):
self.api = dbpedia.api.locality_api.LocalityApi() # noqa: E501
def tearDown(self):
pass
def test_localitys_get(self):
"""Test case for localitys_get
List all instances of Locality # noqa: E501
"""
pass
def test_localitys_id_get(self):
"""Test case for localitys_id_get
Get a single Locality by its id # noqa: E501
"""
pass
if __name__ == '__main__':
unittest.main()
| [
"maxiosorio@gmail.com"
] | maxiosorio@gmail.com |
d248a55669dbb6f95e7048320573eff2922fcd85 | a8d86cad3f3cc6a977012d007d724bbaf02542f7 | /vendors/marvin/marvin/cloudstackAPI/addBaremetalPxePingServer.py | ad3ae5db84fb4c57e1b4ce248bdcc756014ab43e | [] | no_license | bopopescu/bigrobot | f8d971183119a1d59f21eb2fc08bbec9ee1d522b | 24dad9fb0044df5a473ce4244932431b03b75695 | refs/heads/master | 2022-11-20T04:55:58.470402 | 2015-03-31T18:14:39 | 2015-03-31T18:14:39 | 282,015,194 | 0 | 0 | null | 2020-07-23T17:29:53 | 2020-07-23T17:29:52 | null | UTF-8 | Python | false | false | 3,134 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""add a baremetal ping pxe server"""
from baseCmd import *
from baseResponse import *
class addBaremetalPxePingServerCmd (baseCmd):
typeInfo = {}
def __init__(self):
self.isAsync = "true"
"""Credentials to reach external pxe device"""
"""Required"""
self.password = None
self.typeInfo['password'] = 'string'
"""the Physical Network ID"""
"""Required"""
self.physicalnetworkid = None
self.typeInfo['physicalnetworkid'] = 'uuid'
"""Root directory on PING storage server"""
"""Required"""
self.pingdir = None
self.typeInfo['pingdir'] = 'string'
"""PING storage server ip"""
"""Required"""
self.pingstorageserverip = None
self.typeInfo['pingstorageserverip'] = 'string'
"""type of pxe device"""
"""Required"""
self.pxeservertype = None
self.typeInfo['pxeservertype'] = 'string'
"""Tftp root directory of PXE server"""
"""Required"""
self.tftpdir = None
self.typeInfo['tftpdir'] = 'string'
"""URL of the external pxe device"""
"""Required"""
self.url = None
self.typeInfo['url'] = 'string'
"""Credentials to reach external pxe device"""
"""Required"""
self.username = None
self.typeInfo['username'] = 'string'
"""Password of PING storage server"""
self.pingcifspassword = None
self.typeInfo['pingcifspassword'] = 'string'
"""Username of PING storage server"""
self.pingcifsusername = None
self.typeInfo['pingcifsusername'] = 'string'
"""Pod Id"""
self.podid = None
self.typeInfo['podid'] = 'uuid'
self.required = ["password","physicalnetworkid","pingdir","pingstorageserverip","pxeservertype","tftpdir","url","username",]
class addBaremetalPxePingServerResponse (baseResponse):
typeInfo = {}
def __init__(self):
"""Root directory on PING storage server"""
self.pingdir = None
self.typeInfo['pingdir'] = 'string'
"""PING storage server ip"""
self.pingstorageserverip = None
self.typeInfo['pingstorageserverip'] = 'string'
"""Tftp root directory of PXE server"""
self.tftpdir = None
self.typeInfo['tftpdir'] = 'string'
| [
"vui.le@bigswitch.com"
] | vui.le@bigswitch.com |
2c41f55a2753fd378c6b955b81ea0dc108036626 | 255e19ddc1bcde0d3d4fe70e01cec9bb724979c9 | /all-gists/924f6b53a63dede6e59f/snippet.py | 19dcb8b5bcbeee647a0675544e42b8c24949c89b | [
"MIT"
] | permissive | gistable/gistable | 26c1e909928ec463026811f69b61619b62f14721 | 665d39a2bd82543d5196555f0801ef8fd4a3ee48 | refs/heads/master | 2023-02-17T21:33:55.558398 | 2023-02-11T18:20:10 | 2023-02-11T18:20:10 | 119,861,038 | 76 | 19 | null | 2020-07-26T03:14:55 | 2018-02-01T16:19:24 | Python | UTF-8 | Python | false | false | 7,080 | py | #
# Extract files from Bare git-annex repositories without git-annex
# Supports version v6
#
# See internals: http://git-annex.branchable.com/internals/
#
# Modified: added non-bare repos, added tar file (of symlinks) output for use with archivemount
#
# TODO: improve output
# TODO: use cat-files instead of archive
# TODO: export to tar WITH relative links
#
# Emanuele Ruffaldi (C) 2016
import sys,argparse,os,subprocess
import md5,tarfile,cStringIO,hashlib,struct
def gitgetpathinfo(branch,path,recurse=False):
"""uses ls-tree to extract information about a path in the branch or in general tree-ish"""
if recurse:
r = "-r"
else:
r = ""
w = subprocess.check_output(["git", "ls-tree",r,branch,"--",path])
return [pa.split("\t") for pa in w.split("\n") if pa != ""] # meta TAB filename ==> meta is: ?? SPACE type
def tarextraclink(content):
"""extracts the path of a link in a Tar expressed by content"""
t = tarfile.open(mode="r",fileobj=cStringIO.StringIO(content))
ti = t.getmembers()[0]
return ti.linkname
def gitgetfile(branch,path):
"""uses archive for extracing the path. This is better than the git show solution because it deals with diff automatically. But does not work with symbolic links"""
xpath,n = os.path.split(path)
xx = "git archive --format=tar --prefix= \"%s:%s\" \"%s\" | tar -xO \"%s\"" % (branch,xpath,n,n)
return subprocess.check_output(xx,shell=True)
def gitgetfile_tar(branch,path):
"""returns the content of a file in tar format"""
try:
xpath,n = os.path.split(path)
xx = "git archive --format=tar --prefix= \"%s:%s\" \"%s\"" % (branch,xpath,n)
return subprocess.check_output(xx,shell=True)
except:
return None
def gitgetfile_show(branch,path):
"""retrieve path content: first getting the hash and then the content via git show"""
found = gitgetpathinfo(branch,path)
if len(found) == 1:
return subprocess.check_output(["git", "show",found[0][0].split(" ")[2]])
else:
return None
def annexgetremotes(useshow):
"""list of remotes AKA uuid.log"""
if useshow:
return gitgetfile_show("git-annex","uuid.log")
else: # slow with bare
return gitgetfile("git-annex","uuid.log")
#https://gist.github.com/giomasce/a7802bda1417521c5b30
def hashdirlower(key):
hasher = hashlib.md5()
hasher.update(key)
digest = hasher.hexdigest()
return "%s/%s/" % (digest[:3], digest[3:6])
#https://gist.github.com/giomasce/a7802bda1417521c5b30
def hashdirmixed(key):
hasher = hashlib.md5()
hasher.update(key)
digest = hasher.digest()
first_word = struct.unpack('<I', digest[:4])[0]
nums = [first_word >> (6 * x) & 31 for x in xrange(4)]
letters = ["0123456789zqjxkmvwgpfZQJXKMVWGPF"[i] for i in nums]
return "%s%s/%s%s/" % (letters[1], letters[0], letters[3], letters[2])
def annexwhereis_bare(key):
"""returns the location of the key object of git-annex"""
#hashdirlower is used for bare git repositories, the git-annex branch, and on special remotes as well.
#m = md5.new()
#m.update(key)
#h = m.hexdigest()
#pre = h[0:3]
#post = h[3:6]
#print key,pre,post
papa = hashdirlower(key)
return gitgetfile("git-annex",os.path.join(papa,key+".log")),os.path.join("annex","objects",papa,key,key)
def annexwhereis(key):
"""returns the location of the key object of git-annex"""
#non bare uses hashdirmixed
#It takes the md5sum of the key, but rather than a string, represents it as 4 32bit words. Only the first word is used. It is converted into a string by the same mechanism that would be used to encode a normal md5sum value into a string, but where that would normally encode the bits using the 16 characters 0-9a-f, this instead uses the 32 characters "0123456789zqjxkmvwgpfZQJXKMVWGPF". The first 2 letters of the resulting string are the first directory, and the second 2 are the second directory.
papaM = hashdirmixed(key)
papaL = hashdirlower(key)
return gitgetfile("git-annex",os.path.join(papaL,key+".log")),os.path.join("annex","objects",papaM,key,key)
def checkbare(args):
"""checks if the repo is a bare"""
gitdir = os.path.join(args.annex,".git")
if os.path.isdir(gitdir):
if not os.path.isdir(os.path.join(gitdir,"annex")):
return None
else:
return False,gitdir
elif os.path.isdir(os.path.join(args.annex,"annex")):
gitdir = args.annex
return True,gitdir
else:
return None
def main():
parser = argparse.ArgumentParser(description='Retrieve file from git-annex, even barebone')
parser.add_argument('--annex', help="path to annex repository",default=".")
parser.add_argument('path', help="file to be looked at",nargs="*")
parser.add_argument('--all', help="list all",action="store_true")
parser.add_argument('--verbose', help="verbose dump",action="store_true")
parser.add_argument('--tar', help="produces a tar file with given path cotaining the symbolic links")
parser.add_argument('--abs',help="makes abs files",action="store_true")
args = parser.parse_args()
# check if bare repository
isbare = checkbare(args)
if isbare is None:
print "not a git-annex repisitory"
isbare,gitdir = isbare
print "isbare?",isbare,gitdir
if not isbare:
workdir = args.annex
else:
workdir = None
os.environ["GIT_DIR"] = gitdir
print "list annexes\n",annexgetremotes(useshow=False)
if args.tar:
ot = tarfile.open(args.tar,"w")
if args.all:
args.path = [x[1] for x in gitgetpathinfo("master","",recurse=True)]
for p in args.path:
# we cannot use
ww = gitgetfile_tar("master",p) # tarred 1 file
if ww is None:
print "not found",p
continue
link = tarextraclink(ww) # extract the link from the single file
if args.verbose:
print "aslink",link
#w = gitgetfile("master",p) -- not working using tar because it is a link
#ref = gitgetfile_show("master",p) -- not working in theory
ref = link
if ref == "":
print "not found",p
else:
key = os.path.split(ref)[1] # the link contains the annex key
if args.verbose:
print "key is",key
if isbare:
locations,path = annexwhereis_bare(key) # extract
else:
locations,path = annexwhereis(key)
path = os.path.join(gitdir,path)
if args.verbose:
print p,"located in\n",locations
if not os.path.isfile(path):
if not isbare:
if os.path.isfile(path+".map"):
mpath = os.path.join(workdir,open(path+".map","r").read().strip())
if os.path.isfile(mpath):
path = mpath
else:
print "mapped file not found",mpath," for ",path # or direct mode not supported
path = None
else:
print "non bare file not found",path # or direct mode not supported
path = None
else:
print "file not found",path # or direct mode not supported
path = None
if path is not None:
ss = os.stat(path)
print path,ss
ti = tarfile.TarInfo(p)
ti.size = 0 # zero for links: ss.st_size
ti.mode = ss.st_mode
ti.mtime = ss.st_mtime
ti.type = tarfile.SYMTYPE
ti.uid = ss.st_uid
ti.gid = ss.st_gid
if args.abs:
ti.linkname = os.path.abspath(path)
else:
ti.linkname = path
ot.addfile(ti)
if __name__ == '__main__':
main() | [
"gistshub@gmail.com"
] | gistshub@gmail.com |
8726dff6933a14d6099a93099b7963d3eda24be4 | 451d9b10944d8654ce697d542874eb8ff9444cad | /qtgmc_modern/__init__.py | fc00467bfe6c98230dc4e3ea794b029c6ca42c7a | [
"MIT"
] | permissive | Ichunjo/secret-project | 4fc365a07477b6fc5a1decb45c17216107ef0cf8 | 67d1dae873e73f5ea6f952d533ed69e48c81279c | refs/heads/master | 2023-08-25T01:32:45.460495 | 2021-11-01T18:59:36 | 2021-11-01T18:59:36 | 418,175,980 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 25 | py | from .qtgmc import QTGMC
| [
"ichunjo.le.terrible@gmail.com"
] | ichunjo.le.terrible@gmail.com |
7ba87e118ff1a299de306a3d4f2f509d0f68a0ff | dfb3d0b9e5ed3b061a9dcc0a3605af2daa9c5ef2 | /mobile/urls.py | 4e8184edbf9a14432f32e664681d6d21116d70cd | [] | no_license | aishAMZK/shopping | acd7d3c9ace0df75cd90befcbd38f0b8bb86ff8b | 5f40dbe24854c0e8438005fc896120f6f9d295d5 | refs/heads/master | 2023-04-03T12:42:19.136978 | 2021-04-16T13:29:48 | 2021-04-16T13:29:48 | 358,573,656 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,167 | py | """mobileproject URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,include
from .views import Index, Login, Signup, logout
from .views import Cart
from .views import Checkout
urlpatterns = [
path('admin/', admin.site.urls),
path('', Index.as_view(), name='homepage'),
path('signup/', Signup.as_view(), name='signup'),
path('login', Login.as_view(), name='login'),
path('logout', logout, name='logout'),
path('cart', Cart.as_view(), name='cart'),
path('check-out', Checkout.as_view(), name='checkout'),
]
| [
"aiswaryakrishna46@gmail.com"
] | aiswaryakrishna46@gmail.com |
c19e08ed04866b1573abf2e8286143f87b1a9b13 | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_5706278382862336_1/Python/ikostia/solution.py | 34c5e06e1566a0a1531a0ee3e58b53d70f3a967a | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 664 | py | #! /usr/bin/python
debug = False
def gcd(a, b):
if b == 0:
return a
return gcd(b, a % b)
def solve(input_data):
den, num = input_data
d = gcd(den, num)
den = den / d
num = num / d
bnum = map(lambda d: d == '1', list(bin(num)[3:]))
if any(bnum):
return "impossible"
res = 1
while den < num / 2 and res <= 40:
res += 1
den = den * 2
if res > 40 and den < num / 2:
return "impossible"
return str(res)
def read_input():
s = raw_input()
den, num = map(int, s.split("/"))
return (den, num)
def main():
T = int(raw_input())
for t in xrange(T):
print "Case #%d: %s" % (t + 1, solve(read_input()))
if __name__ == "__main__":
main()
| [
"eewestman@gmail.com"
] | eewestman@gmail.com |
b6c72913fc978ae4001e780fa3bed14b6dcf0015 | ca776f549e4103f3ec8bc1d78ea9ddc98eee9cb4 | /siconos/Local/SpheresPyramid/params.py | 5663ef4012af905bc8c36b3ac0beb18c80fdeaba | [] | no_license | FrictionalContactLibrary/fclib-scripts | d4a0f63333bcfa77e1ca5eaed6be587400a82b47 | dbef104353f3d196273ac99ad9ca0b73dc346fcb | refs/heads/master | 2022-05-25T20:12:17.310343 | 2022-05-13T14:15:38 | 2022-05-13T14:15:38 | 77,398,419 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 963 | py | import Siconos.Numerics as Numerics
t0 = 0
T = 30
h = 0.0005
g = 9.81
theta = 0.50001
mu = 2.0
dump_itermax = 80
dump_probability = .05
itermax = 100000
NewtonMaxIter = 20
tolerance = 1e-8
solver = Numerics.SICONOS_FRICTION_3D_NSGS
multipointIterations = False
import imp
try:
imp.load_source('mkinput', 'mkinput.py')
except IOError as e:
warn('I need a mkinput.py file')
usage()
exit(1)
import mkinput
fileName = "SpheresPyramid{0}".format(mkinput.N)
title = "SpheresPyramid with {0} levels"
description = """
Spheres pyramid under gravity on the ground with Bullet collision detection
Moreau TimeStepping: h={0}, theta = {1}
One Step non smooth problem: {2}, maxiter={3}, tol={4}
""".format(h, theta, Numerics.idToName(solver),
itermax,
tolerance)
mathInfo = ""
# if we want a shuffled NonsmoothGaussSeidel
#def initialize(model):
# model.simulation().oneStepNSProblem(0).numericsSolverOptions().iparam[9] = 1
| [
"vincent.acary@inria.fr"
] | vincent.acary@inria.fr |
62a74e879faae2572f524200a28f5f254af13ca5 | 63768dc92cde5515a96d774a32facb461a3bf6e9 | /jacket/tests/storage/unit/test_vmware_vmdk.py | 2d53c6fb02cb24beed420d918017da246837cd95 | [
"Apache-2.0"
] | permissive | ljZM33nd/jacket | 6fe9156f6f5789e5c24425afa7ce9237c302673d | d7ad3147fcb43131098c2a5210847634ff5fb325 | refs/heads/master | 2023-04-16T11:02:01.153751 | 2016-11-15T02:48:12 | 2016-11-15T02:48:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 117,949 | py | # Copyright (c) 2013 VMware, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Test suite for VMware vCenter VMDK driver.
"""
from distutils import version as ver
import ddt
import mock
from oslo_utils import units
from oslo_vmware import api
from oslo_vmware import exceptions
from oslo_vmware import image_transfer
import six
from jacket.storage import exception as cinder_exceptions
from jacket.storage import test
from jacket.storage.volume import configuration
from jacket.storage.volume.drivers.vmware import datastore as hub
from jacket.storage.volume.drivers.vmware import exceptions as vmdk_exceptions
from jacket.storage.volume.drivers.vmware import vmdk
from jacket.storage.volume.drivers.vmware import volumeops
class FakeObject(object):
def __init__(self):
self._fields = {}
def __setitem__(self, key, value):
self._fields[key] = value
def __getitem__(self, item):
return self._fields[item]
# TODO(vbala) Split test methods handling multiple cases into multiple methods,
# each handling a specific case.
@ddt.ddt
class VMwareVcVmdkDriverTestCase(test.TestCase):
"""Unit tests for VMwareVcVmdkDriver."""
IP = 'localhost'
PORT = 2321
USERNAME = 'username'
PASSWORD = 'password'
VOLUME_FOLDER = 'storage-volumes'
API_RETRY_COUNT = 3
TASK_POLL_INTERVAL = 5.0
IMG_TX_TIMEOUT = 10
MAX_OBJECTS = 100
TMP_DIR = "/vmware-tmp"
CA_FILE = "/etc/ssl/rui-ca-cert.pem"
VMDK_DRIVER = vmdk.VMwareVcVmdkDriver
CLUSTERS = ["cls-1", "cls-2"]
DEFAULT_VC_VERSION = '5.5'
VOL_ID = 'abcdefab-cdef-abcd-efab-cdefabcdefab',
DISPLAY_NAME = 'foo',
VOL_TYPE_ID = 'd61b8cb3-aa1b-4c9b-b79e-abcdbda8b58a'
VOL_SIZE = 2
PROJECT_ID = 'd45beabe-f5de-47b7-b462-0d9ea02889bc'
SNAPSHOT_ID = '2f59670a-0355-4790-834c-563b65bba740'
SNAPSHOT_NAME = 'snap-foo'
SNAPSHOT_DESCRIPTION = 'test snapshot'
IMAGE_ID = 'eb87f4b0-d625-47f8-bb45-71c43b486d3a'
IMAGE_NAME = 'image-1'
def setUp(self):
super(VMwareVcVmdkDriverTestCase, self).setUp()
self._config = mock.Mock(spec=configuration.Configuration)
self._config.vmware_host_ip = self.IP
self._config.vmware_host_port = self.PORT
self._config.vmware_host_username = self.USERNAME
self._config.vmware_host_password = self.PASSWORD
self._config.vmware_wsdl_location = None
self._config.vmware_volume_folder = self.VOLUME_FOLDER
self._config.vmware_api_retry_count = self.API_RETRY_COUNT
self._config.vmware_task_poll_interval = self.TASK_POLL_INTERVAL
self._config.vmware_image_transfer_timeout_secs = self.IMG_TX_TIMEOUT
self._config.vmware_max_objects_retrieval = self.MAX_OBJECTS
self._config.vmware_tmp_dir = self.TMP_DIR
self._config.vmware_ca_file = self.CA_FILE
self._config.vmware_insecure = False
self._config.vmware_cluster_name = self.CLUSTERS
self._config.vmware_host_version = self.DEFAULT_VC_VERSION
self._db = mock.Mock()
self._driver = vmdk.VMwareVcVmdkDriver(configuration=self._config,
db=self._db)
api_retry_count = self._config.vmware_api_retry_count
task_poll_interval = self._config.vmware_task_poll_interval,
self._session = api.VMwareAPISession(self.IP, self.USERNAME,
self.PASSWORD, api_retry_count,
task_poll_interval,
create_session=False)
self._volumeops = volumeops.VMwareVolumeOps(self._session,
self.MAX_OBJECTS)
def test_get_volume_stats(self):
stats = self._driver.get_volume_stats()
self.assertEqual('VMware', stats['vendor_name'])
self.assertEqual(self._driver.VERSION, stats['driver_version'])
self.assertEqual('vmdk', stats['storage_protocol'])
self.assertEqual(0, stats['reserved_percentage'])
self.assertEqual('unknown', stats['total_capacity_gb'])
self.assertEqual('unknown', stats['free_capacity_gb'])
def _create_volume_dict(self,
vol_id=VOL_ID,
display_name=DISPLAY_NAME,
volume_type_id=VOL_TYPE_ID,
status='available',
size=VOL_SIZE,
attachment=None,
project_id=PROJECT_ID):
return {'id': vol_id,
'display_name': display_name,
'name': 'volume-%s' % vol_id,
'volume_type_id': volume_type_id,
'status': status,
'size': size,
'volume_attachment': attachment,
'project_id': project_id,
}
@mock.patch.object(VMDK_DRIVER, '_select_ds_for_volume')
def test_verify_volume_creation(self, select_ds_for_volume):
volume = self._create_volume_dict()
self._driver._verify_volume_creation(volume)
select_ds_for_volume.assert_called_once_with(volume)
@mock.patch.object(VMDK_DRIVER, '_verify_volume_creation')
def test_create_volume(self, verify_volume_creation):
volume = self._create_volume_dict()
self._driver.create_volume(volume)
verify_volume_creation.assert_called_once_with(volume)
@mock.patch.object(VMDK_DRIVER, 'volumeops')
def test_delete_volume_without_backing(self, vops):
vops.get_backing.return_value = None
volume = self._create_volume_dict()
self._driver.delete_volume(volume)
vops.get_backing.assert_called_once_with(volume['name'])
self.assertFalse(vops.delete_backing.called)
@mock.patch.object(VMDK_DRIVER, 'volumeops')
def test_delete_volume(self, vops):
backing = mock.sentinel.backing
vops.get_backing.return_value = backing
volume = self._create_volume_dict()
self._driver.delete_volume(volume)
vops.get_backing.assert_called_once_with(volume['name'])
vops.delete_backing.assert_called_once_with(backing)
@mock.patch('storage.volume.drivers.vmware.vmdk.'
'_get_volume_type_extra_spec')
@mock.patch('storage.volume.drivers.vmware.volumeops.'
'VirtualDiskType.validate')
def test_get_extra_spec_disk_type(self, validate,
get_volume_type_extra_spec):
vmdk_type = mock.sentinel.vmdk_type
get_volume_type_extra_spec.return_value = vmdk_type
type_id = mock.sentinel.type_id
self.assertEqual(vmdk_type,
self._driver._get_extra_spec_disk_type(type_id))
get_volume_type_extra_spec.assert_called_once_with(
type_id, 'vmdk_type', default_value=vmdk.THIN_VMDK_TYPE)
validate.assert_called_once_with(vmdk_type)
@mock.patch.object(VMDK_DRIVER, '_get_extra_spec_disk_type')
def test_get_disk_type(self, get_extra_spec_disk_type):
vmdk_type = mock.sentinel.vmdk_type
get_extra_spec_disk_type.return_value = vmdk_type
volume = self._create_volume_dict()
self.assertEqual(vmdk_type, self._driver._get_disk_type(volume))
get_extra_spec_disk_type.assert_called_once_with(
volume['volume_type_id'])
def _create_snapshot_dict(self,
volume,
snap_id=SNAPSHOT_ID,
name=SNAPSHOT_NAME,
description=SNAPSHOT_DESCRIPTION):
return {'id': snap_id,
'volume': volume,
'volume_name': volume['name'],
'name': name,
'display_description': description,
}
@mock.patch.object(VMDK_DRIVER, 'volumeops')
def test_create_snapshot_without_backing(self, vops):
vops.get_backing.return_value = None
volume = self._create_volume_dict()
snapshot = self._create_snapshot_dict(volume)
self._driver.create_snapshot(snapshot)
vops.get_backing.assert_called_once_with(snapshot['volume_name'])
self.assertFalse(vops.create_snapshot.called)
@mock.patch.object(VMDK_DRIVER, 'volumeops')
def test_create_snapshot_with_backing(self, vops):
backing = mock.sentinel.backing
vops.get_backing.return_value = backing
volume = self._create_volume_dict()
snapshot = self._create_snapshot_dict(volume)
self._driver.create_snapshot(snapshot)
vops.get_backing.assert_called_once_with(snapshot['volume_name'])
vops.create_snapshot.assert_called_once_with(
backing, snapshot['name'], snapshot['display_description'])
def test_create_snapshot_when_attached(self):
volume = self._create_volume_dict(status='in-use')
snapshot = self._create_snapshot_dict(volume)
self.assertRaises(cinder_exceptions.InvalidVolume,
self._driver.create_snapshot, snapshot)
@mock.patch.object(VMDK_DRIVER, 'volumeops')
def test_delete_snapshot_without_backing(self, vops):
vops.get_backing.return_value = None
volume = self._create_volume_dict()
snapshot = self._create_snapshot_dict(volume)
self._driver.delete_snapshot(snapshot)
vops.get_backing.assert_called_once_with(snapshot['volume_name'])
self.assertFalse(vops.delete_snapshot.called)
@mock.patch.object(VMDK_DRIVER, 'volumeops')
def test_delete_snapshot_with_backing(self, vops):
backing = mock.sentinel.backing
vops.get_backing.return_value = backing
volume = self._create_volume_dict()
snapshot = self._create_snapshot_dict(volume)
self._driver.delete_snapshot(snapshot)
vops.get_backing.assert_called_once_with(snapshot['volume_name'])
vops.delete_snapshot.assert_called_once_with(
backing, snapshot['name'])
def test_delete_snapshot_when_attached(self):
volume = self._create_volume_dict(status='in-use')
snapshot = self._create_snapshot_dict(volume)
self.assertRaises(cinder_exceptions.InvalidVolume,
self._driver.delete_snapshot, snapshot)
@ddt.data('vmdk', 'VMDK', None)
def test_validate_disk_format(self, disk_format):
self._driver._validate_disk_format(disk_format)
def test_validate_disk_format_with_invalid_format(self):
self.assertRaises(cinder_exceptions.ImageUnacceptable,
self._driver._validate_disk_format,
'img')
def _create_image_meta(self,
_id=IMAGE_ID,
name=IMAGE_NAME,
disk_format='vmdk',
size=1 * units.Gi,
container_format='bare',
vmware_disktype='streamOptimized',
vmware_adaptertype='lsiLogic',
is_public=True):
return {'id': _id,
'name': name,
'disk_format': disk_format,
'size': size,
'container_format': container_format,
'properties': {'vmware_disktype': vmware_disktype,
'vmware_adaptertype': vmware_adaptertype,
},
'is_public': is_public,
}
@mock.patch('storage.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.'
'_validate_disk_format')
def test_copy_image_to_volume_with_ova_container(self,
validate_disk_format):
image_service = mock.Mock()
image_meta = self._create_image_meta(container_format='ova')
image_service.show.return_value = image_meta
context = mock.sentinel.context
volume = self._create_volume_dict()
image_id = mock.sentinel.image_id
self.assertRaises(
cinder_exceptions.ImageUnacceptable,
self._driver.copy_image_to_volume, context, volume, image_service,
image_id)
validate_disk_format.assert_called_once_with(image_meta['disk_format'])
@mock.patch('storage.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.'
'_validate_disk_format')
@mock.patch('storage.volume.drivers.vmware.volumeops.'
'VirtualDiskAdapterType.validate')
@mock.patch('storage.volume.drivers.vmware.vmdk.ImageDiskType.'
'validate')
@mock.patch.object(VMDK_DRIVER,
'_create_volume_from_non_stream_optimized_image')
@mock.patch.object(VMDK_DRIVER,
'_fetch_stream_optimized_image')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
@mock.patch.object(VMDK_DRIVER, '_extend_backing')
def _test_copy_image_to_volume(self,
extend_backing,
vops,
fetch_stream_optimized_image,
create_volume_from_non_stream_opt_image,
validate_image_disk_type,
validate_image_adapter_type,
validate_disk_format,
vmware_disk_type='streamOptimized',
backing_disk_size=VOL_SIZE,
call_extend_backing=False):
image_service = mock.Mock()
image_meta = self._create_image_meta(vmware_disktype=vmware_disk_type)
image_service.show.return_value = image_meta
backing = mock.sentinel.backing
vops.get_backing.return_value = backing
vops.get_disk_size.return_value = backing_disk_size * units.Gi
context = mock.sentinel.context
volume = self._create_volume_dict()
image_id = mock.sentinel.image_id
self._driver.copy_image_to_volume(
context, volume, image_service, image_id)
validate_disk_format.assert_called_once_with(image_meta['disk_format'])
validate_image_disk_type.assert_called_once_with(
image_meta['properties']['vmware_disktype'])
validate_image_adapter_type.assert_called_once_with(
image_meta['properties']['vmware_adaptertype'])
if vmware_disk_type == 'streamOptimized':
fetch_stream_optimized_image.assert_called_once_with(
context, volume, image_service, image_id, image_meta['size'],
image_meta['properties']['vmware_adaptertype'])
else:
create_volume_from_non_stream_opt_image.assert_called_once_with(
context, volume, image_service, image_id, image_meta['size'],
image_meta['properties']['vmware_adaptertype'],
image_meta['properties']['vmware_disktype'])
vops.get_disk_size.assert_called_once_with(backing)
if call_extend_backing:
extend_backing.assert_called_once_with(backing, volume['size'])
else:
self.assertFalse(extend_backing.called)
@ddt.data('sparse', 'preallocated', 'streamOptimized')
def test_copy_image_to_volume(self, vmware_disk_type):
self._test_copy_image_to_volume(vmware_disk_type=vmware_disk_type)
@ddt.data('sparse', 'preallocated', 'streamOptimized')
def test_copy_image_to_volume_with_extend_backing(self, vmware_disk_type):
self._test_copy_image_to_volume(vmware_disk_type=vmware_disk_type,
backing_disk_size=1,
call_extend_backing=True)
@mock.patch('storage.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.'
'_get_disk_type')
@mock.patch.object(VMDK_DRIVER, '_check_disk_conversion')
@mock.patch('oslo_utils.uuidutils.generate_uuid')
@mock.patch.object(VMDK_DRIVER, '_create_backing')
@mock.patch.object(VMDK_DRIVER, '_get_ds_name_folder_path')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
@mock.patch.object(VMDK_DRIVER, '_create_virtual_disk_from_sparse_image')
@mock.patch.object(VMDK_DRIVER,
'_create_virtual_disk_from_preallocated_image')
@mock.patch.object(VMDK_DRIVER, '_get_storage_profile_id')
@mock.patch.object(VMDK_DRIVER, '_select_ds_for_volume')
@mock.patch.object(VMDK_DRIVER, '_delete_temp_backing')
def _test_create_volume_from_non_stream_optimized_image(
self,
delete_tmp_backing,
select_ds_for_volume,
get_storage_profile_id,
create_disk_from_preallocated_image,
create_disk_from_sparse_image,
vops,
get_ds_name_folder_path,
create_backing,
generate_uuid,
check_disk_conversion,
get_disk_type,
image_disk_type='sparse',
disk_conversion=False):
disk_type = mock.sentinel.disk_type
get_disk_type.return_value = disk_type
check_disk_conversion.return_value = disk_conversion
volume = self._create_volume_dict()
if disk_conversion:
disk_name = "6b77b25a-9136-470e-899e-3c930e570d8e"
generate_uuid.return_value = disk_name
else:
disk_name = volume['name']
backing = mock.sentinel.backing
create_backing.return_value = backing
ds_name = mock.sentinel.ds_name
folder_path = mock.sentinel.folder_path
get_ds_name_folder_path.return_value = (ds_name, folder_path)
host = mock.sentinel.host
dc_ref = mock.sentinel.dc_ref
vops.get_host.return_value = host
vops.get_dc.return_value = dc_ref
vmdk_path = mock.Mock(spec=volumeops.FlatExtentVirtualDiskPath)
create_disk_from_sparse_image.return_value = vmdk_path
create_disk_from_preallocated_image.return_value = vmdk_path
profile_id = mock.sentinel.profile_id
get_storage_profile_id.return_value = profile_id
if disk_conversion:
rp = mock.sentinel.rp
folder = mock.sentinel.folder
datastore = mock.sentinel.datastore
summary = mock.Mock(datastore=datastore)
select_ds_for_volume.return_value = (host, rp, folder, summary)
clone = mock.sentinel.clone
vops.clone_backing.return_value = clone
context = mock.sentinel.context
image_service = mock.sentinel.image_service
image_id = mock.sentinel.image_id
image_size_in_bytes = units.Gi
adapter_type = mock.sentinel.adapter_type
self._driver._create_volume_from_non_stream_optimized_image(
context, volume, image_service, image_id, image_size_in_bytes,
adapter_type, image_disk_type)
check_disk_conversion.assert_called_once_with(image_disk_type,
mock.sentinel.disk_type)
if disk_conversion:
create_backing.assert_called_once_with(
volume,
create_params={vmdk.CREATE_PARAM_DISK_LESS: True,
vmdk.CREATE_PARAM_BACKING_NAME: disk_name})
else:
create_backing.assert_called_once_with(
volume, create_params={vmdk.CREATE_PARAM_DISK_LESS: True})
if image_disk_type == 'sparse':
create_disk_from_sparse_image.assert_called_once_with(
context, image_service, image_id, image_size_in_bytes,
dc_ref, ds_name, folder_path, disk_name)
else:
create_disk_from_preallocated_image.assert_called_once_with(
context, image_service, image_id, image_size_in_bytes,
dc_ref, ds_name, folder_path, disk_name, adapter_type)
get_storage_profile_id.assert_called_once_with(volume)
vops.attach_disk_to_backing.assert_called_once_with(
backing, image_size_in_bytes / units.Ki, disk_type,
adapter_type, profile_id, vmdk_path.get_descriptor_ds_file_path())
if disk_conversion:
select_ds_for_volume.assert_called_once_with(volume)
vops.clone_backing.assert_called_once_with(
volume['name'], backing, None, volumeops.FULL_CLONE_TYPE,
datastore, disk_type=disk_type, host=host, resource_pool=rp,
folder=folder)
delete_tmp_backing.assert_called_once_with(backing)
vops.update_backing_disk_uuid(clone, volume['id'])
else:
vops.update_backing_disk_uuid(backing, volume['id'])
@ddt.data('sparse', 'preallocated')
def test_create_volume_from_non_stream_optimized_image(self,
image_disk_type):
self._test_create_volume_from_non_stream_optimized_image(
image_disk_type=image_disk_type)
@ddt.data('sparse', 'preallocated')
def test_create_volume_from_non_stream_opt_image_with_disk_conversion(
self, image_disk_type):
self._test_create_volume_from_non_stream_optimized_image(
image_disk_type=image_disk_type, disk_conversion=True)
@mock.patch.object(VMDK_DRIVER, '_copy_temp_virtual_disk')
@mock.patch.object(VMDK_DRIVER, '_get_temp_image_folder')
@mock.patch('oslo_utils.uuidutils.generate_uuid')
@mock.patch(
'storage.volume.drivers.vmware.volumeops.FlatExtentVirtualDiskPath')
@mock.patch.object(VMDK_DRIVER, '_copy_image')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
def test_create_virtual_disk_from_preallocated_image(
self, vops, copy_image, flat_extent_path, generate_uuid,
get_temp_image_folder, copy_temp_virtual_disk):
dc_ref = mock.Mock(value=mock.sentinel.dc_ref)
ds_name = mock.sentinel.ds_name
folder_path = mock.sentinel.folder_path
get_temp_image_folder.return_value = (dc_ref, ds_name, folder_path)
uuid = mock.sentinel.uuid
generate_uuid.return_value = uuid
path = mock.Mock()
dest_path = mock.Mock()
flat_extent_path.side_effect = [path, dest_path]
context = mock.sentinel.context
image_service = mock.sentinel.image_service
image_id = mock.sentinel.image_id
image_size_in_bytes = 2 * units.Gi
dest_dc_ref = mock.sentinel.dest_dc_ref
dest_ds_name = mock.sentinel.dest_ds_name
dest_folder_path = mock.sentinel.dest_folder_path
dest_disk_name = mock.sentinel.dest_disk_name
adapter_type = mock.sentinel.adapter_type
ret = self._driver._create_virtual_disk_from_preallocated_image(
context, image_service, image_id, image_size_in_bytes, dest_dc_ref,
dest_ds_name, dest_folder_path, dest_disk_name, adapter_type)
exp_flat_extent_path_calls = [
mock.call(ds_name, folder_path, uuid),
mock.call(dest_ds_name, dest_folder_path, dest_disk_name)]
self.assertEqual(exp_flat_extent_path_calls,
flat_extent_path.call_args_list)
create_descriptor = vops.create_flat_extent_virtual_disk_descriptor
create_descriptor.assert_called_once_with(
dc_ref, path, image_size_in_bytes / units.Ki, adapter_type,
vmdk.EAGER_ZEROED_THICK_VMDK_TYPE)
copy_image.assert_called_once_with(
context, dc_ref, image_service, image_id, image_size_in_bytes,
ds_name, path.get_flat_extent_file_path())
copy_temp_virtual_disk.assert_called_once_with(dc_ref, path,
dest_dc_ref, dest_path)
self.assertEqual(dest_path, ret)
@mock.patch.object(VMDK_DRIVER, '_copy_temp_virtual_disk')
@mock.patch.object(VMDK_DRIVER, '_get_temp_image_folder')
@mock.patch(
'storage.volume.drivers.vmware.volumeops.FlatExtentVirtualDiskPath')
@mock.patch.object(VMDK_DRIVER, '_copy_image')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
def test_create_virtual_disk_from_preallocated_image_with_no_disk_copy(
self, vops, copy_image, flat_extent_path, get_temp_image_folder,
copy_temp_virtual_disk):
dc_ref = mock.Mock(value=mock.sentinel.dc_ref)
ds_name = mock.sentinel.ds_name
folder_path = mock.sentinel.folder_path
get_temp_image_folder.return_value = (dc_ref, ds_name, folder_path)
path = mock.Mock()
flat_extent_path.return_value = path
context = mock.sentinel.context
image_service = mock.sentinel.image_service
image_id = mock.sentinel.image_id
image_size_in_bytes = 2 * units.Gi
dest_dc_ref = mock.Mock(value=mock.sentinel.dc_ref)
dest_ds_name = ds_name
dest_folder_path = mock.sentinel.dest_folder_path
dest_disk_name = mock.sentinel.dest_disk_name
adapter_type = mock.sentinel.adapter_type
ret = self._driver._create_virtual_disk_from_preallocated_image(
context, image_service, image_id, image_size_in_bytes, dest_dc_ref,
dest_ds_name, dest_folder_path, dest_disk_name, adapter_type)
flat_extent_path.assert_called_once_with(
dest_ds_name, dest_folder_path, dest_disk_name)
create_descriptor = vops.create_flat_extent_virtual_disk_descriptor
create_descriptor.assert_called_once_with(
dc_ref, path, image_size_in_bytes / units.Ki, adapter_type,
vmdk.EAGER_ZEROED_THICK_VMDK_TYPE)
copy_image.assert_called_once_with(
context, dc_ref, image_service, image_id, image_size_in_bytes,
ds_name, path.get_flat_extent_file_path())
self.assertFalse(copy_temp_virtual_disk.called)
self.assertEqual(path, ret)
@mock.patch.object(VMDK_DRIVER, '_copy_temp_virtual_disk')
@mock.patch.object(VMDK_DRIVER, '_get_temp_image_folder')
@mock.patch('oslo_utils.uuidutils.generate_uuid')
@mock.patch(
'storage.volume.drivers.vmware.volumeops.FlatExtentVirtualDiskPath')
@mock.patch.object(VMDK_DRIVER, '_copy_image')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
def test_create_virtual_disk_from_preallocated_image_with_copy_error(
self, vops, copy_image, flat_extent_path, generate_uuid,
get_temp_image_folder, copy_temp_virtual_disk):
dc_ref = mock.Mock(value=mock.sentinel.dc_ref)
ds_name = mock.sentinel.ds_name
folder_path = mock.sentinel.folder_path
get_temp_image_folder.return_value = (dc_ref, ds_name, folder_path)
uuid = mock.sentinel.uuid
generate_uuid.return_value = uuid
path = mock.Mock()
dest_path = mock.Mock()
flat_extent_path.side_effect = [path, dest_path]
copy_image.side_effect = exceptions.VimException("error")
context = mock.sentinel.context
image_service = mock.sentinel.image_service
image_id = mock.sentinel.image_id
image_size_in_bytes = 2 * units.Gi
dest_dc_ref = mock.sentinel.dest_dc_ref
dest_ds_name = mock.sentinel.dest_ds_name
dest_folder_path = mock.sentinel.dest_folder_path
dest_disk_name = mock.sentinel.dest_disk_name
adapter_type = mock.sentinel.adapter_type
self.assertRaises(
exceptions.VimException,
self._driver._create_virtual_disk_from_preallocated_image,
context, image_service, image_id, image_size_in_bytes, dest_dc_ref,
dest_ds_name, dest_folder_path, dest_disk_name, adapter_type)
vops.delete_file.assert_called_once_with(
path.get_descriptor_ds_file_path(), dc_ref)
self.assertFalse(copy_temp_virtual_disk.called)
@mock.patch('oslo_utils.uuidutils.generate_uuid')
@mock.patch(
'storage.volume.drivers.vmware.volumeops.'
'MonolithicSparseVirtualDiskPath')
@mock.patch(
'storage.volume.drivers.vmware.volumeops.FlatExtentVirtualDiskPath')
@mock.patch.object(VMDK_DRIVER, '_copy_temp_virtual_disk')
@mock.patch.object(VMDK_DRIVER, '_copy_image')
def test_create_virtual_disk_from_sparse_image(
self, copy_image, copy_temp_virtual_disk, flat_extent_path,
sparse_path, generate_uuid):
uuid = mock.sentinel.uuid
generate_uuid.return_value = uuid
src_path = mock.Mock()
sparse_path.return_value = src_path
dest_path = mock.Mock()
flat_extent_path.return_value = dest_path
context = mock.sentinel.context
image_service = mock.sentinel.image_service
image_id = mock.sentinel.image_id
image_size_in_bytes = 2 * units.Gi
dc_ref = mock.sentinel.dc_ref
ds_name = mock.sentinel.ds_name
folder_path = mock.sentinel.folder_path
disk_name = mock.sentinel.disk_name
ret = self._driver._create_virtual_disk_from_sparse_image(
context, image_service, image_id, image_size_in_bytes, dc_ref,
ds_name, folder_path, disk_name)
sparse_path.assert_called_once_with(ds_name, folder_path, uuid)
copy_image.assert_called_once_with(
context, dc_ref, image_service, image_id, image_size_in_bytes,
ds_name, src_path.get_descriptor_file_path())
flat_extent_path.assert_called_once_with(
ds_name, folder_path, disk_name)
copy_temp_virtual_disk.assert_called_once_with(
dc_ref, src_path, dc_ref, dest_path)
self.assertEqual(dest_path, ret)
@mock.patch.object(VMDK_DRIVER, '_select_datastore')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
def test_get_temp_image_folder(self, vops, select_datastore):
host = mock.sentinel.host
resource_pool = mock.sentinel.rp
summary = mock.Mock()
ds_name = mock.sentinel.ds_name
summary.name = ds_name
select_datastore.return_value = (host, resource_pool, summary)
dc = mock.sentinel.dc
vops.get_dc.return_value = dc
image_size = 2 * units.Gi
ret = self._driver._get_temp_image_folder(image_size)
self.assertEqual((dc, ds_name, vmdk.TMP_IMAGES_DATASTORE_FOLDER_PATH),
ret)
exp_req = {
hub.DatastoreSelector.SIZE_BYTES: image_size,
hub.DatastoreSelector.HARD_AFFINITY_DS_TYPE:
{hub.DatastoreType.VMFS, hub.DatastoreType.NFS}}
select_datastore.assert_called_once_with(exp_req)
vops.create_datastore_folder.assert_called_once_with(
ds_name, vmdk.TMP_IMAGES_DATASTORE_FOLDER_PATH, dc)
@mock.patch.object(VMDK_DRIVER, '_select_ds_for_volume')
@mock.patch.object(VMDK_DRIVER, '_get_storage_profile_id')
@mock.patch('storage.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.'
'_get_disk_type')
@mock.patch.object(VMDK_DRIVER, '_get_extra_config')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
@mock.patch.object(VMDK_DRIVER, 'session')
@mock.patch.object(image_transfer, 'download_stream_optimized_image')
def _test_copy_image_to_volume_stream_optimized(self,
download_image,
session,
vops,
get_extra_config,
get_disk_type,
get_profile_id,
select_ds_for_volume,
download_error=False):
host = mock.sentinel.host
rp = mock.sentinel.rp
folder = mock.sentinel.folder
# NOTE(mriedem): The summary.name gets logged so it has to be a string
summary = mock.Mock(name=six.text_type(mock.sentinel.ds_name))
select_ds_for_volume.return_value = (host, rp, folder, summary)
profile_id = mock.sentinel.profile_id
get_profile_id.return_value = profile_id
disk_type = mock.sentinel.disk_type
get_disk_type.return_value = disk_type
extra_config = mock.sentinel.extra_config
get_extra_config.return_value = extra_config
vm_create_spec = mock.sentinel.vm_create_spec
vops.get_create_spec.return_value = vm_create_spec
import_spec = mock.Mock()
session.vim.client.factory.create.return_value = import_spec
backing = mock.sentinel.backing
if download_error:
download_image.side_effect = exceptions.VimException
vops.get_backing.return_value = backing
else:
download_image.return_value = backing
context = mock.sentinel.context
volume = self._create_volume_dict(size=3)
image_service = mock.sentinel.image_service
image_id = mock.sentinel.image_id
image_size = 2 * units.Gi
adapter_type = mock.sentinel.adapter_type
if download_error:
self.assertRaises(
exceptions.VimException,
self._driver._fetch_stream_optimized_image,
context, volume, image_service, image_id,
image_size, adapter_type)
else:
self._driver._fetch_stream_optimized_image(
context, volume, image_service, image_id, image_size,
adapter_type)
select_ds_for_volume.assert_called_once_with(volume)
vops.get_create_spec.assert_called_once_with(
volume['name'], 0, disk_type, summary.name, profile_id=profile_id,
adapter_type=adapter_type, extra_config=extra_config)
self.assertEqual(vm_create_spec, import_spec.configSpec)
download_image.assert_called_with(
context,
self._config.vmware_image_transfer_timeout_secs,
image_service,
image_id,
session=session,
host=self._config.vmware_host_ip,
port=self._config.vmware_host_port,
resource_pool=rp,
vm_folder=folder,
vm_import_spec=import_spec,
image_size=image_size)
if download_error:
self.assertFalse(vops.update_backing_disk_uuid.called)
vops.delete_backing.assert_called_once_with(backing)
else:
vops.update_backing_disk_uuid.assert_called_once_with(
backing, volume['id'])
def test_copy_image_to_volume_stream_optimized(self):
self._test_copy_image_to_volume_stream_optimized()
def test_copy_image_to_volume_stream_optimized_with_download_error(self):
self._test_copy_image_to_volume_stream_optimized(download_error=True)
def test_copy_volume_to_image_when_attached(self):
volume = self._create_volume_dict(
attachment=[mock.sentinel.attachment_1])
self.assertRaises(
cinder_exceptions.InvalidVolume,
self._driver.copy_volume_to_image,
mock.sentinel.context,
volume,
mock.sentinel.image_service,
mock.sentinel.image_meta)
@mock.patch.object(VMDK_DRIVER, '_validate_disk_format')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
@mock.patch('oslo_vmware.image_transfer.upload_image')
@mock.patch.object(VMDK_DRIVER, 'session')
def test_copy_volume_to_image(
self, session, upload_image, vops, validate_disk_format):
backing = mock.sentinel.backing
vops.get_backing.return_value = backing
vmdk_file_path = mock.sentinel.vmdk_file_path
vops.get_vmdk_path.return_value = vmdk_file_path
context = mock.sentinel.context
volume = self._create_volume_dict()
image_service = mock.sentinel.image_service
image_meta = self._create_image_meta()
self._driver.copy_volume_to_image(
context, volume, image_service, image_meta)
validate_disk_format.assert_called_once_with(image_meta['disk_format'])
vops.get_backing.assert_called_once_with(volume['name'])
vops.get_vmdk_path.assert_called_once_with(backing)
upload_image.assert_called_once_with(
context,
self._config.vmware_image_transfer_timeout_secs,
image_service,
image_meta['id'],
volume['project_id'],
session=session,
host=self._config.vmware_host_ip,
port=self._config.vmware_host_port,
vm=backing,
vmdk_file_path=vmdk_file_path,
vmdk_size=volume['size'] * units.Gi,
image_name=image_meta['name'],
image_version=1,
is_public=image_meta['is_public'])
@mock.patch.object(VMDK_DRIVER, '_delete_temp_backing')
@mock.patch('oslo_utils.uuidutils.generate_uuid')
@mock.patch.object(VMDK_DRIVER, '_get_volume_group_folder')
@mock.patch('storage.volume.volume_types.get_volume_type_extra_specs')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
@mock.patch.object(VMDK_DRIVER, 'ds_sel')
def test_retype(self, ds_sel, vops, get_volume_type_extra_specs,
get_volume_group_folder, generate_uuid,
delete_temp_backing):
self._test_retype(ds_sel, vops, get_volume_type_extra_specs,
get_volume_group_folder, generate_uuid,
delete_temp_backing)
def test_in_use(self):
# Test with in-use volume.
vol = {'size': 1, 'status': 'in-use', 'name': 'vol-1',
'volume_type_id': 'def'}
vol['volume_attachment'] = [mock.sentinel.volume_attachment]
self.assertTrue(self._driver._in_use(vol))
# Test with available volume.
vol['status'] = 'available'
vol['volume_attachment'] = None
self.assertIsNone(self._driver._in_use(vol))
vol['volume_attachment'] = []
ret = self._driver._in_use(vol)
# _in_use returns [] here
self.assertFalse(ret)
self.assertEqual(0, len(ret))
def _test_retype(self, ds_sel, vops, get_volume_type_extra_specs,
get_volume_group_folder, genereate_uuid,
delete_temp_backing):
self._driver._storage_policy_enabled = True
context = mock.sentinel.context
diff = mock.sentinel.diff
host = mock.sentinel.host
new_type = {'id': 'abc'}
# Test with in-use volume.
vol = {'size': 1, 'status': 'retyping', 'name': 'vol-1',
'id': 'd11a82de-ddaa-448d-b50a-a255a7e61a1e',
'volume_type_id': 'def',
'project_id': '63c19a12292549818c09946a5e59ddaf'}
vol['volume_attachment'] = [mock.sentinel.volume_attachment]
self.assertFalse(self._driver.retype(context, vol, new_type, diff,
host))
# Test with no backing.
vops.get_backing.return_value = None
vol['volume_attachment'] = None
self.assertTrue(self._driver.retype(context, vol, new_type, diff,
host))
# Test with no disk type conversion, no profile change and
# compliant datastore.
ds_value = mock.sentinel.datastore_value
datastore = mock.Mock(value=ds_value)
vops.get_datastore.return_value = datastore
backing = mock.sentinel.backing
vops.get_backing.return_value = backing
get_volume_type_extra_specs.side_effect = [vmdk.THIN_VMDK_TYPE,
vmdk.THIN_VMDK_TYPE,
None,
None]
ds_sel.is_datastore_compliant.return_value = True
self.assertTrue(self._driver.retype(context, vol, new_type, diff,
host))
# Test with no disk type conversion, profile change and
# compliant datastore.
new_profile = mock.sentinel.new_profile
get_volume_type_extra_specs.side_effect = [vmdk.THIN_VMDK_TYPE,
vmdk.THIN_VMDK_TYPE,
'gold-1',
new_profile]
ds_sel.is_datastore_compliant.return_value = True
profile_id = mock.sentinel.profile_id
ds_sel.get_profile_id.return_value = profile_id
self.assertTrue(self._driver.retype(context, vol, new_type, diff,
host))
vops.change_backing_profile.assert_called_once_with(backing,
profile_id)
# Test with disk type conversion, profile change and a backing with
# snapshots. Also test the no candidate datastore case.
get_volume_type_extra_specs.side_effect = [vmdk.THICK_VMDK_TYPE,
vmdk.THIN_VMDK_TYPE,
'gold-1',
new_profile]
vops.snapshot_exists.return_value = True
ds_sel.select_datastore.return_value = ()
self.assertFalse(self._driver.retype(context, vol, new_type, diff,
host))
exp_req = {hub.DatastoreSelector.HARD_ANTI_AFFINITY_DS: [ds_value],
hub.DatastoreSelector.PROFILE_NAME: new_profile,
hub.DatastoreSelector.SIZE_BYTES: units.Gi}
ds_sel.select_datastore.assert_called_once_with(exp_req)
# Modify the previous case with a candidate datastore which is
# different than the backing's current datastore.
get_volume_type_extra_specs.side_effect = [vmdk.THICK_VMDK_TYPE,
vmdk.THIN_VMDK_TYPE,
'gold-1',
new_profile]
vops.snapshot_exists.return_value = True
host = mock.sentinel.host
rp = mock.sentinel.rp
candidate_ds = mock.Mock(value=mock.sentinel.candidate_ds_value)
summary = mock.Mock(datastore=candidate_ds)
ds_sel.select_datastore.return_value = (host, rp, summary)
folder = mock.sentinel.folder
get_volume_group_folder.return_value = folder
vops.change_backing_profile.reset_mock()
self.assertTrue(self._driver.retype(context, vol, new_type, diff,
host))
vops.relocate_backing.assert_called_once_with(
backing, candidate_ds, rp, host, vmdk.THIN_VMDK_TYPE)
vops.move_backing_to_folder.assert_called_once_with(backing, folder)
vops.change_backing_profile.assert_called_once_with(backing,
profile_id)
# Modify the previous case with no profile change.
get_volume_type_extra_specs.side_effect = [vmdk.THICK_VMDK_TYPE,
vmdk.THIN_VMDK_TYPE,
'gold-1',
'gold-1']
ds_sel.select_datastore.reset_mock()
vops.relocate_backing.reset_mock()
vops.move_backing_to_folder.reset_mock()
vops.change_backing_profile.reset_mock()
self.assertTrue(self._driver.retype(context, vol, new_type, diff,
host))
exp_req = {hub.DatastoreSelector.HARD_ANTI_AFFINITY_DS: [ds_value],
hub.DatastoreSelector.PROFILE_NAME: 'gold-1',
hub.DatastoreSelector.SIZE_BYTES: units.Gi}
ds_sel.select_datastore.assert_called_once_with(exp_req)
vops.relocate_backing.assert_called_once_with(
backing, candidate_ds, rp, host, vmdk.THIN_VMDK_TYPE)
vops.move_backing_to_folder.assert_called_once_with(backing, folder)
self.assertFalse(vops.change_backing_profile.called)
# Test with disk type conversion, profile change, backing with
# no snapshots and candidate datastore which is same as the backing
# datastore.
get_volume_type_extra_specs.side_effect = [vmdk.THICK_VMDK_TYPE,
vmdk.THIN_VMDK_TYPE,
'gold-1',
new_profile]
vops.snapshot_exists.return_value = False
summary.datastore = datastore
uuid = '025b654b-d4ed-47f9-8014-b71a7744eafc'
genereate_uuid.return_value = uuid
clone = mock.sentinel.clone
vops.clone_backing.return_value = clone
vops.change_backing_profile.reset_mock()
self.assertTrue(self._driver.retype(context, vol, new_type, diff,
host))
vops.rename_backing.assert_called_once_with(backing, uuid)
vops.clone_backing.assert_called_once_with(
vol['name'], backing, None, volumeops.FULL_CLONE_TYPE,
datastore, disk_type=vmdk.THIN_VMDK_TYPE, host=host,
resource_pool=rp, folder=folder)
vops.update_backing_disk_uuid.assert_called_once_with(clone, vol['id'])
delete_temp_backing.assert_called_once_with(backing)
vops.change_backing_profile.assert_called_once_with(clone,
profile_id)
# Modify the previous case with exception during clone.
get_volume_type_extra_specs.side_effect = [vmdk.THICK_VMDK_TYPE,
vmdk.THIN_VMDK_TYPE,
'gold-1',
new_profile]
vops.clone_backing.side_effect = exceptions.VimException('error')
vops.update_backing_disk_uuid.reset_mock()
vops.rename_backing.reset_mock()
vops.change_backing_profile.reset_mock()
self.assertRaises(
exceptions.VimException, self._driver.retype, context, vol,
new_type, diff, host)
self.assertFalse(vops.update_backing_disk_uuid.called)
exp_rename_calls = [mock.call(backing, uuid),
mock.call(backing, vol['name'])]
self.assertEqual(exp_rename_calls, vops.rename_backing.call_args_list)
self.assertFalse(vops.change_backing_profile.called)
@mock.patch.object(VMDK_DRIVER, 'volumeops')
def test_extend_backing(self, vops):
vmdk_path = mock.sentinel.vmdk_path
vops.get_vmdk_path.return_value = vmdk_path
dc = mock.sentinel.datacenter
vops.get_dc.return_value = dc
backing = mock.sentinel.backing
new_size = 1
self._driver._extend_backing(backing, new_size)
vops.get_vmdk_path.assert_called_once_with(backing)
vops.get_dc.assert_called_once_with(backing)
vops.extend_virtual_disk.assert_called_once_with(new_size,
vmdk_path,
dc)
@mock.patch.object(image_transfer, 'copy_stream_optimized_disk')
@mock.patch('storage.volume.drivers.vmware.vmdk.open', create=True)
@mock.patch.object(VMDK_DRIVER, '_temporary_file')
@mock.patch('oslo_utils.uuidutils.generate_uuid')
@mock.patch.object(VMDK_DRIVER, '_create_backing')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
@mock.patch.object(VMDK_DRIVER, 'session')
def test_backup_volume(self, session, vops, create_backing, generate_uuid,
temporary_file, file_open, copy_disk):
self._test_backup_volume(session, vops, create_backing, generate_uuid,
temporary_file, file_open, copy_disk)
def _test_backup_volume(self, session, vops, create_backing, generate_uuid,
temporary_file, file_open, copy_disk):
volume = {'name': 'vol-1', 'id': 1, 'size': 1}
self._db.volume_get.return_value = volume
vops.get_backing.return_value = None
backing = mock.sentinel.backing
create_backing.return_value = backing
uuid = "c1037b23-c5e9-4446-815f-3e097cbf5bb0"
generate_uuid.return_value = uuid
tmp_file_path = mock.sentinel.tmp_file_path
temporary_file_ret = mock.Mock()
temporary_file.return_value = temporary_file_ret
temporary_file_ret.__enter__ = mock.Mock(return_value=tmp_file_path)
temporary_file_ret.__exit__ = mock.Mock(return_value=None)
vmdk_path = mock.sentinel.vmdk_path
vops.get_vmdk_path.return_value = vmdk_path
tmp_file = mock.sentinel.tmp_file
file_open_ret = mock.Mock()
file_open.return_value = file_open_ret
file_open_ret.__enter__ = mock.Mock(return_value=tmp_file)
file_open_ret.__exit__ = mock.Mock(return_value=None)
context = mock.sentinel.context
backup = {'id': 2, 'volume_id': 1}
backup_service = mock.Mock()
self._driver.backup_volume(context, backup, backup_service)
create_backing.assert_called_once_with(volume)
temporary_file.assert_called_once_with(suffix=".vmdk", prefix=uuid)
self.assertEqual(mock.call(tmp_file_path, "wb"),
file_open.call_args_list[0])
copy_disk.assert_called_once_with(
context, self.IMG_TX_TIMEOUT, tmp_file, session=session,
host=self.IP, port=self.PORT, vm=backing, vmdk_file_path=vmdk_path,
vmdk_size=volume['size'] * units.Gi)
self.assertEqual(mock.call(tmp_file_path, "rb"),
file_open.call_args_list[1])
backup_service.backup.assert_called_once_with(backup, tmp_file)
@mock.patch.object(VMDK_DRIVER, 'extend_volume')
@mock.patch.object(VMDK_DRIVER, '_restore_backing')
@mock.patch('storage.volume.drivers.vmware.vmdk.open', create=True)
@mock.patch.object(VMDK_DRIVER, '_temporary_file')
@mock.patch('oslo_utils.uuidutils.generate_uuid')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
def test_restore_backup(self, vops, generate_uuid, temporary_file,
file_open, restore_backing, extend_volume):
self._test_restore_backup(vops, generate_uuid, temporary_file,
file_open, restore_backing, extend_volume)
def _test_restore_backup(
self, vops, generate_uuid, temporary_file, file_open,
restore_backing, extend_volume):
volume = {'name': 'vol-1', 'id': 1, 'size': 1}
backup = {'id': 2, 'size': 1}
context = mock.sentinel.context
backup_service = mock.Mock()
backing = mock.sentinel.backing
vops.get_backing.return_value = backing
vops.snapshot_exists.return_value = True
self.assertRaises(
cinder_exceptions.InvalidVolume, self._driver.restore_backup,
context, backup, volume, backup_service)
uuid = "c1037b23-c5e9-4446-815f-3e097cbf5bb0"
generate_uuid.return_value = uuid
tmp_file_path = mock.sentinel.tmp_file_path
temporary_file_ret = mock.Mock()
temporary_file.return_value = temporary_file_ret
temporary_file_ret.__enter__ = mock.Mock(return_value=tmp_file_path)
temporary_file_ret.__exit__ = mock.Mock(return_value=None)
tmp_file = mock.sentinel.tmp_file
file_open_ret = mock.Mock()
file_open.return_value = file_open_ret
file_open_ret.__enter__ = mock.Mock(return_value=tmp_file)
file_open_ret.__exit__ = mock.Mock(return_value=None)
vops.snapshot_exists.return_value = False
self._driver.restore_backup(context, backup, volume, backup_service)
temporary_file.assert_called_once_with(suffix=".vmdk", prefix=uuid)
file_open.assert_called_once_with(tmp_file_path, "wb")
backup_service.restore.assert_called_once_with(
backup, volume['id'], tmp_file)
restore_backing.assert_called_once_with(
context, volume, backing, tmp_file_path, backup['size'] * units.Gi)
self.assertFalse(extend_volume.called)
temporary_file.reset_mock()
file_open.reset_mock()
backup_service.reset_mock()
restore_backing.reset_mock()
volume = {'name': 'vol-1', 'id': 1, 'size': 2}
self._driver.restore_backup(context, backup, volume, backup_service)
temporary_file.assert_called_once_with(suffix=".vmdk", prefix=uuid)
file_open.assert_called_once_with(tmp_file_path, "wb")
backup_service.restore.assert_called_once_with(
backup, volume['id'], tmp_file)
restore_backing.assert_called_once_with(
context, volume, backing, tmp_file_path, backup['size'] * units.Gi)
extend_volume.assert_called_once_with(volume, volume['size'])
@mock.patch.object(VMDK_DRIVER, '_delete_temp_backing')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
@mock.patch(
'storage.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver._get_disk_type')
@mock.patch.object(VMDK_DRIVER, '_select_ds_for_volume')
@mock.patch.object(VMDK_DRIVER,
'_create_backing_from_stream_optimized_file')
@mock.patch('oslo_utils.uuidutils.generate_uuid')
def test_restore_backing(
self, generate_uuid, create_backing, select_ds, get_disk_type,
vops, delete_temp_backing):
self._test_restore_backing(
generate_uuid, create_backing, select_ds, get_disk_type, vops,
delete_temp_backing)
def _test_restore_backing(
self, generate_uuid, create_backing, select_ds, get_disk_type,
vops, delete_temp_backing):
src_uuid = "c1037b23-c5e9-4446-815f-3e097cbf5bb0"
generate_uuid.return_value = src_uuid
src = mock.sentinel.src
create_backing.return_value = src
summary = mock.Mock()
summary.datastore = mock.sentinel.datastore
select_ds.return_value = (mock.sentinel.host, mock.sentinel.rp,
mock.sentinel.folder, summary)
disk_type = vmdk.THIN_VMDK_TYPE
get_disk_type.return_value = disk_type
dest = mock.sentinel.dest
vops.clone_backing.return_value = dest
context = mock.sentinel.context
volume = {'name': 'vol-1',
'id': 'bd45dfe5-d411-435d-85ac-2605fe7d5d8f', 'size': 1}
backing = None
tmp_file_path = mock.sentinel.tmp_file_path
backup_size = units.Gi
self._driver._restore_backing(
context, volume, backing, tmp_file_path, backup_size)
create_backing.assert_called_once_with(
context, src_uuid, volume, tmp_file_path, backup_size)
vops.clone_backing.assert_called_once_with(
volume['name'], src, None, volumeops.FULL_CLONE_TYPE,
summary.datastore, disk_type=disk_type, host=mock.sentinel.host,
resource_pool=mock.sentinel.rp, folder=mock.sentinel.folder)
vops.update_backing_disk_uuid.assert_called_once_with(dest,
volume['id'])
delete_temp_backing.assert_called_once_with(src)
create_backing.reset_mock()
vops.clone_backing.reset_mock()
vops.update_backing_disk_uuid.reset_mock()
delete_temp_backing.reset_mock()
dest_uuid = "de4b0708-f947-4abe-98f8-75e52ce03b7b"
tmp_uuid = "82c2a4f0-9064-4d95-bd88-6567a36018fa"
generate_uuid.side_effect = [src_uuid, dest_uuid, tmp_uuid]
backing = mock.sentinel.backing
self._driver._restore_backing(
context, volume, backing, tmp_file_path, backup_size)
create_backing.assert_called_once_with(
context, src_uuid, volume, tmp_file_path, backup_size)
vops.clone_backing.assert_called_once_with(
dest_uuid, src, None, volumeops.FULL_CLONE_TYPE,
summary.datastore, disk_type=disk_type, host=mock.sentinel.host,
resource_pool=mock.sentinel.rp, folder=mock.sentinel.folder)
vops.update_backing_disk_uuid.assert_called_once_with(dest,
volume['id'])
exp_rename_calls = [mock.call(backing, tmp_uuid),
mock.call(dest, volume['name'])]
self.assertEqual(exp_rename_calls, vops.rename_backing.call_args_list)
exp_delete_temp_backing_calls = [mock.call(backing), mock.call(src)]
self.assertEqual(exp_delete_temp_backing_calls,
delete_temp_backing.call_args_list)
delete_temp_backing.reset_mock()
vops.rename_backing.reset_mock()
def vops_rename(backing, new_name):
if backing == dest and new_name == volume['name']:
raise exceptions.VimException("error")
vops.rename_backing.side_effect = vops_rename
generate_uuid.side_effect = [src_uuid, dest_uuid, tmp_uuid]
self.assertRaises(
exceptions.VimException, self._driver._restore_backing, context,
volume, backing, tmp_file_path, backup_size)
exp_rename_calls = [mock.call(backing, tmp_uuid),
mock.call(dest, volume['name']),
mock.call(backing, volume['name'])]
self.assertEqual(exp_rename_calls, vops.rename_backing.call_args_list)
exp_delete_temp_backing_calls = [mock.call(dest), mock.call(src)]
self.assertEqual(exp_delete_temp_backing_calls,
delete_temp_backing.call_args_list)
@mock.patch.object(VMDK_DRIVER, '_delete_temp_backing')
@mock.patch.object(image_transfer, 'download_stream_optimized_data')
@mock.patch('storage.volume.drivers.vmware.vmdk.open', create=True)
@mock.patch.object(VMDK_DRIVER, 'volumeops')
@mock.patch.object(VMDK_DRIVER, '_get_disk_type')
@mock.patch.object(VMDK_DRIVER, '_get_storage_profile_id')
@mock.patch.object(VMDK_DRIVER, 'session')
@mock.patch.object(VMDK_DRIVER, '_select_ds_for_volume')
def test_create_backing_from_stream_optimized_file(
self, select_ds, session, get_storage_profile_id, get_disk_type,
vops, file_open, download_data, delete_temp_backing):
self._test_create_backing_from_stream_optimized_file(
select_ds, session, get_storage_profile_id, get_disk_type, vops,
file_open, download_data, delete_temp_backing)
def _test_create_backing_from_stream_optimized_file(
self, select_ds, session, get_storage_profile_id, get_disk_type,
vops, file_open, download_data, delete_temp_backing):
rp = mock.sentinel.rp
folder = mock.sentinel.folder
summary = mock.Mock()
summary.name = mock.sentinel.name
select_ds.return_value = (mock.ANY, rp, folder, summary)
import_spec = mock.Mock()
session.vim.client.factory.create.return_value = import_spec
profile_id = 'profile-1'
get_storage_profile_id.return_value = profile_id
disk_type = vmdk.THIN_VMDK_TYPE
get_disk_type.return_value = disk_type
create_spec = mock.Mock()
vops.get_create_spec.return_value = create_spec
tmp_file = mock.sentinel.tmp_file
file_open_ret = mock.Mock()
file_open.return_value = file_open_ret
file_open_ret.__enter__ = mock.Mock(return_value=tmp_file)
file_open_ret.__exit__ = mock.Mock(return_value=None)
vm_ref = mock.sentinel.vm_ref
download_data.return_value = vm_ref
context = mock.sentinel.context
name = 'vm-1'
volume = {'name': 'vol-1',
'id': 'd11a82de-ddaa-448d-b50a-a255a7e61a1e',
'size': 1}
tmp_file_path = mock.sentinel.tmp_file_path
file_size_bytes = units.Gi
ret = self._driver._create_backing_from_stream_optimized_file(
context, name, volume, tmp_file_path, file_size_bytes)
self.assertEqual(vm_ref, ret)
extra_config = {vmdk.EXTRA_CONFIG_VOLUME_ID_KEY: volume['id']}
vops.get_create_spec.assert_called_once_with(
name, 0, disk_type, summary.name, profileId=profile_id,
extra_config=extra_config)
file_open.assert_called_once_with(tmp_file_path, "rb")
download_data.assert_called_once_with(
context, self._config.vmware_image_transfer_timeout_secs, tmp_file,
session=session, host=self._config.vmware_host_ip,
port=self._config.vmware_host_port, resource_pool=rp,
vm_folder=folder, vm_import_spec=import_spec,
image_size=file_size_bytes)
download_data.side_effect = exceptions.VimException("error")
backing = mock.sentinel.backing
vops.get_backing.return_value = backing
self.assertRaises(
exceptions.VimException,
self._driver._create_backing_from_stream_optimized_file,
context, name, volume, tmp_file_path, file_size_bytes)
delete_temp_backing.assert_called_once_with(backing)
@mock.patch('storage.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.'
'session', new_callable=mock.PropertyMock)
def test_get_vc_version(self, session):
# test config overrides fetching from vCenter server
version = self._driver._get_vc_version()
self.assertEqual(ver.LooseVersion(self.DEFAULT_VC_VERSION), version)
# explicitly remove config entry
self._driver.configuration.vmware_host_version = None
session.return_value.vim.service_content.about.version = '6.0.1'
version = self._driver._get_vc_version()
self.assertEqual(ver.LooseVersion('6.0.1'), version)
@ddt.data('5.1', '5.5')
def test_validate_vcenter_version(self, version):
# vCenter versions 5.1 and above should pass validation.
self._driver._validate_vcenter_version(ver.LooseVersion(version))
def test_validate_vcenter_version_with_less_than_min_supported_version(
self):
vc_version = ver.LooseVersion('5.0')
# Validation should fail for vCenter version less than 5.1.
self.assertRaises(exceptions.VMwareDriverException,
self._driver._validate_vcenter_version,
vc_version)
@mock.patch('storage.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.'
'_validate_vcenter_version')
@mock.patch('storage.volume.drivers.vmware.volumeops.VMwareVolumeOps')
@mock.patch('storage.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.'
'_get_vc_version')
@mock.patch('storage.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.'
'session', new_callable=mock.PropertyMock)
def test_do_setup_with_pbm_disabled(self, session, get_vc_version,
vops_cls, validate_vc_version):
session_obj = mock.Mock(name='session')
session.return_value = session_obj
vc_version = ver.LooseVersion('5.0')
get_vc_version.return_value = vc_version
cluster_refs = mock.Mock()
cluster_refs.values.return_value = mock.sentinel.cluster_refs
vops = mock.Mock()
vops.get_cluster_refs.return_value = cluster_refs
def vops_side_effect(session, max_objects):
vops._session = session
vops._max_objects = max_objects
return vops
vops_cls.side_effect = vops_side_effect
self._driver.do_setup(mock.ANY)
validate_vc_version.assert_called_once_with(vc_version)
self.assertFalse(self._driver._storage_policy_enabled)
get_vc_version.assert_called_once_with()
self.assertEqual(session_obj, self._driver.volumeops._session)
self.assertEqual(session_obj, self._driver.ds_sel._session)
self.assertEqual(mock.sentinel.cluster_refs, self._driver._clusters)
vops.get_cluster_refs.assert_called_once_with(self.CLUSTERS)
@mock.patch('storage.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.'
'_validate_vcenter_version')
@mock.patch('oslo_vmware.pbm.get_pbm_wsdl_location')
@mock.patch('storage.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.'
'_get_vc_version')
def test_do_setup_with_invalid_pbm_wsdl(self, get_vc_version,
get_pbm_wsdl_location,
validate_vc_version):
vc_version = ver.LooseVersion('5.5')
get_vc_version.return_value = vc_version
get_pbm_wsdl_location.return_value = None
self.assertRaises(exceptions.VMwareDriverException,
self._driver.do_setup,
mock.ANY)
validate_vc_version.assert_called_once_with(vc_version)
self.assertFalse(self._driver._storage_policy_enabled)
get_vc_version.assert_called_once_with()
get_pbm_wsdl_location.assert_called_once_with(
six.text_type(vc_version))
@mock.patch('storage.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.'
'_validate_vcenter_version')
@mock.patch('storage.volume.drivers.vmware.volumeops.VMwareVolumeOps')
@mock.patch('oslo_vmware.pbm.get_pbm_wsdl_location')
@mock.patch('storage.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.'
'_get_vc_version')
@mock.patch('storage.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.'
'session', new_callable=mock.PropertyMock)
def test_do_setup(self, session, get_vc_version, get_pbm_wsdl_location,
vops_cls, validate_vc_version):
session_obj = mock.Mock(name='session')
session.return_value = session_obj
vc_version = ver.LooseVersion('5.5')
get_vc_version.return_value = vc_version
get_pbm_wsdl_location.return_value = 'file:///pbm.wsdl'
cluster_refs = mock.Mock()
cluster_refs.values.return_value = mock.sentinel.cluster_refs
vops = mock.Mock()
vops.get_cluster_refs.return_value = cluster_refs
def vops_side_effect(session, max_objects):
vops._session = session
vops._max_objects = max_objects
return vops
vops_cls.side_effect = vops_side_effect
self._driver.do_setup(mock.ANY)
validate_vc_version.assert_called_once_with(vc_version)
self.assertTrue(self._driver._storage_policy_enabled)
get_vc_version.assert_called_once_with()
get_pbm_wsdl_location.assert_called_once_with(
six.text_type(vc_version))
self.assertEqual(session_obj, self._driver.volumeops._session)
self.assertEqual(session_obj, self._driver.ds_sel._session)
self.assertEqual(mock.sentinel.cluster_refs, self._driver._clusters)
vops.get_cluster_refs.assert_called_once_with(self.CLUSTERS)
@mock.patch.object(VMDK_DRIVER, '_get_storage_profile')
@mock.patch.object(VMDK_DRIVER, 'ds_sel')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
@mock.patch.object(VMDK_DRIVER, '_get_volume_group_folder')
def test_select_ds_for_volume(self, get_volume_group_folder, vops, ds_sel,
get_storage_profile):
profile = mock.sentinel.profile
get_storage_profile.return_value = profile
host_ref = mock.sentinel.host_ref
rp = mock.sentinel.rp
summary = mock.sentinel.summary
ds_sel.select_datastore.return_value = (host_ref, rp, summary)
dc = mock.sentinel.dc
vops.get_dc.return_value = dc
folder = mock.sentinel.folder
get_volume_group_folder.return_value = folder
host = mock.sentinel.host
project_id = '63c19a12292549818c09946a5e59ddaf'
vol = {'id': 'c1037b23-c5e9-4446-815f-3e097cbf5bb0', 'size': 1,
'name': 'vol-c1037b23-c5e9-4446-815f-3e097cbf5bb0',
'project_id': project_id}
ret = self._driver._select_ds_for_volume(vol, host)
self.assertEqual((host_ref, rp, folder, summary), ret)
exp_req = {hub.DatastoreSelector.SIZE_BYTES: units.Gi,
hub.DatastoreSelector.PROFILE_NAME: profile}
ds_sel.select_datastore.assert_called_once_with(exp_req, hosts=[host])
vops.get_dc.assert_called_once_with(rp)
get_volume_group_folder.assert_called_once_with(dc, project_id)
@mock.patch.object(VMDK_DRIVER, '_get_storage_profile')
@mock.patch.object(VMDK_DRIVER, 'ds_sel')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
@mock.patch.object(VMDK_DRIVER, '_get_volume_group_folder')
def test_select_ds_for_volume_with_no_host(
self, get_volume_group_folder, vops, ds_sel, get_storage_profile):
profile = mock.sentinel.profile
get_storage_profile.return_value = profile
host_ref = mock.sentinel.host_ref
rp = mock.sentinel.rp
summary = mock.sentinel.summary
ds_sel.select_datastore.return_value = (host_ref, rp, summary)
dc = mock.sentinel.dc
vops.get_dc.return_value = dc
folder = mock.sentinel.folder
get_volume_group_folder.return_value = folder
project_id = '63c19a12292549818c09946a5e59ddaf'
vol = {'id': 'c1037b23-c5e9-4446-815f-3e097cbf5bb0', 'size': 1,
'name': 'vol-c1037b23-c5e9-4446-815f-3e097cbf5bb0',
'project_id': project_id}
ret = self._driver._select_ds_for_volume(vol)
self.assertEqual((host_ref, rp, folder, summary), ret)
exp_req = {hub.DatastoreSelector.SIZE_BYTES: units.Gi,
hub.DatastoreSelector.PROFILE_NAME: profile}
ds_sel.select_datastore.assert_called_once_with(exp_req, hosts=None)
vops.get_dc.assert_called_once_with(rp)
get_volume_group_folder.assert_called_once_with(dc, project_id)
@mock.patch.object(VMDK_DRIVER, '_get_storage_profile')
@mock.patch.object(VMDK_DRIVER, 'ds_sel')
def test_select_ds_for_volume_with_no_best_candidate(
self, ds_sel, get_storage_profile):
profile = mock.sentinel.profile
get_storage_profile.return_value = profile
ds_sel.select_datastore.return_value = ()
vol = {'id': 'c1037b23-c5e9-4446-815f-3e097cbf5bb0', 'size': 1,
'name': 'vol-c1037b23-c5e9-4446-815f-3e097cbf5bb0'}
self.assertRaises(vmdk_exceptions.NoValidDatastoreException,
self._driver._select_ds_for_volume, vol)
exp_req = {hub.DatastoreSelector.SIZE_BYTES: units.Gi,
hub.DatastoreSelector.PROFILE_NAME: profile}
ds_sel.select_datastore.assert_called_once_with(exp_req, hosts=None)
@mock.patch.object(VMDK_DRIVER, 'volumeops')
@mock.patch.object(VMDK_DRIVER, '_relocate_backing')
def test_initialize_connection_with_instance_and_backing(
self, relocate_backing, vops):
instance = mock.sentinel.instance
connector = {'instance': instance}
backing = mock.Mock(value=mock.sentinel.backing_value)
vops.get_backing.return_value = backing
host = mock.sentinel.host
vops.get_host.return_value = host
volume = {'name': 'vol-1', 'id': 1}
conn_info = self._driver.initialize_connection(volume, connector)
relocate_backing.assert_called_once_with(volume, backing, host)
self.assertEqual('vmdk', conn_info['driver_volume_type'])
self.assertEqual(backing.value, conn_info['data']['volume'])
self.assertEqual(volume['id'],
conn_info['data']['volume_id'])
@mock.patch.object(VMDK_DRIVER, 'volumeops')
@mock.patch.object(VMDK_DRIVER, '_relocate_backing')
@mock.patch.object(VMDK_DRIVER, '_create_backing')
def test_initialize_connection_with_instance_and_no_backing(
self, create_backing, relocate_backing, vops):
instance = mock.sentinel.instance
connector = {'instance': instance}
vops.get_backing.return_value = None
host = mock.sentinel.host
vops.get_host.return_value = host
backing = mock.Mock(value=mock.sentinel.backing_value)
create_backing.return_value = backing
volume = {'name': 'vol-1', 'id': 1}
conn_info = self._driver.initialize_connection(volume, connector)
create_backing.assert_called_once_with(volume, host)
self.assertFalse(relocate_backing.called)
self.assertEqual('vmdk', conn_info['driver_volume_type'])
self.assertEqual(backing.value, conn_info['data']['volume'])
self.assertEqual(volume['id'],
conn_info['data']['volume_id'])
@mock.patch.object(VMDK_DRIVER, 'volumeops')
@mock.patch.object(VMDK_DRIVER, '_relocate_backing')
@mock.patch.object(VMDK_DRIVER, '_create_backing')
def test_initialize_connection_with_no_instance_and_no_backing(
self, create_backing, relocate_backing, vops):
vops.get_backing.return_value = None
host = mock.sentinel.host
vops.get_host.return_value = host
backing = mock.Mock(value=mock.sentinel.backing_value)
create_backing.return_value = backing
connector = {}
volume = {'name': 'vol-1', 'id': 1}
conn_info = self._driver.initialize_connection(volume, connector)
create_backing.assert_called_once_with(volume)
self.assertFalse(relocate_backing.called)
self.assertEqual('vmdk', conn_info['driver_volume_type'])
self.assertEqual(backing.value, conn_info['data']['volume'])
self.assertEqual(volume['id'],
conn_info['data']['volume_id'])
@mock.patch.object(VMDK_DRIVER, 'volumeops')
def test_get_volume_group_folder(self, vops):
folder = mock.sentinel.folder
vops.create_vm_inventory_folder.return_value = folder
datacenter = mock.sentinel.dc
project_id = '63c19a12292549818c09946a5e59ddaf'
self.assertEqual(folder,
self._driver._get_volume_group_folder(datacenter,
project_id))
project_folder_name = 'Project (%s)' % project_id
vops.create_vm_inventory_folder.assert_called_once_with(
datacenter, ['OpenStack', project_folder_name, self.VOLUME_FOLDER])
@mock.patch('storage.volume.drivers.vmware.vmdk.'
'_get_volume_type_extra_spec')
@ddt.data('full', 'linked')
def test_get_clone_type(self, clone_type, get_volume_type_extra_spec):
get_volume_type_extra_spec.return_value = clone_type
volume = self._create_volume_dict()
self.assertEqual(clone_type, self._driver._get_clone_type(volume))
get_volume_type_extra_spec.assert_called_once_with(
volume['volume_type_id'], 'clone_type',
default_value=volumeops.FULL_CLONE_TYPE)
@mock.patch('storage.volume.drivers.vmware.vmdk.'
'_get_volume_type_extra_spec')
def test_get_clone_type_invalid(
self, get_volume_type_extra_spec):
get_volume_type_extra_spec.return_value = 'foo'
volume = self._create_volume_dict()
self.assertRaises(
cinder_exceptions.Invalid, self._driver._get_clone_type, volume)
get_volume_type_extra_spec.assert_called_once_with(
volume['volume_type_id'], 'clone_type',
default_value=volumeops.FULL_CLONE_TYPE)
@mock.patch.object(VMDK_DRIVER, '_extend_backing')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
def test_clone_backing_linked(self, volume_ops, extend_backing):
"""Test _clone_backing with clone type - linked."""
clone = mock.sentinel.clone
volume_ops.clone_backing.return_value = clone
fake_size = 3
fake_volume = {'volume_type_id': None, 'name': 'fake_name',
'id': '51e47214-8e3c-475d-b44b-aea6cd3eef53',
'size': fake_size}
fake_snapshot = {'volume_name': 'volume_name',
'name': 'snapshot_name',
'volume_size': 2}
fake_type = volumeops.LINKED_CLONE_TYPE
fake_backing = mock.sentinel.backing
self._driver._clone_backing(fake_volume, fake_backing, fake_snapshot,
volumeops.LINKED_CLONE_TYPE,
fake_snapshot['volume_size'])
extra_config = {vmdk.EXTRA_CONFIG_VOLUME_ID_KEY: fake_volume['id']}
volume_ops.clone_backing.assert_called_with(fake_volume['name'],
fake_backing,
fake_snapshot,
fake_type,
None,
host=None,
resource_pool=None,
extra_config=extra_config,
folder=None)
volume_ops.update_backing_disk_uuid.assert_called_once_with(
clone, fake_volume['id'])
# If the volume size is greater than the original snapshot size,
# _extend_backing will be called.
extend_backing.assert_called_with(clone, fake_volume['size'])
# If the volume size is not greater than the original snapshot size,
# _extend_backing will not be called.
fake_size = 2
fake_volume['size'] = fake_size
extend_backing.reset_mock()
self._driver._clone_backing(fake_volume, fake_backing, fake_snapshot,
volumeops.LINKED_CLONE_TYPE,
fake_snapshot['volume_size'])
self.assertFalse(extend_backing.called)
@mock.patch.object(VMDK_DRIVER, '_extend_backing')
@mock.patch.object(VMDK_DRIVER, '_select_ds_for_volume')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
def test_clone_backing_full(self, volume_ops, _select_ds_for_volume,
extend_backing):
"""Test _clone_backing with clone type - full."""
fake_host = mock.sentinel.host
fake_folder = mock.sentinel.folder
fake_datastore = mock.sentinel.datastore
fake_resource_pool = mock.sentinel.resourcePool
fake_summary = mock.Mock(spec=object)
fake_summary.datastore = fake_datastore
fake_size = 3
_select_ds_for_volume.return_value = (fake_host,
fake_resource_pool,
fake_folder, fake_summary)
clone = mock.sentinel.clone
volume_ops.clone_backing.return_value = clone
fake_backing = mock.sentinel.backing
fake_volume = {'volume_type_id': None, 'name': 'fake_name',
'id': '51e47214-8e3c-475d-b44b-aea6cd3eef53',
'size': fake_size}
fake_snapshot = {'volume_name': 'volume_name', 'name': 'snapshot_name',
'volume_size': 2}
self._driver._clone_backing(fake_volume, fake_backing, fake_snapshot,
volumeops.FULL_CLONE_TYPE,
fake_snapshot['volume_size'])
_select_ds_for_volume.assert_called_with(fake_volume)
extra_config = {vmdk.EXTRA_CONFIG_VOLUME_ID_KEY: fake_volume['id']}
volume_ops.clone_backing.assert_called_with(
fake_volume['name'],
fake_backing,
fake_snapshot,
volumeops.FULL_CLONE_TYPE,
fake_datastore,
host=fake_host,
resource_pool=fake_resource_pool,
extra_config=extra_config,
folder=fake_folder)
volume_ops.update_backing_disk_uuid.assert_called_once_with(
clone, fake_volume['id'])
# If the volume size is greater than the original snapshot size,
# _extend_backing will be called.
extend_backing.assert_called_with(clone, fake_volume['size'])
# If the volume size is not greater than the original snapshot size,
# _extend_backing will not be called.
fake_size = 2
fake_volume['size'] = fake_size
extend_backing.reset_mock()
self._driver._clone_backing(fake_volume, fake_backing, fake_snapshot,
volumeops.FULL_CLONE_TYPE,
fake_snapshot['volume_size'])
self.assertFalse(extend_backing.called)
@mock.patch('storage.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.'
'volumeops', new_callable=mock.PropertyMock)
def test_create_volume_from_snapshot_without_backing(self, mock_vops):
"""Test create_volume_from_snapshot without a backing."""
mock_vops = mock_vops.return_value
driver = self._driver
volume = {'name': 'mock_vol'}
snapshot = {'volume_name': 'mock_vol', 'name': 'mock_snap'}
driver._verify_volume_creation = mock.MagicMock()
mock_vops.get_backing.return_value = None
# invoke the create_volume_from_snapshot api
driver.create_volume_from_snapshot(volume, snapshot)
# verify calls
driver._verify_volume_creation.assert_called_once_with(volume)
mock_vops.get_backing.assert_called_once_with('mock_vol')
@mock.patch('storage.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.'
'volumeops', new_callable=mock.PropertyMock)
def test_create_volume_from_snap_without_backing_snap(self, mock_vops):
"""Test create_volume_from_snapshot without a backing snapshot."""
mock_vops = mock_vops.return_value
driver = self._driver
volume = {'volume_type_id': None, 'name': 'mock_vol'}
snapshot = {'volume_name': 'mock_vol', 'name': 'mock_snap'}
backing = mock.sentinel.backing
driver._verify_volume_creation = mock.MagicMock()
mock_vops.get_backing.return_value = backing
mock_vops.get_snapshot.return_value = None
# invoke the create_volume_from_snapshot api
driver.create_volume_from_snapshot(volume, snapshot)
# verify calls
driver._verify_volume_creation.assert_called_once_with(volume)
mock_vops.get_backing.assert_called_once_with('mock_vol')
mock_vops.get_snapshot.assert_called_once_with(backing,
'mock_snap')
@mock.patch('storage.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.'
'volumeops', new_callable=mock.PropertyMock)
def test_create_volume_from_snapshot(self, mock_vops):
"""Test create_volume_from_snapshot."""
mock_vops = mock_vops.return_value
driver = self._driver
volume = {'volume_type_id': None, 'name': 'mock_vol'}
snapshot = {'volume_name': 'mock_vol', 'name': 'mock_snap',
'volume_size': 2}
backing = mock.sentinel.backing
snap_moref = mock.sentinel.snap_moref
driver._verify_volume_creation = mock.MagicMock()
mock_vops.get_backing.return_value = backing
mock_vops.get_snapshot.return_value = snap_moref
driver._clone_backing = mock.MagicMock()
# invoke the create_volume_from_snapshot api
driver.create_volume_from_snapshot(volume, snapshot)
# verify calls
driver._verify_volume_creation.assert_called_once_with(volume)
mock_vops.get_backing.assert_called_once_with('mock_vol')
mock_vops.get_snapshot.assert_called_once_with(backing,
'mock_snap')
default_clone_type = volumeops.FULL_CLONE_TYPE
driver._clone_backing.assert_called_once_with(volume,
backing,
snap_moref,
default_clone_type,
snapshot['volume_size'])
@mock.patch('storage.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.'
'volumeops', new_callable=mock.PropertyMock)
def test_create_cloned_volume_without_backing(self, mock_vops):
"""Test create_cloned_volume without a backing."""
mock_vops = mock_vops.return_value
driver = self._driver
volume = {'name': 'mock_vol'}
src_vref = {'name': 'src_snapshot_name'}
driver._verify_volume_creation = mock.MagicMock()
mock_vops.get_backing.return_value = None
# invoke the create_volume_from_snapshot api
driver.create_cloned_volume(volume, src_vref)
@mock.patch('storage.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.'
'volumeops', new_callable=mock.PropertyMock)
def test_create_cloned_volume_with_backing(self, mock_vops):
"""Test create_cloned_volume with clone type - full."""
mock_vops = mock_vops.return_value
driver = self._driver
volume = {'volume_type_id': None, 'name': 'mock_vol'}
src_vref = {'name': 'src_snapshot_name', 'size': 1}
backing = mock.sentinel.backing
driver._verify_volume_creation = mock.MagicMock()
mock_vops.get_backing.return_value = backing
default_clone_type = volumeops.FULL_CLONE_TYPE
driver._clone_backing = mock.MagicMock()
# invoke the create_volume_from_snapshot api
driver.create_cloned_volume(volume, src_vref)
# verify calls
driver._verify_volume_creation.assert_called_once_with(volume)
mock_vops.get_backing.assert_called_once_with('src_snapshot_name')
driver._clone_backing.assert_called_once_with(volume,
backing,
None,
default_clone_type,
src_vref['size'])
@mock.patch('storage.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.'
'volumeops', new_callable=mock.PropertyMock)
@mock.patch('storage.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.'
'_get_clone_type')
def test_create_linked_cloned_volume_with_backing(self, get_clone_type,
mock_vops):
"""Test create_cloned_volume with clone type - linked."""
mock_vops = mock_vops.return_value
driver = self._driver
volume = {'volume_type_id': None, 'name': 'mock_vol', 'id': 'mock_id'}
src_vref = {'name': 'src_snapshot_name', 'status': 'available',
'size': 1}
backing = mock.sentinel.backing
driver._verify_volume_creation = mock.MagicMock()
mock_vops.get_backing.return_value = backing
linked_clone = volumeops.LINKED_CLONE_TYPE
get_clone_type.return_value = linked_clone
driver._clone_backing = mock.MagicMock()
mock_vops.create_snapshot = mock.MagicMock()
mock_vops.create_snapshot.return_value = mock.sentinel.snapshot
# invoke the create_volume_from_snapshot api
driver.create_cloned_volume(volume, src_vref)
# verify calls
driver._verify_volume_creation.assert_called_once_with(volume)
mock_vops.get_backing.assert_called_once_with('src_snapshot_name')
get_clone_type.assert_called_once_with(volume)
name = 'snapshot-%s' % volume['id']
mock_vops.create_snapshot.assert_called_once_with(backing, name, None)
driver._clone_backing.assert_called_once_with(volume,
backing,
mock.sentinel.snapshot,
linked_clone,
src_vref['size'])
@mock.patch('storage.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.'
'volumeops', new_callable=mock.PropertyMock)
@mock.patch('storage.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.'
'_get_clone_type')
def test_create_linked_cloned_volume_when_attached(self, get_clone_type,
mock_vops):
"""Test create_cloned_volume linked clone when volume is attached."""
mock_vops = mock_vops.return_value
driver = self._driver
volume = {'volume_type_id': None, 'name': 'mock_vol', 'id': 'mock_id'}
src_vref = {'name': 'src_snapshot_name', 'status': 'in-use'}
backing = mock.sentinel.backing
driver._verify_volume_creation = mock.MagicMock()
mock_vops.get_backing.return_value = backing
linked_clone = volumeops.LINKED_CLONE_TYPE
get_clone_type.return_value = linked_clone
# invoke the create_volume_from_snapshot api
self.assertRaises(cinder_exceptions.InvalidVolume,
driver.create_cloned_volume,
volume,
src_vref)
# verify calls
driver._verify_volume_creation.assert_called_once_with(volume)
mock_vops.get_backing.assert_called_once_with('src_snapshot_name')
get_clone_type.assert_called_once_with(volume)
@mock.patch('storage.volume.volume_types.get_volume_type_extra_specs')
def test_get_storage_profile(self, get_volume_type_extra_specs):
"""Test vmdk _get_storage_profile."""
# volume with no type id returns None
volume = FakeObject()
volume['volume_type_id'] = None
sp = self._driver._get_storage_profile(volume)
self.assertIsNone(sp, "Without a volume_type_id no storage "
"profile should be returned.")
# profile associated with the volume type should be returned
fake_id = 'fake_volume_id'
volume['volume_type_id'] = fake_id
get_volume_type_extra_specs.return_value = 'fake_profile'
profile = self._driver._get_storage_profile(volume)
self.assertEqual('fake_profile', profile)
spec_key = 'vmware:storage_profile'
get_volume_type_extra_specs.assert_called_once_with(fake_id, spec_key)
# None should be returned when no storage profile is
# associated with the volume type
get_volume_type_extra_specs.return_value = False
profile = self._driver._get_storage_profile(volume)
self.assertIsNone(profile)
def _test_copy_image(self, download_flat_image, session, vops,
expected_cacerts=False):
dc_name = mock.sentinel.dc_name
vops.get_entity_name.return_value = dc_name
context = mock.sentinel.context
dc_ref = mock.sentinel.dc_ref
image_service = mock.sentinel.image_service
image_id = mock.sentinel.image_id
image_size_in_bytes = 102400
ds_name = mock.sentinel.ds_name
upload_file_path = mock.sentinel.upload_file_path
self._driver._copy_image(
context, dc_ref, image_service, image_id, image_size_in_bytes,
ds_name, upload_file_path)
vops.get_entity_name.assert_called_once_with(dc_ref)
cookies = session.vim.client.options.transport.cookiejar
download_flat_image.assert_called_once_with(
context,
self._config.vmware_image_transfer_timeout_secs,
image_service,
image_id,
image_size=image_size_in_bytes,
host=self._config.vmware_host_ip,
port=self._config.vmware_host_port,
data_center_name=dc_name,
datastore_name=ds_name,
cookies=cookies,
file_path=upload_file_path,
cacerts=expected_cacerts)
@mock.patch.object(VMDK_DRIVER, 'volumeops')
@mock.patch.object(VMDK_DRIVER, 'session')
@mock.patch('oslo_vmware.image_transfer.download_flat_image')
def test_copy_image(self, download_flat_image, session, vops):
# Default value of vmware_ca_file is not None; it should be passed
# to download_flat_image as cacerts.
self._test_copy_image(download_flat_image, session, vops,
expected_cacerts=self._config.vmware_ca_file)
@mock.patch.object(VMDK_DRIVER, 'volumeops')
@mock.patch.object(VMDK_DRIVER, 'session')
@mock.patch('oslo_vmware.image_transfer.download_flat_image')
def test_copy_image_insecure(self, download_flat_image, session, vops):
# Set config options to allow insecure connections.
self._config.vmware_ca_file = None
self._config.vmware_insecure = True
# Since vmware_ca_file is unset and vmware_insecure is True,
# dowload_flat_image should be called with cacerts=False.
self._test_copy_image(download_flat_image, session, vops)
@mock.patch.object(VMDK_DRIVER, '_select_ds_for_volume')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
def test_create_backing_with_params(self, vops, select_ds_for_volume):
host = mock.sentinel.host
resource_pool = mock.sentinel.resource_pool
folder = mock.sentinel.folder
summary = mock.sentinel.summary
select_ds_for_volume.return_value = (host, resource_pool, folder,
summary)
backing = mock.sentinel.backing
vops.create_backing_disk_less.return_value = backing
volume = {'name': 'vol-1', 'volume_type_id': None, 'size': 1,
'id': 'd11a82de-ddaa-448d-b50a-a255a7e61a1e'}
create_params = {vmdk.CREATE_PARAM_DISK_LESS: True}
ret = self._driver._create_backing(volume, host, create_params)
self.assertEqual(backing, ret)
extra_config = {vmdk.EXTRA_CONFIG_VOLUME_ID_KEY: volume['id']}
vops.create_backing_disk_less.assert_called_once_with(
'vol-1',
folder,
resource_pool,
host,
summary.name,
profileId=None,
extra_config=extra_config)
self.assertFalse(vops.update_backing_disk_uuid.called)
vops.create_backing.return_value = backing
create_params = {vmdk.CREATE_PARAM_ADAPTER_TYPE: 'ide'}
ret = self._driver._create_backing(volume, host, create_params)
self.assertEqual(backing, ret)
vops.create_backing.assert_called_once_with('vol-1',
units.Mi,
vmdk.THIN_VMDK_TYPE,
folder,
resource_pool,
host,
summary.name,
profileId=None,
adapter_type='ide',
extra_config=extra_config)
vops.update_backing_disk_uuid.assert_called_once_with(backing,
volume['id'])
vops.create_backing.reset_mock()
vops.update_backing_disk_uuid.reset_mock()
backing_name = "temp-vol"
create_params = {vmdk.CREATE_PARAM_BACKING_NAME: backing_name}
ret = self._driver._create_backing(volume, host, create_params)
self.assertEqual(backing, ret)
vops.create_backing.assert_called_once_with(backing_name,
units.Mi,
vmdk.THIN_VMDK_TYPE,
folder,
resource_pool,
host,
summary.name,
profileId=None,
adapter_type='lsiLogic',
extra_config=extra_config)
vops.update_backing_disk_uuid.assert_called_once_with(backing,
volume['id'])
@mock.patch('oslo_utils.fileutils.ensure_tree')
@mock.patch('oslo_utils.fileutils.delete_if_exists')
@mock.patch('tempfile.mkstemp')
@mock.patch('os.close')
def test_temporary_file(
self, close, mkstemp, delete_if_exists, ensure_tree):
fd = mock.sentinel.fd
tmp = mock.sentinel.tmp
mkstemp.return_value = (fd, tmp)
prefix = ".vmdk"
suffix = "test"
with self._driver._temporary_file(prefix=prefix,
suffix=suffix) as tmp_file:
self.assertEqual(tmp, tmp_file)
ensure_tree.assert_called_once_with(self.TMP_DIR)
mkstemp.assert_called_once_with(dir=self.TMP_DIR,
prefix=prefix,
suffix=suffix)
close.assert_called_once_with(fd)
delete_if_exists.assert_called_once_with(tmp)
@mock.patch.object(VMDK_DRIVER, 'volumeops')
def test_get_hosts(self, vops):
host_1 = mock.sentinel.host_1
host_2 = mock.sentinel.host_2
host_3 = mock.sentinel.host_3
vops.get_cluster_hosts.side_effect = [[host_1, host_2], [host_3]]
# host_1 and host_3 are usable, host_2 is not usable
vops.is_host_usable.side_effect = [True, False, True]
cls_1 = mock.sentinel.cls_1
cls_2 = mock.sentinel.cls_2
self.assertEqual([host_1, host_3],
self._driver._get_hosts([cls_1, cls_2]))
exp_calls = [mock.call(cls_1), mock.call(cls_2)]
self.assertEqual(exp_calls, vops.get_cluster_hosts.call_args_list)
exp_calls = [mock.call(host_1), mock.call(host_2), mock.call(host_3)]
self.assertEqual(exp_calls, vops.is_host_usable.call_args_list)
@mock.patch.object(VMDK_DRIVER, '_get_hosts')
@mock.patch.object(VMDK_DRIVER, 'ds_sel')
def test_select_datastore(self, ds_sel, get_hosts):
cls_1 = mock.sentinel.cls_1
cls_2 = mock.sentinel.cls_2
self._driver._clusters = [cls_1, cls_2]
host_1 = mock.sentinel.host_1
host_2 = mock.sentinel.host_2
host_3 = mock.sentinel.host_3
get_hosts.return_value = [host_1, host_2, host_3]
best_candidate = mock.sentinel.best_candidate
ds_sel.select_datastore.return_value = best_candidate
req = mock.sentinel.req
self.assertEqual(best_candidate, self._driver._select_datastore(req))
get_hosts.assert_called_once_with(self._driver._clusters)
ds_sel.select_datastore.assert_called_once_with(
req, hosts=[host_1, host_2, host_3])
@mock.patch.object(VMDK_DRIVER, '_get_hosts')
@mock.patch.object(VMDK_DRIVER, 'ds_sel')
def test_select_datastore_with_no_best_candidate(self, ds_sel, get_hosts):
cls_1 = mock.sentinel.cls_1
cls_2 = mock.sentinel.cls_2
self._driver._clusters = [cls_1, cls_2]
host_1 = mock.sentinel.host_1
host_2 = mock.sentinel.host_2
host_3 = mock.sentinel.host_3
get_hosts.return_value = [host_1, host_2, host_3]
ds_sel.select_datastore.return_value = ()
req = mock.sentinel.req
self.assertRaises(vmdk_exceptions.NoValidDatastoreException,
self._driver._select_datastore,
req)
get_hosts.assert_called_once_with(self._driver._clusters)
ds_sel.select_datastore.assert_called_once_with(
req, hosts=[host_1, host_2, host_3])
@mock.patch.object(VMDK_DRIVER, '_get_hosts')
@mock.patch.object(VMDK_DRIVER, 'ds_sel')
def test_select_datastore_with_single_host(self, ds_sel, get_hosts):
best_candidate = mock.sentinel.best_candidate
ds_sel.select_datastore.return_value = best_candidate
req = mock.sentinel.req
host_1 = mock.sentinel.host_1
self.assertEqual(best_candidate,
self._driver._select_datastore(req, host_1))
ds_sel.select_datastore.assert_called_once_with(req, hosts=[host_1])
self.assertFalse(get_hosts.called)
@mock.patch.object(VMDK_DRIVER, '_get_hosts')
@mock.patch.object(VMDK_DRIVER, 'ds_sel')
def test_select_datastore_with_empty_clusters(self, ds_sel, get_hosts):
self._driver._clusters = None
best_candidate = mock.sentinel.best_candidate
ds_sel.select_datastore.return_value = best_candidate
req = mock.sentinel.req
self.assertEqual(best_candidate, self._driver._select_datastore(req))
ds_sel.select_datastore.assert_called_once_with(req, hosts=None)
self.assertFalse(get_hosts.called)
@mock.patch.object(VMDK_DRIVER, '_get_hosts')
@mock.patch.object(VMDK_DRIVER, 'ds_sel')
def test_select_datastore_with_no_valid_host(self, ds_sel, get_hosts):
cls_1 = mock.sentinel.cls_1
cls_2 = mock.sentinel.cls_2
self._driver._clusters = [cls_1, cls_2]
get_hosts.return_value = []
req = mock.sentinel.req
self.assertRaises(vmdk_exceptions.NoValidHostException,
self._driver._select_datastore, req)
get_hosts.assert_called_once_with(self._driver._clusters)
self.assertFalse(ds_sel.called)
@mock.patch.object(VMDK_DRIVER, 'volumeops')
@mock.patch.object(VMDK_DRIVER, 'ds_sel')
def test_relocate_backing_nop(self, ds_sel, vops):
self._driver._storage_policy_enabled = True
volume = {'name': 'vol-1', 'size': 1}
datastore = mock.sentinel.datastore
vops.get_datastore.return_value = datastore
profile = mock.sentinel.profile
vops.get_profile.return_value = profile
vops.is_datastore_accessible.return_value = True
ds_sel.is_datastore_compliant.return_value = True
backing = mock.sentinel.backing
host = mock.sentinel.host
self._driver._relocate_backing(volume, backing, host)
vops.is_datastore_accessible.assert_called_once_with(datastore, host)
ds_sel.is_datastore_compliant.assert_called_once_with(datastore,
profile)
self.assertFalse(vops.relocate_backing.called)
@mock.patch.object(VMDK_DRIVER, 'volumeops')
@mock.patch.object(VMDK_DRIVER, 'ds_sel')
def test_relocate_backing_with_no_datastore(
self, ds_sel, vops):
self._driver._storage_policy_enabled = True
volume = {'name': 'vol-1', 'size': 1}
profile = mock.sentinel.profile
vops.get_profile.return_value = profile
vops.is_datastore_accessible.return_value = True
ds_sel.is_datastore_compliant.return_value = False
ds_sel.select_datastore.return_value = []
backing = mock.sentinel.backing
host = mock.sentinel.host
self.assertRaises(vmdk_exceptions.NoValidDatastoreException,
self._driver._relocate_backing,
volume,
backing,
host)
ds_sel.select_datastore.assert_called_once_with(
{hub.DatastoreSelector.SIZE_BYTES: volume['size'] * units.Gi,
hub.DatastoreSelector.PROFILE_NAME: profile}, hosts=[host])
self.assertFalse(vops.relocate_backing.called)
@mock.patch.object(VMDK_DRIVER, 'volumeops')
@mock.patch.object(VMDK_DRIVER, '_get_volume_group_folder')
@mock.patch.object(VMDK_DRIVER, 'ds_sel')
def test_relocate_backing(
self, ds_sel, get_volume_group_folder, vops):
volume = {'name': 'vol-1', 'size': 1,
'project_id': '63c19a12292549818c09946a5e59ddaf'}
vops.is_datastore_accessible.return_value = False
ds_sel.is_datastore_compliant.return_value = True
backing = mock.sentinel.backing
host = mock.sentinel.host
rp = mock.sentinel.rp
datastore = mock.sentinel.datastore
summary = mock.Mock(datastore=datastore)
ds_sel.select_datastore.return_value = (host, rp, summary)
folder = mock.sentinel.folder
get_volume_group_folder.return_value = folder
self._driver._relocate_backing(volume, backing, host)
vops.relocate_backing.assert_called_once_with(backing,
datastore,
rp,
host)
vops.move_backing_to_folder.assert_called_once_with(backing,
folder)
@mock.patch.object(VMDK_DRIVER, 'volumeops')
@mock.patch.object(VMDK_DRIVER, '_get_volume_group_folder')
@mock.patch.object(VMDK_DRIVER, 'ds_sel')
def test_relocate_backing_with_pbm_disabled(
self, ds_sel, get_volume_group_folder, vops):
self._driver._storage_policy_enabled = False
volume = {'name': 'vol-1', 'size': 1, 'project_id': 'abc'}
vops.is_datastore_accessible.return_value = False
backing = mock.sentinel.backing
host = mock.sentinel.host
rp = mock.sentinel.rp
datastore = mock.sentinel.datastore
summary = mock.Mock(datastore=datastore)
ds_sel.select_datastore.return_value = (host, rp, summary)
folder = mock.sentinel.folder
get_volume_group_folder.return_value = folder
self._driver._relocate_backing(volume, backing, host)
self.assertFalse(vops.get_profile.called)
vops.relocate_backing.assert_called_once_with(backing,
datastore,
rp,
host)
vops.move_backing_to_folder.assert_called_once_with(backing,
folder)
ds_sel.select_datastore.assert_called_once_with(
{hub.DatastoreSelector.SIZE_BYTES: volume['size'] * units.Gi,
hub.DatastoreSelector.PROFILE_NAME: None}, hosts=[host])
@mock.patch.object(VMDK_DRIVER, 'volumeops')
def test_get_disk_device(self, vops):
vm = mock.sentinel.vm
vops.get_entity_by_inventory_path.return_value = vm
dev = mock.sentinel.dev
vops.get_disk_device.return_value = dev
vm_inv_path = mock.sentinel.vm_inv_path
vmdk_path = mock.sentinel.vmdk_path
ret = self._driver._get_disk_device(vmdk_path, vm_inv_path)
self.assertEqual((vm, dev), ret)
vops.get_entity_by_inventory_path.assert_called_once_with(vm_inv_path)
vops.get_disk_device.assert_called_once_with(vm, vmdk_path)
def test_get_existing_with_empty_source_name(self):
self.assertRaises(cinder_exceptions.InvalidInput,
self._driver._get_existing,
{})
def test_get_existing_with_invalid_source_name(self):
self.assertRaises(cinder_exceptions.InvalidInput,
self._driver._get_existing,
{'source-name': 'foo'})
@mock.patch.object(VMDK_DRIVER, '_get_disk_device', return_value=None)
def test_get_existing_with_invalid_existing_ref(self, get_disk_device):
self.assertRaises(cinder_exceptions.ManageExistingInvalidReference,
self._driver._get_existing,
{'source-name': '[ds1] foo/foo.vmdk@/dc-1/vm/foo'})
get_disk_device.assert_called_once_with('[ds1] foo/foo.vmdk',
'/dc-1/vm/foo')
@mock.patch.object(VMDK_DRIVER, '_get_disk_device')
def test_get_existing(self, get_disk_device):
vm = mock.sentinel.vm
disk_device = mock.sentinel.disk_device
get_disk_device.return_value = (vm, disk_device)
self.assertEqual(
(vm, disk_device),
self._driver._get_existing({'source-name':
'[ds1] foo/foo.vmdk@/dc-1/vm/foo'}))
get_disk_device.assert_called_once_with('[ds1] foo/foo.vmdk',
'/dc-1/vm/foo')
@mock.patch.object(VMDK_DRIVER, '_get_existing')
@ddt.data((16384, 1), (1048576, 1), (1572864, 2))
def test_manage_existing_get_size(self, test_data, get_existing):
(capacity_kb, exp_size) = test_data
disk_device = mock.Mock(capacityInKB=capacity_kb)
get_existing.return_value = (mock.sentinel.vm, disk_device)
volume = mock.sentinel.volume
existing_ref = mock.sentinel.existing_ref
self.assertEqual(exp_size,
self._driver.manage_existing_get_size(volume,
existing_ref))
get_existing.assert_called_once_with(existing_ref)
@mock.patch.object(VMDK_DRIVER, '_get_existing')
@mock.patch.object(VMDK_DRIVER, '_create_backing')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
@mock.patch.object(VMDK_DRIVER, '_get_ds_name_folder_path')
@mock.patch.object(VMDK_DRIVER, '_get_storage_profile_id')
@mock.patch('storage.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.'
'_get_disk_type')
def test_manage_existing(
self, get_disk_type, get_storage_profile_id,
get_ds_name_folder_path, vops, create_backing, get_existing):
vm = mock.sentinel.vm
src_path = mock.sentinel.src_path
disk_backing = mock.Mock(fileName=src_path)
disk_device = mock.Mock(backing=disk_backing, capacityInKB=1048576)
get_existing.return_value = (vm, disk_device)
backing = mock.sentinel.backing
create_backing.return_value = backing
src_dc = mock.sentinel.src_dc
dest_dc = mock.sentinel.dest_dc
vops.get_dc.side_effect = [src_dc, dest_dc]
volume = self._create_volume_dict()
ds_name = "ds1"
folder_path = "%s/" % volume['name']
get_ds_name_folder_path.return_value = (ds_name, folder_path)
profile_id = mock.sentinel.profile_id
get_storage_profile_id.return_value = profile_id
disk_type = mock.sentinel.disk_type
get_disk_type.return_value = disk_type
existing_ref = mock.sentinel.existing_ref
self._driver.manage_existing(volume, existing_ref)
get_existing.assert_called_once_with(existing_ref)
create_backing.assert_called_once_with(
volume, create_params={vmdk.CREATE_PARAM_DISK_LESS: True})
vops.detach_disk_from_backing.assert_called_once_with(vm, disk_device)
dest_path = "[%s] %s%s.vmdk" % (ds_name, folder_path, volume['name'])
vops.move_vmdk_file.assert_called_once_with(
src_dc, src_path, dest_path, dest_dc_ref=dest_dc)
get_storage_profile_id.assert_called_once_with(volume)
vops.attach_disk_to_backing.assert_called_once_with(
backing, disk_device.capacityInKB, disk_type, 'lsiLogic',
profile_id, dest_path)
vops.update_backing_disk_uuid.assert_called_once_with(backing,
volume['id'])
@mock.patch('oslo_vmware.api.VMwareAPISession')
def test_session(self, apiSession):
self._session = None
self._driver.session()
apiSession.assert_called_once_with(
self._config.vmware_host_ip,
self._config.vmware_host_username,
self._config.vmware_host_password,
self._config.vmware_api_retry_count,
self._config.vmware_task_poll_interval,
wsdl_loc=self._config.safe_get('vmware_wsdl_location'),
pbm_wsdl_loc=None,
port=self._config.vmware_host_port,
cacert=self._config.vmware_ca_file,
insecure=self._config.vmware_insecure)
@mock.patch.object(VMDK_DRIVER, 'volumeops')
@mock.patch.object(VMDK_DRIVER, '_extend_backing')
def test_extend_volume_with_no_backing(self, extend_backing, vops):
vops.get_backing.return_value = None
volume = {'name': 'volume-51e47214-8e3c-475d-b44b-aea6cd3eef53',
'volume_type_id': None, 'size': 1,
'id': '51e47214-8e3c-475d-b44b-aea6cd3eef53',
'display_name': 'foo'}
self._driver.extend_volume(volume, 2)
self.assertFalse(extend_backing.called)
@mock.patch.object(VMDK_DRIVER, 'volumeops')
@mock.patch.object(VMDK_DRIVER, '_extend_backing')
def test_extend_volume(self, extend_backing, vops):
backing = mock.sentinel.backing
vops.get_backing.return_value = backing
volume = {'name': 'volume-51e47214-8e3c-475d-b44b-aea6cd3eef53',
'volume_type_id': None, 'size': 1,
'id': '51e47214-8e3c-475d-b44b-aea6cd3eef53',
'display_name': 'foo'}
new_size = 2
self._driver.extend_volume(volume, new_size)
extend_backing.assert_called_once_with(backing, new_size)
@mock.patch.object(VMDK_DRIVER, 'volumeops')
@mock.patch.object(VMDK_DRIVER, '_extend_backing')
@mock.patch.object(VMDK_DRIVER, '_select_ds_for_volume')
def test_extend_volume_with_no_disk_space(self, select_ds_for_volume,
extend_backing, vops):
backing = mock.sentinel.backing
vops.get_backing.return_value = backing
extend_backing.side_effect = [exceptions.NoDiskSpaceException, None]
host = mock.sentinel.host
rp = mock.sentinel.rp
folder = mock.sentinel.folder
datastore = mock.sentinel.datastore
summary = mock.Mock(datastore=datastore)
select_ds_for_volume.return_value = (host, rp, folder, summary)
volume = {'name': 'volume-51e47214-8e3c-475d-b44b-aea6cd3eef53',
'volume_type_id': None, 'size': 1,
'id': '51e47214-8e3c-475d-b44b-aea6cd3eef53',
'display_name': 'foo'}
new_size = 2
self._driver.extend_volume(volume, new_size)
create_params = {vmdk.CREATE_PARAM_DISK_SIZE: new_size}
select_ds_for_volume.assert_called_once_with(
volume, create_params=create_params)
vops.relocate_backing.assert_called_once_with(backing, datastore, rp,
host)
vops.move_backing_to_folder(backing, folder)
extend_backing_calls = [mock.call(backing, new_size),
mock.call(backing, new_size)]
self.assertEqual(extend_backing_calls, extend_backing.call_args_list)
@mock.patch.object(VMDK_DRIVER, 'volumeops')
@mock.patch.object(VMDK_DRIVER, '_extend_backing')
def test_extend_volume_with_extend_backing_error(
self, extend_backing, vops):
backing = mock.sentinel.backing
vops.get_backing.return_value = backing
extend_backing.side_effect = exceptions.VimException("Error")
volume = {'name': 'volume-51e47214-8e3c-475d-b44b-aea6cd3eef53',
'volume_type_id': None, 'size': 1,
'id': '51e47214-8e3c-475d-b44b-aea6cd3eef53',
'display_name': 'foo'}
new_size = 2
self.assertRaises(exceptions.VimException, self._driver.extend_volume,
volume, new_size)
extend_backing.assert_called_once_with(backing, new_size)
class ImageDiskTypeTest(test.TestCase):
"""Unit tests for ImageDiskType."""
def test_is_valid(self):
self.assertTrue(vmdk.ImageDiskType.is_valid("thin"))
self.assertTrue(vmdk.ImageDiskType.is_valid("preallocated"))
self.assertTrue(vmdk.ImageDiskType.is_valid("streamOptimized"))
self.assertTrue(vmdk.ImageDiskType.is_valid("sparse"))
self.assertFalse(vmdk.ImageDiskType.is_valid("thick"))
def test_validate(self):
vmdk.ImageDiskType.validate("thin")
vmdk.ImageDiskType.validate("preallocated")
vmdk.ImageDiskType.validate("streamOptimized")
vmdk.ImageDiskType.validate("sparse")
self.assertRaises(cinder_exceptions.ImageUnacceptable,
vmdk.ImageDiskType.validate,
"thick")
| [
"nkapotoxin@gmail.com"
] | nkapotoxin@gmail.com |
176b1c3dbec71fa12f0b00932be4b809136c121f | 656def2ca5c0bd959b31b98cdbc53fea3420b2dc | /Python3.7-VideoSplice/src/tencentcloud/clb/v20180317/models.py | 20aaf8be685144e1429237ddf0f36224333b0445 | [] | no_license | tencentyun/serverless-demo | 120271b96f8f960b6125c9d1481a5d8fe56165ae | 4c324bb186c460fe78252f0ca5c28132a8bce6c9 | refs/heads/master | 2023-08-25T17:07:04.959745 | 2023-08-25T08:10:49 | 2023-08-25T08:10:49 | 281,120,881 | 94 | 119 | null | 2023-08-31T06:34:36 | 2020-07-20T13:15:46 | null | UTF-8 | Python | false | false | 272,126 | py | # -*- coding: utf8 -*-
# Copyright (c) 2017-2021 THL A29 Limited, a Tencent company. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from tencentcloud.common.abstract_model import AbstractModel
class AssociateTargetGroupsRequest(AbstractModel):
"""AssociateTargetGroups请求参数结构体
"""
def __init__(self):
r"""
:param Associations: 绑定的关系数组。
:type Associations: list of TargetGroupAssociation
"""
self.Associations = None
def _deserialize(self, params):
if params.get("Associations") is not None:
self.Associations = []
for item in params.get("Associations"):
obj = TargetGroupAssociation()
obj._deserialize(item)
self.Associations.append(obj)
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class AssociateTargetGroupsResponse(AbstractModel):
"""AssociateTargetGroups返回参数结构体
"""
def __init__(self):
r"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class AssociationItem(AbstractModel):
"""目标组关联到的规则
"""
def __init__(self):
r"""
:param LoadBalancerId: 关联到的负载均衡ID
:type LoadBalancerId: str
:param ListenerId: 关联到的监听器ID
:type ListenerId: str
:param LocationId: 关联到的转发规则ID
注意:此字段可能返回 null,表示取不到有效值。
:type LocationId: str
:param Protocol: 关联到的监听器协议类型,如HTTP,TCP,
:type Protocol: str
:param Port: 关联到的监听器端口
:type Port: int
:param Domain: 关联到的转发规则域名
注意:此字段可能返回 null,表示取不到有效值。
:type Domain: str
:param Url: 关联到的转发规则URL
注意:此字段可能返回 null,表示取不到有效值。
:type Url: str
:param LoadBalancerName: 负载均衡名称
:type LoadBalancerName: str
:param ListenerName: 监听器名称
:type ListenerName: str
"""
self.LoadBalancerId = None
self.ListenerId = None
self.LocationId = None
self.Protocol = None
self.Port = None
self.Domain = None
self.Url = None
self.LoadBalancerName = None
self.ListenerName = None
def _deserialize(self, params):
self.LoadBalancerId = params.get("LoadBalancerId")
self.ListenerId = params.get("ListenerId")
self.LocationId = params.get("LocationId")
self.Protocol = params.get("Protocol")
self.Port = params.get("Port")
self.Domain = params.get("Domain")
self.Url = params.get("Url")
self.LoadBalancerName = params.get("LoadBalancerName")
self.ListenerName = params.get("ListenerName")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class AutoRewriteRequest(AbstractModel):
"""AutoRewrite请求参数结构体
"""
def __init__(self):
r"""
:param LoadBalancerId: 负载均衡实例ID。
:type LoadBalancerId: str
:param ListenerId: HTTPS:443监听器的ID。
:type ListenerId: str
:param Domains: HTTPS:443监听器下需要重定向的域名,若不填,则对HTTPS:443监听器下的所有域名都设置重定向。
:type Domains: list of str
:param RewriteCodes: 重定向状态码,可取值301,302,307。
:type RewriteCodes: list of int
:param TakeUrls: 重定向是否携带匹配的URL。
:type TakeUrls: list of bool
"""
self.LoadBalancerId = None
self.ListenerId = None
self.Domains = None
self.RewriteCodes = None
self.TakeUrls = None
def _deserialize(self, params):
self.LoadBalancerId = params.get("LoadBalancerId")
self.ListenerId = params.get("ListenerId")
self.Domains = params.get("Domains")
self.RewriteCodes = params.get("RewriteCodes")
self.TakeUrls = params.get("TakeUrls")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class AutoRewriteResponse(AbstractModel):
"""AutoRewrite返回参数结构体
"""
def __init__(self):
r"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class Backend(AbstractModel):
"""监听器绑定的后端服务的详细信息
"""
def __init__(self):
r"""
:param Type: 后端服务的类型,可取:CVM、ENI
:type Type: str
:param InstanceId: 后端服务的唯一 ID,如 ins-abcd1234
:type InstanceId: str
:param Port: 后端服务的监听端口
:type Port: int
:param Weight: 后端服务的转发权重,取值范围:[0, 100],默认为 10。
:type Weight: int
:param PublicIpAddresses: 后端服务的外网 IP
注意:此字段可能返回 null,表示取不到有效值。
:type PublicIpAddresses: list of str
:param PrivateIpAddresses: 后端服务的内网 IP
注意:此字段可能返回 null,表示取不到有效值。
:type PrivateIpAddresses: list of str
:param InstanceName: 后端服务的实例名称
注意:此字段可能返回 null,表示取不到有效值。
:type InstanceName: str
:param RegisteredTime: 后端服务被绑定的时间
注意:此字段可能返回 null,表示取不到有效值。
:type RegisteredTime: str
:param EniId: 弹性网卡唯一ID,如 eni-1234abcd
注意:此字段可能返回 null,表示取不到有效值。
:type EniId: str
"""
self.Type = None
self.InstanceId = None
self.Port = None
self.Weight = None
self.PublicIpAddresses = None
self.PrivateIpAddresses = None
self.InstanceName = None
self.RegisteredTime = None
self.EniId = None
def _deserialize(self, params):
self.Type = params.get("Type")
self.InstanceId = params.get("InstanceId")
self.Port = params.get("Port")
self.Weight = params.get("Weight")
self.PublicIpAddresses = params.get("PublicIpAddresses")
self.PrivateIpAddresses = params.get("PrivateIpAddresses")
self.InstanceName = params.get("InstanceName")
self.RegisteredTime = params.get("RegisteredTime")
self.EniId = params.get("EniId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class BasicTargetGroupInfo(AbstractModel):
"""监听器或者转发规则绑定的目标组基本信息
"""
def __init__(self):
r"""
:param TargetGroupId: 目标组ID
:type TargetGroupId: str
:param TargetGroupName: 目标组名称
:type TargetGroupName: str
"""
self.TargetGroupId = None
self.TargetGroupName = None
def _deserialize(self, params):
self.TargetGroupId = params.get("TargetGroupId")
self.TargetGroupName = params.get("TargetGroupName")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class BatchDeregisterTargetsRequest(AbstractModel):
"""BatchDeregisterTargets请求参数结构体
"""
def __init__(self):
r"""
:param LoadBalancerId: 负载均衡ID。
:type LoadBalancerId: str
:param Targets: 解绑目标。
:type Targets: list of BatchTarget
"""
self.LoadBalancerId = None
self.Targets = None
def _deserialize(self, params):
self.LoadBalancerId = params.get("LoadBalancerId")
if params.get("Targets") is not None:
self.Targets = []
for item in params.get("Targets"):
obj = BatchTarget()
obj._deserialize(item)
self.Targets.append(obj)
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class BatchDeregisterTargetsResponse(AbstractModel):
"""BatchDeregisterTargets返回参数结构体
"""
def __init__(self):
r"""
:param FailListenerIdSet: 解绑失败的监听器ID。
:type FailListenerIdSet: list of str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.FailListenerIdSet = None
self.RequestId = None
def _deserialize(self, params):
self.FailListenerIdSet = params.get("FailListenerIdSet")
self.RequestId = params.get("RequestId")
class BatchModifyTargetWeightRequest(AbstractModel):
"""BatchModifyTargetWeight请求参数结构体
"""
def __init__(self):
r"""
:param LoadBalancerId: 负载均衡实例 ID。
:type LoadBalancerId: str
:param ModifyList: 要批量修改权重的列表。
:type ModifyList: list of RsWeightRule
"""
self.LoadBalancerId = None
self.ModifyList = None
def _deserialize(self, params):
self.LoadBalancerId = params.get("LoadBalancerId")
if params.get("ModifyList") is not None:
self.ModifyList = []
for item in params.get("ModifyList"):
obj = RsWeightRule()
obj._deserialize(item)
self.ModifyList.append(obj)
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class BatchModifyTargetWeightResponse(AbstractModel):
"""BatchModifyTargetWeight返回参数结构体
"""
def __init__(self):
r"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class BatchRegisterTargetsRequest(AbstractModel):
"""BatchRegisterTargets请求参数结构体
"""
def __init__(self):
r"""
:param LoadBalancerId: 负载均衡ID。
:type LoadBalancerId: str
:param Targets: 绑定目标。
:type Targets: list of BatchTarget
"""
self.LoadBalancerId = None
self.Targets = None
def _deserialize(self, params):
self.LoadBalancerId = params.get("LoadBalancerId")
if params.get("Targets") is not None:
self.Targets = []
for item in params.get("Targets"):
obj = BatchTarget()
obj._deserialize(item)
self.Targets.append(obj)
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class BatchRegisterTargetsResponse(AbstractModel):
"""BatchRegisterTargets返回参数结构体
"""
def __init__(self):
r"""
:param FailListenerIdSet: 绑定失败的监听器ID,如为空表示全部绑定成功。
注意:此字段可能返回 null,表示取不到有效值。
:type FailListenerIdSet: list of str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.FailListenerIdSet = None
self.RequestId = None
def _deserialize(self, params):
self.FailListenerIdSet = params.get("FailListenerIdSet")
self.RequestId = params.get("RequestId")
class BatchTarget(AbstractModel):
"""批量绑定类型
"""
def __init__(self):
r"""
:param ListenerId: 监听器 ID。
:type ListenerId: str
:param Port: 绑定端口。
:type Port: int
:param InstanceId: 子机 ID。表示绑定主网卡主 IP。
:type InstanceId: str
:param EniIp: 绑定 IP 时需要传入此参数,支持弹性网卡的 IP 和其他内网 IP,如果是弹性网卡则必须先绑定至CVM,然后才能绑定到负载均衡实例。
注意:参数 InstanceId、EniIp 只能传入一个且必须传入一个。如果绑定双栈IPV6子机,必须传该参数。
:type EniIp: str
:param Weight: 子机权重,范围[0, 100]。绑定时如果不存在,则默认为10。
:type Weight: int
:param LocationId: 七层规则 ID。
:type LocationId: str
"""
self.ListenerId = None
self.Port = None
self.InstanceId = None
self.EniIp = None
self.Weight = None
self.LocationId = None
def _deserialize(self, params):
self.ListenerId = params.get("ListenerId")
self.Port = params.get("Port")
self.InstanceId = params.get("InstanceId")
self.EniIp = params.get("EniIp")
self.Weight = params.get("Weight")
self.LocationId = params.get("LocationId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class BindDetailItem(AbstractModel):
"""绑定关系,包含监听器名字、协议、url、vport。
"""
def __init__(self):
r"""
:param LoadBalancerId: 配置绑定的CLB ID
:type LoadBalancerId: str
:param ListenerId: 配置绑定的监听器ID
注意:此字段可能返回 null,表示取不到有效值。
:type ListenerId: str
:param Domain: 配置绑定的域名
注意:此字段可能返回 null,表示取不到有效值。
:type Domain: str
:param LocationId: 配置绑定的规则
注意:此字段可能返回 null,表示取不到有效值。
:type LocationId: str
:param ListenerName: 监听器名字
注意:此字段可能返回 null,表示取不到有效值。
:type ListenerName: str
:param Protocol: 监听器协议
注意:此字段可能返回 null,表示取不到有效值。
:type Protocol: str
:param Vport: 监听器端口
注意:此字段可能返回 null,表示取不到有效值。
:type Vport: int
:param Url: location的url
注意:此字段可能返回 null,表示取不到有效值。
:type Url: str
:param UconfigId: 配置ID
注意:此字段可能返回 null,表示取不到有效值。
:type UconfigId: str
"""
self.LoadBalancerId = None
self.ListenerId = None
self.Domain = None
self.LocationId = None
self.ListenerName = None
self.Protocol = None
self.Vport = None
self.Url = None
self.UconfigId = None
def _deserialize(self, params):
self.LoadBalancerId = params.get("LoadBalancerId")
self.ListenerId = params.get("ListenerId")
self.Domain = params.get("Domain")
self.LocationId = params.get("LocationId")
self.ListenerName = params.get("ListenerName")
self.Protocol = params.get("Protocol")
self.Vport = params.get("Vport")
self.Url = params.get("Url")
self.UconfigId = params.get("UconfigId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class BlockedIP(AbstractModel):
"""加入了12306黑名单的IP
"""
def __init__(self):
r"""
:param IP: 黑名单IP
:type IP: str
:param CreateTime: 加入黑名单的时间
:type CreateTime: str
:param ExpireTime: 过期时间
:type ExpireTime: str
"""
self.IP = None
self.CreateTime = None
self.ExpireTime = None
def _deserialize(self, params):
self.IP = params.get("IP")
self.CreateTime = params.get("CreateTime")
self.ExpireTime = params.get("ExpireTime")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CertIdRelatedWithLoadBalancers(AbstractModel):
"""证书ID,以及与该证书ID关联的负载均衡实例列表
"""
def __init__(self):
r"""
:param CertId: 证书ID
:type CertId: str
:param LoadBalancers: 与证书关联的负载均衡实例列表
注意:此字段可能返回 null,表示取不到有效值。
:type LoadBalancers: list of LoadBalancer
"""
self.CertId = None
self.LoadBalancers = None
def _deserialize(self, params):
self.CertId = params.get("CertId")
if params.get("LoadBalancers") is not None:
self.LoadBalancers = []
for item in params.get("LoadBalancers"):
obj = LoadBalancer()
obj._deserialize(item)
self.LoadBalancers.append(obj)
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CertificateInput(AbstractModel):
"""证书信息
"""
def __init__(self):
r"""
:param SSLMode: 认证类型,UNIDIRECTIONAL:单向认证,MUTUAL:双向认证
:type SSLMode: str
:param CertId: 服务端证书的 ID,如果不填写此项则必须上传证书,包括 CertContent,CertKey,CertName。
:type CertId: str
:param CertCaId: 客户端证书的 ID,当监听器采用双向认证,即 SSLMode=MUTUAL 时,如果不填写此项则必须上传客户端证书,包括 CertCaContent,CertCaName。
:type CertCaId: str
:param CertName: 上传服务端证书的名称,如果没有 CertId,则此项必传。
:type CertName: str
:param CertKey: 上传服务端证书的 key,如果没有 CertId,则此项必传。
:type CertKey: str
:param CertContent: 上传服务端证书的内容,如果没有 CertId,则此项必传。
:type CertContent: str
:param CertCaName: 上传客户端 CA 证书的名称,如果 SSLMode=mutual,如果没有 CertCaId,则此项必传。
:type CertCaName: str
:param CertCaContent: 上传客户端证书的内容,如果 SSLMode=mutual,如果没有 CertCaId,则此项必传。
:type CertCaContent: str
"""
self.SSLMode = None
self.CertId = None
self.CertCaId = None
self.CertName = None
self.CertKey = None
self.CertContent = None
self.CertCaName = None
self.CertCaContent = None
def _deserialize(self, params):
self.SSLMode = params.get("SSLMode")
self.CertId = params.get("CertId")
self.CertCaId = params.get("CertCaId")
self.CertName = params.get("CertName")
self.CertKey = params.get("CertKey")
self.CertContent = params.get("CertContent")
self.CertCaName = params.get("CertCaName")
self.CertCaContent = params.get("CertCaContent")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CertificateOutput(AbstractModel):
"""证书相关信息
"""
def __init__(self):
r"""
:param SSLMode: 认证类型,UNIDIRECTIONAL:单向认证,MUTUAL:双向认证
:type SSLMode: str
:param CertId: 服务端证书的 ID。
:type CertId: str
:param CertCaId: 客户端证书的 ID。
注意:此字段可能返回 null,表示取不到有效值。
:type CertCaId: str
"""
self.SSLMode = None
self.CertId = None
self.CertCaId = None
def _deserialize(self, params):
self.SSLMode = params.get("SSLMode")
self.CertId = params.get("CertId")
self.CertCaId = params.get("CertCaId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ClassicalHealth(AbstractModel):
"""传统型负载均衡后端服务的健康状态
"""
def __init__(self):
r"""
:param IP: 后端服务的内网 IP
:type IP: str
:param Port: 后端服务的端口
:type Port: int
:param ListenerPort: 负载均衡的监听端口
:type ListenerPort: int
:param Protocol: 转发协议
:type Protocol: str
:param HealthStatus: 健康检查结果,1 表示健康,0 表示不健康
:type HealthStatus: int
"""
self.IP = None
self.Port = None
self.ListenerPort = None
self.Protocol = None
self.HealthStatus = None
def _deserialize(self, params):
self.IP = params.get("IP")
self.Port = params.get("Port")
self.ListenerPort = params.get("ListenerPort")
self.Protocol = params.get("Protocol")
self.HealthStatus = params.get("HealthStatus")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ClassicalListener(AbstractModel):
"""传统型负载均衡监听器信息
"""
def __init__(self):
r"""
:param ListenerId: 负载均衡监听器ID
:type ListenerId: str
:param ListenerPort: 负载均衡监听器端口
:type ListenerPort: int
:param InstancePort: 监听器后端转发端口
:type InstancePort: int
:param ListenerName: 监听器名称
:type ListenerName: str
:param Protocol: 监听器协议类型
:type Protocol: str
:param SessionExpire: 会话保持时间
:type SessionExpire: int
:param HealthSwitch: 是否开启了健康检查:1(开启)、0(关闭)
:type HealthSwitch: int
:param TimeOut: 响应超时时间
:type TimeOut: int
:param IntervalTime: 检查间隔
:type IntervalTime: int
:param HealthNum: 健康阈值
:type HealthNum: int
:param UnhealthNum: 不健康阈值
:type UnhealthNum: int
:param HttpHash: 传统型公网负载均衡的 HTTP、HTTPS 监听器的请求均衡方法。wrr 表示按权重轮询,ip_hash 表示根据访问的源 IP 进行一致性哈希方式来分发
:type HttpHash: str
:param HttpCode: 传统型公网负载均衡的 HTTP、HTTPS 监听器的健康检查返回码。具体可参考创建监听器中对该字段的解释
:type HttpCode: int
:param HttpCheckPath: 传统型公网负载均衡的 HTTP、HTTPS 监听器的健康检查路径
:type HttpCheckPath: str
:param SSLMode: 传统型公网负载均衡的 HTTPS 监听器的认证方式
:type SSLMode: str
:param CertId: 传统型公网负载均衡的 HTTPS 监听器的服务端证书 ID
:type CertId: str
:param CertCaId: 传统型公网负载均衡的 HTTPS 监听器的客户端证书 ID
:type CertCaId: str
:param Status: 监听器的状态,0 表示创建中,1 表示运行中
:type Status: int
"""
self.ListenerId = None
self.ListenerPort = None
self.InstancePort = None
self.ListenerName = None
self.Protocol = None
self.SessionExpire = None
self.HealthSwitch = None
self.TimeOut = None
self.IntervalTime = None
self.HealthNum = None
self.UnhealthNum = None
self.HttpHash = None
self.HttpCode = None
self.HttpCheckPath = None
self.SSLMode = None
self.CertId = None
self.CertCaId = None
self.Status = None
def _deserialize(self, params):
self.ListenerId = params.get("ListenerId")
self.ListenerPort = params.get("ListenerPort")
self.InstancePort = params.get("InstancePort")
self.ListenerName = params.get("ListenerName")
self.Protocol = params.get("Protocol")
self.SessionExpire = params.get("SessionExpire")
self.HealthSwitch = params.get("HealthSwitch")
self.TimeOut = params.get("TimeOut")
self.IntervalTime = params.get("IntervalTime")
self.HealthNum = params.get("HealthNum")
self.UnhealthNum = params.get("UnhealthNum")
self.HttpHash = params.get("HttpHash")
self.HttpCode = params.get("HttpCode")
self.HttpCheckPath = params.get("HttpCheckPath")
self.SSLMode = params.get("SSLMode")
self.CertId = params.get("CertId")
self.CertCaId = params.get("CertCaId")
self.Status = params.get("Status")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ClassicalLoadBalancerInfo(AbstractModel):
"""负载均衡信息
"""
def __init__(self):
r"""
:param InstanceId: 后端实例ID
:type InstanceId: str
:param LoadBalancerIds: 负载均衡实例ID列表
注意:此字段可能返回 null,表示取不到有效值。
:type LoadBalancerIds: list of str
"""
self.InstanceId = None
self.LoadBalancerIds = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
self.LoadBalancerIds = params.get("LoadBalancerIds")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ClassicalTarget(AbstractModel):
"""传统型负载均衡的后端服务相关信息
"""
def __init__(self):
r"""
:param Type: 后端服务的类型,可取值:CVM、ENI(即将支持)
:type Type: str
:param InstanceId: 后端服务的唯一 ID,可通过 DescribeInstances 接口返回字段中的 unInstanceId 字段获取
:type InstanceId: str
:param Weight: 后端服务的转发权重,取值范围:[0, 100],默认为 10。
:type Weight: int
:param PublicIpAddresses: 后端服务的外网 IP
注意:此字段可能返回 null,表示取不到有效值。
:type PublicIpAddresses: list of str
:param PrivateIpAddresses: 后端服务的内网 IP
注意:此字段可能返回 null,表示取不到有效值。
:type PrivateIpAddresses: list of str
:param InstanceName: 后端服务的实例名称
注意:此字段可能返回 null,表示取不到有效值。
:type InstanceName: str
:param RunFlag: 后端服务的状态
1:故障,2:运行中,3:创建中,4:已关机,5:已退还,6:退还中, 7:重启中,8:开机中,9:关机中,10:密码重置中,11:格式化中,12:镜像制作中,13:带宽设置中,14:重装系统中,19:升级中,21:热迁移中
注意:此字段可能返回 null,表示取不到有效值。
:type RunFlag: int
"""
self.Type = None
self.InstanceId = None
self.Weight = None
self.PublicIpAddresses = None
self.PrivateIpAddresses = None
self.InstanceName = None
self.RunFlag = None
def _deserialize(self, params):
self.Type = params.get("Type")
self.InstanceId = params.get("InstanceId")
self.Weight = params.get("Weight")
self.PublicIpAddresses = params.get("PublicIpAddresses")
self.PrivateIpAddresses = params.get("PrivateIpAddresses")
self.InstanceName = params.get("InstanceName")
self.RunFlag = params.get("RunFlag")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ClassicalTargetInfo(AbstractModel):
"""传统型负载均衡的后端信息
"""
def __init__(self):
r"""
:param InstanceId: 后端实例ID
:type InstanceId: str
:param Weight: 权重,取值范围 [0, 100]
:type Weight: int
"""
self.InstanceId = None
self.Weight = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
self.Weight = params.get("Weight")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class Cluster(AbstractModel):
"""集群的详细信息,如集群ID,名称,类型,可用区,标签等
"""
def __init__(self):
r"""
:param ClusterId: 集群唯一ID
:type ClusterId: str
:param ClusterName: 集群名称
:type ClusterName: str
:param ClusterType: 集群类型,如TGW,STGW,VPCGW
:type ClusterType: str
:param ClusterTag: 集群标签,只有STGW集群有标签
注意:此字段可能返回 null,表示取不到有效值。
:type ClusterTag: str
:param Zone: 集群所在可用区,如ap-guangzhou-1
:type Zone: str
:param Network: 集群网络类型,如Public,Private
:type Network: str
:param MaxConn: 最大连接数
注意:此字段可能返回 null,表示取不到有效值。
:type MaxConn: int
:param MaxInFlow: 最大入带宽
注意:此字段可能返回 null,表示取不到有效值。
:type MaxInFlow: int
:param MaxInPkg: 最大入包量
注意:此字段可能返回 null,表示取不到有效值。
:type MaxInPkg: int
:param MaxOutFlow: 最大出带宽
注意:此字段可能返回 null,表示取不到有效值。
:type MaxOutFlow: int
:param MaxOutPkg: 最大出包量
注意:此字段可能返回 null,表示取不到有效值。
:type MaxOutPkg: int
:param MaxNewConn: 最大新建连接数
注意:此字段可能返回 null,表示取不到有效值。
:type MaxNewConn: int
:param HTTPMaxNewConn: http最大新建连接数
注意:此字段可能返回 null,表示取不到有效值。
:type HTTPMaxNewConn: int
:param HTTPSMaxNewConn: https最大新建连接数
注意:此字段可能返回 null,表示取不到有效值。
:type HTTPSMaxNewConn: int
:param HTTPQps: http QPS
注意:此字段可能返回 null,表示取不到有效值。
:type HTTPQps: int
:param HTTPSQps: https QPS
注意:此字段可能返回 null,表示取不到有效值。
:type HTTPSQps: int
:param ResourceCount: 集群内资源总数目
:type ResourceCount: int
:param IdleResourceCount: 集群内空闲资源数目
注意:此字段可能返回 null,表示取不到有效值。
:type IdleResourceCount: int
:param LoadBalanceDirectorCount: 集群内转发机的数目
注意:此字段可能返回 null,表示取不到有效值。
:type LoadBalanceDirectorCount: int
:param Isp: 集群的Isp属性,如:"BGP","CMCC","CUCC","CTCC","INTERNAL"。
注意:此字段可能返回 null,表示取不到有效值。
:type Isp: str
:param ClustersZone: 集群所在的可用区
注意:此字段可能返回 null,表示取不到有效值。
:type ClustersZone: :class:`tencentcloud.clb.v20180317.models.ClustersZone`
:param ClustersVersion: 集群版本
注意:此字段可能返回 null,表示取不到有效值。
:type ClustersVersion: str
"""
self.ClusterId = None
self.ClusterName = None
self.ClusterType = None
self.ClusterTag = None
self.Zone = None
self.Network = None
self.MaxConn = None
self.MaxInFlow = None
self.MaxInPkg = None
self.MaxOutFlow = None
self.MaxOutPkg = None
self.MaxNewConn = None
self.HTTPMaxNewConn = None
self.HTTPSMaxNewConn = None
self.HTTPQps = None
self.HTTPSQps = None
self.ResourceCount = None
self.IdleResourceCount = None
self.LoadBalanceDirectorCount = None
self.Isp = None
self.ClustersZone = None
self.ClustersVersion = None
def _deserialize(self, params):
self.ClusterId = params.get("ClusterId")
self.ClusterName = params.get("ClusterName")
self.ClusterType = params.get("ClusterType")
self.ClusterTag = params.get("ClusterTag")
self.Zone = params.get("Zone")
self.Network = params.get("Network")
self.MaxConn = params.get("MaxConn")
self.MaxInFlow = params.get("MaxInFlow")
self.MaxInPkg = params.get("MaxInPkg")
self.MaxOutFlow = params.get("MaxOutFlow")
self.MaxOutPkg = params.get("MaxOutPkg")
self.MaxNewConn = params.get("MaxNewConn")
self.HTTPMaxNewConn = params.get("HTTPMaxNewConn")
self.HTTPSMaxNewConn = params.get("HTTPSMaxNewConn")
self.HTTPQps = params.get("HTTPQps")
self.HTTPSQps = params.get("HTTPSQps")
self.ResourceCount = params.get("ResourceCount")
self.IdleResourceCount = params.get("IdleResourceCount")
self.LoadBalanceDirectorCount = params.get("LoadBalanceDirectorCount")
self.Isp = params.get("Isp")
if params.get("ClustersZone") is not None:
self.ClustersZone = ClustersZone()
self.ClustersZone._deserialize(params.get("ClustersZone"))
self.ClustersVersion = params.get("ClustersVersion")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ClusterItem(AbstractModel):
"""独占集群信息
"""
def __init__(self):
r"""
:param ClusterId: 集群唯一ID
:type ClusterId: str
:param ClusterName: 集群名称
注意:此字段可能返回 null,表示取不到有效值。
:type ClusterName: str
:param Zone: 集群所在可用区,如ap-guangzhou-1
注意:此字段可能返回 null,表示取不到有效值。
:type Zone: str
"""
self.ClusterId = None
self.ClusterName = None
self.Zone = None
def _deserialize(self, params):
self.ClusterId = params.get("ClusterId")
self.ClusterName = params.get("ClusterName")
self.Zone = params.get("Zone")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ClusterResource(AbstractModel):
"""集群内资源类型
"""
def __init__(self):
r"""
:param ClusterId: 集群唯一ID,如tgw-12345678。
:type ClusterId: str
:param Vip: ip地址。
:type Vip: str
:param LoadBalancerId: 负载均衡唯一ID,如lb-12345678。
注意:此字段可能返回 null,表示取不到有效值。
:type LoadBalancerId: str
:param Idle: 资源是否闲置。
注意:此字段可能返回 null,表示取不到有效值。
:type Idle: str
:param ClusterName: 集群名称。
:type ClusterName: str
:param Isp: 集群的Isp属性,如:"BGP","CMCC","CUCC","CTCC","INTERNAL"。
注意:此字段可能返回 null,表示取不到有效值。
:type Isp: str
"""
self.ClusterId = None
self.Vip = None
self.LoadBalancerId = None
self.Idle = None
self.ClusterName = None
self.Isp = None
def _deserialize(self, params):
self.ClusterId = params.get("ClusterId")
self.Vip = params.get("Vip")
self.LoadBalancerId = params.get("LoadBalancerId")
self.Idle = params.get("Idle")
self.ClusterName = params.get("ClusterName")
self.Isp = params.get("Isp")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ClustersZone(AbstractModel):
"""集群所在的可用区。
"""
def __init__(self):
r"""
:param MasterZone: 集群所在的主可用区。
注意:此字段可能返回 null,表示取不到有效值。
:type MasterZone: list of str
:param SlaveZone: 集群所在的备可用区。
注意:此字段可能返回 null,表示取不到有效值。
:type SlaveZone: list of str
"""
self.MasterZone = None
self.SlaveZone = None
def _deserialize(self, params):
self.MasterZone = params.get("MasterZone")
self.SlaveZone = params.get("SlaveZone")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ConfigListItem(AbstractModel):
"""配置内容
"""
def __init__(self):
r"""
:param UconfigId: 配置ID
:type UconfigId: str
:param ConfigType: 配置类型
:type ConfigType: str
:param ConfigName: 配置名字
注意:此字段可能返回 null,表示取不到有效值。
:type ConfigName: str
:param ConfigContent: 配置内容
:type ConfigContent: str
:param CreateTimestamp: 增加配置时间
:type CreateTimestamp: str
:param UpdateTimestamp: 修改配置时间
:type UpdateTimestamp: str
"""
self.UconfigId = None
self.ConfigType = None
self.ConfigName = None
self.ConfigContent = None
self.CreateTimestamp = None
self.UpdateTimestamp = None
def _deserialize(self, params):
self.UconfigId = params.get("UconfigId")
self.ConfigType = params.get("ConfigType")
self.ConfigName = params.get("ConfigName")
self.ConfigContent = params.get("ConfigContent")
self.CreateTimestamp = params.get("CreateTimestamp")
self.UpdateTimestamp = params.get("UpdateTimestamp")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CreateClsLogSetRequest(AbstractModel):
"""CreateClsLogSet请求参数结构体
"""
def __init__(self):
r"""
:param Period: 日志集的保存周期,单位:天,最大 90。
:type Period: int
:param LogsetName: 日志集的名字,不能和cls其他日志集重名。不填默认为clb_logset。
:type LogsetName: str
:param LogsetType: 日志集类型,ACCESS:访问日志,HEALTH:健康检查日志,默认ACCESS。
:type LogsetType: str
"""
self.Period = None
self.LogsetName = None
self.LogsetType = None
def _deserialize(self, params):
self.Period = params.get("Period")
self.LogsetName = params.get("LogsetName")
self.LogsetType = params.get("LogsetType")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CreateClsLogSetResponse(AbstractModel):
"""CreateClsLogSet返回参数结构体
"""
def __init__(self):
r"""
:param LogsetId: 日志集的 ID。
:type LogsetId: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.LogsetId = None
self.RequestId = None
def _deserialize(self, params):
self.LogsetId = params.get("LogsetId")
self.RequestId = params.get("RequestId")
class CreateListenerRequest(AbstractModel):
"""CreateListener请求参数结构体
"""
def __init__(self):
r"""
:param LoadBalancerId: 负载均衡实例 ID。
:type LoadBalancerId: str
:param Ports: 要将监听器创建到哪些端口,每个端口对应一个新的监听器。
:type Ports: list of int
:param Protocol: 监听器协议: TCP | UDP | HTTP | HTTPS | TCP_SSL(TCP_SSL 正在内测中,如需使用请通过工单申请)。
:type Protocol: str
:param ListenerNames: 要创建的监听器名称列表,名称与Ports数组按序一一对应,如不需立即命名,则无需提供此参数。
:type ListenerNames: list of str
:param HealthCheck: 健康检查相关参数,此参数仅适用于TCP/UDP/TCP_SSL监听器。
:type HealthCheck: :class:`tencentcloud.clb.v20180317.models.HealthCheck`
:param Certificate: 证书相关信息,此参数仅适用于TCP_SSL监听器和未开启SNI特性的HTTPS监听器。
:type Certificate: :class:`tencentcloud.clb.v20180317.models.CertificateInput`
:param SessionExpireTime: 会话保持时间,单位:秒。可选值:30~3600,默认 0,表示不开启。此参数仅适用于TCP/UDP监听器。
:type SessionExpireTime: int
:param Scheduler: 监听器转发的方式。可选值:WRR、LEAST_CONN
分别表示按权重轮询、最小连接数, 默认为 WRR。此参数仅适用于TCP/UDP/TCP_SSL监听器。
:type Scheduler: str
:param SniSwitch: 是否开启SNI特性,此参数仅适用于HTTPS监听器。
:type SniSwitch: int
:param TargetType: 后端目标类型,NODE表示绑定普通节点,TARGETGROUP表示绑定目标组。
:type TargetType: str
:param SessionType: 会话保持类型。不传或传NORMAL表示默认会话保持类型。QUIC_CID 表示根据Quic Connection ID做会话保持。QUIC_CID只支持UDP协议。
:type SessionType: str
:param KeepaliveEnable: 是否开启长连接,此参数仅适用于HTTP/HTTPS监听器,0:关闭;1:开启, 默认关闭。
:type KeepaliveEnable: int
:param EndPort: 创建端口段监听器时必须传入此参数,用以标识结束端口。同时,入参Ports只允许传入一个成员,用以标识开始端口。【如果您需要体验端口段功能,请通过 [工单申请](https://console.cloud.tencent.com/workorder/category)】。
:type EndPort: int
:param DeregisterTargetRst: 解绑后端目标时,是否发RST给客户端,此参数仅适用于TCP监听器。
:type DeregisterTargetRst: bool
"""
self.LoadBalancerId = None
self.Ports = None
self.Protocol = None
self.ListenerNames = None
self.HealthCheck = None
self.Certificate = None
self.SessionExpireTime = None
self.Scheduler = None
self.SniSwitch = None
self.TargetType = None
self.SessionType = None
self.KeepaliveEnable = None
self.EndPort = None
self.DeregisterTargetRst = None
def _deserialize(self, params):
self.LoadBalancerId = params.get("LoadBalancerId")
self.Ports = params.get("Ports")
self.Protocol = params.get("Protocol")
self.ListenerNames = params.get("ListenerNames")
if params.get("HealthCheck") is not None:
self.HealthCheck = HealthCheck()
self.HealthCheck._deserialize(params.get("HealthCheck"))
if params.get("Certificate") is not None:
self.Certificate = CertificateInput()
self.Certificate._deserialize(params.get("Certificate"))
self.SessionExpireTime = params.get("SessionExpireTime")
self.Scheduler = params.get("Scheduler")
self.SniSwitch = params.get("SniSwitch")
self.TargetType = params.get("TargetType")
self.SessionType = params.get("SessionType")
self.KeepaliveEnable = params.get("KeepaliveEnable")
self.EndPort = params.get("EndPort")
self.DeregisterTargetRst = params.get("DeregisterTargetRst")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CreateListenerResponse(AbstractModel):
"""CreateListener返回参数结构体
"""
def __init__(self):
r"""
:param ListenerIds: 创建的监听器的唯一标识数组。
:type ListenerIds: list of str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.ListenerIds = None
self.RequestId = None
def _deserialize(self, params):
self.ListenerIds = params.get("ListenerIds")
self.RequestId = params.get("RequestId")
class CreateLoadBalancerRequest(AbstractModel):
"""CreateLoadBalancer请求参数结构体
"""
def __init__(self):
r"""
:param LoadBalancerType: 负载均衡实例的网络类型:
OPEN:公网属性, INTERNAL:内网属性。
:type LoadBalancerType: str
:param Forward: 负载均衡实例的类型。1:通用的负载均衡实例,目前只支持传入1。
:type Forward: int
:param LoadBalancerName: 负载均衡实例的名称,只在创建一个实例的时候才会生效。规则:1-60 个英文、汉字、数字、连接线“-”或下划线“_”。
注意:如果名称与系统中已有负载均衡实例的名称相同,则系统将会自动生成此次创建的负载均衡实例的名称。
:type LoadBalancerName: str
:param VpcId: 负载均衡后端目标设备所属的网络 ID,如vpc-12345678,可以通过 [DescribeVpcEx](https://cloud.tencent.com/document/product/215/1372) 接口获取。 不填此参数则默认为DefaultVPC。创建内网负载均衡实例时,此参数必填。
:type VpcId: str
:param SubnetId: 在私有网络内购买内网负载均衡实例的情况下,必须指定子网 ID,内网负载均衡实例的 VIP 将从这个子网中产生。创建内网负载均衡实例时,此参数必填。
:type SubnetId: str
:param ProjectId: 负载均衡实例所属的项目 ID,可以通过 [DescribeProject](https://cloud.tencent.com/document/product/378/4400) 接口获取。不填此参数则视为默认项目。
:type ProjectId: int
:param AddressIPVersion: 仅适用于公网负载均衡。IP版本,可取值:IPV4、IPV6、IPv6FullChain,默认值 IPV4。说明:取值为IPV6表示为IPV6 NAT64版本;取值为IPv6FullChain,表示为IPv6版本。
:type AddressIPVersion: str
:param Number: 创建负载均衡的个数,默认值 1。
:type Number: int
:param MasterZoneId: 仅适用于公网负载均衡。设置跨可用区容灾时的主可用区ID,例如 100001 或 ap-guangzhou-1
注:主可用区是需要承载流量的可用区,备可用区默认不承载流量,主可用区不可用时才使用备可用区,平台将为您自动选择最佳备可用区。可通过 DescribeMasterZones 接口查询一个地域的主可用区的列表。
:type MasterZoneId: str
:param ZoneId: 仅适用于公网负载均衡。可用区ID,指定可用区以创建负载均衡实例。如:ap-guangzhou-1。
:type ZoneId: str
:param InternetAccessible: 仅适用于公网负载均衡。负载均衡的网络计费模式。
:type InternetAccessible: :class:`tencentcloud.clb.v20180317.models.InternetAccessible`
:param VipIsp: 仅适用于公网负载均衡。CMCC | CTCC | CUCC,分别对应 移动 | 电信 | 联通,如果不指定本参数,则默认使用BGP。可通过 DescribeSingleIsp 接口查询一个地域所支持的Isp。如果指定运营商,则网络计费式只能使用按带宽包计费(BANDWIDTH_PACKAGE)。
:type VipIsp: str
:param Tags: 购买负载均衡的同时,给负载均衡打上标签。
:type Tags: list of TagInfo
:param Vip: 指定VIP申请负载均衡。指定此参数后:
<ul><li>若创建共享型集群的公网负载均衡实例,则上述的VpcId选填,若实例是IPv6类型的,则SubnetId必填;若是IPv4、IPv6 NAT64类型,则SubnetId不填。</li>
<li>若创建独占型集群的公网负载均衡实例,则上述的VpcId选填,若实例是IPv6类型的,则SubnetId必填;若是IPv4、IPv6 NAT64类型,则SubnetId不填。
</li></ul>
:type Vip: str
:param BandwidthPackageId: 带宽包ID,指定此参数时,网络计费方式(InternetAccessible.InternetChargeType)只支持按带宽包计费(BANDWIDTH_PACKAGE)。
:type BandwidthPackageId: str
:param ExclusiveCluster: 独占集群信息。若创建独占集群负载均衡实例,则此参数必填。
:type ExclusiveCluster: :class:`tencentcloud.clb.v20180317.models.ExclusiveCluster`
:param SlaType: 创建性能容量型 CLB 实例。
<ul><li>若需要创建性能容量型 CLB 实例,则此参数必填,且取值为:SLA,表示创建按量计费模式下的默认性能保障规格的性能容量型实例。</li>
<li>若需要创建共享型 CLB 实例,则无需填写此参数。</li></ul>
:type SlaType: str
:param ClientToken: 用于保证请求幂等性的字符串。该字符串由客户生成,需保证不同请求之间唯一,最大值不超过64个ASCII字符。若不指定该参数,则无法保证请求的幂等性。
:type ClientToken: str
:param SnatPro: 是否支持绑定跨地域/跨Vpc绑定IP的功能。
:type SnatPro: bool
:param SnatIps: 开启绑定跨地域/跨Vpc绑定IP的功能后,创建SnatIp。
:type SnatIps: list of SnatIp
:param ClusterTag: Stgw独占集群的标签。
:type ClusterTag: str
:param SlaveZoneId: 仅适用于公网负载均衡。设置跨可用区容灾时的备可用区ID,例如 100001 或 ap-guangzhou-1
注:备可用区是主可用区故障后,需要承载流量的可用区。可通过 DescribeMasterZones 接口查询一个地域的主/备可用区的列表。
:type SlaveZoneId: str
:param EipAddressId: EIP 的唯一 ID,形如:eip-11112222,仅适用于内网负载均衡绑定EIP。
:type EipAddressId: str
:param LoadBalancerPassToTarget: Target是否放通来自CLB的流量。开启放通(true):只验证CLB上的安全组;不开启放通(false):需同时验证CLB和后端实例上的安全组。
:type LoadBalancerPassToTarget: bool
"""
self.LoadBalancerType = None
self.Forward = None
self.LoadBalancerName = None
self.VpcId = None
self.SubnetId = None
self.ProjectId = None
self.AddressIPVersion = None
self.Number = None
self.MasterZoneId = None
self.ZoneId = None
self.InternetAccessible = None
self.VipIsp = None
self.Tags = None
self.Vip = None
self.BandwidthPackageId = None
self.ExclusiveCluster = None
self.SlaType = None
self.ClientToken = None
self.SnatPro = None
self.SnatIps = None
self.ClusterTag = None
self.SlaveZoneId = None
self.EipAddressId = None
self.LoadBalancerPassToTarget = None
def _deserialize(self, params):
self.LoadBalancerType = params.get("LoadBalancerType")
self.Forward = params.get("Forward")
self.LoadBalancerName = params.get("LoadBalancerName")
self.VpcId = params.get("VpcId")
self.SubnetId = params.get("SubnetId")
self.ProjectId = params.get("ProjectId")
self.AddressIPVersion = params.get("AddressIPVersion")
self.Number = params.get("Number")
self.MasterZoneId = params.get("MasterZoneId")
self.ZoneId = params.get("ZoneId")
if params.get("InternetAccessible") is not None:
self.InternetAccessible = InternetAccessible()
self.InternetAccessible._deserialize(params.get("InternetAccessible"))
self.VipIsp = params.get("VipIsp")
if params.get("Tags") is not None:
self.Tags = []
for item in params.get("Tags"):
obj = TagInfo()
obj._deserialize(item)
self.Tags.append(obj)
self.Vip = params.get("Vip")
self.BandwidthPackageId = params.get("BandwidthPackageId")
if params.get("ExclusiveCluster") is not None:
self.ExclusiveCluster = ExclusiveCluster()
self.ExclusiveCluster._deserialize(params.get("ExclusiveCluster"))
self.SlaType = params.get("SlaType")
self.ClientToken = params.get("ClientToken")
self.SnatPro = params.get("SnatPro")
if params.get("SnatIps") is not None:
self.SnatIps = []
for item in params.get("SnatIps"):
obj = SnatIp()
obj._deserialize(item)
self.SnatIps.append(obj)
self.ClusterTag = params.get("ClusterTag")
self.SlaveZoneId = params.get("SlaveZoneId")
self.EipAddressId = params.get("EipAddressId")
self.LoadBalancerPassToTarget = params.get("LoadBalancerPassToTarget")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CreateLoadBalancerResponse(AbstractModel):
"""CreateLoadBalancer返回参数结构体
"""
def __init__(self):
r"""
:param LoadBalancerIds: 由负载均衡实例唯一 ID 组成的数组。
:type LoadBalancerIds: list of str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.LoadBalancerIds = None
self.RequestId = None
def _deserialize(self, params):
self.LoadBalancerIds = params.get("LoadBalancerIds")
self.RequestId = params.get("RequestId")
class CreateLoadBalancerSnatIpsRequest(AbstractModel):
"""CreateLoadBalancerSnatIps请求参数结构体
"""
def __init__(self):
r"""
:param LoadBalancerId: 负载均衡唯一性ID,例如:lb-12345678。
:type LoadBalancerId: str
:param SnatIps: 添加SnatIp信息,可指定IP申请,或者指定子网自动申请。
:type SnatIps: list of SnatIp
:param Number: 添加SnatIp个数,与SnatIps一起使用,当指定Ip时,不能指定创建SnatIp个数。
:type Number: int
"""
self.LoadBalancerId = None
self.SnatIps = None
self.Number = None
def _deserialize(self, params):
self.LoadBalancerId = params.get("LoadBalancerId")
if params.get("SnatIps") is not None:
self.SnatIps = []
for item in params.get("SnatIps"):
obj = SnatIp()
obj._deserialize(item)
self.SnatIps.append(obj)
self.Number = params.get("Number")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CreateLoadBalancerSnatIpsResponse(AbstractModel):
"""CreateLoadBalancerSnatIps返回参数结构体
"""
def __init__(self):
r"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class CreateRuleRequest(AbstractModel):
"""CreateRule请求参数结构体
"""
def __init__(self):
r"""
:param LoadBalancerId: 负载均衡实例 ID。
:type LoadBalancerId: str
:param ListenerId: 监听器 ID。
:type ListenerId: str
:param Rules: 新建转发规则的信息。
:type Rules: list of RuleInput
"""
self.LoadBalancerId = None
self.ListenerId = None
self.Rules = None
def _deserialize(self, params):
self.LoadBalancerId = params.get("LoadBalancerId")
self.ListenerId = params.get("ListenerId")
if params.get("Rules") is not None:
self.Rules = []
for item in params.get("Rules"):
obj = RuleInput()
obj._deserialize(item)
self.Rules.append(obj)
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CreateRuleResponse(AbstractModel):
"""CreateRule返回参数结构体
"""
def __init__(self):
r"""
:param LocationIds: 创建的转发规则的唯一标识数组。
:type LocationIds: list of str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.LocationIds = None
self.RequestId = None
def _deserialize(self, params):
self.LocationIds = params.get("LocationIds")
self.RequestId = params.get("RequestId")
class CreateTargetGroupRequest(AbstractModel):
"""CreateTargetGroup请求参数结构体
"""
def __init__(self):
r"""
:param TargetGroupName: 目标组名称,限定50个字符
:type TargetGroupName: str
:param VpcId: 目标组的vpcid属性,不填则使用默认vpc
:type VpcId: str
:param Port: 目标组的默认端口, 后续添加服务器时可使用该默认端口
:type Port: int
:param TargetGroupInstances: 目标组绑定的后端服务器
:type TargetGroupInstances: list of TargetGroupInstance
"""
self.TargetGroupName = None
self.VpcId = None
self.Port = None
self.TargetGroupInstances = None
def _deserialize(self, params):
self.TargetGroupName = params.get("TargetGroupName")
self.VpcId = params.get("VpcId")
self.Port = params.get("Port")
if params.get("TargetGroupInstances") is not None:
self.TargetGroupInstances = []
for item in params.get("TargetGroupInstances"):
obj = TargetGroupInstance()
obj._deserialize(item)
self.TargetGroupInstances.append(obj)
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CreateTargetGroupResponse(AbstractModel):
"""CreateTargetGroup返回参数结构体
"""
def __init__(self):
r"""
:param TargetGroupId: 创建目标组后生成的id
:type TargetGroupId: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TargetGroupId = None
self.RequestId = None
def _deserialize(self, params):
self.TargetGroupId = params.get("TargetGroupId")
self.RequestId = params.get("RequestId")
class CreateTopicRequest(AbstractModel):
"""CreateTopic请求参数结构体
"""
def __init__(self):
r"""
:param TopicName: 日志主题的名称。
:type TopicName: str
:param PartitionCount: 主题分区Partition的数量,不传参默认创建1个,最大创建允许10个,分裂/合并操作会改变分区数量,整体上限50个。
:type PartitionCount: int
:param TopicType: 日志类型,ACCESS:访问日志,HEALTH:健康检查日志,默认ACCESS。
:type TopicType: str
"""
self.TopicName = None
self.PartitionCount = None
self.TopicType = None
def _deserialize(self, params):
self.TopicName = params.get("TopicName")
self.PartitionCount = params.get("PartitionCount")
self.TopicType = params.get("TopicType")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CreateTopicResponse(AbstractModel):
"""CreateTopic返回参数结构体
"""
def __init__(self):
r"""
:param TopicId: 日志主题的 ID。
:type TopicId: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TopicId = None
self.RequestId = None
def _deserialize(self, params):
self.TopicId = params.get("TopicId")
self.RequestId = params.get("RequestId")
class DeleteListenerRequest(AbstractModel):
"""DeleteListener请求参数结构体
"""
def __init__(self):
r"""
:param LoadBalancerId: 负载均衡实例ID。
:type LoadBalancerId: str
:param ListenerId: 要删除的监听器ID。
:type ListenerId: str
"""
self.LoadBalancerId = None
self.ListenerId = None
def _deserialize(self, params):
self.LoadBalancerId = params.get("LoadBalancerId")
self.ListenerId = params.get("ListenerId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DeleteListenerResponse(AbstractModel):
"""DeleteListener返回参数结构体
"""
def __init__(self):
r"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class DeleteLoadBalancerListenersRequest(AbstractModel):
"""DeleteLoadBalancerListeners请求参数结构体
"""
def __init__(self):
r"""
:param LoadBalancerId: 负载均衡实例ID。
:type LoadBalancerId: str
:param ListenerIds: 指定删除的监听器ID数组,最大为20个。若不填则删除负载均衡的所有监听器。
:type ListenerIds: list of str
"""
self.LoadBalancerId = None
self.ListenerIds = None
def _deserialize(self, params):
self.LoadBalancerId = params.get("LoadBalancerId")
self.ListenerIds = params.get("ListenerIds")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DeleteLoadBalancerListenersResponse(AbstractModel):
"""DeleteLoadBalancerListeners返回参数结构体
"""
def __init__(self):
r"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class DeleteLoadBalancerRequest(AbstractModel):
"""DeleteLoadBalancer请求参数结构体
"""
def __init__(self):
r"""
:param LoadBalancerIds: 要删除的负载均衡实例 ID数组,数组大小最大支持20。
:type LoadBalancerIds: list of str
"""
self.LoadBalancerIds = None
def _deserialize(self, params):
self.LoadBalancerIds = params.get("LoadBalancerIds")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DeleteLoadBalancerResponse(AbstractModel):
"""DeleteLoadBalancer返回参数结构体
"""
def __init__(self):
r"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class DeleteLoadBalancerSnatIpsRequest(AbstractModel):
"""DeleteLoadBalancerSnatIps请求参数结构体
"""
def __init__(self):
r"""
:param LoadBalancerId: 负载均衡唯一ID,例如:lb-12345678。
:type LoadBalancerId: str
:param Ips: 删除SnatIp地址数组。
:type Ips: list of str
"""
self.LoadBalancerId = None
self.Ips = None
def _deserialize(self, params):
self.LoadBalancerId = params.get("LoadBalancerId")
self.Ips = params.get("Ips")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DeleteLoadBalancerSnatIpsResponse(AbstractModel):
"""DeleteLoadBalancerSnatIps返回参数结构体
"""
def __init__(self):
r"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class DeleteRewriteRequest(AbstractModel):
"""DeleteRewrite请求参数结构体
"""
def __init__(self):
r"""
:param LoadBalancerId: 负载均衡实例ID。
:type LoadBalancerId: str
:param SourceListenerId: 源监听器ID。
:type SourceListenerId: str
:param TargetListenerId: 目标监听器ID。
:type TargetListenerId: str
:param RewriteInfos: 转发规则之间的重定向关系。
:type RewriteInfos: list of RewriteLocationMap
"""
self.LoadBalancerId = None
self.SourceListenerId = None
self.TargetListenerId = None
self.RewriteInfos = None
def _deserialize(self, params):
self.LoadBalancerId = params.get("LoadBalancerId")
self.SourceListenerId = params.get("SourceListenerId")
self.TargetListenerId = params.get("TargetListenerId")
if params.get("RewriteInfos") is not None:
self.RewriteInfos = []
for item in params.get("RewriteInfos"):
obj = RewriteLocationMap()
obj._deserialize(item)
self.RewriteInfos.append(obj)
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DeleteRewriteResponse(AbstractModel):
"""DeleteRewrite返回参数结构体
"""
def __init__(self):
r"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class DeleteRuleRequest(AbstractModel):
"""DeleteRule请求参数结构体
"""
def __init__(self):
r"""
:param LoadBalancerId: 负载均衡实例ID。
:type LoadBalancerId: str
:param ListenerId: 负载均衡监听器ID。
:type ListenerId: str
:param LocationIds: 要删除的转发规则的ID组成的数组。
:type LocationIds: list of str
:param Domain: 要删除的转发规则的域名,已提供LocationIds参数时本参数不生效。
:type Domain: str
:param Url: 要删除的转发规则的转发路径,已提供LocationIds参数时本参数不生效。
:type Url: str
:param NewDefaultServerDomain: 监听器下必须配置一个默认域名,当需要删除默认域名时,可以指定另一个域名作为新的默认域名。
:type NewDefaultServerDomain: str
"""
self.LoadBalancerId = None
self.ListenerId = None
self.LocationIds = None
self.Domain = None
self.Url = None
self.NewDefaultServerDomain = None
def _deserialize(self, params):
self.LoadBalancerId = params.get("LoadBalancerId")
self.ListenerId = params.get("ListenerId")
self.LocationIds = params.get("LocationIds")
self.Domain = params.get("Domain")
self.Url = params.get("Url")
self.NewDefaultServerDomain = params.get("NewDefaultServerDomain")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DeleteRuleResponse(AbstractModel):
"""DeleteRule返回参数结构体
"""
def __init__(self):
r"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class DeleteTargetGroupsRequest(AbstractModel):
"""DeleteTargetGroups请求参数结构体
"""
def __init__(self):
r"""
:param TargetGroupIds: 目标组的ID数组。
:type TargetGroupIds: list of str
"""
self.TargetGroupIds = None
def _deserialize(self, params):
self.TargetGroupIds = params.get("TargetGroupIds")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DeleteTargetGroupsResponse(AbstractModel):
"""DeleteTargetGroups返回参数结构体
"""
def __init__(self):
r"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class DeregisterTargetGroupInstancesRequest(AbstractModel):
"""DeregisterTargetGroupInstances请求参数结构体
"""
def __init__(self):
r"""
:param TargetGroupId: 目标组ID。
:type TargetGroupId: str
:param TargetGroupInstances: 待解绑的服务器信息。
:type TargetGroupInstances: list of TargetGroupInstance
"""
self.TargetGroupId = None
self.TargetGroupInstances = None
def _deserialize(self, params):
self.TargetGroupId = params.get("TargetGroupId")
if params.get("TargetGroupInstances") is not None:
self.TargetGroupInstances = []
for item in params.get("TargetGroupInstances"):
obj = TargetGroupInstance()
obj._deserialize(item)
self.TargetGroupInstances.append(obj)
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DeregisterTargetGroupInstancesResponse(AbstractModel):
"""DeregisterTargetGroupInstances返回参数结构体
"""
def __init__(self):
r"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class DeregisterTargetsFromClassicalLBRequest(AbstractModel):
"""DeregisterTargetsFromClassicalLB请求参数结构体
"""
def __init__(self):
r"""
:param LoadBalancerId: 负载均衡实例ID。
:type LoadBalancerId: str
:param InstanceIds: 后端服务的实例ID列表。
:type InstanceIds: list of str
"""
self.LoadBalancerId = None
self.InstanceIds = None
def _deserialize(self, params):
self.LoadBalancerId = params.get("LoadBalancerId")
self.InstanceIds = params.get("InstanceIds")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DeregisterTargetsFromClassicalLBResponse(AbstractModel):
"""DeregisterTargetsFromClassicalLB返回参数结构体
"""
def __init__(self):
r"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class DeregisterTargetsRequest(AbstractModel):
"""DeregisterTargets请求参数结构体
"""
def __init__(self):
r"""
:param LoadBalancerId: 负载均衡实例 ID,格式如 lb-12345678。
:type LoadBalancerId: str
:param ListenerId: 监听器 ID,格式如 lbl-12345678。
:type ListenerId: str
:param Targets: 要解绑的后端服务列表,数组长度最大支持20。
:type Targets: list of Target
:param LocationId: 转发规则的ID,格式如 loc-12345678,当从七层转发规则解绑机器时,必须提供此参数或Domain+URL两者之一。
:type LocationId: str
:param Domain: 目标规则的域名,提供LocationId参数时本参数不生效。
:type Domain: str
:param Url: 目标规则的URL,提供LocationId参数时本参数不生效。
:type Url: str
"""
self.LoadBalancerId = None
self.ListenerId = None
self.Targets = None
self.LocationId = None
self.Domain = None
self.Url = None
def _deserialize(self, params):
self.LoadBalancerId = params.get("LoadBalancerId")
self.ListenerId = params.get("ListenerId")
if params.get("Targets") is not None:
self.Targets = []
for item in params.get("Targets"):
obj = Target()
obj._deserialize(item)
self.Targets.append(obj)
self.LocationId = params.get("LocationId")
self.Domain = params.get("Domain")
self.Url = params.get("Url")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DeregisterTargetsResponse(AbstractModel):
"""DeregisterTargets返回参数结构体
"""
def __init__(self):
r"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class DescribeBlockIPListRequest(AbstractModel):
"""DescribeBlockIPList请求参数结构体
"""
def __init__(self):
r"""
:param LoadBalancerId: 负载均衡实例 ID。
:type LoadBalancerId: str
:param Offset: 数据偏移量,默认为 0。
:type Offset: int
:param Limit: 返回IP的最大个数,默认为 100000。
:type Limit: int
"""
self.LoadBalancerId = None
self.Offset = None
self.Limit = None
def _deserialize(self, params):
self.LoadBalancerId = params.get("LoadBalancerId")
self.Offset = params.get("Offset")
self.Limit = params.get("Limit")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeBlockIPListResponse(AbstractModel):
"""DescribeBlockIPList返回参数结构体
"""
def __init__(self):
r"""
:param BlockedIPCount: 返回的IP的数量
:type BlockedIPCount: int
:param ClientIPField: 获取用户真实IP的字段
:type ClientIPField: str
:param BlockedIPList: 加入了12360黑名单的IP列表
:type BlockedIPList: list of BlockedIP
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.BlockedIPCount = None
self.ClientIPField = None
self.BlockedIPList = None
self.RequestId = None
def _deserialize(self, params):
self.BlockedIPCount = params.get("BlockedIPCount")
self.ClientIPField = params.get("ClientIPField")
if params.get("BlockedIPList") is not None:
self.BlockedIPList = []
for item in params.get("BlockedIPList"):
obj = BlockedIP()
obj._deserialize(item)
self.BlockedIPList.append(obj)
self.RequestId = params.get("RequestId")
class DescribeBlockIPTaskRequest(AbstractModel):
"""DescribeBlockIPTask请求参数结构体
"""
def __init__(self):
r"""
:param TaskId: ModifyBlockIPList 接口返回的异步任务的ID。
:type TaskId: str
"""
self.TaskId = None
def _deserialize(self, params):
self.TaskId = params.get("TaskId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeBlockIPTaskResponse(AbstractModel):
"""DescribeBlockIPTask返回参数结构体
"""
def __init__(self):
r"""
:param Status: 1 running,2 fail,6 succ
:type Status: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Status = None
self.RequestId = None
def _deserialize(self, params):
self.Status = params.get("Status")
self.RequestId = params.get("RequestId")
class DescribeClassicalLBByInstanceIdRequest(AbstractModel):
"""DescribeClassicalLBByInstanceId请求参数结构体
"""
def __init__(self):
r"""
:param InstanceIds: 后端实例ID列表。
:type InstanceIds: list of str
"""
self.InstanceIds = None
def _deserialize(self, params):
self.InstanceIds = params.get("InstanceIds")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeClassicalLBByInstanceIdResponse(AbstractModel):
"""DescribeClassicalLBByInstanceId返回参数结构体
"""
def __init__(self):
r"""
:param LoadBalancerInfoList: 负载均衡相关信息列表。
:type LoadBalancerInfoList: list of ClassicalLoadBalancerInfo
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.LoadBalancerInfoList = None
self.RequestId = None
def _deserialize(self, params):
if params.get("LoadBalancerInfoList") is not None:
self.LoadBalancerInfoList = []
for item in params.get("LoadBalancerInfoList"):
obj = ClassicalLoadBalancerInfo()
obj._deserialize(item)
self.LoadBalancerInfoList.append(obj)
self.RequestId = params.get("RequestId")
class DescribeClassicalLBHealthStatusRequest(AbstractModel):
"""DescribeClassicalLBHealthStatus请求参数结构体
"""
def __init__(self):
r"""
:param LoadBalancerId: 负载均衡实例ID。
:type LoadBalancerId: str
:param ListenerId: 负载均衡监听器ID。
:type ListenerId: str
"""
self.LoadBalancerId = None
self.ListenerId = None
def _deserialize(self, params):
self.LoadBalancerId = params.get("LoadBalancerId")
self.ListenerId = params.get("ListenerId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeClassicalLBHealthStatusResponse(AbstractModel):
"""DescribeClassicalLBHealthStatus返回参数结构体
"""
def __init__(self):
r"""
:param HealthList: 后端健康状态列表。
注意:此字段可能返回 null,表示取不到有效值。
:type HealthList: list of ClassicalHealth
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.HealthList = None
self.RequestId = None
def _deserialize(self, params):
if params.get("HealthList") is not None:
self.HealthList = []
for item in params.get("HealthList"):
obj = ClassicalHealth()
obj._deserialize(item)
self.HealthList.append(obj)
self.RequestId = params.get("RequestId")
class DescribeClassicalLBListenersRequest(AbstractModel):
"""DescribeClassicalLBListeners请求参数结构体
"""
def __init__(self):
r"""
:param LoadBalancerId: 负载均衡实例ID。
:type LoadBalancerId: str
:param ListenerIds: 负载均衡监听器ID列表。
:type ListenerIds: list of str
:param Protocol: 负载均衡监听的协议:'TCP', 'UDP', 'HTTP', 'HTTPS'。
:type Protocol: str
:param ListenerPort: 负载均衡监听端口,范围为[1-65535]。
:type ListenerPort: int
:param Status: 监听器的状态,0:创建中,1:运行中。
:type Status: int
"""
self.LoadBalancerId = None
self.ListenerIds = None
self.Protocol = None
self.ListenerPort = None
self.Status = None
def _deserialize(self, params):
self.LoadBalancerId = params.get("LoadBalancerId")
self.ListenerIds = params.get("ListenerIds")
self.Protocol = params.get("Protocol")
self.ListenerPort = params.get("ListenerPort")
self.Status = params.get("Status")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeClassicalLBListenersResponse(AbstractModel):
"""DescribeClassicalLBListeners返回参数结构体
"""
def __init__(self):
r"""
:param Listeners: 监听器列表。
注意:此字段可能返回 null,表示取不到有效值。
:type Listeners: list of ClassicalListener
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Listeners = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Listeners") is not None:
self.Listeners = []
for item in params.get("Listeners"):
obj = ClassicalListener()
obj._deserialize(item)
self.Listeners.append(obj)
self.RequestId = params.get("RequestId")
class DescribeClassicalLBTargetsRequest(AbstractModel):
"""DescribeClassicalLBTargets请求参数结构体
"""
def __init__(self):
r"""
:param LoadBalancerId: 负载均衡实例 ID。
:type LoadBalancerId: str
"""
self.LoadBalancerId = None
def _deserialize(self, params):
self.LoadBalancerId = params.get("LoadBalancerId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeClassicalLBTargetsResponse(AbstractModel):
"""DescribeClassicalLBTargets返回参数结构体
"""
def __init__(self):
r"""
:param Targets: 后端服务列表。
注意:此字段可能返回 null,表示取不到有效值。
:type Targets: list of ClassicalTarget
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Targets = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Targets") is not None:
self.Targets = []
for item in params.get("Targets"):
obj = ClassicalTarget()
obj._deserialize(item)
self.Targets.append(obj)
self.RequestId = params.get("RequestId")
class DescribeClsLogSetRequest(AbstractModel):
"""DescribeClsLogSet请求参数结构体
"""
class DescribeClsLogSetResponse(AbstractModel):
"""DescribeClsLogSet返回参数结构体
"""
def __init__(self):
r"""
:param LogsetId: 日志集的 ID。
:type LogsetId: str
:param HealthLogsetId: 健康检查日志集的 ID。
:type HealthLogsetId: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.LogsetId = None
self.HealthLogsetId = None
self.RequestId = None
def _deserialize(self, params):
self.LogsetId = params.get("LogsetId")
self.HealthLogsetId = params.get("HealthLogsetId")
self.RequestId = params.get("RequestId")
class DescribeClusterResourcesRequest(AbstractModel):
"""DescribeClusterResources请求参数结构体
"""
def __init__(self):
r"""
:param Limit: 返回集群中资源列表数目,默认为20,最大值为100。
:type Limit: int
:param Offset: 返回集群中资源列表起始偏移量,默认为0。
:type Offset: int
:param Filters: 查询集群中资源列表条件,详细的过滤条件如下:
<li> cluster-id - String - 是否必填:否 - (过滤条件)按照 集群 的唯一ID过滤,如 :"tgw-12345678","stgw-12345678","vpcgw-12345678"。</li>
<li> vip - String - 是否必填:否 - (过滤条件)按照vip过滤。</li>
<li> loadblancer-id - String - 是否必填:否 - (过滤条件)按照负载均衡唯一ID过滤。</li>
<li> idle - String 是否必填:否 - (过滤条件)按照是否闲置过滤,如"True","False"。</li>
:type Filters: list of Filter
"""
self.Limit = None
self.Offset = None
self.Filters = None
def _deserialize(self, params):
self.Limit = params.get("Limit")
self.Offset = params.get("Offset")
if params.get("Filters") is not None:
self.Filters = []
for item in params.get("Filters"):
obj = Filter()
obj._deserialize(item)
self.Filters.append(obj)
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeClusterResourcesResponse(AbstractModel):
"""DescribeClusterResources返回参数结构体
"""
def __init__(self):
r"""
:param ClusterResourceSet: 集群中资源列表。
:type ClusterResourceSet: list of ClusterResource
:param TotalCount: 集群中资源总数。
:type TotalCount: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.ClusterResourceSet = None
self.TotalCount = None
self.RequestId = None
def _deserialize(self, params):
if params.get("ClusterResourceSet") is not None:
self.ClusterResourceSet = []
for item in params.get("ClusterResourceSet"):
obj = ClusterResource()
obj._deserialize(item)
self.ClusterResourceSet.append(obj)
self.TotalCount = params.get("TotalCount")
self.RequestId = params.get("RequestId")
class DescribeCustomizedConfigAssociateListRequest(AbstractModel):
"""DescribeCustomizedConfigAssociateList请求参数结构体
"""
def __init__(self):
r"""
:param UconfigId: 配置ID
:type UconfigId: str
:param Offset: 拉取绑定关系列表开始位置,默认值 0
:type Offset: int
:param Limit: 拉取绑定关系列表数目,默认值 20
:type Limit: int
:param Domain: 搜索域名
:type Domain: str
"""
self.UconfigId = None
self.Offset = None
self.Limit = None
self.Domain = None
def _deserialize(self, params):
self.UconfigId = params.get("UconfigId")
self.Offset = params.get("Offset")
self.Limit = params.get("Limit")
self.Domain = params.get("Domain")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeCustomizedConfigAssociateListResponse(AbstractModel):
"""DescribeCustomizedConfigAssociateList返回参数结构体
"""
def __init__(self):
r"""
:param BindList: 绑定关系列表
:type BindList: list of BindDetailItem
:param TotalCount: 绑定关系总数目
:type TotalCount: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.BindList = None
self.TotalCount = None
self.RequestId = None
def _deserialize(self, params):
if params.get("BindList") is not None:
self.BindList = []
for item in params.get("BindList"):
obj = BindDetailItem()
obj._deserialize(item)
self.BindList.append(obj)
self.TotalCount = params.get("TotalCount")
self.RequestId = params.get("RequestId")
class DescribeCustomizedConfigListRequest(AbstractModel):
"""DescribeCustomizedConfigList请求参数结构体
"""
def __init__(self):
r"""
:param ConfigType: 配置类型:CLB 负载均衡维度。 SERVER 域名维度。 LOCATION 规则维度。
:type ConfigType: str
:param Offset: 拉取页偏移,默认值0
:type Offset: int
:param Limit: 拉取数目,默认值20
:type Limit: int
:param ConfigName: 拉取指定配置名字,模糊匹配。
:type ConfigName: str
:param UconfigIds: 配置ID
:type UconfigIds: list of str
:param Filters: 过滤条件如下:
<li> loadbalancer-id - String - 是否必填:否 - (过滤条件)按照 负载均衡ID 过滤,如:"lb-12345678"。</li>
<li> vip - String - 是否必填:否 - (过滤条件)按照 负载均衡Vip 过滤,如:"1.1.1.1","2204::22:3"。</li>
:type Filters: list of Filter
"""
self.ConfigType = None
self.Offset = None
self.Limit = None
self.ConfigName = None
self.UconfigIds = None
self.Filters = None
def _deserialize(self, params):
self.ConfigType = params.get("ConfigType")
self.Offset = params.get("Offset")
self.Limit = params.get("Limit")
self.ConfigName = params.get("ConfigName")
self.UconfigIds = params.get("UconfigIds")
if params.get("Filters") is not None:
self.Filters = []
for item in params.get("Filters"):
obj = Filter()
obj._deserialize(item)
self.Filters.append(obj)
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeCustomizedConfigListResponse(AbstractModel):
"""DescribeCustomizedConfigList返回参数结构体
"""
def __init__(self):
r"""
:param ConfigList: 配置列表
:type ConfigList: list of ConfigListItem
:param TotalCount: 配置数目
:type TotalCount: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.ConfigList = None
self.TotalCount = None
self.RequestId = None
def _deserialize(self, params):
if params.get("ConfigList") is not None:
self.ConfigList = []
for item in params.get("ConfigList"):
obj = ConfigListItem()
obj._deserialize(item)
self.ConfigList.append(obj)
self.TotalCount = params.get("TotalCount")
self.RequestId = params.get("RequestId")
class DescribeExclusiveClustersRequest(AbstractModel):
"""DescribeExclusiveClusters请求参数结构体
"""
def __init__(self):
r"""
:param Limit: 返回集群列表数目,默认值为20,最大值为100。
:type Limit: int
:param Offset: 返回集群列表起始偏移量,默认为0。
:type Offset: int
:param Filters: 查询集群列表条件,详细的过滤条件如下:
<li> cluster-type - String - 是否必填:否 - (过滤条件)按照 集群 的类型过滤,包括"TGW","STGW","VPCGW"。</li>
<li> cluster-id - String - 是否必填:否 - (过滤条件)按照 集群 的唯一ID过滤,如 :"tgw-12345678","stgw-12345678","vpcgw-12345678"。</li>
<li> cluster-name - String - 是否必填:否 - (过滤条件)按照 集群 的名称过滤。</li>
<li> cluster-tag - String - 是否必填:否 - (过滤条件)按照 集群 的标签过滤。(只有TGW/STGW集群有集群标签) </li>
<li> vip - String - 是否必填:否 - (过滤条件)按照 集群 内的vip过滤。</li>
<li> loadblancer-id - String - 是否必填:否 - (过滤条件)按照 集群 内的负载均衡唯一ID过滤。</li>
<li> network - String - 是否必填:否 - (过滤条件)按照 集群 的网络类型过滤,如:"Public","Private"。</li>
<li> zone - String - 是否必填:否 - (过滤条件)按照 集群 所在可用区过滤,如:"ap-guangzhou-1"(广州一区)。</li>
<li> isp -- String - 是否必填:否 - (过滤条件)按照TGW集群的 Isp 类型过滤,如:"BGP","CMCC","CUCC","CTCC","INTERNAL"。</li>
:type Filters: list of Filter
"""
self.Limit = None
self.Offset = None
self.Filters = None
def _deserialize(self, params):
self.Limit = params.get("Limit")
self.Offset = params.get("Offset")
if params.get("Filters") is not None:
self.Filters = []
for item in params.get("Filters"):
obj = Filter()
obj._deserialize(item)
self.Filters.append(obj)
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeExclusiveClustersResponse(AbstractModel):
"""DescribeExclusiveClusters返回参数结构体
"""
def __init__(self):
r"""
:param ClusterSet: 集群列表。
:type ClusterSet: list of Cluster
:param TotalCount: 集群总数量。
:type TotalCount: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.ClusterSet = None
self.TotalCount = None
self.RequestId = None
def _deserialize(self, params):
if params.get("ClusterSet") is not None:
self.ClusterSet = []
for item in params.get("ClusterSet"):
obj = Cluster()
obj._deserialize(item)
self.ClusterSet.append(obj)
self.TotalCount = params.get("TotalCount")
self.RequestId = params.get("RequestId")
class DescribeLBListenersRequest(AbstractModel):
"""DescribeLBListeners请求参数结构体
"""
def __init__(self):
r"""
:param Backends: 需要查询的内网ip列表
:type Backends: list of LbRsItem
"""
self.Backends = None
def _deserialize(self, params):
if params.get("Backends") is not None:
self.Backends = []
for item in params.get("Backends"):
obj = LbRsItem()
obj._deserialize(item)
self.Backends.append(obj)
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeLBListenersResponse(AbstractModel):
"""DescribeLBListeners返回参数结构体
"""
def __init__(self):
r"""
:param LoadBalancers: 绑定的后端规则
:type LoadBalancers: list of LBItem
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.LoadBalancers = None
self.RequestId = None
def _deserialize(self, params):
if params.get("LoadBalancers") is not None:
self.LoadBalancers = []
for item in params.get("LoadBalancers"):
obj = LBItem()
obj._deserialize(item)
self.LoadBalancers.append(obj)
self.RequestId = params.get("RequestId")
class DescribeListenersRequest(AbstractModel):
"""DescribeListeners请求参数结构体
"""
def __init__(self):
r"""
:param LoadBalancerId: 负载均衡实例 ID。
:type LoadBalancerId: str
:param ListenerIds: 要查询的负载均衡监听器 ID 数组,最大为100个。
:type ListenerIds: list of str
:param Protocol: 要查询的监听器协议类型,取值 TCP | UDP | HTTP | HTTPS | TCP_SSL。
:type Protocol: str
:param Port: 要查询的监听器的端口。
:type Port: int
"""
self.LoadBalancerId = None
self.ListenerIds = None
self.Protocol = None
self.Port = None
def _deserialize(self, params):
self.LoadBalancerId = params.get("LoadBalancerId")
self.ListenerIds = params.get("ListenerIds")
self.Protocol = params.get("Protocol")
self.Port = params.get("Port")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeListenersResponse(AbstractModel):
"""DescribeListeners返回参数结构体
"""
def __init__(self):
r"""
:param Listeners: 监听器列表。
:type Listeners: list of Listener
:param TotalCount: 总的监听器个数(根据端口、协议、监听器ID过滤后)。
注意:此字段可能返回 null,表示取不到有效值。
:type TotalCount: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Listeners = None
self.TotalCount = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Listeners") is not None:
self.Listeners = []
for item in params.get("Listeners"):
obj = Listener()
obj._deserialize(item)
self.Listeners.append(obj)
self.TotalCount = params.get("TotalCount")
self.RequestId = params.get("RequestId")
class DescribeLoadBalancerListByCertIdRequest(AbstractModel):
"""DescribeLoadBalancerListByCertId请求参数结构体
"""
def __init__(self):
r"""
:param CertIds: 服务端证书的ID,或客户端证书的ID
:type CertIds: list of str
"""
self.CertIds = None
def _deserialize(self, params):
self.CertIds = params.get("CertIds")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeLoadBalancerListByCertIdResponse(AbstractModel):
"""DescribeLoadBalancerListByCertId返回参数结构体
"""
def __init__(self):
r"""
:param CertSet: 证书ID,以及与该证书ID关联的负载均衡实例列表
:type CertSet: list of CertIdRelatedWithLoadBalancers
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.CertSet = None
self.RequestId = None
def _deserialize(self, params):
if params.get("CertSet") is not None:
self.CertSet = []
for item in params.get("CertSet"):
obj = CertIdRelatedWithLoadBalancers()
obj._deserialize(item)
self.CertSet.append(obj)
self.RequestId = params.get("RequestId")
class DescribeLoadBalancerTrafficRequest(AbstractModel):
"""DescribeLoadBalancerTraffic请求参数结构体
"""
def __init__(self):
r"""
:param LoadBalancerRegion: 负载均衡所在地域,不传默认返回所有地域负载均衡。
:type LoadBalancerRegion: str
"""
self.LoadBalancerRegion = None
def _deserialize(self, params):
self.LoadBalancerRegion = params.get("LoadBalancerRegion")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeLoadBalancerTrafficResponse(AbstractModel):
"""DescribeLoadBalancerTraffic返回参数结构体
"""
def __init__(self):
r"""
:param LoadBalancerTraffic: 按出带宽从高到低排序后的负载均衡信息。
注意:此字段可能返回 null,表示取不到有效值。
:type LoadBalancerTraffic: list of LoadBalancerTraffic
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.LoadBalancerTraffic = None
self.RequestId = None
def _deserialize(self, params):
if params.get("LoadBalancerTraffic") is not None:
self.LoadBalancerTraffic = []
for item in params.get("LoadBalancerTraffic"):
obj = LoadBalancerTraffic()
obj._deserialize(item)
self.LoadBalancerTraffic.append(obj)
self.RequestId = params.get("RequestId")
class DescribeLoadBalancersDetailRequest(AbstractModel):
"""DescribeLoadBalancersDetail请求参数结构体
"""
def __init__(self):
r"""
:param Limit: 返回负载均衡列表数目,默认20,最大值100。
:type Limit: int
:param Offset: 返回负载均衡列表起始偏移量,默认0。
:type Offset: int
:param Fields: 选择返回的Fields列表,默认添加LoadBalancerId和LoadBalancerName。
:type Fields: list of str
:param TargetType: 当Fields包含TargetId、TargetAddress、TargetPort、TargetWeight等Fields时,必选选择导出目标组的Target或者非目标组Target,值范围NODE、GROUP。
:type TargetType: str
:param Filters: 查询负载均衡详细信息列表条件,详细的过滤条件如下:
<li> loadbalancer-id - String - 是否必填:否 - (过滤条件)按照 负载均衡ID 过滤,如:"lb-12345678"。</li>
<li> project-id - String - 是否必填:否 - (过滤条件)按照 项目ID 过滤,如:"0","123"。</li>
<li> network - String - 是否必填:否 - (过滤条件)按照 负载均衡网络类型 过滤,如:"Public","Private"。</li>
<li> vip - String - 是否必填:否 - (过滤条件)按照 负载均衡Vip 过滤,如:"1.1.1.1","2204::22:3"。</li>
<li> target-ip - String - 是否必填:否 - (过滤条件)按照 后端目标内网Ip 过滤,如:"1.1.1.1","2203::214:4"。</li>
<li> vpcid - String - 是否必填:否 - (过滤条件)按照 负载均衡所属vpcId 过滤,如:"vpc-12345678"。</li>
<li> zone - String - 是否必填:否 - (过滤条件)按照 负载均衡所属的可用区 过滤,如:"ap-guangzhou-1"。</li>
<li> tag-key - String - 是否必填:否 - (过滤条件)按照 负载均衡标签的标签键 过滤,如:"name"。</li>
<li> tag:* - String - 是否必填:否 - (过滤条件)按照 负载均衡的标签 过滤,':' 后面跟的是标签键。如:过滤标签键name,标签值zhangsan,lisi,{"Name": "tag:name","Values": ["zhangsan", "lisi"]}。</li>
<li> fuzzy-search - String - 是否必填:否 - (过滤条件)按照 负载均衡Vip,负载均衡名称 模糊搜索,如:"1.1"。</li>
:type Filters: list of Filter
"""
self.Limit = None
self.Offset = None
self.Fields = None
self.TargetType = None
self.Filters = None
def _deserialize(self, params):
self.Limit = params.get("Limit")
self.Offset = params.get("Offset")
self.Fields = params.get("Fields")
self.TargetType = params.get("TargetType")
if params.get("Filters") is not None:
self.Filters = []
for item in params.get("Filters"):
obj = Filter()
obj._deserialize(item)
self.Filters.append(obj)
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeLoadBalancersDetailResponse(AbstractModel):
"""DescribeLoadBalancersDetail返回参数结构体
"""
def __init__(self):
r"""
:param TotalCount: 负载均衡详情列表总数。
:type TotalCount: int
:param LoadBalancerDetailSet: 负载均衡详情列表。
注意:此字段可能返回 null,表示取不到有效值。
:type LoadBalancerDetailSet: list of LoadBalancerDetail
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TotalCount = None
self.LoadBalancerDetailSet = None
self.RequestId = None
def _deserialize(self, params):
self.TotalCount = params.get("TotalCount")
if params.get("LoadBalancerDetailSet") is not None:
self.LoadBalancerDetailSet = []
for item in params.get("LoadBalancerDetailSet"):
obj = LoadBalancerDetail()
obj._deserialize(item)
self.LoadBalancerDetailSet.append(obj)
self.RequestId = params.get("RequestId")
class DescribeLoadBalancersRequest(AbstractModel):
"""DescribeLoadBalancers请求参数结构体
"""
def __init__(self):
r"""
:param LoadBalancerIds: 负载均衡实例ID。
:type LoadBalancerIds: list of str
:param LoadBalancerType: 负载均衡实例的网络类型:
OPEN:公网属性, INTERNAL:内网属性。
:type LoadBalancerType: str
:param Forward: 负载均衡实例的类型。1:通用的负载均衡实例,0:传统型负载均衡实例。如果不传此参数,则查询所有类型的负载均衡实例。
:type Forward: int
:param LoadBalancerName: 负载均衡实例的名称。
:type LoadBalancerName: str
:param Domain: 腾讯云为负载均衡实例分配的域名,本参数仅对传统型公网负载均衡才有意义。
:type Domain: str
:param LoadBalancerVips: 负载均衡实例的 VIP 地址,支持多个。
:type LoadBalancerVips: list of str
:param BackendPublicIps: 负载均衡绑定的后端服务的外网 IP。
:type BackendPublicIps: list of str
:param BackendPrivateIps: 负载均衡绑定的后端服务的内网 IP。
:type BackendPrivateIps: list of str
:param Offset: 数据偏移量,默认为0。
:type Offset: int
:param Limit: 返回负载均衡实例的数量,默认为20,最大值为100。
:type Limit: int
:param OrderBy: 排序参数,支持以下字段:LoadBalancerName,CreateTime,Domain,LoadBalancerType。
:type OrderBy: str
:param OrderType: 1:倒序,0:顺序,默认按照创建时间倒序。
:type OrderType: int
:param SearchKey: 搜索字段,模糊匹配名称、域名、VIP。
:type SearchKey: str
:param ProjectId: 负载均衡实例所属的项目 ID,可以通过 DescribeProject 接口获取。
:type ProjectId: int
:param WithRs: 负载均衡是否绑定后端服务,0:没有绑定后端服务,1:绑定后端服务,-1:查询全部。
:type WithRs: int
:param VpcId: 负载均衡实例所属私有网络唯一ID,如 vpc-bhqkbhdx,
基础网络可传入'0'。
:type VpcId: str
:param SecurityGroup: 安全组ID,如 sg-m1cc****。
:type SecurityGroup: str
:param MasterZone: 主可用区ID,如 :"100001" (对应的是广州一区)。
:type MasterZone: str
:param Filters: 每次请求的`Filters`的上限为10,`Filter.Values`的上限为100。详细的过滤条件如下:
<li> internet-charge-type - String - 是否必填:否 - (过滤条件)按照 CLB 的网络计费模式过滤,包括"BANDWIDTH_PREPAID","TRAFFIC_POSTPAID_BY_HOUR","BANDWIDTH_POSTPAID_BY_HOUR","BANDWIDTH_PACKAGE"。</li>
<li> master-zone-id - String - 是否必填:否 - (过滤条件)按照 CLB 的主可用区ID过滤,如 :"100001" (对应的是广州一区)。</li>
<li> tag-key - String - 是否必填:否 - (过滤条件)按照 CLB 标签的键过滤。</li>
<li> tag:tag-key - String - 是否必填:否 - (过滤条件)按照CLB标签键值对进行过滤,tag-key使用具体的标签键进行替换。</li>
<li> function-name - String - 是否必填:否 - (过滤条件)按照 CLB 后端绑定的SCF云函数的函数名称过滤。</li>
<li> function-name - String - 是否必填:否 - (过滤条件)按照 CLB 后端绑定的SCF云函数的函数名称过滤。</li>
<li> vip-isp - String - 是否必填:否 - (过滤条件)按照 CLB VIP的运营商类型过滤,如:"BGP","INTERNAL","CMCC","CTCC","CUCC"等。</li>
:type Filters: list of Filter
"""
self.LoadBalancerIds = None
self.LoadBalancerType = None
self.Forward = None
self.LoadBalancerName = None
self.Domain = None
self.LoadBalancerVips = None
self.BackendPublicIps = None
self.BackendPrivateIps = None
self.Offset = None
self.Limit = None
self.OrderBy = None
self.OrderType = None
self.SearchKey = None
self.ProjectId = None
self.WithRs = None
self.VpcId = None
self.SecurityGroup = None
self.MasterZone = None
self.Filters = None
def _deserialize(self, params):
self.LoadBalancerIds = params.get("LoadBalancerIds")
self.LoadBalancerType = params.get("LoadBalancerType")
self.Forward = params.get("Forward")
self.LoadBalancerName = params.get("LoadBalancerName")
self.Domain = params.get("Domain")
self.LoadBalancerVips = params.get("LoadBalancerVips")
self.BackendPublicIps = params.get("BackendPublicIps")
self.BackendPrivateIps = params.get("BackendPrivateIps")
self.Offset = params.get("Offset")
self.Limit = params.get("Limit")
self.OrderBy = params.get("OrderBy")
self.OrderType = params.get("OrderType")
self.SearchKey = params.get("SearchKey")
self.ProjectId = params.get("ProjectId")
self.WithRs = params.get("WithRs")
self.VpcId = params.get("VpcId")
self.SecurityGroup = params.get("SecurityGroup")
self.MasterZone = params.get("MasterZone")
if params.get("Filters") is not None:
self.Filters = []
for item in params.get("Filters"):
obj = Filter()
obj._deserialize(item)
self.Filters.append(obj)
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeLoadBalancersResponse(AbstractModel):
"""DescribeLoadBalancers返回参数结构体
"""
def __init__(self):
r"""
:param TotalCount: 满足过滤条件的负载均衡实例总数。此数值与入参中的Limit无关。
:type TotalCount: int
:param LoadBalancerSet: 返回的负载均衡实例数组。
:type LoadBalancerSet: list of LoadBalancer
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TotalCount = None
self.LoadBalancerSet = None
self.RequestId = None
def _deserialize(self, params):
self.TotalCount = params.get("TotalCount")
if params.get("LoadBalancerSet") is not None:
self.LoadBalancerSet = []
for item in params.get("LoadBalancerSet"):
obj = LoadBalancer()
obj._deserialize(item)
self.LoadBalancerSet.append(obj)
self.RequestId = params.get("RequestId")
class DescribeQuotaRequest(AbstractModel):
"""DescribeQuota请求参数结构体
"""
class DescribeQuotaResponse(AbstractModel):
"""DescribeQuota返回参数结构体
"""
def __init__(self):
r"""
:param QuotaSet: 配额列表
:type QuotaSet: list of Quota
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.QuotaSet = None
self.RequestId = None
def _deserialize(self, params):
if params.get("QuotaSet") is not None:
self.QuotaSet = []
for item in params.get("QuotaSet"):
obj = Quota()
obj._deserialize(item)
self.QuotaSet.append(obj)
self.RequestId = params.get("RequestId")
class DescribeRewriteRequest(AbstractModel):
"""DescribeRewrite请求参数结构体
"""
def __init__(self):
r"""
:param LoadBalancerId: 负载均衡实例ID。
:type LoadBalancerId: str
:param SourceListenerIds: 负载均衡监听器ID数组。
:type SourceListenerIds: list of str
:param SourceLocationIds: 负载均衡转发规则的ID数组。
:type SourceLocationIds: list of str
"""
self.LoadBalancerId = None
self.SourceListenerIds = None
self.SourceLocationIds = None
def _deserialize(self, params):
self.LoadBalancerId = params.get("LoadBalancerId")
self.SourceListenerIds = params.get("SourceListenerIds")
self.SourceLocationIds = params.get("SourceLocationIds")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeRewriteResponse(AbstractModel):
"""DescribeRewrite返回参数结构体
"""
def __init__(self):
r"""
:param RewriteSet: 重定向转发规则构成的数组,若无重定向规则,则返回空数组。
:type RewriteSet: list of RuleOutput
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RewriteSet = None
self.RequestId = None
def _deserialize(self, params):
if params.get("RewriteSet") is not None:
self.RewriteSet = []
for item in params.get("RewriteSet"):
obj = RuleOutput()
obj._deserialize(item)
self.RewriteSet.append(obj)
self.RequestId = params.get("RequestId")
class DescribeTargetGroupInstancesRequest(AbstractModel):
"""DescribeTargetGroupInstances请求参数结构体
"""
def __init__(self):
r"""
:param Filters: 过滤条件,当前仅支持TargetGroupId,BindIP,InstanceId过滤
:type Filters: list of Filter
:param Limit: 显示数量限制,默认20
:type Limit: int
:param Offset: 显示的偏移量,默认为0
:type Offset: int
"""
self.Filters = None
self.Limit = None
self.Offset = None
def _deserialize(self, params):
if params.get("Filters") is not None:
self.Filters = []
for item in params.get("Filters"):
obj = Filter()
obj._deserialize(item)
self.Filters.append(obj)
self.Limit = params.get("Limit")
self.Offset = params.get("Offset")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeTargetGroupInstancesResponse(AbstractModel):
"""DescribeTargetGroupInstances返回参数结构体
"""
def __init__(self):
r"""
:param TotalCount: 本次查询的结果数量
:type TotalCount: int
:param TargetGroupInstanceSet: 绑定的服务器信息
:type TargetGroupInstanceSet: list of TargetGroupBackend
:param RealCount: 实际统计数量,不受Limit,Offset,CAM的影响
:type RealCount: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TotalCount = None
self.TargetGroupInstanceSet = None
self.RealCount = None
self.RequestId = None
def _deserialize(self, params):
self.TotalCount = params.get("TotalCount")
if params.get("TargetGroupInstanceSet") is not None:
self.TargetGroupInstanceSet = []
for item in params.get("TargetGroupInstanceSet"):
obj = TargetGroupBackend()
obj._deserialize(item)
self.TargetGroupInstanceSet.append(obj)
self.RealCount = params.get("RealCount")
self.RequestId = params.get("RequestId")
class DescribeTargetGroupListRequest(AbstractModel):
"""DescribeTargetGroupList请求参数结构体
"""
def __init__(self):
r"""
:param TargetGroupIds: 目标组ID数组。
:type TargetGroupIds: list of str
:param Filters: 过滤条件数组,支持TargetGroupVpcId和TargetGroupName。与TargetGroupIds互斥,优先使用目标组ID。
:type Filters: list of Filter
:param Offset: 显示的偏移起始量。
:type Offset: int
:param Limit: 显示条数限制,默认为20。
:type Limit: int
"""
self.TargetGroupIds = None
self.Filters = None
self.Offset = None
self.Limit = None
def _deserialize(self, params):
self.TargetGroupIds = params.get("TargetGroupIds")
if params.get("Filters") is not None:
self.Filters = []
for item in params.get("Filters"):
obj = Filter()
obj._deserialize(item)
self.Filters.append(obj)
self.Offset = params.get("Offset")
self.Limit = params.get("Limit")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeTargetGroupListResponse(AbstractModel):
"""DescribeTargetGroupList返回参数结构体
"""
def __init__(self):
r"""
:param TotalCount: 显示的结果数量。
:type TotalCount: int
:param TargetGroupSet: 显示的目标组信息集合。
:type TargetGroupSet: list of TargetGroupInfo
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TotalCount = None
self.TargetGroupSet = None
self.RequestId = None
def _deserialize(self, params):
self.TotalCount = params.get("TotalCount")
if params.get("TargetGroupSet") is not None:
self.TargetGroupSet = []
for item in params.get("TargetGroupSet"):
obj = TargetGroupInfo()
obj._deserialize(item)
self.TargetGroupSet.append(obj)
self.RequestId = params.get("RequestId")
class DescribeTargetGroupsRequest(AbstractModel):
"""DescribeTargetGroups请求参数结构体
"""
def __init__(self):
r"""
:param TargetGroupIds: 目标组ID,与Filters互斥。
:type TargetGroupIds: list of str
:param Limit: 显示条数限制,默认为20。
:type Limit: int
:param Offset: 显示的偏移起始量。
:type Offset: int
:param Filters: 过滤条件数组,与TargetGroupIds互斥,支持TargetGroupVpcId和TargetGroupName。
:type Filters: list of Filter
"""
self.TargetGroupIds = None
self.Limit = None
self.Offset = None
self.Filters = None
def _deserialize(self, params):
self.TargetGroupIds = params.get("TargetGroupIds")
self.Limit = params.get("Limit")
self.Offset = params.get("Offset")
if params.get("Filters") is not None:
self.Filters = []
for item in params.get("Filters"):
obj = Filter()
obj._deserialize(item)
self.Filters.append(obj)
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeTargetGroupsResponse(AbstractModel):
"""DescribeTargetGroups返回参数结构体
"""
def __init__(self):
r"""
:param TotalCount: 显示的结果数量。
:type TotalCount: int
:param TargetGroupSet: 显示的目标组信息集合。
:type TargetGroupSet: list of TargetGroupInfo
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TotalCount = None
self.TargetGroupSet = None
self.RequestId = None
def _deserialize(self, params):
self.TotalCount = params.get("TotalCount")
if params.get("TargetGroupSet") is not None:
self.TargetGroupSet = []
for item in params.get("TargetGroupSet"):
obj = TargetGroupInfo()
obj._deserialize(item)
self.TargetGroupSet.append(obj)
self.RequestId = params.get("RequestId")
class DescribeTargetHealthRequest(AbstractModel):
"""DescribeTargetHealth请求参数结构体
"""
def __init__(self):
r"""
:param LoadBalancerIds: 要查询的负载均衡实例ID列表。
:type LoadBalancerIds: list of str
"""
self.LoadBalancerIds = None
def _deserialize(self, params):
self.LoadBalancerIds = params.get("LoadBalancerIds")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeTargetHealthResponse(AbstractModel):
"""DescribeTargetHealth返回参数结构体
"""
def __init__(self):
r"""
:param LoadBalancers: 负载均衡实例列表。
注意:此字段可能返回 null,表示取不到有效值。
:type LoadBalancers: list of LoadBalancerHealth
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.LoadBalancers = None
self.RequestId = None
def _deserialize(self, params):
if params.get("LoadBalancers") is not None:
self.LoadBalancers = []
for item in params.get("LoadBalancers"):
obj = LoadBalancerHealth()
obj._deserialize(item)
self.LoadBalancers.append(obj)
self.RequestId = params.get("RequestId")
class DescribeTargetsRequest(AbstractModel):
"""DescribeTargets请求参数结构体
"""
def __init__(self):
r"""
:param LoadBalancerId: 负载均衡实例 ID。
:type LoadBalancerId: str
:param ListenerIds: 监听器 ID 列表。ID 数量上限为20个。
:type ListenerIds: list of str
:param Protocol: 监听器协议类型。
:type Protocol: str
:param Port: 监听器端口。
:type Port: int
"""
self.LoadBalancerId = None
self.ListenerIds = None
self.Protocol = None
self.Port = None
def _deserialize(self, params):
self.LoadBalancerId = params.get("LoadBalancerId")
self.ListenerIds = params.get("ListenerIds")
self.Protocol = params.get("Protocol")
self.Port = params.get("Port")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeTargetsResponse(AbstractModel):
"""DescribeTargets返回参数结构体
"""
def __init__(self):
r"""
:param Listeners: 监听器后端绑定的机器信息。
注意:此字段可能返回 null,表示取不到有效值。
:type Listeners: list of ListenerBackend
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Listeners = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Listeners") is not None:
self.Listeners = []
for item in params.get("Listeners"):
obj = ListenerBackend()
obj._deserialize(item)
self.Listeners.append(obj)
self.RequestId = params.get("RequestId")
class DescribeTaskStatusRequest(AbstractModel):
"""DescribeTaskStatus请求参数结构体
"""
def __init__(self):
r"""
:param TaskId: 请求ID,即接口返回的 RequestId 参数。
:type TaskId: str
"""
self.TaskId = None
def _deserialize(self, params):
self.TaskId = params.get("TaskId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeTaskStatusResponse(AbstractModel):
"""DescribeTaskStatus返回参数结构体
"""
def __init__(self):
r"""
:param Status: 任务的当前状态。 0:成功,1:失败,2:进行中。
:type Status: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Status = None
self.RequestId = None
def _deserialize(self, params):
self.Status = params.get("Status")
self.RequestId = params.get("RequestId")
class DisassociateTargetGroupsRequest(AbstractModel):
"""DisassociateTargetGroups请求参数结构体
"""
def __init__(self):
r"""
:param Associations: 待解绑的规则关系数组。
:type Associations: list of TargetGroupAssociation
"""
self.Associations = None
def _deserialize(self, params):
if params.get("Associations") is not None:
self.Associations = []
for item in params.get("Associations"):
obj = TargetGroupAssociation()
obj._deserialize(item)
self.Associations.append(obj)
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DisassociateTargetGroupsResponse(AbstractModel):
"""DisassociateTargetGroups返回参数结构体
"""
def __init__(self):
r"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class ExclusiveCluster(AbstractModel):
"""独占集群
"""
def __init__(self):
r"""
:param L4Clusters: 4层独占集群列表
注意:此字段可能返回 null,表示取不到有效值。
:type L4Clusters: list of ClusterItem
:param L7Clusters: 7层独占集群列表
注意:此字段可能返回 null,表示取不到有效值。
:type L7Clusters: list of ClusterItem
:param ClassicalCluster: vpcgw集群
注意:此字段可能返回 null,表示取不到有效值。
:type ClassicalCluster: :class:`tencentcloud.clb.v20180317.models.ClusterItem`
"""
self.L4Clusters = None
self.L7Clusters = None
self.ClassicalCluster = None
def _deserialize(self, params):
if params.get("L4Clusters") is not None:
self.L4Clusters = []
for item in params.get("L4Clusters"):
obj = ClusterItem()
obj._deserialize(item)
self.L4Clusters.append(obj)
if params.get("L7Clusters") is not None:
self.L7Clusters = []
for item in params.get("L7Clusters"):
obj = ClusterItem()
obj._deserialize(item)
self.L7Clusters.append(obj)
if params.get("ClassicalCluster") is not None:
self.ClassicalCluster = ClusterItem()
self.ClassicalCluster._deserialize(params.get("ClassicalCluster"))
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ExtraInfo(AbstractModel):
"""暂做保留,一般用户无需关注。
"""
def __init__(self):
r"""
:param ZhiTong: 是否开通VIP直通
注意:此字段可能返回 null,表示取不到有效值。
:type ZhiTong: bool
:param TgwGroupName: TgwGroup名称
注意:此字段可能返回 null,表示取不到有效值。
:type TgwGroupName: str
"""
self.ZhiTong = None
self.TgwGroupName = None
def _deserialize(self, params):
self.ZhiTong = params.get("ZhiTong")
self.TgwGroupName = params.get("TgwGroupName")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class Filter(AbstractModel):
"""过滤器条件
"""
def __init__(self):
r"""
:param Name: 过滤器的名称
:type Name: str
:param Values: 过滤器的值数组
:type Values: list of str
"""
self.Name = None
self.Values = None
def _deserialize(self, params):
self.Name = params.get("Name")
self.Values = params.get("Values")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class HealthCheck(AbstractModel):
"""健康检查信息。
注意,自定义探测相关参数 目前只有少量区域灰度支持。
"""
def __init__(self):
r"""
:param HealthSwitch: 是否开启健康检查:1(开启)、0(关闭)。
:type HealthSwitch: int
:param TimeOut: 健康检查的响应超时时间(仅适用于四层监听器),可选值:2~60,默认值:2,单位:秒。响应超时时间要小于检查间隔时间。
注意:此字段可能返回 null,表示取不到有效值。
:type TimeOut: int
:param IntervalTime: 健康检查探测间隔时间,默认值:5,可选值:5~300,单位:秒。
注意:此字段可能返回 null,表示取不到有效值。
:type IntervalTime: int
:param HealthNum: 健康阈值,默认值:3,表示当连续探测三次健康则表示该转发正常,可选值:2~10,单位:次。
注意:此字段可能返回 null,表示取不到有效值。
:type HealthNum: int
:param UnHealthNum: 不健康阈值,默认值:3,表示当连续探测三次不健康则表示该转发异常,可选值:2~10,单位:次。
注意:此字段可能返回 null,表示取不到有效值。
:type UnHealthNum: int
:param HttpCode: 健康检查状态码(仅适用于HTTP/HTTPS转发规则、TCP监听器的HTTP健康检查方式)。可选值:1~31,默认 31。
1 表示探测后返回值 1xx 代表健康,2 表示返回 2xx 代表健康,4 表示返回 3xx 代表健康,8 表示返回 4xx 代表健康,16 表示返回 5xx 代表健康。若希望多种返回码都可代表健康,则将相应的值相加。注意:TCP监听器的HTTP健康检查方式,只支持指定一种健康检查状态码。
注意:此字段可能返回 null,表示取不到有效值。
:type HttpCode: int
:param HttpCheckPath: 健康检查路径(仅适用于HTTP/HTTPS转发规则、TCP监听器的HTTP健康检查方式)。
注意:此字段可能返回 null,表示取不到有效值。
:type HttpCheckPath: str
:param HttpCheckDomain: 健康检查域名(仅适用于HTTP/HTTPS转发规则、TCP监听器的HTTP健康检查方式)。
注意:此字段可能返回 null,表示取不到有效值。
:type HttpCheckDomain: str
:param HttpCheckMethod: 健康检查方法(仅适用于HTTP/HTTPS转发规则、TCP监听器的HTTP健康检查方式),默认值:HEAD,可选值HEAD或GET。
注意:此字段可能返回 null,表示取不到有效值。
:type HttpCheckMethod: str
:param CheckPort: 自定义探测相关参数。健康检查端口,默认为后端服务的端口,除非您希望指定特定端口,否则建议留空。(仅适用于TCP/UDP监听器)。
注意:此字段可能返回 null,表示取不到有效值。
:type CheckPort: int
:param ContextType: 自定义探测相关参数。健康检查协议CheckType的值取CUSTOM时,必填此字段,代表健康检查的输入格式,可取值:HEX或TEXT;取值为HEX时,SendContext和RecvContext的字符只能在0123456789ABCDEF中选取且长度必须是偶数位。(仅适用于TCP/UDP监听器)
注意:此字段可能返回 null,表示取不到有效值。
:type ContextType: str
:param SendContext: 自定义探测相关参数。健康检查协议CheckType的值取CUSTOM时,必填此字段,代表健康检查发送的请求内容,只允许ASCII可见字符,最大长度限制500。(仅适用于TCP/UDP监听器)。
注意:此字段可能返回 null,表示取不到有效值。
:type SendContext: str
:param RecvContext: 自定义探测相关参数。健康检查协议CheckType的值取CUSTOM时,必填此字段,代表健康检查返回的结果,只允许ASCII可见字符,最大长度限制500。(仅适用于TCP/UDP监听器)。
注意:此字段可能返回 null,表示取不到有效值。
:type RecvContext: str
:param CheckType: 自定义探测相关参数。健康检查使用的协议:TCP | HTTP | CUSTOM(仅适用于TCP/UDP监听器,其中UDP监听器只支持CUSTOM;如果使用自定义健康检查功能,则必传)。
注意:此字段可能返回 null,表示取不到有效值。
:type CheckType: str
:param HttpVersion: 自定义探测相关参数。健康检查协议CheckType的值取HTTP时,必传此字段,代表后端服务的HTTP版本:HTTP/1.0、HTTP/1.1;(仅适用于TCP监听器)
注意:此字段可能返回 null,表示取不到有效值。
:type HttpVersion: str
:param SourceIpType: 自定义探测相关参数。健康检查原IP类型:0(使用LB的VIP做为源IP),1(使用100.64网段IP做为源IP),默认值:0
注意:此字段可能返回 null,表示取不到有效值。
:type SourceIpType: int
"""
self.HealthSwitch = None
self.TimeOut = None
self.IntervalTime = None
self.HealthNum = None
self.UnHealthNum = None
self.HttpCode = None
self.HttpCheckPath = None
self.HttpCheckDomain = None
self.HttpCheckMethod = None
self.CheckPort = None
self.ContextType = None
self.SendContext = None
self.RecvContext = None
self.CheckType = None
self.HttpVersion = None
self.SourceIpType = None
def _deserialize(self, params):
self.HealthSwitch = params.get("HealthSwitch")
self.TimeOut = params.get("TimeOut")
self.IntervalTime = params.get("IntervalTime")
self.HealthNum = params.get("HealthNum")
self.UnHealthNum = params.get("UnHealthNum")
self.HttpCode = params.get("HttpCode")
self.HttpCheckPath = params.get("HttpCheckPath")
self.HttpCheckDomain = params.get("HttpCheckDomain")
self.HttpCheckMethod = params.get("HttpCheckMethod")
self.CheckPort = params.get("CheckPort")
self.ContextType = params.get("ContextType")
self.SendContext = params.get("SendContext")
self.RecvContext = params.get("RecvContext")
self.CheckType = params.get("CheckType")
self.HttpVersion = params.get("HttpVersion")
self.SourceIpType = params.get("SourceIpType")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class InternetAccessible(AbstractModel):
"""网络计费模式,最大出带宽
"""
def __init__(self):
r"""
:param InternetChargeType: TRAFFIC_POSTPAID_BY_HOUR 按流量按小时后计费 ; BANDWIDTH_POSTPAID_BY_HOUR 按带宽按小时后计费;
BANDWIDTH_PACKAGE 按带宽包计费;
注意:此字段可能返回 null,表示取不到有效值。
:type InternetChargeType: str
:param InternetMaxBandwidthOut: 最大出带宽,单位Mbps,范围支持0到2048,仅对公网属性的LB生效,默认值 10
注意:此字段可能返回 null,表示取不到有效值。
:type InternetMaxBandwidthOut: int
:param BandwidthpkgSubType: 带宽包的类型,如SINGLEISP
注意:此字段可能返回 null,表示取不到有效值。
:type BandwidthpkgSubType: str
"""
self.InternetChargeType = None
self.InternetMaxBandwidthOut = None
self.BandwidthpkgSubType = None
def _deserialize(self, params):
self.InternetChargeType = params.get("InternetChargeType")
self.InternetMaxBandwidthOut = params.get("InternetMaxBandwidthOut")
self.BandwidthpkgSubType = params.get("BandwidthpkgSubType")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class LBChargePrepaid(AbstractModel):
"""lb实例包年包月相关配置属性
"""
def __init__(self):
r"""
:param RenewFlag: 续费类型:AUTO_RENEW 自动续费, MANUAL_RENEW 手动续费
注意:此字段可能返回 null,表示取不到有效值。
:type RenewFlag: str
:param Period: 购买时长,单位:月
注意:此字段可能返回 null,表示取不到有效值。
:type Period: int
"""
self.RenewFlag = None
self.Period = None
def _deserialize(self, params):
self.RenewFlag = params.get("RenewFlag")
self.Period = params.get("Period")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class LBItem(AbstractModel):
"""反查Lb绑定关系。
"""
def __init__(self):
r"""
:param LoadBalancerId: lb的字符串id
:type LoadBalancerId: str
:param Vip: lb的vip
:type Vip: str
:param Listeners: 监听器规则
:type Listeners: list of ListenerItem
:param Region: LB所在地域
:type Region: str
"""
self.LoadBalancerId = None
self.Vip = None
self.Listeners = None
self.Region = None
def _deserialize(self, params):
self.LoadBalancerId = params.get("LoadBalancerId")
self.Vip = params.get("Vip")
if params.get("Listeners") is not None:
self.Listeners = []
for item in params.get("Listeners"):
obj = ListenerItem()
obj._deserialize(item)
self.Listeners.append(obj)
self.Region = params.get("Region")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class LbRsItem(AbstractModel):
"""查询类型
"""
def __init__(self):
r"""
:param VpcId: vpc的字符串id,只支持字符串id。
:type VpcId: str
:param PrivateIp: 需要查询后端的内网ip,可以是cvm和弹性网卡。
:type PrivateIp: str
"""
self.VpcId = None
self.PrivateIp = None
def _deserialize(self, params):
self.VpcId = params.get("VpcId")
self.PrivateIp = params.get("PrivateIp")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class LbRsTargets(AbstractModel):
"""反查结果数据类型。
"""
def __init__(self):
r"""
:param Type: 内网ip类型。“cvm”或“eni”
:type Type: str
:param PrivateIp: 后端实例的内网ip。
:type PrivateIp: str
:param Port: 绑定后端实例的端口。
:type Port: int
:param VpcId: rs的vpcId
注意:此字段可能返回 null,表示取不到有效值。
:type VpcId: int
:param Weight: rs的权重
注意:此字段可能返回 null,表示取不到有效值。
:type Weight: int
"""
self.Type = None
self.PrivateIp = None
self.Port = None
self.VpcId = None
self.Weight = None
def _deserialize(self, params):
self.Type = params.get("Type")
self.PrivateIp = params.get("PrivateIp")
self.Port = params.get("Port")
self.VpcId = params.get("VpcId")
self.Weight = params.get("Weight")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class Listener(AbstractModel):
"""监听器的信息
"""
def __init__(self):
r"""
:param ListenerId: 负载均衡监听器 ID
:type ListenerId: str
:param Protocol: 监听器协议
:type Protocol: str
:param Port: 监听器端口
:type Port: int
:param Certificate: 监听器绑定的证书信息
注意:此字段可能返回 null,表示取不到有效值。
:type Certificate: :class:`tencentcloud.clb.v20180317.models.CertificateOutput`
:param HealthCheck: 监听器的健康检查信息
注意:此字段可能返回 null,表示取不到有效值。
:type HealthCheck: :class:`tencentcloud.clb.v20180317.models.HealthCheck`
:param Scheduler: 请求的调度方式
注意:此字段可能返回 null,表示取不到有效值。
:type Scheduler: str
:param SessionExpireTime: 会话保持时间
注意:此字段可能返回 null,表示取不到有效值。
:type SessionExpireTime: int
:param SniSwitch: 是否开启SNI特性(本参数仅对于HTTPS监听器有意义)
注意:此字段可能返回 null,表示取不到有效值。
:type SniSwitch: int
:param Rules: 监听器下的全部转发规则(本参数仅对于HTTP/HTTPS监听器有意义)
注意:此字段可能返回 null,表示取不到有效值。
:type Rules: list of RuleOutput
:param ListenerName: 监听器的名称
注意:此字段可能返回 null,表示取不到有效值。
:type ListenerName: str
:param CreateTime: 监听器的创建时间。
注意:此字段可能返回 null,表示取不到有效值。
:type CreateTime: str
:param EndPort: 端口段结束端口
注意:此字段可能返回 null,表示取不到有效值。
:type EndPort: int
:param TargetType: 后端服务器类型
注意:此字段可能返回 null,表示取不到有效值。
:type TargetType: str
:param TargetGroup: 绑定的目标组基本信息;当监听器绑定目标组时,会返回该字段
注意:此字段可能返回 null,表示取不到有效值。
:type TargetGroup: :class:`tencentcloud.clb.v20180317.models.BasicTargetGroupInfo`
:param SessionType: 会话保持类型。NORMAL表示默认会话保持类型。QUIC_CID 表示根据Quic Connection ID做会话保持。
注意:此字段可能返回 null,表示取不到有效值。
:type SessionType: str
:param KeepaliveEnable: 是否开启长连接,1开启,0关闭,(本参数仅对于HTTP/HTTPS监听器有意义)
注意:此字段可能返回 null,表示取不到有效值。
:type KeepaliveEnable: int
:param Toa: 仅支持Nat64 CLB TCP监听器
注意:此字段可能返回 null,表示取不到有效值。
:type Toa: bool
:param DeregisterTargetRst: 解绑后端目标时,是否发RST给客户端,(此参数仅对于TCP监听器有意义)。
注意:此字段可能返回 null,表示取不到有效值。
:type DeregisterTargetRst: bool
"""
self.ListenerId = None
self.Protocol = None
self.Port = None
self.Certificate = None
self.HealthCheck = None
self.Scheduler = None
self.SessionExpireTime = None
self.SniSwitch = None
self.Rules = None
self.ListenerName = None
self.CreateTime = None
self.EndPort = None
self.TargetType = None
self.TargetGroup = None
self.SessionType = None
self.KeepaliveEnable = None
self.Toa = None
self.DeregisterTargetRst = None
def _deserialize(self, params):
self.ListenerId = params.get("ListenerId")
self.Protocol = params.get("Protocol")
self.Port = params.get("Port")
if params.get("Certificate") is not None:
self.Certificate = CertificateOutput()
self.Certificate._deserialize(params.get("Certificate"))
if params.get("HealthCheck") is not None:
self.HealthCheck = HealthCheck()
self.HealthCheck._deserialize(params.get("HealthCheck"))
self.Scheduler = params.get("Scheduler")
self.SessionExpireTime = params.get("SessionExpireTime")
self.SniSwitch = params.get("SniSwitch")
if params.get("Rules") is not None:
self.Rules = []
for item in params.get("Rules"):
obj = RuleOutput()
obj._deserialize(item)
self.Rules.append(obj)
self.ListenerName = params.get("ListenerName")
self.CreateTime = params.get("CreateTime")
self.EndPort = params.get("EndPort")
self.TargetType = params.get("TargetType")
if params.get("TargetGroup") is not None:
self.TargetGroup = BasicTargetGroupInfo()
self.TargetGroup._deserialize(params.get("TargetGroup"))
self.SessionType = params.get("SessionType")
self.KeepaliveEnable = params.get("KeepaliveEnable")
self.Toa = params.get("Toa")
self.DeregisterTargetRst = params.get("DeregisterTargetRst")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ListenerBackend(AbstractModel):
"""监听器上绑定的后端服务的信息
"""
def __init__(self):
r"""
:param ListenerId: 监听器 ID
:type ListenerId: str
:param Protocol: 监听器的协议
:type Protocol: str
:param Port: 监听器的端口
:type Port: int
:param Rules: 监听器下的规则信息(仅适用于HTTP/HTTPS监听器)
注意:此字段可能返回 null,表示取不到有效值。
:type Rules: list of RuleTargets
:param Targets: 监听器上绑定的后端服务列表(仅适用于TCP/UDP/TCP_SSL监听器)
注意:此字段可能返回 null,表示取不到有效值。
:type Targets: list of Backend
:param EndPort: 若支持端口段,则为端口段结束端口;若不支持端口段,则为0
注意:此字段可能返回 null,表示取不到有效值。
:type EndPort: int
"""
self.ListenerId = None
self.Protocol = None
self.Port = None
self.Rules = None
self.Targets = None
self.EndPort = None
def _deserialize(self, params):
self.ListenerId = params.get("ListenerId")
self.Protocol = params.get("Protocol")
self.Port = params.get("Port")
if params.get("Rules") is not None:
self.Rules = []
for item in params.get("Rules"):
obj = RuleTargets()
obj._deserialize(item)
self.Rules.append(obj)
if params.get("Targets") is not None:
self.Targets = []
for item in params.get("Targets"):
obj = Backend()
obj._deserialize(item)
self.Targets.append(obj)
self.EndPort = params.get("EndPort")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ListenerHealth(AbstractModel):
"""监听器的健康检查信息
"""
def __init__(self):
r"""
:param ListenerId: 监听器ID
:type ListenerId: str
:param ListenerName: 监听器名称
注意:此字段可能返回 null,表示取不到有效值。
:type ListenerName: str
:param Protocol: 监听器的协议
:type Protocol: str
:param Port: 监听器的端口
:type Port: int
:param Rules: 监听器的转发规则列表
注意:此字段可能返回 null,表示取不到有效值。
:type Rules: list of RuleHealth
"""
self.ListenerId = None
self.ListenerName = None
self.Protocol = None
self.Port = None
self.Rules = None
def _deserialize(self, params):
self.ListenerId = params.get("ListenerId")
self.ListenerName = params.get("ListenerName")
self.Protocol = params.get("Protocol")
self.Port = params.get("Port")
if params.get("Rules") is not None:
self.Rules = []
for item in params.get("Rules"):
obj = RuleHealth()
obj._deserialize(item)
self.Rules.append(obj)
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ListenerItem(AbstractModel):
"""反查监听器类型
"""
def __init__(self):
r"""
:param ListenerId: 监听器ID
:type ListenerId: str
:param Protocol: 监听器协议
:type Protocol: str
:param Port: 监听器端口
:type Port: int
:param Rules: 绑定规则
注意:此字段可能返回 null,表示取不到有效值。
:type Rules: list of RulesItems
:param Targets: 四层绑定对象
注意:此字段可能返回 null,表示取不到有效值。
:type Targets: list of LbRsTargets
:param EndPort: 端口段监听器的结束端口
注意:此字段可能返回 null,表示取不到有效值。
:type EndPort: int
"""
self.ListenerId = None
self.Protocol = None
self.Port = None
self.Rules = None
self.Targets = None
self.EndPort = None
def _deserialize(self, params):
self.ListenerId = params.get("ListenerId")
self.Protocol = params.get("Protocol")
self.Port = params.get("Port")
if params.get("Rules") is not None:
self.Rules = []
for item in params.get("Rules"):
obj = RulesItems()
obj._deserialize(item)
self.Rules.append(obj)
if params.get("Targets") is not None:
self.Targets = []
for item in params.get("Targets"):
obj = LbRsTargets()
obj._deserialize(item)
self.Targets.append(obj)
self.EndPort = params.get("EndPort")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class LoadBalancer(AbstractModel):
"""负载均衡实例的信息
"""
def __init__(self):
r"""
:param LoadBalancerId: 负载均衡实例 ID。
:type LoadBalancerId: str
:param LoadBalancerName: 负载均衡实例的名称。
:type LoadBalancerName: str
:param LoadBalancerType: 负载均衡实例的网络类型:
OPEN:公网属性, INTERNAL:内网属性。
:type LoadBalancerType: str
:param Forward: 负载均衡类型标识,1:负载均衡,0:传统型负载均衡。
:type Forward: int
:param Domain: 负载均衡实例的域名,仅公网传统型负载均衡实例才提供该字段
注意:此字段可能返回 null,表示取不到有效值。
:type Domain: str
:param LoadBalancerVips: 负载均衡实例的 VIP 列表。
注意:此字段可能返回 null,表示取不到有效值。
:type LoadBalancerVips: list of str
:param Status: 负载均衡实例的状态,包括
0:创建中,1:正常运行。
注意:此字段可能返回 null,表示取不到有效值。
:type Status: int
:param CreateTime: 负载均衡实例的创建时间。
注意:此字段可能返回 null,表示取不到有效值。
:type CreateTime: str
:param StatusTime: 负载均衡实例的上次状态转换时间。
注意:此字段可能返回 null,表示取不到有效值。
:type StatusTime: str
:param ProjectId: 负载均衡实例所属的项目 ID, 0 表示默认项目。
:type ProjectId: int
:param VpcId: 私有网络的 ID
注意:此字段可能返回 null,表示取不到有效值。
:type VpcId: str
:param OpenBgp: 高防 LB 的标识,1:高防负载均衡 0:非高防负载均衡。
注意:此字段可能返回 null,表示取不到有效值。
:type OpenBgp: int
:param Snat: 在 2016 年 12 月份之前的传统型内网负载均衡都是开启了 snat 的。
注意:此字段可能返回 null,表示取不到有效值。
:type Snat: bool
:param Isolation: 0:表示未被隔离,1:表示被隔离。
注意:此字段可能返回 null,表示取不到有效值。
:type Isolation: int
:param Log: 用户开启日志的信息,日志只有公网属性创建了 HTTP 、HTTPS 监听器的负载均衡才会有日志。
注意:此字段可能返回 null,表示取不到有效值。
:type Log: str
:param SubnetId: 负载均衡实例所在的子网(仅对内网VPC型LB有意义)
注意:此字段可能返回 null,表示取不到有效值。
:type SubnetId: str
:param Tags: 负载均衡实例的标签信息
注意:此字段可能返回 null,表示取不到有效值。
:type Tags: list of TagInfo
:param SecureGroups: 负载均衡实例的安全组
注意:此字段可能返回 null,表示取不到有效值。
:type SecureGroups: list of str
:param TargetRegionInfo: 负载均衡实例绑定的后端设备的基本信息
注意:此字段可能返回 null,表示取不到有效值。
:type TargetRegionInfo: :class:`tencentcloud.clb.v20180317.models.TargetRegionInfo`
:param AnycastZone: anycast负载均衡的发布域,对于非anycast的负载均衡,此字段返回为空字符串
注意:此字段可能返回 null,表示取不到有效值。
:type AnycastZone: str
:param AddressIPVersion: IP版本,ipv4 | ipv6
注意:此字段可能返回 null,表示取不到有效值。
:type AddressIPVersion: str
:param NumericalVpcId: 数值形式的私有网络 ID
注意:此字段可能返回 null,表示取不到有效值。
:type NumericalVpcId: int
:param VipIsp: 负载均衡IP地址所属的ISP
注意:此字段可能返回 null,表示取不到有效值。
:type VipIsp: str
:param MasterZone: 主可用区
注意:此字段可能返回 null,表示取不到有效值。
:type MasterZone: :class:`tencentcloud.clb.v20180317.models.ZoneInfo`
:param BackupZoneSet: 备可用区
注意:此字段可能返回 null,表示取不到有效值。
:type BackupZoneSet: list of ZoneInfo
:param IsolatedTime: 负载均衡实例被隔离的时间
注意:此字段可能返回 null,表示取不到有效值。
:type IsolatedTime: str
:param ExpireTime: 负载均衡实例的过期时间,仅对预付费负载均衡生效
注意:此字段可能返回 null,表示取不到有效值。
:type ExpireTime: str
:param ChargeType: 负载均衡实例的计费类型,PREPAID:包年包月,POSTPAID_BY_HOUR:按量计费
注意:此字段可能返回 null,表示取不到有效值。
:type ChargeType: str
:param NetworkAttributes: 负载均衡实例的网络属性
注意:此字段可能返回 null,表示取不到有效值。
:type NetworkAttributes: :class:`tencentcloud.clb.v20180317.models.InternetAccessible`
:param PrepaidAttributes: 负载均衡实例的预付费相关属性
注意:此字段可能返回 null,表示取不到有效值。
:type PrepaidAttributes: :class:`tencentcloud.clb.v20180317.models.LBChargePrepaid`
:param LogSetId: 负载均衡日志服务(CLS)的日志集ID
注意:此字段可能返回 null,表示取不到有效值。
:type LogSetId: str
:param LogTopicId: 负载均衡日志服务(CLS)的日志主题ID
注意:此字段可能返回 null,表示取不到有效值。
:type LogTopicId: str
:param AddressIPv6: 负载均衡实例的IPv6地址
注意:此字段可能返回 null,表示取不到有效值。
:type AddressIPv6: str
:param ExtraInfo: 暂做保留,一般用户无需关注。
注意:此字段可能返回 null,表示取不到有效值。
:type ExtraInfo: :class:`tencentcloud.clb.v20180317.models.ExtraInfo`
:param IsDDos: 是否可绑定高防包
注意:此字段可能返回 null,表示取不到有效值。
:type IsDDos: bool
:param ConfigId: 负载均衡维度的个性化配置ID
注意:此字段可能返回 null,表示取不到有效值。
:type ConfigId: str
:param LoadBalancerPassToTarget: 后端服务是否放通来自LB的流量
注意:此字段可能返回 null,表示取不到有效值。
:type LoadBalancerPassToTarget: bool
:param ExclusiveCluster: 内网独占集群
注意:此字段可能返回 null,表示取不到有效值。
:type ExclusiveCluster: :class:`tencentcloud.clb.v20180317.models.ExclusiveCluster`
:param IPv6Mode: IP地址版本为ipv6时此字段有意义, IPv6Nat64 | IPv6FullChain
注意:此字段可能返回 null,表示取不到有效值。
:type IPv6Mode: str
:param SnatPro: 是否开启SnatPro。
注意:此字段可能返回 null,表示取不到有效值。
:type SnatPro: bool
:param SnatIps: 开启SnatPro负载均衡后,SnatIp列表。
注意:此字段可能返回 null,表示取不到有效值。
:type SnatIps: list of SnatIp
:param SlaType: 性能保障规格
注意:此字段可能返回 null,表示取不到有效值。
:type SlaType: str
:param IsBlock: vip是否被封堵
注意:此字段可能返回 null,表示取不到有效值。
:type IsBlock: bool
:param IsBlockTime: 封堵或解封时间
注意:此字段可能返回 null,表示取不到有效值。
:type IsBlockTime: str
:param LocalBgp: IP类型是否是本地BGP
注意:此字段可能返回 null,表示取不到有效值。
:type LocalBgp: bool
:param ClusterTag: 7层独占标签。
注意:此字段可能返回 null,表示取不到有效值。
:type ClusterTag: str
:param MixIpTarget: 开启IPv6FullChain负载均衡7层监听器支持混绑IPv4/IPv6目标功能。
注意:此字段可能返回 null,表示取不到有效值。
:type MixIpTarget: bool
:param Zones: 私有网络内网负载均衡,就近接入模式下规则所落在的可用区
注意:此字段可能返回 null,表示取不到有效值。
:type Zones: list of str
:param NfvInfo: CLB是否为NFV,空:不是,l7nfv:七层是NFV。
注意:此字段可能返回 null,表示取不到有效值。
:type NfvInfo: str
:param HealthLogSetId: 负载均衡日志服务(CLS)的健康检查日志集ID
注意:此字段可能返回 null,表示取不到有效值。
:type HealthLogSetId: str
:param HealthLogTopicId: 负载均衡日志服务(CLS)的健康检查日志主题ID
注意:此字段可能返回 null,表示取不到有效值。
:type HealthLogTopicId: str
"""
self.LoadBalancerId = None
self.LoadBalancerName = None
self.LoadBalancerType = None
self.Forward = None
self.Domain = None
self.LoadBalancerVips = None
self.Status = None
self.CreateTime = None
self.StatusTime = None
self.ProjectId = None
self.VpcId = None
self.OpenBgp = None
self.Snat = None
self.Isolation = None
self.Log = None
self.SubnetId = None
self.Tags = None
self.SecureGroups = None
self.TargetRegionInfo = None
self.AnycastZone = None
self.AddressIPVersion = None
self.NumericalVpcId = None
self.VipIsp = None
self.MasterZone = None
self.BackupZoneSet = None
self.IsolatedTime = None
self.ExpireTime = None
self.ChargeType = None
self.NetworkAttributes = None
self.PrepaidAttributes = None
self.LogSetId = None
self.LogTopicId = None
self.AddressIPv6 = None
self.ExtraInfo = None
self.IsDDos = None
self.ConfigId = None
self.LoadBalancerPassToTarget = None
self.ExclusiveCluster = None
self.IPv6Mode = None
self.SnatPro = None
self.SnatIps = None
self.SlaType = None
self.IsBlock = None
self.IsBlockTime = None
self.LocalBgp = None
self.ClusterTag = None
self.MixIpTarget = None
self.Zones = None
self.NfvInfo = None
self.HealthLogSetId = None
self.HealthLogTopicId = None
def _deserialize(self, params):
self.LoadBalancerId = params.get("LoadBalancerId")
self.LoadBalancerName = params.get("LoadBalancerName")
self.LoadBalancerType = params.get("LoadBalancerType")
self.Forward = params.get("Forward")
self.Domain = params.get("Domain")
self.LoadBalancerVips = params.get("LoadBalancerVips")
self.Status = params.get("Status")
self.CreateTime = params.get("CreateTime")
self.StatusTime = params.get("StatusTime")
self.ProjectId = params.get("ProjectId")
self.VpcId = params.get("VpcId")
self.OpenBgp = params.get("OpenBgp")
self.Snat = params.get("Snat")
self.Isolation = params.get("Isolation")
self.Log = params.get("Log")
self.SubnetId = params.get("SubnetId")
if params.get("Tags") is not None:
self.Tags = []
for item in params.get("Tags"):
obj = TagInfo()
obj._deserialize(item)
self.Tags.append(obj)
self.SecureGroups = params.get("SecureGroups")
if params.get("TargetRegionInfo") is not None:
self.TargetRegionInfo = TargetRegionInfo()
self.TargetRegionInfo._deserialize(params.get("TargetRegionInfo"))
self.AnycastZone = params.get("AnycastZone")
self.AddressIPVersion = params.get("AddressIPVersion")
self.NumericalVpcId = params.get("NumericalVpcId")
self.VipIsp = params.get("VipIsp")
if params.get("MasterZone") is not None:
self.MasterZone = ZoneInfo()
self.MasterZone._deserialize(params.get("MasterZone"))
if params.get("BackupZoneSet") is not None:
self.BackupZoneSet = []
for item in params.get("BackupZoneSet"):
obj = ZoneInfo()
obj._deserialize(item)
self.BackupZoneSet.append(obj)
self.IsolatedTime = params.get("IsolatedTime")
self.ExpireTime = params.get("ExpireTime")
self.ChargeType = params.get("ChargeType")
if params.get("NetworkAttributes") is not None:
self.NetworkAttributes = InternetAccessible()
self.NetworkAttributes._deserialize(params.get("NetworkAttributes"))
if params.get("PrepaidAttributes") is not None:
self.PrepaidAttributes = LBChargePrepaid()
self.PrepaidAttributes._deserialize(params.get("PrepaidAttributes"))
self.LogSetId = params.get("LogSetId")
self.LogTopicId = params.get("LogTopicId")
self.AddressIPv6 = params.get("AddressIPv6")
if params.get("ExtraInfo") is not None:
self.ExtraInfo = ExtraInfo()
self.ExtraInfo._deserialize(params.get("ExtraInfo"))
self.IsDDos = params.get("IsDDos")
self.ConfigId = params.get("ConfigId")
self.LoadBalancerPassToTarget = params.get("LoadBalancerPassToTarget")
if params.get("ExclusiveCluster") is not None:
self.ExclusiveCluster = ExclusiveCluster()
self.ExclusiveCluster._deserialize(params.get("ExclusiveCluster"))
self.IPv6Mode = params.get("IPv6Mode")
self.SnatPro = params.get("SnatPro")
if params.get("SnatIps") is not None:
self.SnatIps = []
for item in params.get("SnatIps"):
obj = SnatIp()
obj._deserialize(item)
self.SnatIps.append(obj)
self.SlaType = params.get("SlaType")
self.IsBlock = params.get("IsBlock")
self.IsBlockTime = params.get("IsBlockTime")
self.LocalBgp = params.get("LocalBgp")
self.ClusterTag = params.get("ClusterTag")
self.MixIpTarget = params.get("MixIpTarget")
self.Zones = params.get("Zones")
self.NfvInfo = params.get("NfvInfo")
self.HealthLogSetId = params.get("HealthLogSetId")
self.HealthLogTopicId = params.get("HealthLogTopicId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class LoadBalancerDetail(AbstractModel):
"""负载均衡详细信息
"""
def __init__(self):
r"""
:param LoadBalancerId: 负载均衡实例 ID。
:type LoadBalancerId: str
:param LoadBalancerName: 负载均衡实例的名称。
:type LoadBalancerName: str
:param LoadBalancerType: 负载均衡实例的网络类型:
Public:公网属性, Private:内网属性。
注意:此字段可能返回 null,表示取不到有效值。
:type LoadBalancerType: str
:param Status: 负载均衡实例的状态,包括
0:创建中,1:正常运行。
注意:此字段可能返回 null,表示取不到有效值。
:type Status: int
:param Address: 负载均衡实例的 VIP 。
注意:此字段可能返回 null,表示取不到有效值。
:type Address: str
:param AddressIPv6: 负载均衡实例 VIP 的IPv6地址。
注意:此字段可能返回 null,表示取不到有效值。
:type AddressIPv6: str
:param AddressIPVersion: 负载均衡实例IP版本,IPv4 | IPv6。
注意:此字段可能返回 null,表示取不到有效值。
:type AddressIPVersion: str
:param IPv6Mode: 负载均衡实例IPv6地址类型,IPv6Nat64 | IPv6FullChain。
注意:此字段可能返回 null,表示取不到有效值。
:type IPv6Mode: str
:param Zone: 负载均衡实例所在可用区。
注意:此字段可能返回 null,表示取不到有效值。
:type Zone: str
:param AddressIsp: 负载均衡实例IP地址所属的ISP。
注意:此字段可能返回 null,表示取不到有效值。
:type AddressIsp: str
:param VpcId: 负载均衡实例所属私有网络的 ID。
注意:此字段可能返回 null,表示取不到有效值。
:type VpcId: str
:param ProjectId: 负载均衡实例所属的项目 ID, 0 表示默认项目。
注意:此字段可能返回 null,表示取不到有效值。
:type ProjectId: int
:param CreateTime: 负载均衡实例的创建时间。
注意:此字段可能返回 null,表示取不到有效值。
:type CreateTime: str
:param ChargeType: 负载均衡实例的计费类型。
注意:此字段可能返回 null,表示取不到有效值。
:type ChargeType: str
:param NetworkAttributes: 负载均衡实例的网络属性。
注意:此字段可能返回 null,表示取不到有效值。
:type NetworkAttributes: :class:`tencentcloud.clb.v20180317.models.InternetAccessible`
:param PrepaidAttributes: 负载均衡实例的预付费相关属性。
注意:此字段可能返回 null,表示取不到有效值。
:type PrepaidAttributes: :class:`tencentcloud.clb.v20180317.models.LBChargePrepaid`
:param ExtraInfo: 暂做保留,一般用户无需关注。
注意:此字段可能返回 null,表示取不到有效值。
:type ExtraInfo: :class:`tencentcloud.clb.v20180317.models.ExtraInfo`
:param ConfigId: 负载均衡维度的个性化配置ID。
注意:此字段可能返回 null,表示取不到有效值。
:type ConfigId: str
:param Tags: 负载均衡实例的标签信息。
注意:此字段可能返回 null,表示取不到有效值。
:type Tags: list of TagInfo
:param ListenerId: 负载均衡监听器 ID。
注意:此字段可能返回 null,表示取不到有效值。
:type ListenerId: str
:param Protocol: 监听器协议。
注意:此字段可能返回 null,表示取不到有效值。
:type Protocol: str
:param Port: 监听器端口。
注意:此字段可能返回 null,表示取不到有效值。
:type Port: int
:param LocationId: 转发规则的 ID。
注意:此字段可能返回 null,表示取不到有效值。
:type LocationId: str
:param Domain: 转发规则的域名。
注意:此字段可能返回 null,表示取不到有效值。
:type Domain: str
:param Url: 转发规则的路径。
注意:此字段可能返回 null,表示取不到有效值。
:type Url: str
:param TargetId: 后端目标ID。
注意:此字段可能返回 null,表示取不到有效值。
:type TargetId: str
:param TargetAddress: 后端目标的IP地址。
注意:此字段可能返回 null,表示取不到有效值。
:type TargetAddress: str
:param TargetPort: 后端目标监听端口。
注意:此字段可能返回 null,表示取不到有效值。
:type TargetPort: int
:param TargetWeight: 后端目标转发权重。
注意:此字段可能返回 null,表示取不到有效值。
:type TargetWeight: int
:param Isolation: 0:表示未被隔离,1:表示被隔离。
注意:此字段可能返回 null,表示取不到有效值。
:type Isolation: int
:param SecurityGroup: 负载均衡绑定的安全组列表。
注意:此字段可能返回 null,表示取不到有效值。
:type SecurityGroup: list of str
:param LoadBalancerPassToTarget: 负载均衡安全组上移特性是否开启标识。
注意:此字段可能返回 null,表示取不到有效值。
:type LoadBalancerPassToTarget: int
:param TargetHealth: 后端目标健康状态。
注意:此字段可能返回 null,表示取不到有效值。
:type TargetHealth: str
"""
self.LoadBalancerId = None
self.LoadBalancerName = None
self.LoadBalancerType = None
self.Status = None
self.Address = None
self.AddressIPv6 = None
self.AddressIPVersion = None
self.IPv6Mode = None
self.Zone = None
self.AddressIsp = None
self.VpcId = None
self.ProjectId = None
self.CreateTime = None
self.ChargeType = None
self.NetworkAttributes = None
self.PrepaidAttributes = None
self.ExtraInfo = None
self.ConfigId = None
self.Tags = None
self.ListenerId = None
self.Protocol = None
self.Port = None
self.LocationId = None
self.Domain = None
self.Url = None
self.TargetId = None
self.TargetAddress = None
self.TargetPort = None
self.TargetWeight = None
self.Isolation = None
self.SecurityGroup = None
self.LoadBalancerPassToTarget = None
self.TargetHealth = None
def _deserialize(self, params):
self.LoadBalancerId = params.get("LoadBalancerId")
self.LoadBalancerName = params.get("LoadBalancerName")
self.LoadBalancerType = params.get("LoadBalancerType")
self.Status = params.get("Status")
self.Address = params.get("Address")
self.AddressIPv6 = params.get("AddressIPv6")
self.AddressIPVersion = params.get("AddressIPVersion")
self.IPv6Mode = params.get("IPv6Mode")
self.Zone = params.get("Zone")
self.AddressIsp = params.get("AddressIsp")
self.VpcId = params.get("VpcId")
self.ProjectId = params.get("ProjectId")
self.CreateTime = params.get("CreateTime")
self.ChargeType = params.get("ChargeType")
if params.get("NetworkAttributes") is not None:
self.NetworkAttributes = InternetAccessible()
self.NetworkAttributes._deserialize(params.get("NetworkAttributes"))
if params.get("PrepaidAttributes") is not None:
self.PrepaidAttributes = LBChargePrepaid()
self.PrepaidAttributes._deserialize(params.get("PrepaidAttributes"))
if params.get("ExtraInfo") is not None:
self.ExtraInfo = ExtraInfo()
self.ExtraInfo._deserialize(params.get("ExtraInfo"))
self.ConfigId = params.get("ConfigId")
if params.get("Tags") is not None:
self.Tags = []
for item in params.get("Tags"):
obj = TagInfo()
obj._deserialize(item)
self.Tags.append(obj)
self.ListenerId = params.get("ListenerId")
self.Protocol = params.get("Protocol")
self.Port = params.get("Port")
self.LocationId = params.get("LocationId")
self.Domain = params.get("Domain")
self.Url = params.get("Url")
self.TargetId = params.get("TargetId")
self.TargetAddress = params.get("TargetAddress")
self.TargetPort = params.get("TargetPort")
self.TargetWeight = params.get("TargetWeight")
self.Isolation = params.get("Isolation")
self.SecurityGroup = params.get("SecurityGroup")
self.LoadBalancerPassToTarget = params.get("LoadBalancerPassToTarget")
self.TargetHealth = params.get("TargetHealth")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class LoadBalancerHealth(AbstractModel):
"""负载均衡实例的健康检查状态
"""
def __init__(self):
r"""
:param LoadBalancerId: 负载均衡实例ID
:type LoadBalancerId: str
:param LoadBalancerName: 负载均衡实例名称
注意:此字段可能返回 null,表示取不到有效值。
:type LoadBalancerName: str
:param Listeners: 监听器列表
注意:此字段可能返回 null,表示取不到有效值。
:type Listeners: list of ListenerHealth
"""
self.LoadBalancerId = None
self.LoadBalancerName = None
self.Listeners = None
def _deserialize(self, params):
self.LoadBalancerId = params.get("LoadBalancerId")
self.LoadBalancerName = params.get("LoadBalancerName")
if params.get("Listeners") is not None:
self.Listeners = []
for item in params.get("Listeners"):
obj = ListenerHealth()
obj._deserialize(item)
self.Listeners.append(obj)
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class LoadBalancerTraffic(AbstractModel):
"""负载均衡流量数据。
"""
def __init__(self):
r"""
:param LoadBalancerId: 负载均衡ID
:type LoadBalancerId: str
:param LoadBalancerName: 负载均衡名字
:type LoadBalancerName: str
:param Region: 负载均衡所在地域
:type Region: str
:param Vip: 负载均衡的vip
:type Vip: str
:param OutBandwidth: 最大出带宽,单位:Mbps
:type OutBandwidth: float
"""
self.LoadBalancerId = None
self.LoadBalancerName = None
self.Region = None
self.Vip = None
self.OutBandwidth = None
def _deserialize(self, params):
self.LoadBalancerId = params.get("LoadBalancerId")
self.LoadBalancerName = params.get("LoadBalancerName")
self.Region = params.get("Region")
self.Vip = params.get("Vip")
self.OutBandwidth = params.get("OutBandwidth")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ManualRewriteRequest(AbstractModel):
"""ManualRewrite请求参数结构体
"""
def __init__(self):
r"""
:param LoadBalancerId: 负载均衡实例 ID。
:type LoadBalancerId: str
:param SourceListenerId: 源监听器 ID。
:type SourceListenerId: str
:param TargetListenerId: 目标监听器 ID。
:type TargetListenerId: str
:param RewriteInfos: 转发规则之间的重定向关系。
:type RewriteInfos: list of RewriteLocationMap
"""
self.LoadBalancerId = None
self.SourceListenerId = None
self.TargetListenerId = None
self.RewriteInfos = None
def _deserialize(self, params):
self.LoadBalancerId = params.get("LoadBalancerId")
self.SourceListenerId = params.get("SourceListenerId")
self.TargetListenerId = params.get("TargetListenerId")
if params.get("RewriteInfos") is not None:
self.RewriteInfos = []
for item in params.get("RewriteInfos"):
obj = RewriteLocationMap()
obj._deserialize(item)
self.RewriteInfos.append(obj)
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ManualRewriteResponse(AbstractModel):
"""ManualRewrite返回参数结构体
"""
def __init__(self):
r"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class ModifyBlockIPListRequest(AbstractModel):
"""ModifyBlockIPList请求参数结构体
"""
def __init__(self):
r"""
:param LoadBalancerIds: 负载均衡实例ID
:type LoadBalancerIds: list of str
:param Type: 操作类型,可取:
<li> add_customized_field(首次设置header,开启黑名单功能)</li>
<li> set_customized_field(修改header)</li>
<li> del_customized_field(删除header)</li>
<li> add_blocked(添加黑名单)</li>
<li> del_blocked(删除黑名单)</li>
<li> flush_blocked(清空黑名单)</li>
:type Type: str
:param ClientIPField: 客户端真实IP存放的header字段名
:type ClientIPField: str
:param BlockIPList: 封禁IP列表,单次操作数组最大长度支持200000
:type BlockIPList: list of str
:param ExpireTime: 过期时间,单位秒,默认值3600
:type ExpireTime: int
:param AddStrategy: 添加IP的策略,可取:fifo(如果黑名单容量已满,新加入黑名单的IP采用先进先出策略)
:type AddStrategy: str
"""
self.LoadBalancerIds = None
self.Type = None
self.ClientIPField = None
self.BlockIPList = None
self.ExpireTime = None
self.AddStrategy = None
def _deserialize(self, params):
self.LoadBalancerIds = params.get("LoadBalancerIds")
self.Type = params.get("Type")
self.ClientIPField = params.get("ClientIPField")
self.BlockIPList = params.get("BlockIPList")
self.ExpireTime = params.get("ExpireTime")
self.AddStrategy = params.get("AddStrategy")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ModifyBlockIPListResponse(AbstractModel):
"""ModifyBlockIPList返回参数结构体
"""
def __init__(self):
r"""
:param JodId: 异步任务的ID
:type JodId: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.JodId = None
self.RequestId = None
def _deserialize(self, params):
self.JodId = params.get("JodId")
self.RequestId = params.get("RequestId")
class ModifyDomainAttributesRequest(AbstractModel):
"""ModifyDomainAttributes请求参数结构体
"""
def __init__(self):
r"""
:param LoadBalancerId: 负载均衡实例ID。
:type LoadBalancerId: str
:param ListenerId: 负载均衡监听器ID。
:type ListenerId: str
:param Domain: 域名(必须是已经创建的转发规则下的域名)。
:type Domain: str
:param NewDomain: 要修改的新域名。
:type NewDomain: str
:param Certificate: 域名相关的证书信息,注意,仅对启用SNI的监听器适用。
:type Certificate: :class:`tencentcloud.clb.v20180317.models.CertificateInput`
:param Http2: 是否开启Http2,注意,只有HTTPS域名才能开启Http2。
:type Http2: bool
:param DefaultServer: 是否设为默认域名,注意,一个监听器下只能设置一个默认域名。
:type DefaultServer: bool
:param NewDefaultServerDomain: 监听器下必须配置一个默认域名,若要关闭原默认域名,必须同时指定另一个域名作为新的默认域名。
:type NewDefaultServerDomain: str
"""
self.LoadBalancerId = None
self.ListenerId = None
self.Domain = None
self.NewDomain = None
self.Certificate = None
self.Http2 = None
self.DefaultServer = None
self.NewDefaultServerDomain = None
def _deserialize(self, params):
self.LoadBalancerId = params.get("LoadBalancerId")
self.ListenerId = params.get("ListenerId")
self.Domain = params.get("Domain")
self.NewDomain = params.get("NewDomain")
if params.get("Certificate") is not None:
self.Certificate = CertificateInput()
self.Certificate._deserialize(params.get("Certificate"))
self.Http2 = params.get("Http2")
self.DefaultServer = params.get("DefaultServer")
self.NewDefaultServerDomain = params.get("NewDefaultServerDomain")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ModifyDomainAttributesResponse(AbstractModel):
"""ModifyDomainAttributes返回参数结构体
"""
def __init__(self):
r"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class ModifyDomainRequest(AbstractModel):
"""ModifyDomain请求参数结构体
"""
def __init__(self):
r"""
:param LoadBalancerId: 负载均衡实例 ID
:type LoadBalancerId: str
:param ListenerId: 负载均衡监听器 ID
:type ListenerId: str
:param Domain: 监听器下的某个旧域名。
:type Domain: str
:param NewDomain: 新域名, 长度限制为:1-120。有三种使用格式:非正则表达式格式,通配符格式,正则表达式格式。非正则表达式格式只能使用字母、数字、‘-’、‘.’。通配符格式的使用 ‘*’ 只能在开头或者结尾。正则表达式以'~'开头。
:type NewDomain: str
"""
self.LoadBalancerId = None
self.ListenerId = None
self.Domain = None
self.NewDomain = None
def _deserialize(self, params):
self.LoadBalancerId = params.get("LoadBalancerId")
self.ListenerId = params.get("ListenerId")
self.Domain = params.get("Domain")
self.NewDomain = params.get("NewDomain")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ModifyDomainResponse(AbstractModel):
"""ModifyDomain返回参数结构体
"""
def __init__(self):
r"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class ModifyListenerRequest(AbstractModel):
"""ModifyListener请求参数结构体
"""
def __init__(self):
r"""
:param LoadBalancerId: 负载均衡实例ID。
:type LoadBalancerId: str
:param ListenerId: 负载均衡监听器ID。
:type ListenerId: str
:param ListenerName: 新的监听器名称。
:type ListenerName: str
:param SessionExpireTime: 会话保持时间,单位:秒。可选值:30~3600,默认 0,表示不开启。此参数仅适用于TCP/UDP监听器。
:type SessionExpireTime: int
:param HealthCheck: 健康检查相关参数,此参数仅适用于TCP/UDP/TCP_SSL监听器。
:type HealthCheck: :class:`tencentcloud.clb.v20180317.models.HealthCheck`
:param Certificate: 证书相关信息,此参数仅适用于HTTPS/TCP_SSL监听器。
:type Certificate: :class:`tencentcloud.clb.v20180317.models.CertificateInput`
:param Scheduler: 监听器转发的方式。可选值:WRR、LEAST_CONN
分别表示按权重轮询、最小连接数, 默认为 WRR。
:type Scheduler: str
:param SniSwitch: 是否开启SNI特性,此参数仅适用于HTTPS监听器。注意:未开启SNI的监听器可以开启SNI;已开启SNI的监听器不能关闭SNI。
:type SniSwitch: int
:param KeepaliveEnable: 是否开启长连接,此参数仅适用于HTTP/HTTPS监听器。
:type KeepaliveEnable: int
:param DeregisterTargetRst: 解绑后端目标时,是否发RST给客户端,此参数仅适用于TCP监听器。
:type DeregisterTargetRst: bool
:param SessionType: 会话保持类型。NORMAL表示默认会话保持类型。QUIC_CID表示根据Quic Connection ID做会话保持。QUIC_CID只支持UDP协议。
:type SessionType: str
"""
self.LoadBalancerId = None
self.ListenerId = None
self.ListenerName = None
self.SessionExpireTime = None
self.HealthCheck = None
self.Certificate = None
self.Scheduler = None
self.SniSwitch = None
self.KeepaliveEnable = None
self.DeregisterTargetRst = None
self.SessionType = None
def _deserialize(self, params):
self.LoadBalancerId = params.get("LoadBalancerId")
self.ListenerId = params.get("ListenerId")
self.ListenerName = params.get("ListenerName")
self.SessionExpireTime = params.get("SessionExpireTime")
if params.get("HealthCheck") is not None:
self.HealthCheck = HealthCheck()
self.HealthCheck._deserialize(params.get("HealthCheck"))
if params.get("Certificate") is not None:
self.Certificate = CertificateInput()
self.Certificate._deserialize(params.get("Certificate"))
self.Scheduler = params.get("Scheduler")
self.SniSwitch = params.get("SniSwitch")
self.KeepaliveEnable = params.get("KeepaliveEnable")
self.DeregisterTargetRst = params.get("DeregisterTargetRst")
self.SessionType = params.get("SessionType")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ModifyListenerResponse(AbstractModel):
"""ModifyListener返回参数结构体
"""
def __init__(self):
r"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class ModifyLoadBalancerAttributesRequest(AbstractModel):
"""ModifyLoadBalancerAttributes请求参数结构体
"""
def __init__(self):
r"""
:param LoadBalancerId: 负载均衡的唯一ID
:type LoadBalancerId: str
:param LoadBalancerName: 负载均衡实例名称
:type LoadBalancerName: str
:param TargetRegionInfo: 负载均衡绑定的后端服务的地域信息
:type TargetRegionInfo: :class:`tencentcloud.clb.v20180317.models.TargetRegionInfo`
:param InternetChargeInfo: 网络计费相关参数
:type InternetChargeInfo: :class:`tencentcloud.clb.v20180317.models.InternetAccessible`
:param LoadBalancerPassToTarget: Target是否放通来自CLB的流量。开启放通(true):只验证CLB上的安全组;不开启放通(false):需同时验证CLB和后端实例上的安全组。
:type LoadBalancerPassToTarget: bool
:param SnatPro: 是否开启SnatPro
:type SnatPro: bool
:param DeleteProtect: 是否开启删除保护
:type DeleteProtect: bool
"""
self.LoadBalancerId = None
self.LoadBalancerName = None
self.TargetRegionInfo = None
self.InternetChargeInfo = None
self.LoadBalancerPassToTarget = None
self.SnatPro = None
self.DeleteProtect = None
def _deserialize(self, params):
self.LoadBalancerId = params.get("LoadBalancerId")
self.LoadBalancerName = params.get("LoadBalancerName")
if params.get("TargetRegionInfo") is not None:
self.TargetRegionInfo = TargetRegionInfo()
self.TargetRegionInfo._deserialize(params.get("TargetRegionInfo"))
if params.get("InternetChargeInfo") is not None:
self.InternetChargeInfo = InternetAccessible()
self.InternetChargeInfo._deserialize(params.get("InternetChargeInfo"))
self.LoadBalancerPassToTarget = params.get("LoadBalancerPassToTarget")
self.SnatPro = params.get("SnatPro")
self.DeleteProtect = params.get("DeleteProtect")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ModifyLoadBalancerAttributesResponse(AbstractModel):
"""ModifyLoadBalancerAttributes返回参数结构体
"""
def __init__(self):
r"""
:param DealName: 切换负载均衡计费方式时,可用此参数查询切换任务是否成功。
注意:此字段可能返回 null,表示取不到有效值。
:type DealName: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.DealName = None
self.RequestId = None
def _deserialize(self, params):
self.DealName = params.get("DealName")
self.RequestId = params.get("RequestId")
class ModifyLoadBalancerSlaRequest(AbstractModel):
"""ModifyLoadBalancerSla请求参数结构体
"""
def __init__(self):
r"""
:param LoadBalancerSla: 负载均衡性能保障实例ID和变配的目标规格
:type LoadBalancerSla: list of SlaUpdateParam
"""
self.LoadBalancerSla = None
def _deserialize(self, params):
if params.get("LoadBalancerSla") is not None:
self.LoadBalancerSla = []
for item in params.get("LoadBalancerSla"):
obj = SlaUpdateParam()
obj._deserialize(item)
self.LoadBalancerSla.append(obj)
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ModifyLoadBalancerSlaResponse(AbstractModel):
"""ModifyLoadBalancerSla返回参数结构体
"""
def __init__(self):
r"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class ModifyRuleRequest(AbstractModel):
"""ModifyRule请求参数结构体
"""
def __init__(self):
r"""
:param LoadBalancerId: 负载均衡实例 ID。
:type LoadBalancerId: str
:param ListenerId: 负载均衡监听器 ID。
:type ListenerId: str
:param LocationId: 要修改的转发规则的 ID。
:type LocationId: str
:param Url: 转发规则的新的转发路径,如不需修改Url,则不需提供此参数。
:type Url: str
:param HealthCheck: 健康检查信息。
:type HealthCheck: :class:`tencentcloud.clb.v20180317.models.HealthCheck`
:param Scheduler: 规则的请求转发方式,可选值:WRR、LEAST_CONN、IP_HASH
分别表示按权重轮询、最小连接数、按IP哈希, 默认为 WRR。
:type Scheduler: str
:param SessionExpireTime: 会话保持时间。
:type SessionExpireTime: int
:param ForwardType: 负载均衡实例与后端服务之间的转发协议,默认HTTP,可取值:HTTP、HTTPS、TRPC。
:type ForwardType: str
:param TrpcCallee: TRPC被调服务器路由,ForwardType为TRPC时必填。
:type TrpcCallee: str
:param TrpcFunc: TRPC调用服务接口,ForwardType为TRPC时必填。
:type TrpcFunc: str
"""
self.LoadBalancerId = None
self.ListenerId = None
self.LocationId = None
self.Url = None
self.HealthCheck = None
self.Scheduler = None
self.SessionExpireTime = None
self.ForwardType = None
self.TrpcCallee = None
self.TrpcFunc = None
def _deserialize(self, params):
self.LoadBalancerId = params.get("LoadBalancerId")
self.ListenerId = params.get("ListenerId")
self.LocationId = params.get("LocationId")
self.Url = params.get("Url")
if params.get("HealthCheck") is not None:
self.HealthCheck = HealthCheck()
self.HealthCheck._deserialize(params.get("HealthCheck"))
self.Scheduler = params.get("Scheduler")
self.SessionExpireTime = params.get("SessionExpireTime")
self.ForwardType = params.get("ForwardType")
self.TrpcCallee = params.get("TrpcCallee")
self.TrpcFunc = params.get("TrpcFunc")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ModifyRuleResponse(AbstractModel):
"""ModifyRule返回参数结构体
"""
def __init__(self):
r"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class ModifyTargetGroupAttributeRequest(AbstractModel):
"""ModifyTargetGroupAttribute请求参数结构体
"""
def __init__(self):
r"""
:param TargetGroupId: 目标组的ID。
:type TargetGroupId: str
:param TargetGroupName: 目标组的新名称。
:type TargetGroupName: str
:param Port: 目标组的新默认端口。
:type Port: int
"""
self.TargetGroupId = None
self.TargetGroupName = None
self.Port = None
def _deserialize(self, params):
self.TargetGroupId = params.get("TargetGroupId")
self.TargetGroupName = params.get("TargetGroupName")
self.Port = params.get("Port")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ModifyTargetGroupAttributeResponse(AbstractModel):
"""ModifyTargetGroupAttribute返回参数结构体
"""
def __init__(self):
r"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class ModifyTargetGroupInstancesPortRequest(AbstractModel):
"""ModifyTargetGroupInstancesPort请求参数结构体
"""
def __init__(self):
r"""
:param TargetGroupId: 目标组ID。
:type TargetGroupId: str
:param TargetGroupInstances: 待修改端口的服务器数组。
:type TargetGroupInstances: list of TargetGroupInstance
"""
self.TargetGroupId = None
self.TargetGroupInstances = None
def _deserialize(self, params):
self.TargetGroupId = params.get("TargetGroupId")
if params.get("TargetGroupInstances") is not None:
self.TargetGroupInstances = []
for item in params.get("TargetGroupInstances"):
obj = TargetGroupInstance()
obj._deserialize(item)
self.TargetGroupInstances.append(obj)
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ModifyTargetGroupInstancesPortResponse(AbstractModel):
"""ModifyTargetGroupInstancesPort返回参数结构体
"""
def __init__(self):
r"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class ModifyTargetGroupInstancesWeightRequest(AbstractModel):
"""ModifyTargetGroupInstancesWeight请求参数结构体
"""
def __init__(self):
r"""
:param TargetGroupId: 目标组ID。
:type TargetGroupId: str
:param TargetGroupInstances: 待修改权重的服务器数组。
:type TargetGroupInstances: list of TargetGroupInstance
"""
self.TargetGroupId = None
self.TargetGroupInstances = None
def _deserialize(self, params):
self.TargetGroupId = params.get("TargetGroupId")
if params.get("TargetGroupInstances") is not None:
self.TargetGroupInstances = []
for item in params.get("TargetGroupInstances"):
obj = TargetGroupInstance()
obj._deserialize(item)
self.TargetGroupInstances.append(obj)
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ModifyTargetGroupInstancesWeightResponse(AbstractModel):
"""ModifyTargetGroupInstancesWeight返回参数结构体
"""
def __init__(self):
r"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class ModifyTargetPortRequest(AbstractModel):
"""ModifyTargetPort请求参数结构体
"""
def __init__(self):
r"""
:param LoadBalancerId: 负载均衡实例ID。
:type LoadBalancerId: str
:param ListenerId: 负载均衡监听器ID。
:type ListenerId: str
:param Targets: 要修改端口的后端服务列表。
:type Targets: list of Target
:param NewPort: 后端服务绑定到监听器或转发规则的新端口。
:type NewPort: int
:param LocationId: 转发规则的ID,当后端服务绑定到七层转发规则时,必须提供此参数或Domain+Url两者之一。
:type LocationId: str
:param Domain: 目标规则的域名,提供LocationId参数时本参数不生效。
:type Domain: str
:param Url: 目标规则的URL,提供LocationId参数时本参数不生效。
:type Url: str
"""
self.LoadBalancerId = None
self.ListenerId = None
self.Targets = None
self.NewPort = None
self.LocationId = None
self.Domain = None
self.Url = None
def _deserialize(self, params):
self.LoadBalancerId = params.get("LoadBalancerId")
self.ListenerId = params.get("ListenerId")
if params.get("Targets") is not None:
self.Targets = []
for item in params.get("Targets"):
obj = Target()
obj._deserialize(item)
self.Targets.append(obj)
self.NewPort = params.get("NewPort")
self.LocationId = params.get("LocationId")
self.Domain = params.get("Domain")
self.Url = params.get("Url")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ModifyTargetPortResponse(AbstractModel):
"""ModifyTargetPort返回参数结构体
"""
def __init__(self):
r"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class ModifyTargetWeightRequest(AbstractModel):
"""ModifyTargetWeight请求参数结构体
"""
def __init__(self):
r"""
:param LoadBalancerId: 负载均衡实例ID。
:type LoadBalancerId: str
:param ListenerId: 负载均衡监听器ID。
:type ListenerId: str
:param LocationId: 转发规则的ID,当绑定机器到七层转发规则时,必须提供此参数或Domain+Url两者之一。
:type LocationId: str
:param Domain: 目标规则的域名,提供LocationId参数时本参数不生效。
:type Domain: str
:param Url: 目标规则的URL,提供LocationId参数时本参数不生效。
:type Url: str
:param Targets: 要修改权重的后端服务列表。
:type Targets: list of Target
:param Weight: 后端服务新的转发权重,取值范围:0~100,默认值10。如果设置了 Targets.Weight 参数,则此参数不生效。
:type Weight: int
"""
self.LoadBalancerId = None
self.ListenerId = None
self.LocationId = None
self.Domain = None
self.Url = None
self.Targets = None
self.Weight = None
def _deserialize(self, params):
self.LoadBalancerId = params.get("LoadBalancerId")
self.ListenerId = params.get("ListenerId")
self.LocationId = params.get("LocationId")
self.Domain = params.get("Domain")
self.Url = params.get("Url")
if params.get("Targets") is not None:
self.Targets = []
for item in params.get("Targets"):
obj = Target()
obj._deserialize(item)
self.Targets.append(obj)
self.Weight = params.get("Weight")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ModifyTargetWeightResponse(AbstractModel):
"""ModifyTargetWeight返回参数结构体
"""
def __init__(self):
r"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class Quota(AbstractModel):
"""描述配额信息,所有配额均指当前地域下的配额。
"""
def __init__(self):
r"""
:param QuotaId: 配额名称,取值范围:
<li> TOTAL_OPEN_CLB_QUOTA: 用户当前地域下的公网LB配额 </li>
<li> TOTAL_INTERNAL_CLB_QUOTA: 用户当前地域下的内网LB配额 </li>
<li> TOTAL_LISTENER_QUOTA: 一个CLB下的监听器配额 </li>
<li> TOTAL_LISTENER_RULE_QUOTA: 一个监听器下的转发规则配额 </li>
<li> TOTAL_TARGET_BIND_QUOTA: 一条转发规则下绑定设备配额 </li>
:type QuotaId: str
:param QuotaCurrent: 当前使用数量,为 null 时表示无意义。
注意:此字段可能返回 null,表示取不到有效值。
:type QuotaCurrent: int
:param QuotaLimit: 配额数量。
:type QuotaLimit: int
"""
self.QuotaId = None
self.QuotaCurrent = None
self.QuotaLimit = None
def _deserialize(self, params):
self.QuotaId = params.get("QuotaId")
self.QuotaCurrent = params.get("QuotaCurrent")
self.QuotaLimit = params.get("QuotaLimit")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class RegisterTargetGroupInstancesRequest(AbstractModel):
"""RegisterTargetGroupInstances请求参数结构体
"""
def __init__(self):
r"""
:param TargetGroupId: 目标组ID
:type TargetGroupId: str
:param TargetGroupInstances: 服务器实例数组
:type TargetGroupInstances: list of TargetGroupInstance
"""
self.TargetGroupId = None
self.TargetGroupInstances = None
def _deserialize(self, params):
self.TargetGroupId = params.get("TargetGroupId")
if params.get("TargetGroupInstances") is not None:
self.TargetGroupInstances = []
for item in params.get("TargetGroupInstances"):
obj = TargetGroupInstance()
obj._deserialize(item)
self.TargetGroupInstances.append(obj)
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class RegisterTargetGroupInstancesResponse(AbstractModel):
"""RegisterTargetGroupInstances返回参数结构体
"""
def __init__(self):
r"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class RegisterTargetsRequest(AbstractModel):
"""RegisterTargets请求参数结构体
"""
def __init__(self):
r"""
:param LoadBalancerId: 负载均衡实例ID。
:type LoadBalancerId: str
:param ListenerId: 负载均衡监听器ID。
:type ListenerId: str
:param Targets: 待绑定的后端服务列表,数组长度最大支持20。
:type Targets: list of Target
:param LocationId: 转发规则的ID,当绑定后端服务到七层转发规则时,必须提供此参数或Domain+Url两者之一。
:type LocationId: str
:param Domain: 目标转发规则的域名,提供LocationId参数时本参数不生效。
:type Domain: str
:param Url: 目标转发规则的URL,提供LocationId参数时本参数不生效。
:type Url: str
"""
self.LoadBalancerId = None
self.ListenerId = None
self.Targets = None
self.LocationId = None
self.Domain = None
self.Url = None
def _deserialize(self, params):
self.LoadBalancerId = params.get("LoadBalancerId")
self.ListenerId = params.get("ListenerId")
if params.get("Targets") is not None:
self.Targets = []
for item in params.get("Targets"):
obj = Target()
obj._deserialize(item)
self.Targets.append(obj)
self.LocationId = params.get("LocationId")
self.Domain = params.get("Domain")
self.Url = params.get("Url")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class RegisterTargetsResponse(AbstractModel):
"""RegisterTargets返回参数结构体
"""
def __init__(self):
r"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class RegisterTargetsWithClassicalLBRequest(AbstractModel):
"""RegisterTargetsWithClassicalLB请求参数结构体
"""
def __init__(self):
r"""
:param LoadBalancerId: 负载均衡实例ID。
:type LoadBalancerId: str
:param Targets: 后端服务信息。
:type Targets: list of ClassicalTargetInfo
"""
self.LoadBalancerId = None
self.Targets = None
def _deserialize(self, params):
self.LoadBalancerId = params.get("LoadBalancerId")
if params.get("Targets") is not None:
self.Targets = []
for item in params.get("Targets"):
obj = ClassicalTargetInfo()
obj._deserialize(item)
self.Targets.append(obj)
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class RegisterTargetsWithClassicalLBResponse(AbstractModel):
"""RegisterTargetsWithClassicalLB返回参数结构体
"""
def __init__(self):
r"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class ReplaceCertForLoadBalancersRequest(AbstractModel):
"""ReplaceCertForLoadBalancers请求参数结构体
"""
def __init__(self):
r"""
:param OldCertificateId: 需要被替换的证书的ID,可以是服务端证书或客户端证书
:type OldCertificateId: str
:param Certificate: 新证书的内容等相关信息
:type Certificate: :class:`tencentcloud.clb.v20180317.models.CertificateInput`
"""
self.OldCertificateId = None
self.Certificate = None
def _deserialize(self, params):
self.OldCertificateId = params.get("OldCertificateId")
if params.get("Certificate") is not None:
self.Certificate = CertificateInput()
self.Certificate._deserialize(params.get("Certificate"))
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ReplaceCertForLoadBalancersResponse(AbstractModel):
"""ReplaceCertForLoadBalancers返回参数结构体
"""
def __init__(self):
r"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class RewriteLocationMap(AbstractModel):
"""转发规则之间的重定向关系
"""
def __init__(self):
r"""
:param SourceLocationId: 源转发规则ID
:type SourceLocationId: str
:param TargetLocationId: 重定向至的目标转发规则ID
:type TargetLocationId: str
:param RewriteCode: 重定向状态码,可取值301,302,307
:type RewriteCode: int
:param TakeUrl: 重定向是否携带匹配的url,配置RewriteCode时必填
:type TakeUrl: bool
:param SourceDomain: 源转发的域名,必须是SourceLocationId对应的域名,配置RewriteCode时必填
:type SourceDomain: str
"""
self.SourceLocationId = None
self.TargetLocationId = None
self.RewriteCode = None
self.TakeUrl = None
self.SourceDomain = None
def _deserialize(self, params):
self.SourceLocationId = params.get("SourceLocationId")
self.TargetLocationId = params.get("TargetLocationId")
self.RewriteCode = params.get("RewriteCode")
self.TakeUrl = params.get("TakeUrl")
self.SourceDomain = params.get("SourceDomain")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class RewriteTarget(AbstractModel):
"""重定向目标的信息
"""
def __init__(self):
r"""
:param TargetListenerId: 重定向目标的监听器ID
注意:此字段可能返回 null,表示无重定向。
注意:此字段可能返回 null,表示取不到有效值。
:type TargetListenerId: str
:param TargetLocationId: 重定向目标的转发规则ID
注意:此字段可能返回 null,表示无重定向。
注意:此字段可能返回 null,表示取不到有效值。
:type TargetLocationId: str
:param RewriteCode: 重定向状态码
注意:此字段可能返回 null,表示取不到有效值。
:type RewriteCode: int
:param TakeUrl: 重定向是否携带匹配的url
注意:此字段可能返回 null,表示取不到有效值。
:type TakeUrl: bool
:param RewriteType: 重定向类型,Manual: 手动重定向,Auto: 自动重定向
注意:此字段可能返回 null,表示取不到有效值。
:type RewriteType: str
"""
self.TargetListenerId = None
self.TargetLocationId = None
self.RewriteCode = None
self.TakeUrl = None
self.RewriteType = None
def _deserialize(self, params):
self.TargetListenerId = params.get("TargetListenerId")
self.TargetLocationId = params.get("TargetLocationId")
self.RewriteCode = params.get("RewriteCode")
self.TakeUrl = params.get("TakeUrl")
self.RewriteType = params.get("RewriteType")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class RsWeightRule(AbstractModel):
"""修改节点权重的数据类型
"""
def __init__(self):
r"""
:param ListenerId: 负载均衡监听器 ID。
:type ListenerId: str
:param Targets: 要修改权重的后端机器列表。
:type Targets: list of Target
:param LocationId: 转发规则的ID,七层规则时需要此参数,4层规则不需要。
:type LocationId: str
:param Domain: 目标规则的域名,提供LocationId参数时本参数不生效。
:type Domain: str
:param Url: 目标规则的URL,提供LocationId参数时本参数不生效。
:type Url: str
:param Weight: 后端服务修改后的转发权重,取值范围:[0,100]。此参数的优先级低于前述[Target](https://cloud.tencent.com/document/api/214/30694#Target)中的Weight参数,即最终的权重值以Target中的Weight参数值为准,仅当Target中的Weight参数为空时,才以RsWeightRule中的Weight参数为准。
:type Weight: int
"""
self.ListenerId = None
self.Targets = None
self.LocationId = None
self.Domain = None
self.Url = None
self.Weight = None
def _deserialize(self, params):
self.ListenerId = params.get("ListenerId")
if params.get("Targets") is not None:
self.Targets = []
for item in params.get("Targets"):
obj = Target()
obj._deserialize(item)
self.Targets.append(obj)
self.LocationId = params.get("LocationId")
self.Domain = params.get("Domain")
self.Url = params.get("Url")
self.Weight = params.get("Weight")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class RuleHealth(AbstractModel):
"""一条转发规则的健康检查状态
"""
def __init__(self):
r"""
:param LocationId: 转发规则ID
:type LocationId: str
:param Domain: 转发规则的域名
注意:此字段可能返回 null,表示取不到有效值。
:type Domain: str
:param Url: 转发规则的Url
注意:此字段可能返回 null,表示取不到有效值。
:type Url: str
:param Targets: 本规则上绑定的后端服务的健康检查状态
注意:此字段可能返回 null,表示取不到有效值。
:type Targets: list of TargetHealth
"""
self.LocationId = None
self.Domain = None
self.Url = None
self.Targets = None
def _deserialize(self, params):
self.LocationId = params.get("LocationId")
self.Domain = params.get("Domain")
self.Url = params.get("Url")
if params.get("Targets") is not None:
self.Targets = []
for item in params.get("Targets"):
obj = TargetHealth()
obj._deserialize(item)
self.Targets.append(obj)
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class RuleInput(AbstractModel):
"""HTTP/HTTPS转发规则(输入)
"""
def __init__(self):
r"""
:param Domain: 转发规则的域名。长度限制为:1~80。
:type Domain: str
:param Url: 转发规则的路径。长度限制为:1~200。
:type Url: str
:param SessionExpireTime: 会话保持时间。设置为0表示关闭会话保持,开启会话保持可取值30~3600,单位:秒。
:type SessionExpireTime: int
:param HealthCheck: 健康检查信息。详情请参见:[健康检查](https://cloud.tencent.com/document/product/214/6097)
:type HealthCheck: :class:`tencentcloud.clb.v20180317.models.HealthCheck`
:param Certificate: 证书信息
:type Certificate: :class:`tencentcloud.clb.v20180317.models.CertificateInput`
:param Scheduler: 规则的请求转发方式,可选值:WRR、LEAST_CONN、IP_HASH
分别表示按权重轮询、最小连接数、按IP哈希, 默认为 WRR。
:type Scheduler: str
:param ForwardType: 负载均衡与后端服务之间的转发协议,目前支持 HTTP/HTTPS/TRPC
:type ForwardType: str
:param DefaultServer: 是否将该域名设为默认域名,注意,一个监听器下只能设置一个默认域名。
:type DefaultServer: bool
:param Http2: 是否开启Http2,注意,只有HTTPS域名才能开启Http2。
:type Http2: bool
:param TargetType: 后端目标类型,NODE表示绑定普通节点,TARGETGROUP表示绑定目标组
:type TargetType: str
:param TrpcCallee: TRPC被调服务器路由,ForwardType为TRPC时必填
:type TrpcCallee: str
:param TrpcFunc: TRPC调用服务接口,ForwardType为TRPC时必填
:type TrpcFunc: str
:param Quic: 是否开启QUIC,注意,只有HTTPS域名才能开启QUIC
:type Quic: bool
"""
self.Domain = None
self.Url = None
self.SessionExpireTime = None
self.HealthCheck = None
self.Certificate = None
self.Scheduler = None
self.ForwardType = None
self.DefaultServer = None
self.Http2 = None
self.TargetType = None
self.TrpcCallee = None
self.TrpcFunc = None
self.Quic = None
def _deserialize(self, params):
self.Domain = params.get("Domain")
self.Url = params.get("Url")
self.SessionExpireTime = params.get("SessionExpireTime")
if params.get("HealthCheck") is not None:
self.HealthCheck = HealthCheck()
self.HealthCheck._deserialize(params.get("HealthCheck"))
if params.get("Certificate") is not None:
self.Certificate = CertificateInput()
self.Certificate._deserialize(params.get("Certificate"))
self.Scheduler = params.get("Scheduler")
self.ForwardType = params.get("ForwardType")
self.DefaultServer = params.get("DefaultServer")
self.Http2 = params.get("Http2")
self.TargetType = params.get("TargetType")
self.TrpcCallee = params.get("TrpcCallee")
self.TrpcFunc = params.get("TrpcFunc")
self.Quic = params.get("Quic")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class RuleOutput(AbstractModel):
"""HTTP/HTTPS监听器的转发规则(输出)
"""
def __init__(self):
r"""
:param LocationId: 转发规则的 ID
:type LocationId: str
:param Domain: 转发规则的域名。
注意:此字段可能返回 null,表示取不到有效值。
:type Domain: str
:param Url: 转发规则的路径。
注意:此字段可能返回 null,表示取不到有效值。
:type Url: str
:param SessionExpireTime: 会话保持时间
:type SessionExpireTime: int
:param HealthCheck: 健康检查信息
注意:此字段可能返回 null,表示取不到有效值。
:type HealthCheck: :class:`tencentcloud.clb.v20180317.models.HealthCheck`
:param Certificate: 证书信息
注意:此字段可能返回 null,表示取不到有效值。
:type Certificate: :class:`tencentcloud.clb.v20180317.models.CertificateOutput`
:param Scheduler: 规则的请求转发方式
:type Scheduler: str
:param ListenerId: 转发规则所属的监听器 ID
:type ListenerId: str
:param RewriteTarget: 转发规则的重定向目标信息
注意:此字段可能返回 null,表示取不到有效值。
:type RewriteTarget: :class:`tencentcloud.clb.v20180317.models.RewriteTarget`
:param HttpGzip: 是否开启gzip
:type HttpGzip: bool
:param BeAutoCreated: 转发规则是否为自动创建
:type BeAutoCreated: bool
:param DefaultServer: 是否作为默认域名
:type DefaultServer: bool
:param Http2: 是否开启Http2
:type Http2: bool
:param ForwardType: 负载均衡与后端服务之间的转发协议
:type ForwardType: str
:param CreateTime: 转发规则的创建时间
:type CreateTime: str
:param TargetType: 后端服务器类型
:type TargetType: str
:param TargetGroup: 绑定的目标组基本信息;当规则绑定目标组时,会返回该字段
注意:此字段可能返回 null,表示取不到有效值。
:type TargetGroup: :class:`tencentcloud.clb.v20180317.models.BasicTargetGroupInfo`
:param WafDomainId: WAF实例ID
注意:此字段可能返回 null,表示取不到有效值。
:type WafDomainId: str
:param TrpcCallee: TRPC被调服务器路由,ForwardType为TRPC时有效
注意:此字段可能返回 null,表示取不到有效值。
:type TrpcCallee: str
:param TrpcFunc: TRPC调用服务接口,ForwardType为TRPC时有效
注意:此字段可能返回 null,表示取不到有效值。
:type TrpcFunc: str
:param QuicStatus: QUIC状态
注意:此字段可能返回 null,表示取不到有效值。
:type QuicStatus: str
"""
self.LocationId = None
self.Domain = None
self.Url = None
self.SessionExpireTime = None
self.HealthCheck = None
self.Certificate = None
self.Scheduler = None
self.ListenerId = None
self.RewriteTarget = None
self.HttpGzip = None
self.BeAutoCreated = None
self.DefaultServer = None
self.Http2 = None
self.ForwardType = None
self.CreateTime = None
self.TargetType = None
self.TargetGroup = None
self.WafDomainId = None
self.TrpcCallee = None
self.TrpcFunc = None
self.QuicStatus = None
def _deserialize(self, params):
self.LocationId = params.get("LocationId")
self.Domain = params.get("Domain")
self.Url = params.get("Url")
self.SessionExpireTime = params.get("SessionExpireTime")
if params.get("HealthCheck") is not None:
self.HealthCheck = HealthCheck()
self.HealthCheck._deserialize(params.get("HealthCheck"))
if params.get("Certificate") is not None:
self.Certificate = CertificateOutput()
self.Certificate._deserialize(params.get("Certificate"))
self.Scheduler = params.get("Scheduler")
self.ListenerId = params.get("ListenerId")
if params.get("RewriteTarget") is not None:
self.RewriteTarget = RewriteTarget()
self.RewriteTarget._deserialize(params.get("RewriteTarget"))
self.HttpGzip = params.get("HttpGzip")
self.BeAutoCreated = params.get("BeAutoCreated")
self.DefaultServer = params.get("DefaultServer")
self.Http2 = params.get("Http2")
self.ForwardType = params.get("ForwardType")
self.CreateTime = params.get("CreateTime")
self.TargetType = params.get("TargetType")
if params.get("TargetGroup") is not None:
self.TargetGroup = BasicTargetGroupInfo()
self.TargetGroup._deserialize(params.get("TargetGroup"))
self.WafDomainId = params.get("WafDomainId")
self.TrpcCallee = params.get("TrpcCallee")
self.TrpcFunc = params.get("TrpcFunc")
self.QuicStatus = params.get("QuicStatus")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class RuleTargets(AbstractModel):
"""HTTP/HTTPS监听器下的转发规则绑定的后端服务信息
"""
def __init__(self):
r"""
:param LocationId: 转发规则的 ID
:type LocationId: str
:param Domain: 转发规则的域名
:type Domain: str
:param Url: 转发规则的路径。
:type Url: str
:param Targets: 后端服务的信息
注意:此字段可能返回 null,表示取不到有效值。
:type Targets: list of Backend
"""
self.LocationId = None
self.Domain = None
self.Url = None
self.Targets = None
def _deserialize(self, params):
self.LocationId = params.get("LocationId")
self.Domain = params.get("Domain")
self.Url = params.get("Url")
if params.get("Targets") is not None:
self.Targets = []
for item in params.get("Targets"):
obj = Backend()
obj._deserialize(item)
self.Targets.append(obj)
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class RulesItems(AbstractModel):
"""七层规则对象
"""
def __init__(self):
r"""
:param LocationId: 规则id
:type LocationId: str
:param Domain: 域名
:type Domain: str
:param Url: uri
:type Url: str
:param Targets: 绑定的后端对象
:type Targets: list of LbRsTargets
"""
self.LocationId = None
self.Domain = None
self.Url = None
self.Targets = None
def _deserialize(self, params):
self.LocationId = params.get("LocationId")
self.Domain = params.get("Domain")
self.Url = params.get("Url")
if params.get("Targets") is not None:
self.Targets = []
for item in params.get("Targets"):
obj = LbRsTargets()
obj._deserialize(item)
self.Targets.append(obj)
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class SetCustomizedConfigForLoadBalancerRequest(AbstractModel):
"""SetCustomizedConfigForLoadBalancer请求参数结构体
"""
def __init__(self):
r"""
:param OperationType: 操作类型:'ADD', 'DELETE', 'UPDATE', 'BIND', 'UNBIND'
:type OperationType: str
:param UconfigId: 除了创建个性化配置外,必传此字段,如:pz-1234abcd
:type UconfigId: str
:param ConfigContent: 创建个性化配置或修改个性化配置的内容时,必传此字段
:type ConfigContent: str
:param ConfigName: 创建个性化配置或修改个性化配置的名字时,必传此字段
:type ConfigName: str
:param LoadBalancerIds: 绑定解绑时,必传此字段
:type LoadBalancerIds: list of str
"""
self.OperationType = None
self.UconfigId = None
self.ConfigContent = None
self.ConfigName = None
self.LoadBalancerIds = None
def _deserialize(self, params):
self.OperationType = params.get("OperationType")
self.UconfigId = params.get("UconfigId")
self.ConfigContent = params.get("ConfigContent")
self.ConfigName = params.get("ConfigName")
self.LoadBalancerIds = params.get("LoadBalancerIds")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class SetCustomizedConfigForLoadBalancerResponse(AbstractModel):
"""SetCustomizedConfigForLoadBalancer返回参数结构体
"""
def __init__(self):
r"""
:param ConfigId: 个性化配置ID,如:pz-1234abcd
:type ConfigId: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.ConfigId = None
self.RequestId = None
def _deserialize(self, params):
self.ConfigId = params.get("ConfigId")
self.RequestId = params.get("RequestId")
class SetLoadBalancerClsLogRequest(AbstractModel):
"""SetLoadBalancerClsLog请求参数结构体
"""
def __init__(self):
r"""
:param LoadBalancerId: 负载均衡实例 ID。
:type LoadBalancerId: str
:param LogSetId: 日志服务(CLS)的日志集ID。
:type LogSetId: str
:param LogTopicId: 日志服务(CLS)的日志主题ID。
:type LogTopicId: str
:param LogType: 日志类型,ACCESS:访问日志,HEALTH:健康检查日志,默认ACCESS。
:type LogType: str
"""
self.LoadBalancerId = None
self.LogSetId = None
self.LogTopicId = None
self.LogType = None
def _deserialize(self, params):
self.LoadBalancerId = params.get("LoadBalancerId")
self.LogSetId = params.get("LogSetId")
self.LogTopicId = params.get("LogTopicId")
self.LogType = params.get("LogType")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class SetLoadBalancerClsLogResponse(AbstractModel):
"""SetLoadBalancerClsLog返回参数结构体
"""
def __init__(self):
r"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class SetLoadBalancerSecurityGroupsRequest(AbstractModel):
"""SetLoadBalancerSecurityGroups请求参数结构体
"""
def __init__(self):
r"""
:param LoadBalancerId: 负载均衡实例 ID
:type LoadBalancerId: str
:param SecurityGroups: 安全组ID构成的数组,一个负载均衡实例最多可绑定50个安全组,如果要解绑所有安全组,可不传此参数,或传入空数组。
:type SecurityGroups: list of str
"""
self.LoadBalancerId = None
self.SecurityGroups = None
def _deserialize(self, params):
self.LoadBalancerId = params.get("LoadBalancerId")
self.SecurityGroups = params.get("SecurityGroups")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class SetLoadBalancerSecurityGroupsResponse(AbstractModel):
"""SetLoadBalancerSecurityGroups返回参数结构体
"""
def __init__(self):
r"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class SetSecurityGroupForLoadbalancersRequest(AbstractModel):
"""SetSecurityGroupForLoadbalancers请求参数结构体
"""
def __init__(self):
r"""
:param SecurityGroup: 安全组ID,如 sg-12345678
:type SecurityGroup: str
:param OperationType: ADD 绑定安全组;
DEL 解绑安全组
:type OperationType: str
:param LoadBalancerIds: 负载均衡实例ID数组
:type LoadBalancerIds: list of str
"""
self.SecurityGroup = None
self.OperationType = None
self.LoadBalancerIds = None
def _deserialize(self, params):
self.SecurityGroup = params.get("SecurityGroup")
self.OperationType = params.get("OperationType")
self.LoadBalancerIds = params.get("LoadBalancerIds")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class SetSecurityGroupForLoadbalancersResponse(AbstractModel):
"""SetSecurityGroupForLoadbalancers返回参数结构体
"""
def __init__(self):
r"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class SlaUpdateParam(AbstractModel):
"""性能保障变配参数
"""
def __init__(self):
r"""
:param LoadBalancerId: lb的字符串ID
:type LoadBalancerId: str
:param SlaType: 需要变更的性能保障级别
:type SlaType: str
"""
self.LoadBalancerId = None
self.SlaType = None
def _deserialize(self, params):
self.LoadBalancerId = params.get("LoadBalancerId")
self.SlaType = params.get("SlaType")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class SnatIp(AbstractModel):
"""SnatIp的信息结构
"""
def __init__(self):
r"""
:param SubnetId: 私有网络子网的唯一性id,如subnet-12345678
:type SubnetId: str
:param Ip: IP地址,如192.168.0.1
:type Ip: str
"""
self.SubnetId = None
self.Ip = None
def _deserialize(self, params):
self.SubnetId = params.get("SubnetId")
self.Ip = params.get("Ip")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class TagInfo(AbstractModel):
"""负载均衡的标签信息
"""
def __init__(self):
r"""
:param TagKey: 标签的键
:type TagKey: str
:param TagValue: 标签的值
:type TagValue: str
"""
self.TagKey = None
self.TagValue = None
def _deserialize(self, params):
self.TagKey = params.get("TagKey")
self.TagValue = params.get("TagValue")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class Target(AbstractModel):
"""转发目标,即绑定在负载均衡上的后端服务
"""
def __init__(self):
r"""
:param Port: 后端服务的监听端口。
注意:绑定CVM(云服务器)或ENI(弹性网卡)时必传此参数
注意:此字段可能返回 null,表示取不到有效值。
:type Port: int
:param Type: 后端服务的类型,可取:CVM(云服务器)、ENI(弹性网卡);作为入参时,目前本参数暂不生效。
注意:此字段可能返回 null,表示取不到有效值。
:type Type: str
:param InstanceId: 绑定CVM时需要传入此参数,代表CVM的唯一 ID,可通过 DescribeInstances 接口返回字段中的 InstanceId 字段获取。表示绑定主网卡主IP。
注意:参数 InstanceId、EniIp 只能传入一个且必须传入一个。
注意:此字段可能返回 null,表示取不到有效值。
:type InstanceId: str
:param Weight: 后端服务修改后的转发权重,取值范围:[0, 100],默认为 10。此参数的优先级高于[RsWeightRule](https://cloud.tencent.com/document/api/214/30694#RsWeightRule)中的Weight参数,即最终的权重值以此Weight参数值为准,仅当此Weight参数为空时,才以RsWeightRule中的Weight参数为准。
:type Weight: int
:param EniIp: 绑定IP时需要传入此参数,支持弹性网卡的IP和其他内网IP,如果是弹性网卡则必须先绑定至CVM,然后才能绑定到负载均衡实例。
注意:参数 InstanceId、EniIp 只能传入一个且必须传入一个。如果绑定双栈IPV6子机,必须传该参数。
注意:此字段可能返回 null,表示取不到有效值。
:type EniIp: str
"""
self.Port = None
self.Type = None
self.InstanceId = None
self.Weight = None
self.EniIp = None
def _deserialize(self, params):
self.Port = params.get("Port")
self.Type = params.get("Type")
self.InstanceId = params.get("InstanceId")
self.Weight = params.get("Weight")
self.EniIp = params.get("EniIp")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class TargetGroupAssociation(AbstractModel):
"""规则与目标组的关联关系
"""
def __init__(self):
r"""
:param LoadBalancerId: 负载均衡ID
:type LoadBalancerId: str
:param ListenerId: 监听器ID
:type ListenerId: str
:param TargetGroupId: 目标组ID
:type TargetGroupId: str
:param LocationId: 转发规则ID
:type LocationId: str
"""
self.LoadBalancerId = None
self.ListenerId = None
self.TargetGroupId = None
self.LocationId = None
def _deserialize(self, params):
self.LoadBalancerId = params.get("LoadBalancerId")
self.ListenerId = params.get("ListenerId")
self.TargetGroupId = params.get("TargetGroupId")
self.LocationId = params.get("LocationId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class TargetGroupBackend(AbstractModel):
"""目标组绑定的后端服务器
"""
def __init__(self):
r"""
:param TargetGroupId: 目标组ID
:type TargetGroupId: str
:param Type: 后端服务的类型,可取:CVM、ENI(即将支持)
:type Type: str
:param InstanceId: 后端服务的唯一 ID
:type InstanceId: str
:param Port: 后端服务的监听端口
:type Port: int
:param Weight: 后端服务的转发权重,取值范围:[0, 100],默认为 10。
:type Weight: int
:param PublicIpAddresses: 后端服务的外网 IP
注意:此字段可能返回 null,表示取不到有效值。
:type PublicIpAddresses: list of str
:param PrivateIpAddresses: 后端服务的内网 IP
注意:此字段可能返回 null,表示取不到有效值。
:type PrivateIpAddresses: list of str
:param InstanceName: 后端服务的实例名称
注意:此字段可能返回 null,表示取不到有效值。
:type InstanceName: str
:param RegisteredTime: 后端服务被绑定的时间
注意:此字段可能返回 null,表示取不到有效值。
:type RegisteredTime: str
:param EniId: 弹性网卡唯一ID
注意:此字段可能返回 null,表示取不到有效值。
:type EniId: str
:param ZoneId: 后端服务的可用区ID
注意:此字段可能返回 null,表示取不到有效值。
:type ZoneId: int
"""
self.TargetGroupId = None
self.Type = None
self.InstanceId = None
self.Port = None
self.Weight = None
self.PublicIpAddresses = None
self.PrivateIpAddresses = None
self.InstanceName = None
self.RegisteredTime = None
self.EniId = None
self.ZoneId = None
def _deserialize(self, params):
self.TargetGroupId = params.get("TargetGroupId")
self.Type = params.get("Type")
self.InstanceId = params.get("InstanceId")
self.Port = params.get("Port")
self.Weight = params.get("Weight")
self.PublicIpAddresses = params.get("PublicIpAddresses")
self.PrivateIpAddresses = params.get("PrivateIpAddresses")
self.InstanceName = params.get("InstanceName")
self.RegisteredTime = params.get("RegisteredTime")
self.EniId = params.get("EniId")
self.ZoneId = params.get("ZoneId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class TargetGroupInfo(AbstractModel):
"""目标组信息
"""
def __init__(self):
r"""
:param TargetGroupId: 目标组ID
:type TargetGroupId: str
:param VpcId: 目标组的vpcid
:type VpcId: str
:param TargetGroupName: 目标组的名字
:type TargetGroupName: str
:param Port: 目标组的默认端口
注意:此字段可能返回 null,表示取不到有效值。
:type Port: int
:param CreatedTime: 目标组的创建时间
:type CreatedTime: str
:param UpdatedTime: 目标组的修改时间
:type UpdatedTime: str
:param AssociatedRule: 关联到的规则数组
注意:此字段可能返回 null,表示取不到有效值。
:type AssociatedRule: list of AssociationItem
"""
self.TargetGroupId = None
self.VpcId = None
self.TargetGroupName = None
self.Port = None
self.CreatedTime = None
self.UpdatedTime = None
self.AssociatedRule = None
def _deserialize(self, params):
self.TargetGroupId = params.get("TargetGroupId")
self.VpcId = params.get("VpcId")
self.TargetGroupName = params.get("TargetGroupName")
self.Port = params.get("Port")
self.CreatedTime = params.get("CreatedTime")
self.UpdatedTime = params.get("UpdatedTime")
if params.get("AssociatedRule") is not None:
self.AssociatedRule = []
for item in params.get("AssociatedRule"):
obj = AssociationItem()
obj._deserialize(item)
self.AssociatedRule.append(obj)
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class TargetGroupInstance(AbstractModel):
"""目标组实例
"""
def __init__(self):
r"""
:param BindIP: 目标组实例的内网IP
:type BindIP: str
:param Port: 目标组实例的端口
:type Port: int
:param Weight: 目标组实例的权重
:type Weight: int
:param NewPort: 目标组实例的新端口
:type NewPort: int
"""
self.BindIP = None
self.Port = None
self.Weight = None
self.NewPort = None
def _deserialize(self, params):
self.BindIP = params.get("BindIP")
self.Port = params.get("Port")
self.Weight = params.get("Weight")
self.NewPort = params.get("NewPort")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class TargetHealth(AbstractModel):
"""描述一个Target的健康信息
"""
def __init__(self):
r"""
:param IP: Target的内网IP
:type IP: str
:param Port: Target绑定的端口
:type Port: int
:param HealthStatus: 当前健康状态,true:健康,false:不健康(包括尚未开始探测、探测中、状态异常等几种状态)。只有处于健康状态(且权重大于0),负载均衡才会向其转发流量。
:type HealthStatus: bool
:param TargetId: Target的实例ID,如 ins-12345678
:type TargetId: str
:param HealthStatusDetial: 当前健康状态的详细信息。如:Alive、Dead、Unknown。Alive状态为健康,Dead状态为异常,Unknown状态包括尚未开始探测、探测中、状态未知。
:type HealthStatusDetial: str
"""
self.IP = None
self.Port = None
self.HealthStatus = None
self.TargetId = None
self.HealthStatusDetial = None
def _deserialize(self, params):
self.IP = params.get("IP")
self.Port = params.get("Port")
self.HealthStatus = params.get("HealthStatus")
self.TargetId = params.get("TargetId")
self.HealthStatusDetial = params.get("HealthStatusDetial")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class TargetRegionInfo(AbstractModel):
"""负载均衡实例所绑定的后端服务的信息,包括所属地域、所属网络。
"""
def __init__(self):
r"""
:param Region: Target所属地域,如 ap-guangzhou
:type Region: str
:param VpcId: Target所属网络,私有网络格式如 vpc-abcd1234,如果是基础网络,则为"0"
:type VpcId: str
"""
self.Region = None
self.VpcId = None
def _deserialize(self, params):
self.Region = params.get("Region")
self.VpcId = params.get("VpcId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ZoneInfo(AbstractModel):
"""可用区相关信息
"""
def __init__(self):
r"""
:param ZoneId: 可用区数值形式的唯一ID,如:100001
注意:此字段可能返回 null,表示取不到有效值。
:type ZoneId: int
:param Zone: 可用区字符串形式的唯一ID,如:ap-guangzhou-1
注意:此字段可能返回 null,表示取不到有效值。
:type Zone: str
:param ZoneName: 可用区名称,如:广州一区
注意:此字段可能返回 null,表示取不到有效值。
:type ZoneName: str
:param ZoneRegion: 可用区所属地域,如:ap-guangzhou
注意:此字段可能返回 null,表示取不到有效值。
:type ZoneRegion: str
:param LocalZone: 可用区是否是LocalZone可用区,如:false
注意:此字段可能返回 null,表示取不到有效值。
:type LocalZone: bool
"""
self.ZoneId = None
self.Zone = None
self.ZoneName = None
self.ZoneRegion = None
self.LocalZone = None
def _deserialize(self, params):
self.ZoneId = params.get("ZoneId")
self.Zone = params.get("Zone")
self.ZoneName = params.get("ZoneName")
self.ZoneRegion = params.get("ZoneRegion")
self.LocalZone = params.get("LocalZone")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
| [
"colorguitar@hotmail.com"
] | colorguitar@hotmail.com |
74a74180166787702105afc2c0bd3c1d3a64a0e0 | e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f | /indices/nnn1278.py | cd837450315f269d0bbdf6c1157463c79cd396d0 | [] | no_license | psdh/WhatsintheVector | e8aabacc054a88b4cb25303548980af9a10c12a8 | a24168d068d9c69dc7a0fd13f606c080ae82e2a6 | refs/heads/master | 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 | Python | UTF-8 | Python | false | false | 101 | py | ii = [('RoscTTI3.py', 1), ('DaltJMA.py', 1), ('WadeJEB.py', 1), ('MereHHB.py', 7), ('StorJCC.py', 1)] | [
"varunwachaspati@gmail.com"
] | varunwachaspati@gmail.com |
da2f3447df25e8672a8c89d31eb740322ae1790f | 5be8b0f2ee392abeee6970e7a6364ac9a5b8ceaa | /xiaojian/xiaojian/forth_phase/Django./day08/code/note/mynote/admin.py | c52aded9e2cc4e58c61eeed3be026b54015fcc6a | [] | no_license | Wellsjian/20180826 | 424b65f828f0174e4d568131da01dafc2a36050a | 0156ad4db891a2c4b06711748d2624080578620c | refs/heads/master | 2021-06-18T12:16:08.466177 | 2019-09-01T10:06:44 | 2019-09-01T10:06:44 | 204,462,572 | 0 | 1 | null | 2021-04-20T18:26:03 | 2019-08-26T11:38:09 | JavaScript | UTF-8 | Python | false | false | 188 | py | from django.contrib import admin
from . import models
from user import models as u_models
# Register your models here.
admin.site.register(u_models.User)
admin.site.register(models.Note)
| [
"1149158963@qq.com"
] | 1149158963@qq.com |
b0a622e6c7ab5d8810032327c012c41f075dbbbc | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/380/usersdata/348/92907/submittedfiles/principal.py | 8674f107067ffc4d8d05766f3d3d2689e85555f5 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 235 | py | # -*- coding: utf-8 -*-
#COMECE AQUI ABAIXO
notas = []
for i in range(0,50,1):
notas.append(float(input('digite a nota%d: ' % ( i+1))))
media = 0
for i in range(0,50,1):
media += notas[i]/50.0
print(notas)
print(media)
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
ede12433d9359fd4d0bd72ba7be09ba5b9ad0180 | 71e539273a80f943b0e2164228fe9e5288c90e62 | /Python/KidsWithTheGreatestNumberOfCandies.py | 253eb8e1802aacb6926340c9ff5929c5b0c98a8c | [] | no_license | abhi10010/LeetCode-Solutions | f67206052674585b57b93dae4cd9d68282b39bd6 | 01993de9f431dff487787709af8556f476e6b20b | refs/heads/master | 2022-11-09T20:07:27.689429 | 2020-06-25T13:37:19 | 2020-06-25T13:37:19 | 271,465,995 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 354 | py | class Solution:
def kidsWithCandies(self, candies: List[int], extraCandies: int) -> List[bool]:
res = list()
m = max(candies)
for i in candies:
if i + extraCandies >= m:
res.append(True)
else:
res.append(False)
return res
| [
"noreply@github.com"
] | abhi10010.noreply@github.com |
4a954641dd795b67e6f8550afe0a9869e0cae803 | 7298d1692c6948f0880e550d6100c63a64ce3ea1 | /deriva-annotations/catalog99/catalog-configs/PDB/ihm_multi_state_model_group_link.py | ceb7363dc7dfb2ec3595499a221b0c33f271df76 | [] | no_license | informatics-isi-edu/protein-database | b7684b3d08dbf22c1e7c4a4b8460248c6f0d2c6d | ce4be1bf13e6b1c22f3fccbb513824782609991f | refs/heads/master | 2023-08-16T10:24:10.206574 | 2023-07-25T23:10:42 | 2023-07-25T23:10:42 | 174,095,941 | 2 | 0 | null | 2023-06-16T19:44:43 | 2019-03-06T07:39:14 | Python | UTF-8 | Python | false | false | 10,423 | py | import argparse
from deriva.core import ErmrestCatalog, AttrDict, get_credential
import deriva.core.ermrest_model as em
from deriva.core.ermrest_config import tag as chaise_tags
from deriva.utils.catalog.manage.update_catalog import CatalogUpdater, parse_args
groups = {
'pdb-reader': 'https://auth.globus.org/8875a770-3c40-11e9-a8c8-0ee7d80087ee',
'pdb-writer': 'https://auth.globus.org/c94a1e5c-3c40-11e9-a5d1-0aacc65bfe9a',
'pdb-admin': 'https://auth.globus.org/0b98092c-3c41-11e9-a8c8-0ee7d80087ee',
'pdb-curator': 'https://auth.globus.org/eef3e02a-3c40-11e9-9276-0edc9bdd56a6',
'isrd-staff': 'https://auth.globus.org/176baec4-ed26-11e5-8e88-22000ab4b42b',
'pdb-submitter': 'https://auth.globus.org/99da042e-64a6-11ea-ad5f-0ef992ed7ca1'
}
table_name = 'ihm_multi_state_model_group_link'
schema_name = 'PDB'
column_annotations = {'structure_id': {}, 'model_group_id': {}, 'state_id': {}, 'Owner': {}}
column_comment = {
'structure_id': 'A reference to table entry.id.',
'model_group_id': 'A reference to table ihm_model_group.id.',
'state_id': 'A reference to table ihm_multi_state_modeling.state_id.',
'Owner': 'Group that can update the record.'
}
column_acls = {}
column_acl_bindings = {}
column_defs = [
em.Column.define(
'structure_id',
em.builtin_types['text'],
nullok=False,
comment=column_comment['structure_id'],
),
em.Column.define(
'model_group_id',
em.builtin_types['int4'],
nullok=False,
comment=column_comment['model_group_id'],
),
em.Column.define(
'state_id', em.builtin_types['int4'], nullok=False, comment=column_comment['state_id'],
),
em.Column.define('Owner', em.builtin_types['text'], comment=column_comment['Owner'],
),
]
display = {'name': 'Model Groups Belonging to Multiple States'}
visible_columns = {
'*': [
'RID', {
'source': [
{
'outbound': ['PDB', 'ihm_multi_state_model_group_link_structure_id_fkey']
}, 'RID'
],
'comment': 'A reference to table entry.id.',
'markdown_name': 'structure id'
}, {
'source': [
{
'outbound': ['PDB', 'ihm_multi_state_model_group_link_state_id_fkey']
}, 'RID'
],
'comment': 'A reference to table ihm_multi_state_modeling.state_id.',
'markdown_name': 'state id'
}, {
'source': [
{
'outbound': ['PDB', 'ihm_multi_state_model_group_link_model_group_id_fkey']
}, 'RID'
],
'comment': 'A reference to table ihm_model_group.id.',
'markdown_name': 'model group id'
}
],
'entry': [
{
'source': [
{
'outbound': ['PDB', 'ihm_multi_state_model_group_link_structure_id_fkey']
}, 'RID'
],
'comment': 'A reference to table entry.id.',
'markdown_name': 'structure id'
}, {
'source': [
{
'outbound': ['PDB', 'ihm_multi_state_model_group_link_state_id_fkey']
}, 'RID'
],
'comment': 'A reference to table ihm_multi_state_modeling.state_id.',
'markdown_name': 'state id'
}, {
'source': [
{
'outbound': ['PDB', 'ihm_multi_state_model_group_link_model_group_id_fkey']
}, 'RID'
],
'comment': 'A reference to table ihm_model_group.id.',
'markdown_name': 'model group id'
}
],
'detailed': [
'RID', {
'source': [
{
'outbound': ['PDB', 'ihm_multi_state_model_group_link_structure_id_fkey']
}, 'RID'
],
'comment': 'A reference to table entry.id.',
'markdown_name': 'structure id'
}, {
'source': [
{
'outbound': ['PDB', 'ihm_multi_state_model_group_link_state_id_fkey']
}, 'RID'
],
'comment': 'A reference to table ihm_multi_state_modeling.state_id.',
'markdown_name': 'state id'
}, {
'source': [
{
'outbound': ['PDB', 'ihm_multi_state_model_group_link_model_group_id_fkey']
}, 'RID'
],
'comment': 'A reference to table ihm_model_group.id.',
'markdown_name': 'model group id'
}, ['PDB', 'ihm_multi_state_model_group_link_RCB_fkey'],
['PDB', 'ihm_multi_state_model_group_link_RMB_fkey'], 'RCT', 'RMT',
['PDB', 'ihm_multi_state_model_group_link_Owner_fkey']
]
}
table_annotations = {chaise_tags.display: display, chaise_tags.visible_columns: visible_columns, }
table_comment = 'List of model groups belonging to a particular state'
table_acls = {
'owner': [groups['pdb-admin'], groups['isrd-staff']],
'write': [],
'delete': [groups['pdb-curator']],
'insert': [groups['pdb-curator'], groups['pdb-writer'], groups['pdb-submitter']],
'select': [groups['pdb-writer'], groups['pdb-reader']],
'update': [groups['pdb-curator']],
'enumerate': ['*']
}
table_acl_bindings = {
'released_reader': {
'types': ['select'],
'scope_acl': [groups['pdb-submitter']],
'projection': [
{
'outbound': ['PDB', 'ihm_multi_state_model_group_link_structure_id_fkey']
}, 'RCB'
],
'projection_type': 'acl'
},
'self_service_group': {
'types': ['update', 'delete'],
'scope_acl': ['*'],
'projection': ['Owner'],
'projection_type': 'acl'
},
'self_service_creator': {
'types': ['update', 'delete'],
'scope_acl': [groups['pdb-submitter']],
'projection': [
{
'outbound': ['PDB', 'ihm_multi_state_model_group_link_structure_id_fkey']
}, {
'or': [
{
'filter': 'Workflow_Status',
'operand': 'DRAFT',
'operator': '='
}, {
'filter': 'Workflow_Status',
'operand': 'DEPO',
'operator': '='
}, {
'filter': 'Workflow_Status',
'operand': 'RECORD READY',
'operator': '='
}, {
'filter': 'Workflow_Status',
'operand': 'ERROR',
'operator': '='
}
]
}, 'RCB'
],
'projection_type': 'acl'
}
}
key_defs = [
em.Key.define(
['model_group_id', 'structure_id'],
constraint_names=[['PDB', 'ihm_multi_state_model_group_link_primary_key']],
),
em.Key.define(
['RID'], constraint_names=[['PDB', 'ihm_multi_state_model_group_link_RIDkey1']],
),
]
fkey_defs = [
em.ForeignKey.define(
['RCB'],
'public',
'ERMrest_Client', ['ID'],
constraint_names=[['PDB', 'ihm_multi_state_model_group_link_RCB_fkey']],
),
em.ForeignKey.define(
['RMB'],
'public',
'ERMrest_Client', ['ID'],
constraint_names=[['PDB', 'ihm_multi_state_model_group_link_RMB_fkey']],
),
em.ForeignKey.define(
['structure_id'],
'PDB',
'entry', ['id'],
constraint_names=[['PDB', 'ihm_multi_state_model_group_link_structure_id_fkey']],
acls={
'insert': ['*'],
'update': ['*']
},
on_update='CASCADE',
on_delete='CASCADE',
),
em.ForeignKey.define(
['structure_id', 'model_group_id'],
'PDB',
'ihm_model_group', ['structure_id', 'id'],
constraint_names=[['PDB', 'ihm_multi_state_model_group_link_model_group_id_fkey']],
annotations={
chaise_tags.foreign_key: {
'domain_filter_pattern': 'structure_id={{structure_id}}'
}
},
acls={
'insert': ['*'],
'update': ['*']
},
on_update='CASCADE',
on_delete='SET NULL',
),
em.ForeignKey.define(
['Owner'],
'public',
'Catalog_Group', ['ID'],
constraint_names=[['PDB', 'ihm_multi_state_model_group_link_Owner_fkey']],
acls={
'insert': [groups['pdb-curator']],
'update': [groups['pdb-curator']]
},
acl_bindings={
'set_owner': {
'types': ['update', 'insert'],
'scope_acl': ['*'],
'projection': ['ID'],
'projection_type': 'acl'
}
},
),
em.ForeignKey.define(
['state_id', 'structure_id'],
'PDB',
'ihm_multi_state_modeling', ['state_id', 'structure_id'],
constraint_names=[['PDB', 'ihm_multi_state_model_group_link_state_id_fkey']],
annotations={
chaise_tags.foreign_key: {
'domain_filter_pattern': 'structure_id={{structure_id}}'
}
},
acls={
'insert': ['*'],
'update': ['*']
},
on_update='CASCADE',
on_delete='SET NULL',
),
]
table_def = em.Table.define(
table_name,
column_defs=column_defs,
key_defs=key_defs,
fkey_defs=fkey_defs,
annotations=table_annotations,
acls=table_acls,
acl_bindings=table_acl_bindings,
comment=table_comment,
provide_system=True
)
def main(catalog, mode, replace=False, really=False):
updater = CatalogUpdater(catalog)
table_def['column_annotations'] = column_annotations
table_def['column_comment'] = column_comment
updater.update_table(mode, schema_name, table_def, replace=replace, really=really)
if __name__ == "__main__":
host = 'pdb.isrd.isi.edu'
catalog_id = 99
mode, replace, host, catalog_id = parse_args(host, catalog_id, is_table=True)
catalog = ErmrestCatalog('https', host, catalog_id=catalog_id, credentials=get_credential(host))
main(catalog, mode, replace)
| [
"brinda.vallat@rcsb.org"
] | brinda.vallat@rcsb.org |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.