hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e7282d517f520af2d0d52900f706afdb00f27340
| 1,459
|
py
|
Python
|
code/7T/con2png.py
|
chfc-cmi/cmr-seg-tl
|
0a2293a6cfec12e0e13c51ca87c9bb4648a0e880
|
[
"MIT"
] | 1
|
2021-05-27T20:58:51.000Z
|
2021-05-27T20:58:51.000Z
|
code/7T/con2png.py
|
chfc-cmi/cmr-seg-tl
|
0a2293a6cfec12e0e13c51ca87c9bb4648a0e880
|
[
"MIT"
] | null | null | null |
code/7T/con2png.py
|
chfc-cmi/cmr-seg-tl
|
0a2293a6cfec12e0e13c51ca87c9bb4648a0e880
|
[
"MIT"
] | null | null | null |
import os
import pandas as pd
import matplotlib.pyplot as plt
from PIL import Image, ImageDraw
import numpy as np
from tqdm import tqdm
volunteers = pd.read_csv("resolution.tsv",sep="\t",index_col="id")
os.makedirs('../masks', exist_ok=True)
Image.new('I', (472,512), 0).save('../masks/empty_472x512.png')
Image.new('I', (512,472), 0).save('../masks/empty_512x472.png')
for volunteerId,volunteer in tqdm(volunteers.iterrows()):
contour = pd.read_csv("{}.tsv".format(volunteerId),sep=" ",names=["x","y","z","t","c"],usecols=range(5))
iters = contour.iloc[:,2:4].drop_duplicates().to_numpy()
for i in tqdm(iters, leave=False):
z = i[0]
t = i[1]
poly = [(x[0],x[1]) for x in contour[contour.z==z][contour.t==t][contour.c==1].to_numpy()[:,0:2]]
img = Image.new('L', (volunteer["columns"], volunteer["rows"]), 0)
if(len(poly)>1):
ImageDraw.Draw(img).polygon(poly, outline=1, fill=1)
mask = np.array(img)
poly2 = [(x[0],x[1]) for x in contour[contour.z==z][contour.t==t][contour.c==0].to_numpy()[:,0:2]]
img = Image.new('L', (volunteer["columns"], volunteer["rows"]), 0)
if(len(poly2)>1):
ImageDraw.Draw(img).polygon(poly2, outline=1, fill=1)
mask2 = np.array(img)
im_array = 2*mask.astype(np.int32)-mask2
im = Image.fromarray(im_array, 'I')
im.save('../masks/{}_slice{:03d}_frame{:03d}-mask.png'.format(volunteerId,z,t))
| 44.212121
| 108
| 0.612063
|
62fed9b3f099834b943432bfb0a7706679d4eaa9
| 15,555
|
py
|
Python
|
DSFDv2_r18/operations.py
|
MinliangLin/lightDSFD
|
5f04ab89ac08eaf69d16c96f6c9e237701f80281
|
[
"MIT"
] | 87
|
2019-11-15T13:09:22.000Z
|
2022-03-02T14:44:56.000Z
|
DSFDv2_r18/operations.py
|
MinliangLin/lightDSFD
|
5f04ab89ac08eaf69d16c96f6c9e237701f80281
|
[
"MIT"
] | 6
|
2020-01-16T06:49:09.000Z
|
2021-06-06T14:16:11.000Z
|
DSFDv2_r18/operations.py
|
MinliangLin/lightDSFD
|
5f04ab89ac08eaf69d16c96f6c9e237701f80281
|
[
"MIT"
] | 31
|
2019-11-18T07:55:46.000Z
|
2022-01-31T15:53:07.000Z
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from dataset.config import widerface_640 as cfg
# from layers.DCNv2 import DCN
RELU_FIRST = True
OPS = {
"none": lambda C, stride, affine: Zero(stride),
"avg_pool_3x3": lambda C, stride, affine: nn.AvgPool2d(3, stride=stride, padding=1, count_include_pad=False),
"max_pool_3x3": lambda C, stride, affine: nn.MaxPool2d(3, stride=stride, padding=1),
"skip_connect": lambda C, stride, affine: Identity() if stride == 1 else FactorizedReduce(C, C, affine=affine),
"sep_conv_3x3": lambda C, stride, affine: SepConv(C, C, 3, stride, 1, affine=affine),
"sep_conv_5x5": lambda C, stride, affine: SepConv(C, C, 5, stride, 2, affine=affine),
"sep_conv_7x7": lambda C, stride, affine: SepConv(C, C, 7, stride, 3, affine=affine),
"dil_conv_3x3": lambda C, stride, affine: DilConv(C, C, 3, stride, 2, 2, affine=affine),
"dil_conv_3x3_3": lambda C, stride, affine: DilConv(C, C, 3, stride, 3, 3, affine=affine),
"dil_conv_5x5": lambda C, stride, affine: DilConv(C, C, 5, stride, 4, 2, affine=affine),
"conv_1x3_3x1": lambda C, stride, affine: RfeConv(C, C, 3, stride, 1, affine=affine),
"conv_1x5_5x1": lambda C, stride, affine: RfeConv(C, C, 5, stride, 2, affine=affine),
# "dconv_3x3": lambda C, stride, affine: D_Conv(C, C, 3, 1, affine=affine, bn=False),
"conv_1x3": lambda C, stride, affine: Normal_Relu_Conv(C, C, kernel_size=(1,3), stride=stride, padding=(0,1), bn=False),
"conv_3x1": lambda C, stride, affine: Normal_Relu_Conv(C, C, kernel_size=(3,1), stride=stride, padding=(1,0), bn=False),
"conv_1x5": lambda C, stride, affine: Normal_Relu_Conv(C, C, kernel_size=(1,5), stride=stride, padding=(0,2), bn=False),
"conv_5x1": lambda C, stride, affine: Normal_Relu_Conv(C, C, kernel_size=(5,1), stride=stride, padding=(2,0), bn=False),
"conv_1x1": lambda C, stride, affine: Normal_Relu_Conv(C, C, kernel_size=1, stride=1, padding=0, bn=False),
} # black: disable
BN_OPS = {
"none": lambda C, stride, affine: Zero(stride),
"avg_pool_3x3": lambda C, stride, affine: nn.AvgPool2d(3, stride=stride, padding=1, count_include_pad=False),
"max_pool_3x3": lambda C, stride, affine: nn.MaxPool2d(3, stride=stride, padding=1),
"skip_connect": lambda C, stride, affine: Identity() if stride == 1 else FactorizedReduce(C, C, affine=affine),
"sep_conv_3x3": lambda C, stride, affine: SepConv(C, C, 3, stride, 1, affine=affine, bn=True),
"sep_conv_5x5": lambda C, stride, affine: SepConv(C, C, 5, stride, 2, affine=affine, bn=True),
"sep_conv_7x7": lambda C, stride, affine: SepConv(C, C, 7, stride, 3, affine=affine, bn=True),
"dil_conv_3x3": lambda C, stride, affine: DilConv(C, C, 3, stride, 2, 2, affine=affine, bn=True),
"dil_conv_3x3_3": lambda C, stride, affine: DilConv(C, C, 3, stride, 3, 3, affine=affine, bn=True),
"dil_conv_5x5": lambda C, stride, affine: DilConv(C, C, 5, stride, 4, 2, affine=affine, bn=True),
"conv_1x3_3x1": lambda C, stride, affine: RfeConv(C, C, 3, stride, 1, affine=affine, bn=True),
"conv_1x5_5x1": lambda C, stride, affine: RfeConv(C, C, 5, stride, 2, affine=affine, bn=True),
# "dconv_3x3": lambda C, stride, affine: D_Conv(C, C, 3, 1, affine=affine, bn=True),
"conv_1x3": lambda C, stride, affine: Normal_Relu_Conv(C, C, kernel_size=(1,3), stride=stride, padding=(0,1), bn=True),
"conv_3x1": lambda C, stride, affine: Normal_Relu_Conv(C, C, kernel_size=(3,1), stride=stride, padding=(1,0), bn=True),
"conv_1x5": lambda C, stride, affine: Normal_Relu_Conv(C, C, kernel_size=(1,5), stride=stride, padding=(0,2), bn=True),
"conv_5x1": lambda C, stride, affine: Normal_Relu_Conv(C, C, kernel_size=(5,1), stride=stride, padding=(2,0), bn=True),
"conv_1x1": lambda C, stride, affine: Normal_Relu_Conv(C, C, kernel_size=1, stride=1, padding=0, bn=True),
}
NORMAL_OPS = {
"none": lambda C, stride, affine: Zero(stride),
"avg_pool_3x3": lambda C, stride, affine: nn.AvgPool2d(3, stride=stride, padding=1, count_include_pad=False),
"max_pool_3x3": lambda C, stride, affine: nn.MaxPool2d(3, stride=stride, padding=1),
"skip_connect": lambda C, stride, affine: Identity() if stride == 1 else FactorizedReduce(C, C, affine=affine),
"sep_conv_3x3": lambda C, stride, affine: Normal_Relu_Conv(C, C, kernel_size=3, stride=stride, padding=1, bn=True),
"sep_conv_5x5": lambda C, stride, affine: Normal_Relu_Conv(C, C, kernel_size=5, stride=stride, padding=1, bn=True),
"sep_conv_7x7": lambda C, stride, affine: Normal_Relu_Conv(C, C, kernel_size=7, stride=stride, padding=1, bn=True),
"dil_conv_3x3": lambda C, stride, affine: Normal_Relu_Conv(C, C, kernel_size=3, stride=stride, padding=2, bn=True, dilation=2),
"dil_conv_3x3_3": lambda C, stride, affine: Normal_Relu_Conv(C, C, kernel_size=3, stride=stride, padding=3, bn=True, dilation=3),
"dil_conv_5x5": lambda C, stride, affine: Normal_Relu_Conv(C, C, kernel_size=5, stride=stride, padding=4, bn=True, dilation=2),
"conv_1x3_3x1": lambda C, stride, affine: RfeConv(C, C, 3, stride, 1, affine=affine, bn=True),
"conv_1x5_5x1": lambda C, stride, affine: RfeConv(C, C, 5, stride, 2, affine=affine, bn=True),
# "dconv_3x3": lambda C, stride, affine: D_Conv(C, C, 3, 1, affine=affine, bn=True),
"conv_1x3": lambda C, stride, affine: Normal_Relu_Conv(C, C, kernel_size=(1,3), stride=stride, padding=(0,1), bn=True),
"conv_3x1": lambda C, stride, affine: Normal_Relu_Conv(C, C, kernel_size=(3,1), stride=stride, padding=(1,0), bn=True),
"conv_1x5": lambda C, stride, affine: Normal_Relu_Conv(C, C, kernel_size=(1,5), stride=stride, padding=(0,2), bn=True),
"conv_5x1": lambda C, stride, affine: Normal_Relu_Conv(C, C, kernel_size=(5,1), stride=stride, padding=(2,0), bn=True),
}
class Normal_Relu_Conv(nn.Module):
def __init__(self, C_in, C_out, affine=True, bn=False, **kwargs):
super(Normal_Relu_Conv, self).__init__()
if not bn:
op = nn.Sequential(
# nn.ReLU(),
nn.Conv2d(C_in, C_in, bias=True, **kwargs),
)
else:
if cfg['GN']:
bn_layer = nn.GroupNorm(32, C_out)
elif cfg["syncBN"]:
bn_layer = nn.SyncBatchNorm(C_out)
else:
bn_layer = nn.BatchNorm2d(C_out)
op = nn.Sequential(
# nn.ReLU(),
nn.Conv2d(C_in, C_in, bias=False, **kwargs),
bn_layer,
)
if RELU_FIRST:
self.op = nn.Sequential()
self.op.add_module('0', nn.ReLU())
for i in range(1, len(op)+1):
self.op.add_module(str(i), op[i-1])
else:
self.op = op
self.op.add_module(str(len(op)), nn.ReLU())
# self.op = op
def forward(self, x):
return self.op(x)
class _GumbelSoftMax(torch.autograd.Function):
"""
implementing the MixedOp, but carried out in a different way as DARTS
DARTS adds all operations together, then select the maximal one to construct the final network,
however, during the late process, more weights are assigned to the None, this is unreasonable under the
circumstance that per operation has the unsure number of inputs.
Thus, we modifies the original DARTS by applying way in GDAS to test.
This class aims to compute the gradients by ourself.
"""
@staticmethod
def forward(ctx, weights):
weights_norm = F.softmax(weights, dim=-1)
ctx.saved_for_backward = weights_norm
# select the max one
mask = torch.zeros_like(weights_norm).to(weights.device)
_, idx = weights_norm.topk(dim=-1, k=1, largest=True)
mask[idx] = 1.0
return mask
@staticmethod
def backward(ctx, grad_output):
gumbel_norm = ctx.saved_for_backward
return gumbel_norm * (1 - gumbel_norm) * grad_output * gumbel_norm.shape[0]
class GumbelSoftMax(nn.Module):
def __init__(self):
super(GumbelSoftMax, self).__init__()
def forward(self, weights, temp_coeff=1.0):
gumbel = -1e-3 * torch.log(-torch.log(torch.rand_like(weights))).to(weights.device)
weights = _GumbelSoftMax.apply((weights + gumbel) / temp_coeff)
return weights
# class D_Conv(nn.Module):
# """ Deformable Conv V2 """
# def __init__(self, C_in, C_out, kernel_size, padding, affine=True, bn=False):
# super(D_Conv, self).__init__()
# if bn:
# if cfg["syncBN"]:
# bn_layer = nn.SyncBatchNorm(C_out)
# else:
# bn_layer = nn.BatchNorm2d(C_out)
# self.op = nn.Sequential(
# nn.ReLU(inplace=False),
# DCN(
# C_in, C_in, kernel_size=kernel_size, padding=padding, stride=1, deformable_groups=C_in, groups=C_in
# ),
# nn.Conv2d(C_in, C_out, kernel_size=1, padding=0, bias=False),
# bn_layer,
# )
# else:
# self.op = nn.Sequential(
# nn.ReLU(inplace=False),
# DCN(
# C_in, C_in, kernel_size=kernel_size, padding=padding, stride=1, deformable_groups=C_in, groups=C_in
# ),
# nn.Conv2d(C_in, C_out, kernel_size=1, padding=0, bias=True),
# )
# def forward(self, x):
# return self.op(x)
class RfeConv(nn.Module):
def __init__(self, C_in, C_out, kernel_size, stride, padding, affine=True, bn=False):
super(RfeConv, self).__init__()
if not bn:
op = nn.Sequential(
# nn.ReLU(),
nn.Conv2d(
C_in,
C_in,
kernel_size=(1, kernel_size),
stride=(1, stride),
padding=(0, padding),
groups=C_in,
bias=True,
),
# nn.Conv2d(C_in, C_in, kernel_size=1, padding=0, bias=True),
nn.Conv2d(
C_in,
C_in,
kernel_size=(kernel_size, 1),
stride=(stride, 1),
padding=(padding, 0),
groups=C_in,
bias=True,
),
nn.Conv2d(C_in, C_out, kernel_size=1, padding=0, bias=True),
)
else:
if cfg['GN']:
bn_layer = nn.GroupNorm(32, C_out)
elif cfg["syncBN"]:
bn_layer = nn.SyncBatchNorm(C_out)
else:
bn_layer = nn.BatchNorm2d(C_out)
op = nn.Sequential(
# nn.ReLU(),
nn.Conv2d(
C_in,
C_in,
kernel_size=(1, kernel_size),
stride=(1, stride),
padding=(0, padding),
groups=C_in,
bias=True,
),
nn.Conv2d(
C_in,
C_in,
kernel_size=(kernel_size, 1),
stride=(stride, 1),
padding=(padding, 0),
groups=C_in,
bias=True,
),
nn.Conv2d(C_in, C_out, kernel_size=1, padding=0, bias=True),
bn_layer,
)
if RELU_FIRST:
self.op = nn.Sequential()
self.op.add_module('0', nn.ReLU())
for i in range(1, len(op)+1):
self.op.add_module(str(i), op[i-1])
else:
self.op = op
self.op.add_module(str(len(op)), nn.ReLU())
# self.op = op
def forward(self, x):
return self.op(x)
class DilConv(nn.Module):
def __init__(self, C_in, C_out, kernel_size, stride, padding, dilation, affine=True, bn=False):
super(DilConv, self).__init__()
if not bn:
op = nn.Sequential(
# nn.ReLU(),
nn.Conv2d(
C_in,
C_in,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=C_in,
bias=True,
),
nn.Conv2d(C_in, C_out, kernel_size=1, padding=0, bias=True),
)
else:
if cfg['GN']:
bn_layer = nn.GroupNorm(32, C_out)
elif cfg["syncBN"]:
bn_layer = nn.SyncBatchNorm(C_out)
else:
bn_layer = nn.BatchNorm2d(C_out)
op = nn.Sequential(
# nn.ReLU(),
nn.Conv2d(
C_in,
C_in,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=C_in,
bias=False,
),
nn.Conv2d(C_in, C_out, kernel_size=1, padding=0, bias=False),
bn_layer,
)
if RELU_FIRST:
self.op = nn.Sequential()
self.op.add_module('0', nn.ReLU())
for i in range(1, len(op)+1):
self.op.add_module(str(i), op[i-1])
else:
self.op = op
self.op.add_module(str(len(op)), nn.ReLU())
# self.op = op
def forward(self, x):
return self.op(x)
class SepConv(nn.Module):
def __init__(self, C_in, C_out, kernel_size, stride, padding, affine=True, bn=False):
super(SepConv, self).__init__()
if not bn:
op = nn.Sequential(
# nn.ReLU(),
nn.Conv2d(C_in, C_in, kernel_size=kernel_size, stride=stride, padding=padding, groups=C_in, bias=True,),
nn.Conv2d(C_in, C_out, kernel_size=1, padding=0, bias=True),
)
else:
if cfg['GN']:
bn_layer = nn.GroupNorm(32, C_out)
elif cfg["syncBN"]:
bn_layer = nn.SyncBatchNorm(C_out)
else:
bn_layer = nn.BatchNorm2d(C_out)
op = nn.Sequential(
# nn.ReLU(),
nn.Conv2d(
C_in, C_in, kernel_size=kernel_size, stride=stride, padding=padding, groups=C_in, bias=False,
),
nn.Conv2d(C_in, C_out, kernel_size=1, padding=0, bias=False),
bn_layer,
)
if RELU_FIRST:
self.op = nn.Sequential(nn.ReLU())
# self.op.add_module('0', nn.ReLU())
for i in range(1, len(op)+1):
self.op.add_module(str(i), op[i-1])
else:
self.op = op
self.op.add_module(str(len(op)), nn.ReLU())
# self.op = op
def forward(self, x):
return self.op(x)
class Identity(nn.Module):
def __init__(self):
super(Identity, self).__init__()
def forward(self, x):
return x
class Zero(nn.Module):
def __init__(self, stride):
super(Zero, self).__init__()
self.stride = stride
def forward(self, x):
if self.stride == 1:
return x.mul(0.0)
return x[:, :, :: self.stride, :: self.stride].mul(0.0)
| 42.154472
| 133
| 0.555963
|
da6929de8c67d458d4310b9d67374c32b719175b
| 2,753
|
py
|
Python
|
flask/app/views.py
|
nokiam9/forester
|
117dc5c60f5cba633f0f760c488775f30510149c
|
[
"MIT"
] | 1
|
2020-11-22T16:17:09.000Z
|
2020-11-22T16:17:09.000Z
|
flask/app/views.py
|
nokiam9/forester
|
117dc5c60f5cba633f0f760c488775f30510149c
|
[
"MIT"
] | 2
|
2019-10-20T17:22:10.000Z
|
2022-03-02T14:56:35.000Z
|
flask/app/views.py
|
nokiam9/forester
|
117dc5c60f5cba633f0f760c488775f30510149c
|
[
"MIT"
] | null | null | null |
# # -*- coding: utf-8 -*-
from flask import request, render_template, abort
from mongoengine.errors import NotUniqueError
from models import BidNotice
import json, datetime
NOTICE_TYPE_CONFIG = {
'0': '全部招标公告',
'1': '单一来源采购公告',
'2': '采购公告',
'7': '中标结果公示',
'3': '资格预审公告',
'8': '供应商信息收集',
'99': '供应商公告',
}
PAGE_SIZE = 10
# pylint: disable=no-member
# 所有route的定义,采用add_url_rule(),而不是修饰符,便于将应用隐藏在views.py中
def index():
return render_template('index.html')
def content_view(nid):
content = BidNotice.objects(nid=nid).first().notice_content
if not content:
abort(status=404)
else:
return content
def hello():
return "Hello World from Flask in a uWSGI Nginx Docker container with \
Python 3.6 (from the example template)"
def notice_page_view(type_id):
""" View of /notice/pagination/[012378]/?page_id=1 """
try:
title = NOTICE_TYPE_CONFIG[type_id]
except KeyError:
abort(status=406) # Unacceptable url para
page_id=request.args.get('page_id', default=1, type=int)
# 为了解决order by排序时内存溢出的问题,document的meta定义增加了index
if type_id == '0' or type_id is None:
todos_page = BidNotice.objects(). \
order_by("-published_date", "-timestamp"). \
paginate(page=page_id, per_page=PAGE_SIZE)
else:
todos_page = BidNotice.objects(type_id=type_id). \
order_by("-published_date", "-timestamp"). \
paginate(page=page_id, per_page=PAGE_SIZE)
return render_template('pagination.html',
todos_page=todos_page,
type_id=type_id,
title=title)
'''
Func: 试图插入一条Notice
'''
def api_post_notice():
json_data = json.loads(request.get_data().decode("utf-8"))
try: # try to insert new record
BidNotice(
title = json_data['title'],
nid = json_data['nid'],
notice_type = json_data['notice_type'],
type_id = json_data['type_id'],
spider = json_data['spider'],
source_ch = json_data['source_ch'],
notice_url = json_data['notice_url'],
notice_content = json_data['notice_content'],
published_date = datetime.datetime.strptime(json_data['published_date'], '%Y-%m-%d'), # 日期转换
# 填入API网关当前时间
timestamp = datetime.datetime.utcnow() + datetime.timedelta(hours=8),
).save()
except (NotUniqueError): ## DuplicateKeyError,
print('Dup rec! nid=' + json_data['nid'])
return 'dup rec', 200
except ValueError as e:
print('Unknown error:', e)
return('error',200)
finally:
return 'ok', 200
| 30.588889
| 106
| 0.601526
|
4e4a74e2ad0642d6c581d3cc9a7d55d2e374924b
| 5,765
|
py
|
Python
|
coders/extended_grid.py
|
susannahsoon/oldperth
|
a09ea03e8b0f96ffe235755a1120569a61a0dd79
|
[
"Apache-2.0"
] | 302
|
2015-03-19T20:40:47.000Z
|
2022-03-27T23:31:21.000Z
|
coders/extended_grid.py
|
gegilligan/oldnyc
|
a09ea03e8b0f96ffe235755a1120569a61a0dd79
|
[
"Apache-2.0"
] | 88
|
2015-02-23T18:29:23.000Z
|
2020-08-23T18:32:18.000Z
|
coders/extended_grid.py
|
gegilligan/oldnyc
|
a09ea03e8b0f96ffe235755a1120569a61a0dd79
|
[
"Apache-2.0"
] | 186
|
2015-02-12T11:16:39.000Z
|
2022-02-12T16:21:08.000Z
|
#!/usr/bin/python
'''Geocode intersections by extending the existing NYC grid.
This lets us cover intersections which no longer exist, but may have in the
past, e.g.
15th Street and Avenue A
20th Street and 4th Avenue
This requires grid/intersections.json, which is generated by
grid/extrapolate.py.
'''
import json
import re
import fileinput
import sys
import coders.registration
import record
from grid import coder
ORDINALS = {
'First': 1,
'Second': 2,
'Third': 3,
'Fourth': 4,
'Fifth': 5,
'Sixth': 6,
'Seventh': 7,
'Eighth': 8,
'Ninth': 9,
'Tenth': 10,
'Eleventh': 11,
'Twelfth': 12,
# Some NYC-specific stuff
'Amsterdam': 10,
r'\bPark\b': 4, # the \b's prevent this from matching, e.g., 'Parkway'
'Columbus': 9,
'West End': 11,
'Lenox': 6 # Now Malcolm X
}
def parse_street_ave(street1, street2):
# try to get the avenue in street1
if re.search(r'str|st\.', street1, flags=re.I):
street2, street1 = street1, street2
if not re.search(r'ave', street1, flags=re.I):
raise ValueError('%s is not an avenue' % street1)
if not re.search(r'str|st\.', street2, flags=re.I):
raise ValueError('%s is not a street' % street2)
street1 = remove_parens(street1)
street2 = remove_parens(street2)
street2 = re.sub(r'West|East', '', street2, flags=re.I)
# pull the number from the street string
num = extract_ordinal(street2)
if num == None:
raise ValueError('Unable to find a number in %s' % street2)
street2 = num
# Try the same for the avenue
num = extract_ordinal(street1)
if num != None:
street1 = str(num)
else:
# Look for something like 'Avenue A'
m = re.search(r'[aA]venue (A|B|C|D)', street1)
if m:
street1 = m.group(1)
else:
# How about 'Fourth', 'Fifth'?
num = multisearch(ORDINALS, street1)
if num is not None:
street1 = str(num)
else:
raise ValueError('Did not find an avenue in %s' % street1)
return street1, street2
def remove_parens(txt):
return re.sub(r'\([^)]+\)', '', txt)
def extract_ordinal(txt):
m = re.search(r'(\d+)(?:st|nd|rd|th) ', txt)
return int(m.group(1)) if m else None
def multisearch(re_dict, txt):
'''Search for any of the keys. Given a match, return the value.'''
for k, v in re_dict.iteritems():
if re.search(k, txt, flags=re.I):
return v
return None
class ExtendedGridCoder:
def __init__(self):
# This is done here to avoid the milstein registering itself.
from coders.milstein import cross_patterns
self._cross_patterns = cross_patterns
def _extractLocationStringFromRecord(self, r):
raw_loc = r.location().strip()
loc = re.sub(r'^[ ?\t"\[]+|[ ?\t"\]]+$', '', raw_loc)
return loc
def codeRecord(self, r):
if r.source() != 'Milstein Division': return None
loc = self._extractLocationStringFromRecord(r)
m = None
for pattern in self._cross_patterns:
m = re.match(pattern, loc)
if m: break
if not m: return None
street1, street2, boro = m.groups()
if not boro.startswith('Manhattan'):
return None
try:
avenue, street = parse_street_ave(street1, street2)
except ValueError as e:
sys.stderr.write('%s: %s\n' % (loc, e.message))
return None
# Special cases
photo_id = r.photo_id()
if photo_id.startswith('723557f'):
# These are mislabeled as 93rd and B.
avenue, street = 'B', '8'
elif photo_id.startswith('711789') or photo_id.startswith('713187'):
# Mislabeled as 25th & D. Unclear about the second one.
avenue, street = 'A', '25'
elif photo_id.startswith('715535f'):
# Mislabeled as 103rd & 7th instead of 130th & 7th.
# This incorrectly puts it in the middle of Central Park!
avenue, street = '7', '130'
latlon = coder.code(avenue, street)
if not latlon: return None
# sys.stderr.write('coded (%s, %s) --> (%s, %s)\n' % (street1, street2, avenue, street))
return {
'address': '@%s,%s' % latlon,
'source': loc,
'grid': '(%s, %s)' % (avenue, street),
'type': 'intersection'
}
def getLatLonFromGeocode(self, geocode, data, r):
for result in geocode['results']:
# data['type'] is something like 'address' or 'intersection'.
if 'point_of_interest' in result['types']:
loc = result['geometry']['location']
return (loc['lat'], loc['lng'])
def finalize(self):
sys.stderr.write(' num_exact: %d\n' % coder.num_exact)
sys.stderr.write('num_extrapolated: %d\n' % coder.num_extrapolated)
sys.stderr.write(' num_unclaimed: %d\n' % coder.num_unclaimed)
def name(self):
return 'extended-grid'
coders.registration.registerCoderClass(ExtendedGridCoder)
# For fast iteration
if __name__ == '__main__':
grid_coder = ExtendedGridCoder()
r = record.Record()
num_ok, num_bad = 0, 0
for line in fileinput.input():
addr = line.strip()
if not addr: continue
r.tabular = {
'i': ['PHOTO_ID'],
'l': [addr],
'a': ['Milstein Division']
}
result = grid_coder.codeRecord(r)
print '"%s" -> %s' % (addr, result)
if result:
num_ok += 1
else:
num_bad += 1
sys.stderr.write('Parsed %d / %d = %.4f records\n' % (
num_ok, num_ok + num_bad, 1. * num_ok / (num_ok + num_bad)))
| 28.399015
| 96
| 0.581266
|
7293f7ede2821fe189a4b94f501aa6829d340e3b
| 5,034
|
py
|
Python
|
evohomeclient2/__init__.py
|
andrew-blake/evohome-client
|
5ca24cccad2a49223222880bc059ab0396efd7f0
|
[
"Apache-2.0"
] | null | null | null |
evohomeclient2/__init__.py
|
andrew-blake/evohome-client
|
5ca24cccad2a49223222880bc059ab0396efd7f0
|
[
"Apache-2.0"
] | null | null | null |
evohomeclient2/__init__.py
|
andrew-blake/evohome-client
|
5ca24cccad2a49223222880bc059ab0396efd7f0
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import print_function
import requests
import json
import codecs
from .location import Location
from .base import EvohomeBase
class EvohomeClient(EvohomeBase):
def __init__(self, username, password, debug=False):
super(EvohomeClient, self).__init__(debug)
self.username = username
self.password = password
self.access_token = None
self.locations = []
self._login()
def _get_location(self, location):
if location is None:
return self.installation_info[0]['locationInfo']['locationId']
else:
return location
def _get_single_heating_system(self):
# This allows a shortcut for some systems
location = None
gateway = None
control_system = None
if len(self.locations)==1:
location = self.locations[0]
else:
raise Exception("More than one location available")
if len(location._gateways)==1:
gateway = location._gateways[0]
else:
raise Exception("More than one gateway available")
if len(gateway._control_systems)==1:
control_system = gateway._control_systems[0]
else:
raise Exception("More than one control system available")
return control_system
def _login(self):
url = 'https://rs.alarmnet.com:443/TotalConnectComfort/Auth/OAuth/Token'
headers = {
'Authorization': 'Basic YjAxM2FhMjYtOTcyNC00ZGJkLTg4OTctMDQ4YjlhYWRhMjQ5OnRlc3Q=',
'Accept': 'application/json, application/xml, text/json, text/x-json, text/javascript, text/xml'
}
data = {
'Content-Type': 'application/x-www-form-urlencoded; charset=utf-8',
'Host': 'rs.alarmnet.com/',
'Cache-Control':'no-store no-cache',
'Pragma': 'no-cache',
'grant_type': 'password',
'scope': 'EMEA-V1-Basic EMEA-V1-Anonymous EMEA-V1-Get-Current-User-Account',
'Username': self.username,
'Password': self.password,
'Connection': 'Keep-Alive'
}
r = requests.post(url, data=data, headers=headers)
self.access_token = self._convert(r.text)['access_token']
self.headers = {
'Authorization': 'bearer ' + self.access_token,
'applicationId': 'b013aa26-9724-4dbd-8897-048b9aada249',
'Accept': 'application/json, application/xml, text/json, text/x-json, text/javascript, text/xml'
}
self.user_account()
self.installation()
def user_account(self):
r = requests.get('https://rs.alarmnet.com:443/TotalConnectComfort/WebAPI/emea/api/v1/userAccount', headers=self.headers)
self.account_info = self._convert(r.text)
return self.account_info
def installation(self):
r = requests.get('https://rs.alarmnet.com:443/TotalConnectComfort/WebAPI/emea/api/v1/location/installationInfo?userId=%s&includeTemperatureControlSystems=True' % self.account_info['userId'], headers=self.headers)
self.installation_info = self._convert(r.text)
self.system_id = self.installation_info[0]['gateways'][0]['temperatureControlSystems'][0]['systemId']
for loc_data in self.installation_info:
self.locations.append(Location(self, loc_data))
return self.installation_info
def full_installation(self, location=None):
location = self._get_location(location)
r = requests.get('https://rs.alarmnet.com:443/TotalConnectComfort/WebAPI/emea/api/v1/location/%s/installationInfo?includeTemperatureControlSystems=True' % location, headers=self.headers)
return self._convert(r.text)
def gateway(self):
r = requests.get('https://rs.alarmnet.com:443/TotalConnectComfort/WebAPI/emea/api/v1/gateway', headers=self.headers)
return self._convert(r.text)
def set_status_normal(self):
return self._get_single_heating_system().set_status_normal()
def set_status_custom(self, until=None):
return self._get_single_heating_system().set_status_custom(until)
def set_status_eco(self, until=None):
return self._get_single_heating_system().set_status_eco(until)
def set_status_away(self, until=None):
return self._get_single_heating_system().set_status_away(until)
def set_status_dayoff(self, until=None):
return self._get_single_heating_system().set_status_dayoff(until)
def set_status_heatingoff(self, until=None):
return self._get_single_heating_system().set_status_heatingoff(until)
def temperatures(self):
return self._get_single_heating_system().temperatures()
def zone_schedules_backup(self, filename):
return self._get_single_heating_system().zone_schedules_backup(filename)
def zone_schedules_restore(self, filename):
return self._get_single_heating_system().zone_schedules_restore(filename)
| 38.427481
| 220
| 0.665276
|
c8eae13c99af7e9a4258761ea47b53799c651dcf
| 875
|
py
|
Python
|
vpo/__init__.py
|
xgid/vpo
|
5d13a28d36d87cad55dd08b82a47b706b8a3b8aa
|
[
"MIT"
] | null | null | null |
vpo/__init__.py
|
xgid/vpo
|
5d13a28d36d87cad55dd08b82a47b706b8a3b8aa
|
[
"MIT"
] | null | null | null |
vpo/__init__.py
|
xgid/vpo
|
5d13a28d36d87cad55dd08b82a47b706b8a3b8aa
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
vpo
~~~~~~~~~~~~~~~~~~~
Prueba de paquete Python con cookiecutter plantilla pyvanguard
:copyright: (c) 2015 by xgid
:licence: MIT, see LICENCE for more details
"""
from __future__ import absolute_import, unicode_literals
import logging
# Generate your own AsciiArt at:
# patorjk.com/software/taag/#f=Calvin%20S&t=Vanguard Package One
__banner__ = r"""
╦ ╦┌─┐┌┐┌┌─┐┬ ┬┌─┐┬─┐┌┬┐
╚╗╔╝├─┤││││ ┬│ │├─┤├┬┘ ││ by xgid
╚╝ ┴ ┴┘└┘└─┘└─┘┴ ┴┴└──┴┘
"""
__title__ = 'vpo'
__summary__ = 'Prueba de paquete Python con cookiecutter plantilla pyvanguard'
__uri__ = 'https://github.com/xgid/vpo'
__version__ = '0.0.1'
__author__ = 'xgid'
__email__ = 'xgid03@zoho.com'
__license__ = 'MIT'
__copyright__ = 'Copyright 2015 xgid'
# the user should dictate what happens when a logging event occurs
logging.getLogger(__name__).addHandler(logging.NullHandler())
| 24.305556
| 78
| 0.668571
|
95e23fe83346d0f11155f62a0eb29f11889f6408
| 63
|
py
|
Python
|
src/transformers/additions/__init__.py
|
ashim95/wordsalad
|
77dc1cbc77d50fa2546a0ef6eee22b983c56fa61
|
[
"Apache-2.0"
] | 13
|
2021-06-18T08:18:32.000Z
|
2022-02-18T22:17:11.000Z
|
src/transformers/additions/__init__.py
|
ashim95/wordsalad
|
77dc1cbc77d50fa2546a0ef6eee22b983c56fa61
|
[
"Apache-2.0"
] | 1
|
2021-06-18T01:38:40.000Z
|
2021-06-18T01:38:40.000Z
|
src/transformers/additions/__init__.py
|
ashim95/wordsalad
|
77dc1cbc77d50fa2546a0ef6eee22b983c56fa61
|
[
"Apache-2.0"
] | 4
|
2021-06-18T06:22:29.000Z
|
2022-03-25T22:37:25.000Z
|
from .additional_utils import replace_tokens, freeze_full_bert
| 31.5
| 62
| 0.888889
|
55b446d88e67ca14e2099fbad0ac6f9fb7b277a7
| 568
|
py
|
Python
|
tests/developer_tools/designer/components/test_title_component.py
|
nickderobertis/awesome-panel-extensions
|
07a4898b5539b9b6f2f55c5fedfda4a7b0b240e9
|
[
"CC-BY-4.0"
] | 3
|
2020-07-16T07:28:45.000Z
|
2020-07-17T12:53:56.000Z
|
tests/developer_tools/designer/components/test_title_component.py
|
MarcSkovMadsen/panel-extensions-template
|
f41ad8d8fb8502f87de3a4992917cbffb6299012
|
[
"CC-BY-4.0"
] | null | null | null |
tests/developer_tools/designer/components/test_title_component.py
|
MarcSkovMadsen/panel-extensions-template
|
f41ad8d8fb8502f87de3a4992917cbffb6299012
|
[
"CC-BY-4.0"
] | null | null | null |
# pylint: disable=redefined-outer-name,protected-access
# pylint: disable=missing-function-docstring,missing-module-docstring,missing-class-docstring
import pytest
from awesome_panel_extensions.developer_tools.designer.components import TitleComponent
@pytest.fixture
def title_component():
return TitleComponent()
def test_constructor(title_component):
assert isinstance(title_component, TitleComponent)
assert "logo_url" in title_component.param
assert "logo_spinning_url" in title_component.param
assert "spinning" in title_component.param
| 31.555556
| 93
| 0.820423
|
3690133cec5f1c6906f91745189f33d6093c3457
| 1,953
|
py
|
Python
|
helpers/combine_docfiles.py
|
zachberger/terraform-google-event-function
|
b2a4306c7c774aa287b0f5166ae8b82b544f0041
|
[
"Apache-2.0"
] | null | null | null |
helpers/combine_docfiles.py
|
zachberger/terraform-google-event-function
|
b2a4306c7c774aa287b0f5166ae8b82b544f0041
|
[
"Apache-2.0"
] | null | null | null |
helpers/combine_docfiles.py
|
zachberger/terraform-google-event-function
|
b2a4306c7c774aa287b0f5166ae8b82b544f0041
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Please note that this file was generated from
# [terraform-google-module-template](https://github.com/terraform-google-modules/terraform-google-module-template).
# Please make sure to contribute relevant changes upstream!
''' Combine file from:
* script argument 1
with content of file from:
* script argument 2
using the beginning of line separators
hardcoded using regexes in this file:
We exclude any text using the separate
regex specified here
'''
import os
import re
import sys
insert_separator_regex = r'(.*?\[\^\]\:\ \(autogen_docs_start\))(.*?)(\n\[\^\]\:\ \(autogen_docs_end\).*?$)' # noqa: E501
exclude_separator_regex = r'(.*?)Copyright 20\d\d Google LLC.*?limitations under the License.(.*?)$' # noqa: E501
if len(sys.argv) != 3:
sys.exit(1)
if not os.path.isfile(sys.argv[1]):
sys.exit(0)
input = open(sys.argv[1], "r").read()
replace_content = open(sys.argv[2], "r").read()
# Exclude the specified content from the replacement content
groups = re.match(
exclude_separator_regex,
replace_content,
re.DOTALL
).groups(0)
replace_content = groups[0] + groups[1]
# Find where to put the replacement content, overwrite the input file
groups = re.match(insert_separator_regex, input, re.DOTALL).groups(0)
output = groups[0] + replace_content + groups[2] + "\n"
open(sys.argv[1], "w").write(output)
| 32.55
| 122
| 0.72299
|
0837df2bc943bec22411b08401a10f2d9460c418
| 2,696
|
py
|
Python
|
samples/result_visualisation/plot_precision_recall_curve.py
|
reithmeier/Mask_RCNN
|
4e7d93adf8c244dc541c7fcc959d5e994c8dd9b1
|
[
"MIT"
] | null | null | null |
samples/result_visualisation/plot_precision_recall_curve.py
|
reithmeier/Mask_RCNN
|
4e7d93adf8c244dc541c7fcc959d5e994c8dd9b1
|
[
"MIT"
] | null | null | null |
samples/result_visualisation/plot_precision_recall_curve.py
|
reithmeier/Mask_RCNN
|
4e7d93adf8c244dc541c7fcc959d5e994c8dd9b1
|
[
"MIT"
] | null | null | null |
# **********************************************************************************************************************
#
# brief: simple script to plot the optimizer runs
#
# author: Lukas Reithmeier
# date: 29.09.2020
#
# **********************************************************************************************************************
from matplotlib import pyplot as plt
import pandas as pd
from tikzplotlib import save as tikz_save
SMALL_SIZE = 14
MEDIUM_SIZE = 16
BIGGER_SIZE = 18
plt.rc('font', size=SMALL_SIZE) # controls default text sizes
plt.rc('axes', titlesize=SMALL_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize
plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title
plt.style.use('ggplot')
def plot_precision_recall_curve(precissions, recalls):
fig, ax = plt.subplots()
ax.plot(recalls, precissions, label="RGB")
ax.set_xlabel("Recall")
ax.set_ylabel("Precission")
ax.set_xlim(0., 1.1)
ax.set_ylim(0., 1.1)
plt.show()
def plot_precision_recall_curves(precissions_rgb, recalls_rgb, precisions_d3, recalls_d3, precisions_rgbd, recalls_rgbd,
precisions_rgbdf, recalls_rgbdf):
fig, ax = plt.subplots(figsize=(6,4))
ax.plot(recalls_rgb, precissions_rgb, label="RGB")
ax.plot(recalls_d3, precisions_d3, label="D3")
ax.plot(recalls_rgbd, precisions_rgbd, label="RGBD")
ax.plot(recalls_rgbdf, precisions_rgbdf, label="RGBD-F")
ax.set_xlabel("Recall")
ax.set_ylabel("Precission")
ax.set_ylim(0., 1.05)
ax.legend()
tikz_save("precision_recall_elevator.tex")
plt.show()
data_rgb = pd.read_csv("./precision_recalls_ELEVATOR_RGB.csv")
plot_precision_recall_curve(data_rgb["precision"], data_rgb["recall"])
data_d3 = pd.read_csv("./precision_recalls_ELEVATOR_D3.csv")
plot_precision_recall_curve(data_d3["precision"], data_d3["recall"])
data_rgbd = pd.read_csv("./precision_recalls_ELEVATOR_RGBD.csv")
plot_precision_recall_curve(data_rgbd["precision"], data_rgbd["recall"])
data_rgbdf = pd.read_csv("./precision_recalls_ELEVATOR_RGBDFusenet.csv")
plot_precision_recall_curve(data_rgbdf["precision"], data_rgbdf["recall"])
plot_precision_recall_curves(data_rgb["precision"], data_rgb["recall"], data_d3["precision"], data_d3["recall"],
data_rgbd["precision"], data_rgbd["recall"],
data_rgbdf["precision"], data_rgbdf["recall"])
| 39.072464
| 120
| 0.655415
|
4ef2e1d75a340f7e464bc74538f9aa9342dbac32
| 62,690
|
py
|
Python
|
ibm_watson/personality_insights_v3.py
|
johann-petrak/python-sdk
|
e8a2f1883822f0ad2d655d31a595dbd93f5c81c9
|
[
"Apache-2.0"
] | 1,579
|
2015-10-08T14:02:17.000Z
|
2022-02-28T10:49:21.000Z
|
ibm_watson/personality_insights_v3.py
|
johann-petrak/python-sdk
|
e8a2f1883822f0ad2d655d31a595dbd93f5c81c9
|
[
"Apache-2.0"
] | 749
|
2015-10-08T20:00:24.000Z
|
2022-03-21T21:33:17.000Z
|
ibm_watson/personality_insights_v3.py
|
johann-petrak/python-sdk
|
e8a2f1883822f0ad2d655d31a595dbd93f5c81c9
|
[
"Apache-2.0"
] | 1,006
|
2015-10-24T06:30:58.000Z
|
2022-03-23T07:10:04.000Z
|
# coding: utf-8
# (C) Copyright IBM Corp. 2016, 2021.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# IBM OpenAPI SDK Code Generator Version: 3.38.0-07189efd-20210827-205025
"""
IBM Watson™ Personality Insights is discontinued. Existing instances are supported
until 1 December 2021, but as of 1 December 2020, you cannot create new instances. Any
instance that exists on 1 December 2021 will be deleted.<br/><br/>No direct replacement
exists for Personality Insights. However, you can consider using [IBM Watson™
Natural Language
Understanding](https://cloud.ibm.com/docs/natural-language-understanding?topic=natural-language-understanding-about)
on IBM Cloud® as part of a replacement analytic workflow for your Personality Insights
use cases. You can use Natural Language Understanding to extract data and insights from
text, such as keywords, categories, sentiment, emotion, and syntax. For more information
about the personality models in Personality Insights, see [The science behind the
service](https://cloud.ibm.com/docs/personality-insights?topic=personality-insights-science).
{: deprecated}
The IBM Watson Personality Insights service enables applications to derive insights from
social media, enterprise data, or other digital communications. The service uses
linguistic analytics to infer individuals' intrinsic personality characteristics,
including Big Five, Needs, and Values, from digital communications such as email, text
messages, tweets, and forum posts.
The service can automatically infer, from potentially noisy social media, portraits of
individuals that reflect their personality characteristics. The service can infer
consumption preferences based on the results of its analysis and, for JSON content that is
timestamped, can report temporal behavior.
* For information about the meaning of the models that the service uses to describe
personality characteristics, see [Personality
models](https://cloud.ibm.com/docs/personality-insights?topic=personality-insights-models#models).
* For information about the meaning of the consumption preferences, see [Consumption
preferences](https://cloud.ibm.com/docs/personality-insights?topic=personality-insights-preferences#preferences).
**Note:** Request logging is disabled for the Personality Insights service. Regardless of
whether you set the `X-Watson-Learning-Opt-Out` request header, the service does not log
or retain data from requests and responses.
API Version: 3.4.4
See: https://cloud.ibm.com/docs/personality-insights
"""
from enum import Enum
from typing import Dict, List, TextIO, Union
import json
from ibm_cloud_sdk_core import BaseService, DetailedResponse
from ibm_cloud_sdk_core.authenticators.authenticator import Authenticator
from ibm_cloud_sdk_core.get_authenticator import get_authenticator_from_environment
from ibm_cloud_sdk_core.utils import convert_model
from .common import get_sdk_headers
##############################################################################
# Service
##############################################################################
class PersonalityInsightsV3(BaseService):
"""The Personality Insights V3 service."""
DEFAULT_SERVICE_URL = 'https://api.us-south.personality-insights.watson.cloud.ibm.com'
DEFAULT_SERVICE_NAME = 'personality_insights'
def __init__(
self,
version: str,
authenticator: Authenticator = None,
service_name: str = DEFAULT_SERVICE_NAME,
) -> None:
"""
Construct a new client for the Personality Insights service.
:param str version: Release date of the version of the API you want to use.
Specify dates in YYYY-MM-DD format. The current version is `2017-10-13`.
:param Authenticator authenticator: The authenticator specifies the authentication mechanism.
Get up to date information from https://github.com/IBM/python-sdk-core/blob/master/README.md
about initializing the authenticator of your choice.
"""
print(
'warning: On 1 December 2021, Personality Insights will no longer be available. For more information, see https://github.com/watson-developer-cloud/python-sdk/tree/master#personality-insights-deprecation.'
)
if version is None:
raise ValueError('version must be provided')
if not authenticator:
authenticator = get_authenticator_from_environment(service_name)
BaseService.__init__(self,
service_url=self.DEFAULT_SERVICE_URL,
authenticator=authenticator)
self.version = version
self.configure_service(service_name)
#########################
# Methods
#########################
def profile(self,
content: Union['Content', str, TextIO],
accept: str,
*,
content_type: str = None,
content_language: str = None,
accept_language: str = None,
raw_scores: bool = None,
csv_headers: bool = None,
consumption_preferences: bool = None,
**kwargs) -> DetailedResponse:
"""
Get profile.
Generates a personality profile for the author of the input text. The service
accepts a maximum of 20 MB of input content, but it requires much less text to
produce an accurate profile. The service can analyze text in Arabic, English,
Japanese, Korean, or Spanish. It can return its results in a variety of languages.
**See also:**
* [Requesting a
profile](https://cloud.ibm.com/docs/personality-insights?topic=personality-insights-input#input)
* [Providing sufficient
input](https://cloud.ibm.com/docs/personality-insights?topic=personality-insights-input#sufficient)
### Content types
You can provide input content as plain text (`text/plain`), HTML (`text/html`),
or JSON (`application/json`) by specifying the **Content-Type** parameter. The
default is `text/plain`.
* Per the JSON specification, the default character encoding for JSON content is
effectively always UTF-8.
* Per the HTTP specification, the default encoding for plain text and HTML is
ISO-8859-1 (effectively, the ASCII character set).
When specifying a content type of plain text or HTML, include the `charset`
parameter to indicate the character encoding of the input text; for example,
`Content-Type: text/plain;charset=utf-8`.
**See also:** [Specifying request and response
formats](https://cloud.ibm.com/docs/personality-insights?topic=personality-insights-input#formats)
### Accept types
You must request a response as JSON (`application/json`) or comma-separated
values (`text/csv`) by specifying the **Accept** parameter. CSV output includes a
fixed number of columns. Set the **csv_headers** parameter to `true` to request
optional column headers for CSV output.
**See also:**
* [Understanding a JSON
profile](https://cloud.ibm.com/docs/personality-insights?topic=personality-insights-output#output)
* [Understanding a CSV
profile](https://cloud.ibm.com/docs/personality-insights?topic=personality-insights-outputCSV#outputCSV).
:param Content content: A maximum of 20 MB of content to analyze, though
the service requires much less text; for more information, see [Providing
sufficient
input](https://cloud.ibm.com/docs/personality-insights?topic=personality-insights-input#sufficient).
For JSON input, provide an object of type `Content`.
:param str accept: The type of the response. For more information, see
**Accept types** in the method description.
:param str content_type: (optional) The type of the input. For more
information, see **Content types** in the method description.
:param str content_language: (optional) The language of the input text for
the request: Arabic, English, Japanese, Korean, or Spanish. Regional
variants are treated as their parent language; for example, `en-US` is
interpreted as `en`.
The effect of the **Content-Language** parameter depends on the
**Content-Type** parameter. When **Content-Type** is `text/plain` or
`text/html`, **Content-Language** is the only way to specify the language.
When **Content-Type** is `application/json`, **Content-Language** overrides
a language specified with the `language` parameter of a `ContentItem`
object, and content items that specify a different language are ignored;
omit this parameter to base the language on the specification of the
content items. You can specify any combination of languages for
**Content-Language** and **Accept-Language**.
:param str accept_language: (optional) The desired language of the
response. For two-character arguments, regional variants are treated as
their parent language; for example, `en-US` is interpreted as `en`. You can
specify any combination of languages for the input and response content.
:param bool raw_scores: (optional) Indicates whether a raw score in
addition to a normalized percentile is returned for each characteristic;
raw scores are not compared with a sample population. By default, only
normalized percentiles are returned.
:param bool csv_headers: (optional) Indicates whether column labels are
returned with a CSV response. By default, no column labels are returned.
Applies only when the response type is CSV (`text/csv`).
:param bool consumption_preferences: (optional) Indicates whether
consumption preferences are returned with the results. By default, no
consumption preferences are returned.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse with `dict` result representing a `Profile` object
"""
if content is None:
raise ValueError('content must be provided')
if accept is None:
raise ValueError('accept must be provided')
if isinstance(content, Content):
content = convert_model(content)
content_type = content_type or 'application/json'
headers = {
'Accept': accept,
'Content-Type': content_type,
'Content-Language': content_language,
'Accept-Language': accept_language
}
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V3',
operation_id='profile')
headers.update(sdk_headers)
params = {
'version': self.version,
'raw_scores': raw_scores,
'csv_headers': csv_headers,
'consumption_preferences': consumption_preferences
}
if isinstance(content, dict):
data = json.dumps(content)
if content_type is None:
headers['Content-Type'] = 'application/json'
else:
data = content
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
url = '/v3/profile'
request = self.prepare_request(method='POST',
url=url,
headers=headers,
params=params,
data=data)
response = self.send(request, **kwargs)
return response
class ProfileEnums:
"""
Enums for profile parameters.
"""
class Accept(str, Enum):
"""
The type of the response. For more information, see **Accept types** in the method
description.
"""
APPLICATION_JSON = 'application/json'
TEXT_CSV = 'text/csv'
class ContentType(str, Enum):
"""
The type of the input. For more information, see **Content types** in the method
description.
"""
APPLICATION_JSON = 'application/json'
TEXT_HTML = 'text/html'
TEXT_PLAIN = 'text/plain'
class ContentLanguage(str, Enum):
"""
The language of the input text for the request: Arabic, English, Japanese, Korean,
or Spanish. Regional variants are treated as their parent language; for example,
`en-US` is interpreted as `en`.
The effect of the **Content-Language** parameter depends on the **Content-Type**
parameter. When **Content-Type** is `text/plain` or `text/html`,
**Content-Language** is the only way to specify the language. When
**Content-Type** is `application/json`, **Content-Language** overrides a language
specified with the `language` parameter of a `ContentItem` object, and content
items that specify a different language are ignored; omit this parameter to base
the language on the specification of the content items. You can specify any
combination of languages for **Content-Language** and **Accept-Language**.
"""
AR = 'ar'
EN = 'en'
ES = 'es'
JA = 'ja'
KO = 'ko'
class AcceptLanguage(str, Enum):
"""
The desired language of the response. For two-character arguments, regional
variants are treated as their parent language; for example, `en-US` is interpreted
as `en`. You can specify any combination of languages for the input and response
content.
"""
AR = 'ar'
DE = 'de'
EN = 'en'
ES = 'es'
FR = 'fr'
IT = 'it'
JA = 'ja'
KO = 'ko'
PT_BR = 'pt-br'
ZH_CN = 'zh-cn'
ZH_TW = 'zh-tw'
##############################################################################
# Models
##############################################################################
class Behavior():
"""
The temporal behavior for the input content.
:attr str trait_id: The unique, non-localized identifier of the characteristic
to which the results pertain. IDs have the form `behavior_{value}`.
:attr str name: The user-visible, localized name of the characteristic.
:attr str category: The category of the characteristic: `behavior` for temporal
data.
:attr float percentage: For JSON content that is timestamped, the percentage of
timestamped input data that occurred during that day of the week or hour of the
day. The range is 0 to 1.
"""
def __init__(self, trait_id: str, name: str, category: str,
percentage: float) -> None:
"""
Initialize a Behavior object.
:param str trait_id: The unique, non-localized identifier of the
characteristic to which the results pertain. IDs have the form
`behavior_{value}`.
:param str name: The user-visible, localized name of the characteristic.
:param str category: The category of the characteristic: `behavior` for
temporal data.
:param float percentage: For JSON content that is timestamped, the
percentage of timestamped input data that occurred during that day of the
week or hour of the day. The range is 0 to 1.
"""
self.trait_id = trait_id
self.name = name
self.category = category
self.percentage = percentage
@classmethod
def from_dict(cls, _dict: Dict) -> 'Behavior':
"""Initialize a Behavior object from a json dictionary."""
args = {}
if 'trait_id' in _dict:
args['trait_id'] = _dict.get('trait_id')
else:
raise ValueError(
'Required property \'trait_id\' not present in Behavior JSON')
if 'name' in _dict:
args['name'] = _dict.get('name')
else:
raise ValueError(
'Required property \'name\' not present in Behavior JSON')
if 'category' in _dict:
args['category'] = _dict.get('category')
else:
raise ValueError(
'Required property \'category\' not present in Behavior JSON')
if 'percentage' in _dict:
args['percentage'] = _dict.get('percentage')
else:
raise ValueError(
'Required property \'percentage\' not present in Behavior JSON')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a Behavior object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'trait_id') and self.trait_id is not None:
_dict['trait_id'] = self.trait_id
if hasattr(self, 'name') and self.name is not None:
_dict['name'] = self.name
if hasattr(self, 'category') and self.category is not None:
_dict['category'] = self.category
if hasattr(self, 'percentage') and self.percentage is not None:
_dict['percentage'] = self.percentage
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this Behavior object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'Behavior') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'Behavior') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class ConsumptionPreferences():
"""
A consumption preference that the service inferred from the input content.
:attr str consumption_preference_id: The unique, non-localized identifier of the
consumption preference to which the results pertain. IDs have the form
`consumption_preferences_{preference}`.
:attr str name: The user-visible, localized name of the consumption preference.
:attr float score: The score for the consumption preference:
* `0.0`: Unlikely
* `0.5`: Neutral
* `1.0`: Likely
The scores for some preferences are binary and do not allow a neutral value. The
score is an indication of preference based on the results inferred from the
input text, not a normalized percentile.
"""
def __init__(self, consumption_preference_id: str, name: str,
score: float) -> None:
"""
Initialize a ConsumptionPreferences object.
:param str consumption_preference_id: The unique, non-localized identifier
of the consumption preference to which the results pertain. IDs have the
form `consumption_preferences_{preference}`.
:param str name: The user-visible, localized name of the consumption
preference.
:param float score: The score for the consumption preference:
* `0.0`: Unlikely
* `0.5`: Neutral
* `1.0`: Likely
The scores for some preferences are binary and do not allow a neutral
value. The score is an indication of preference based on the results
inferred from the input text, not a normalized percentile.
"""
self.consumption_preference_id = consumption_preference_id
self.name = name
self.score = score
@classmethod
def from_dict(cls, _dict: Dict) -> 'ConsumptionPreferences':
"""Initialize a ConsumptionPreferences object from a json dictionary."""
args = {}
if 'consumption_preference_id' in _dict:
args['consumption_preference_id'] = _dict.get(
'consumption_preference_id')
else:
raise ValueError(
'Required property \'consumption_preference_id\' not present in ConsumptionPreferences JSON'
)
if 'name' in _dict:
args['name'] = _dict.get('name')
else:
raise ValueError(
'Required property \'name\' not present in ConsumptionPreferences JSON'
)
if 'score' in _dict:
args['score'] = _dict.get('score')
else:
raise ValueError(
'Required property \'score\' not present in ConsumptionPreferences JSON'
)
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a ConsumptionPreferences object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'consumption_preference_id'
) and self.consumption_preference_id is not None:
_dict['consumption_preference_id'] = self.consumption_preference_id
if hasattr(self, 'name') and self.name is not None:
_dict['name'] = self.name
if hasattr(self, 'score') and self.score is not None:
_dict['score'] = self.score
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this ConsumptionPreferences object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'ConsumptionPreferences') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'ConsumptionPreferences') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class ConsumptionPreferencesCategory():
"""
The consumption preferences that the service inferred from the input content.
:attr str consumption_preference_category_id: The unique, non-localized
identifier of the consumption preferences category to which the results pertain.
IDs have the form `consumption_preferences_{category}`.
:attr str name: The user-visible name of the consumption preferences category.
:attr List[ConsumptionPreferences] consumption_preferences: Detailed results
inferred from the input text for the individual preferences of the category.
"""
def __init__(
self, consumption_preference_category_id: str, name: str,
consumption_preferences: List['ConsumptionPreferences']) -> None:
"""
Initialize a ConsumptionPreferencesCategory object.
:param str consumption_preference_category_id: The unique, non-localized
identifier of the consumption preferences category to which the results
pertain. IDs have the form `consumption_preferences_{category}`.
:param str name: The user-visible name of the consumption preferences
category.
:param List[ConsumptionPreferences] consumption_preferences: Detailed
results inferred from the input text for the individual preferences of the
category.
"""
self.consumption_preference_category_id = consumption_preference_category_id
self.name = name
self.consumption_preferences = consumption_preferences
@classmethod
def from_dict(cls, _dict: Dict) -> 'ConsumptionPreferencesCategory':
"""Initialize a ConsumptionPreferencesCategory object from a json dictionary."""
args = {}
if 'consumption_preference_category_id' in _dict:
args['consumption_preference_category_id'] = _dict.get(
'consumption_preference_category_id')
else:
raise ValueError(
'Required property \'consumption_preference_category_id\' not present in ConsumptionPreferencesCategory JSON'
)
if 'name' in _dict:
args['name'] = _dict.get('name')
else:
raise ValueError(
'Required property \'name\' not present in ConsumptionPreferencesCategory JSON'
)
if 'consumption_preferences' in _dict:
args['consumption_preferences'] = [
ConsumptionPreferences.from_dict(x)
for x in _dict.get('consumption_preferences')
]
else:
raise ValueError(
'Required property \'consumption_preferences\' not present in ConsumptionPreferencesCategory JSON'
)
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a ConsumptionPreferencesCategory object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'consumption_preference_category_id'
) and self.consumption_preference_category_id is not None:
_dict[
'consumption_preference_category_id'] = self.consumption_preference_category_id
if hasattr(self, 'name') and self.name is not None:
_dict['name'] = self.name
if hasattr(self, 'consumption_preferences'
) and self.consumption_preferences is not None:
_dict['consumption_preferences'] = [
x.to_dict() for x in self.consumption_preferences
]
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this ConsumptionPreferencesCategory object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'ConsumptionPreferencesCategory') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'ConsumptionPreferencesCategory') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class Content():
"""
The full input content that the service is to analyze.
:attr List[ContentItem] content_items: An array of `ContentItem` objects that
provides the text that is to be analyzed.
"""
def __init__(self, content_items: List['ContentItem']) -> None:
"""
Initialize a Content object.
:param List[ContentItem] content_items: An array of `ContentItem` objects
that provides the text that is to be analyzed.
"""
self.content_items = content_items
@classmethod
def from_dict(cls, _dict: Dict) -> 'Content':
"""Initialize a Content object from a json dictionary."""
args = {}
if 'contentItems' in _dict:
args['content_items'] = [
ContentItem.from_dict(x) for x in _dict.get('contentItems')
]
else:
raise ValueError(
'Required property \'contentItems\' not present in Content JSON'
)
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a Content object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'content_items') and self.content_items is not None:
_dict['contentItems'] = [x.to_dict() for x in self.content_items]
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this Content object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'Content') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'Content') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class ContentItem():
"""
An input content item that the service is to analyze.
:attr str content: The content that is to be analyzed. The service supports up
to 20 MB of content for all `ContentItem` objects combined.
:attr str id: (optional) A unique identifier for this content item.
:attr int created: (optional) A timestamp that identifies when this content was
created. Specify a value in milliseconds since the UNIX Epoch (January 1, 1970,
at 0:00 UTC). Required only for results that include temporal behavior data.
:attr int updated: (optional) A timestamp that identifies when this content was
last updated. Specify a value in milliseconds since the UNIX Epoch (January 1,
1970, at 0:00 UTC). Required only for results that include temporal behavior
data.
:attr str contenttype: (optional) The MIME type of the content. The default is
plain text. The tags are stripped from HTML content before it is analyzed; plain
text is processed as submitted.
:attr str language: (optional) The language identifier (two-letter ISO 639-1
identifier) for the language of the content item. The default is `en` (English).
Regional variants are treated as their parent language; for example, `en-US` is
interpreted as `en`. A language specified with the **Content-Type** parameter
overrides the value of this parameter; any content items that specify a
different language are ignored. Omit the **Content-Type** parameter to base the
language on the most prevalent specification among the content items; again,
content items that specify a different language are ignored. You can specify any
combination of languages for the input and response content.
:attr str parentid: (optional) The unique ID of the parent content item for this
item. Used to identify hierarchical relationships between posts/replies,
messages/replies, and so on.
:attr bool reply: (optional) Indicates whether this content item is a reply to
another content item.
:attr bool forward: (optional) Indicates whether this content item is a
forwarded/copied version of another content item.
"""
def __init__(self,
content: str,
*,
id: str = None,
created: int = None,
updated: int = None,
contenttype: str = None,
language: str = None,
parentid: str = None,
reply: bool = None,
forward: bool = None) -> None:
"""
Initialize a ContentItem object.
:param str content: The content that is to be analyzed. The service
supports up to 20 MB of content for all `ContentItem` objects combined.
:param str id: (optional) A unique identifier for this content item.
:param int created: (optional) A timestamp that identifies when this
content was created. Specify a value in milliseconds since the UNIX Epoch
(January 1, 1970, at 0:00 UTC). Required only for results that include
temporal behavior data.
:param int updated: (optional) A timestamp that identifies when this
content was last updated. Specify a value in milliseconds since the UNIX
Epoch (January 1, 1970, at 0:00 UTC). Required only for results that
include temporal behavior data.
:param str contenttype: (optional) The MIME type of the content. The
default is plain text. The tags are stripped from HTML content before it is
analyzed; plain text is processed as submitted.
:param str language: (optional) The language identifier (two-letter ISO
639-1 identifier) for the language of the content item. The default is `en`
(English). Regional variants are treated as their parent language; for
example, `en-US` is interpreted as `en`. A language specified with the
**Content-Type** parameter overrides the value of this parameter; any
content items that specify a different language are ignored. Omit the
**Content-Type** parameter to base the language on the most prevalent
specification among the content items; again, content items that specify a
different language are ignored. You can specify any combination of
languages for the input and response content.
:param str parentid: (optional) The unique ID of the parent content item
for this item. Used to identify hierarchical relationships between
posts/replies, messages/replies, and so on.
:param bool reply: (optional) Indicates whether this content item is a
reply to another content item.
:param bool forward: (optional) Indicates whether this content item is a
forwarded/copied version of another content item.
"""
self.content = content
self.id = id
self.created = created
self.updated = updated
self.contenttype = contenttype
self.language = language
self.parentid = parentid
self.reply = reply
self.forward = forward
@classmethod
def from_dict(cls, _dict: Dict) -> 'ContentItem':
"""Initialize a ContentItem object from a json dictionary."""
args = {}
if 'content' in _dict:
args['content'] = _dict.get('content')
else:
raise ValueError(
'Required property \'content\' not present in ContentItem JSON')
if 'id' in _dict:
args['id'] = _dict.get('id')
if 'created' in _dict:
args['created'] = _dict.get('created')
if 'updated' in _dict:
args['updated'] = _dict.get('updated')
if 'contenttype' in _dict:
args['contenttype'] = _dict.get('contenttype')
if 'language' in _dict:
args['language'] = _dict.get('language')
if 'parentid' in _dict:
args['parentid'] = _dict.get('parentid')
if 'reply' in _dict:
args['reply'] = _dict.get('reply')
if 'forward' in _dict:
args['forward'] = _dict.get('forward')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a ContentItem object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'content') and self.content is not None:
_dict['content'] = self.content
if hasattr(self, 'id') and self.id is not None:
_dict['id'] = self.id
if hasattr(self, 'created') and self.created is not None:
_dict['created'] = self.created
if hasattr(self, 'updated') and self.updated is not None:
_dict['updated'] = self.updated
if hasattr(self, 'contenttype') and self.contenttype is not None:
_dict['contenttype'] = self.contenttype
if hasattr(self, 'language') and self.language is not None:
_dict['language'] = self.language
if hasattr(self, 'parentid') and self.parentid is not None:
_dict['parentid'] = self.parentid
if hasattr(self, 'reply') and self.reply is not None:
_dict['reply'] = self.reply
if hasattr(self, 'forward') and self.forward is not None:
_dict['forward'] = self.forward
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this ContentItem object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'ContentItem') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'ContentItem') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class ContenttypeEnum(str, Enum):
"""
The MIME type of the content. The default is plain text. The tags are stripped
from HTML content before it is analyzed; plain text is processed as submitted.
"""
TEXT_PLAIN = 'text/plain'
TEXT_HTML = 'text/html'
class LanguageEnum(str, Enum):
"""
The language identifier (two-letter ISO 639-1 identifier) for the language of the
content item. The default is `en` (English). Regional variants are treated as
their parent language; for example, `en-US` is interpreted as `en`. A language
specified with the **Content-Type** parameter overrides the value of this
parameter; any content items that specify a different language are ignored. Omit
the **Content-Type** parameter to base the language on the most prevalent
specification among the content items; again, content items that specify a
different language are ignored. You can specify any combination of languages for
the input and response content.
"""
AR = 'ar'
EN = 'en'
ES = 'es'
JA = 'ja'
KO = 'ko'
class Profile():
"""
The personality profile that the service generated for the input content.
:attr str processed_language: The language model that was used to process the
input.
:attr int word_count: The number of words from the input that were used to
produce the profile.
:attr str word_count_message: (optional) When guidance is appropriate, a string
that provides a message that indicates the number of words found and where that
value falls in the range of required or suggested number of words.
:attr List[Trait] personality: A recursive array of `Trait` objects that
provides detailed results for the Big Five personality characteristics
(dimensions and facets) inferred from the input text.
:attr List[Trait] needs: Detailed results for the Needs characteristics inferred
from the input text.
:attr List[Trait] values: Detailed results for the Values characteristics
inferred from the input text.
:attr List[Behavior] behavior: (optional) For JSON content that is timestamped,
detailed results about the social behavior disclosed by the input in terms of
temporal characteristics. The results include information about the distribution
of the content over the days of the week and the hours of the day.
:attr List[ConsumptionPreferencesCategory] consumption_preferences: (optional)
If the **consumption_preferences** parameter is `true`, detailed results for
each category of consumption preferences. Each element of the array provides
information inferred from the input text for the individual preferences of that
category.
:attr List[Warning] warnings: An array of warning messages that are associated
with the input text for the request. The array is empty if the input generated
no warnings.
"""
def __init__(
self,
processed_language: str,
word_count: int,
personality: List['Trait'],
needs: List['Trait'],
values: List['Trait'],
warnings: List['Warning'],
*,
word_count_message: str = None,
behavior: List['Behavior'] = None,
consumption_preferences: List['ConsumptionPreferencesCategory'] = None
) -> None:
"""
Initialize a Profile object.
:param str processed_language: The language model that was used to process
the input.
:param int word_count: The number of words from the input that were used to
produce the profile.
:param List[Trait] personality: A recursive array of `Trait` objects that
provides detailed results for the Big Five personality characteristics
(dimensions and facets) inferred from the input text.
:param List[Trait] needs: Detailed results for the Needs characteristics
inferred from the input text.
:param List[Trait] values: Detailed results for the Values characteristics
inferred from the input text.
:param List[Warning] warnings: An array of warning messages that are
associated with the input text for the request. The array is empty if the
input generated no warnings.
:param str word_count_message: (optional) When guidance is appropriate, a
string that provides a message that indicates the number of words found and
where that value falls in the range of required or suggested number of
words.
:param List[Behavior] behavior: (optional) For JSON content that is
timestamped, detailed results about the social behavior disclosed by the
input in terms of temporal characteristics. The results include information
about the distribution of the content over the days of the week and the
hours of the day.
:param List[ConsumptionPreferencesCategory] consumption_preferences:
(optional) If the **consumption_preferences** parameter is `true`, detailed
results for each category of consumption preferences. Each element of the
array provides information inferred from the input text for the individual
preferences of that category.
"""
self.processed_language = processed_language
self.word_count = word_count
self.word_count_message = word_count_message
self.personality = personality
self.needs = needs
self.values = values
self.behavior = behavior
self.consumption_preferences = consumption_preferences
self.warnings = warnings
@classmethod
def from_dict(cls, _dict: Dict) -> 'Profile':
"""Initialize a Profile object from a json dictionary."""
args = {}
if 'processed_language' in _dict:
args['processed_language'] = _dict.get('processed_language')
else:
raise ValueError(
'Required property \'processed_language\' not present in Profile JSON'
)
if 'word_count' in _dict:
args['word_count'] = _dict.get('word_count')
else:
raise ValueError(
'Required property \'word_count\' not present in Profile JSON')
if 'word_count_message' in _dict:
args['word_count_message'] = _dict.get('word_count_message')
if 'personality' in _dict:
args['personality'] = [
Trait.from_dict(x) for x in _dict.get('personality')
]
else:
raise ValueError(
'Required property \'personality\' not present in Profile JSON')
if 'needs' in _dict:
args['needs'] = [Trait.from_dict(x) for x in _dict.get('needs')]
else:
raise ValueError(
'Required property \'needs\' not present in Profile JSON')
if 'values' in _dict:
args['values'] = [Trait.from_dict(x) for x in _dict.get('values')]
else:
raise ValueError(
'Required property \'values\' not present in Profile JSON')
if 'behavior' in _dict:
args['behavior'] = [
Behavior.from_dict(x) for x in _dict.get('behavior')
]
if 'consumption_preferences' in _dict:
args['consumption_preferences'] = [
ConsumptionPreferencesCategory.from_dict(x)
for x in _dict.get('consumption_preferences')
]
if 'warnings' in _dict:
args['warnings'] = [
Warning.from_dict(x) for x in _dict.get('warnings')
]
else:
raise ValueError(
'Required property \'warnings\' not present in Profile JSON')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a Profile object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(
self,
'processed_language') and self.processed_language is not None:
_dict['processed_language'] = self.processed_language
if hasattr(self, 'word_count') and self.word_count is not None:
_dict['word_count'] = self.word_count
if hasattr(
self,
'word_count_message') and self.word_count_message is not None:
_dict['word_count_message'] = self.word_count_message
if hasattr(self, 'personality') and self.personality is not None:
_dict['personality'] = [x.to_dict() for x in self.personality]
if hasattr(self, 'needs') and self.needs is not None:
_dict['needs'] = [x.to_dict() for x in self.needs]
if hasattr(self, 'values') and self.values is not None:
_dict['values'] = [x.to_dict() for x in self.values]
if hasattr(self, 'behavior') and self.behavior is not None:
_dict['behavior'] = [x.to_dict() for x in self.behavior]
if hasattr(self, 'consumption_preferences'
) and self.consumption_preferences is not None:
_dict['consumption_preferences'] = [
x.to_dict() for x in self.consumption_preferences
]
if hasattr(self, 'warnings') and self.warnings is not None:
_dict['warnings'] = [x.to_dict() for x in self.warnings]
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this Profile object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'Profile') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'Profile') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class ProcessedLanguageEnum(str, Enum):
"""
The language model that was used to process the input.
"""
AR = 'ar'
EN = 'en'
ES = 'es'
JA = 'ja'
KO = 'ko'
class Trait():
"""
The characteristics that the service inferred from the input content.
:attr str trait_id: The unique, non-localized identifier of the characteristic
to which the results pertain. IDs have the form
* `big5_{characteristic}` for Big Five personality dimensions
* `facet_{characteristic}` for Big Five personality facets
* `need_{characteristic}` for Needs
*`value_{characteristic}` for Values.
:attr str name: The user-visible, localized name of the characteristic.
:attr str category: The category of the characteristic: `personality` for Big
Five personality characteristics, `needs` for Needs, and `values` for Values.
:attr float percentile: The normalized percentile score for the characteristic.
The range is 0 to 1. For example, if the percentage for Openness is 0.60, the
author scored in the 60th percentile; the author is more open than 59 percent of
the population and less open than 39 percent of the population.
:attr float raw_score: (optional) The raw score for the characteristic. The
range is 0 to 1. A higher score generally indicates a greater likelihood that
the author has that characteristic, but raw scores must be considered in
aggregate: The range of values in practice might be much smaller than 0 to 1, so
an individual score must be considered in the context of the overall scores and
their range.
The raw score is computed based on the input and the service model; it is not
normalized or compared with a sample population. The raw score enables
comparison of the results against a different sampling population and with a
custom normalization approach.
:attr bool significant: (optional) **`2017-10-13`**: Indicates whether the
characteristic is meaningful for the input language. The field is always `true`
for all characteristics of English, Spanish, and Japanese input. The field is
`false` for the subset of characteristics of Arabic and Korean input for which
the service's models are unable to generate meaningful results.
**`2016-10-19`**: Not returned.
:attr List[Trait] children: (optional) For `personality` (Big Five) dimensions,
more detailed results for the facets of each dimension as inferred from the
input text.
"""
def __init__(self,
trait_id: str,
name: str,
category: str,
percentile: float,
*,
raw_score: float = None,
significant: bool = None,
children: List['Trait'] = None) -> None:
"""
Initialize a Trait object.
:param str trait_id: The unique, non-localized identifier of the
characteristic to which the results pertain. IDs have the form
* `big5_{characteristic}` for Big Five personality dimensions
* `facet_{characteristic}` for Big Five personality facets
* `need_{characteristic}` for Needs
*`value_{characteristic}` for Values.
:param str name: The user-visible, localized name of the characteristic.
:param str category: The category of the characteristic: `personality` for
Big Five personality characteristics, `needs` for Needs, and `values` for
Values.
:param float percentile: The normalized percentile score for the
characteristic. The range is 0 to 1. For example, if the percentage for
Openness is 0.60, the author scored in the 60th percentile; the author is
more open than 59 percent of the population and less open than 39 percent
of the population.
:param float raw_score: (optional) The raw score for the characteristic.
The range is 0 to 1. A higher score generally indicates a greater
likelihood that the author has that characteristic, but raw scores must be
considered in aggregate: The range of values in practice might be much
smaller than 0 to 1, so an individual score must be considered in the
context of the overall scores and their range.
The raw score is computed based on the input and the service model; it is
not normalized or compared with a sample population. The raw score enables
comparison of the results against a different sampling population and with
a custom normalization approach.
:param bool significant: (optional) **`2017-10-13`**: Indicates whether the
characteristic is meaningful for the input language. The field is always
`true` for all characteristics of English, Spanish, and Japanese input. The
field is `false` for the subset of characteristics of Arabic and Korean
input for which the service's models are unable to generate meaningful
results. **`2016-10-19`**: Not returned.
:param List[Trait] children: (optional) For `personality` (Big Five)
dimensions, more detailed results for the facets of each dimension as
inferred from the input text.
"""
self.trait_id = trait_id
self.name = name
self.category = category
self.percentile = percentile
self.raw_score = raw_score
self.significant = significant
self.children = children
@classmethod
def from_dict(cls, _dict: Dict) -> 'Trait':
"""Initialize a Trait object from a json dictionary."""
args = {}
if 'trait_id' in _dict:
args['trait_id'] = _dict.get('trait_id')
else:
raise ValueError(
'Required property \'trait_id\' not present in Trait JSON')
if 'name' in _dict:
args['name'] = _dict.get('name')
else:
raise ValueError(
'Required property \'name\' not present in Trait JSON')
if 'category' in _dict:
args['category'] = _dict.get('category')
else:
raise ValueError(
'Required property \'category\' not present in Trait JSON')
if 'percentile' in _dict:
args['percentile'] = _dict.get('percentile')
else:
raise ValueError(
'Required property \'percentile\' not present in Trait JSON')
if 'raw_score' in _dict:
args['raw_score'] = _dict.get('raw_score')
if 'significant' in _dict:
args['significant'] = _dict.get('significant')
if 'children' in _dict:
args['children'] = [
Trait.from_dict(x) for x in _dict.get('children')
]
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a Trait object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'trait_id') and self.trait_id is not None:
_dict['trait_id'] = self.trait_id
if hasattr(self, 'name') and self.name is not None:
_dict['name'] = self.name
if hasattr(self, 'category') and self.category is not None:
_dict['category'] = self.category
if hasattr(self, 'percentile') and self.percentile is not None:
_dict['percentile'] = self.percentile
if hasattr(self, 'raw_score') and self.raw_score is not None:
_dict['raw_score'] = self.raw_score
if hasattr(self, 'significant') and self.significant is not None:
_dict['significant'] = self.significant
if hasattr(self, 'children') and self.children is not None:
_dict['children'] = [x.to_dict() for x in self.children]
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this Trait object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'Trait') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'Trait') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class CategoryEnum(str, Enum):
"""
The category of the characteristic: `personality` for Big Five personality
characteristics, `needs` for Needs, and `values` for Values.
"""
PERSONALITY = 'personality'
NEEDS = 'needs'
VALUES = 'values'
class Warning():
"""
A warning message that is associated with the input content.
:attr str warning_id: The identifier of the warning message.
:attr str message: The message associated with the `warning_id`:
* `WORD_COUNT_MESSAGE`: "There were {number} words in the input. We need a
minimum of 600, preferably 1,200 or more, to compute statistically significant
estimates."
* `JSON_AS_TEXT`: "Request input was processed as text/plain as indicated,
however detected a JSON input. Did you mean application/json?"
* `CONTENT_TRUNCATED`: "For maximum accuracy while also optimizing processing
time, only the first 250KB of input text (excluding markup) was analyzed.
Accuracy levels off at approximately 3,000 words so this did not affect the
accuracy of the profile."
* `PARTIAL_TEXT_USED`, "The text provided to compute the profile was trimmed for
performance reasons. This action does not affect the accuracy of the output, as
not all of the input text was required." Applies only when Arabic input text
exceeds a threshold at which additional words do not contribute to the accuracy
of the profile.
"""
def __init__(self, warning_id: str, message: str) -> None:
"""
Initialize a Warning object.
:param str warning_id: The identifier of the warning message.
:param str message: The message associated with the `warning_id`:
* `WORD_COUNT_MESSAGE`: "There were {number} words in the input. We need a
minimum of 600, preferably 1,200 or more, to compute statistically
significant estimates."
* `JSON_AS_TEXT`: "Request input was processed as text/plain as indicated,
however detected a JSON input. Did you mean application/json?"
* `CONTENT_TRUNCATED`: "For maximum accuracy while also optimizing
processing time, only the first 250KB of input text (excluding markup) was
analyzed. Accuracy levels off at approximately 3,000 words so this did not
affect the accuracy of the profile."
* `PARTIAL_TEXT_USED`, "The text provided to compute the profile was
trimmed for performance reasons. This action does not affect the accuracy
of the output, as not all of the input text was required." Applies only
when Arabic input text exceeds a threshold at which additional words do not
contribute to the accuracy of the profile.
"""
self.warning_id = warning_id
self.message = message
@classmethod
def from_dict(cls, _dict: Dict) -> 'Warning':
"""Initialize a Warning object from a json dictionary."""
args = {}
if 'warning_id' in _dict:
args['warning_id'] = _dict.get('warning_id')
else:
raise ValueError(
'Required property \'warning_id\' not present in Warning JSON')
if 'message' in _dict:
args['message'] = _dict.get('message')
else:
raise ValueError(
'Required property \'message\' not present in Warning JSON')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a Warning object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'warning_id') and self.warning_id is not None:
_dict['warning_id'] = self.warning_id
if hasattr(self, 'message') and self.message is not None:
_dict['message'] = self.message
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this Warning object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'Warning') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'Warning') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class WarningIdEnum(str, Enum):
"""
The identifier of the warning message.
"""
WORD_COUNT_MESSAGE = 'WORD_COUNT_MESSAGE'
JSON_AS_TEXT = 'JSON_AS_TEXT'
CONTENT_TRUNCATED = 'CONTENT_TRUNCATED'
PARTIAL_TEXT_USED = 'PARTIAL_TEXT_USED'
| 46.679077
| 217
| 0.631807
|
a9c18fc04e233796d2e1f70acc0bdfa46104c855
| 11,050
|
py
|
Python
|
kinow_client/models/blog_page.py
|
kinow-io/kaemo-python-sdk
|
610fce09e3a9e631babf09195b0492959d9e4d56
|
[
"Apache-2.0"
] | 1
|
2017-05-03T12:48:22.000Z
|
2017-05-03T12:48:22.000Z
|
kinow_client/models/blog_page.py
|
kinow-io/kaemo-python-sdk
|
610fce09e3a9e631babf09195b0492959d9e4d56
|
[
"Apache-2.0"
] | null | null | null |
kinow_client/models/blog_page.py
|
kinow-io/kaemo-python-sdk
|
610fce09e3a9e631babf09195b0492959d9e4d56
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Server API
Reference for Server API (REST/Json)
OpenAPI spec version: 2.0.9
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class BlogPage(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, id_lang=None, title=None, link_rewrite=None, description_short=None, description=None, meta_title=None, meta_description=None, meta_keywords=None, date_add=None, date_issue=None, date_upd=None, active=None, id_blog_category=None, cover=None, id=None):
"""
BlogPage - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'id_lang': 'int',
'title': 'str',
'link_rewrite': 'str',
'description_short': 'str',
'description': 'str',
'meta_title': 'str',
'meta_description': 'str',
'meta_keywords': 'str',
'date_add': 'str',
'date_issue': 'str',
'date_upd': 'str',
'active': 'int',
'id_blog_category': 'int',
'cover': 'str',
'id': 'int'
}
self.attribute_map = {
'id_lang': 'id_lang',
'title': 'title',
'link_rewrite': 'link_rewrite',
'description_short': 'description_short',
'description': 'description',
'meta_title': 'meta_title',
'meta_description': 'meta_description',
'meta_keywords': 'meta_keywords',
'date_add': 'date_add',
'date_issue': 'date_issue',
'date_upd': 'date_upd',
'active': 'active',
'id_blog_category': 'id_blog_category',
'cover': 'cover',
'id': 'id'
}
self._id_lang = id_lang
self._title = title
self._link_rewrite = link_rewrite
self._description_short = description_short
self._description = description
self._meta_title = meta_title
self._meta_description = meta_description
self._meta_keywords = meta_keywords
self._date_add = date_add
self._date_issue = date_issue
self._date_upd = date_upd
self._active = active
self._id_blog_category = id_blog_category
self._cover = cover
self._id = id
@property
def id_lang(self):
"""
Gets the id_lang of this BlogPage.
:return: The id_lang of this BlogPage.
:rtype: int
"""
return self._id_lang
@id_lang.setter
def id_lang(self, id_lang):
"""
Sets the id_lang of this BlogPage.
:param id_lang: The id_lang of this BlogPage.
:type: int
"""
self._id_lang = id_lang
@property
def title(self):
"""
Gets the title of this BlogPage.
:return: The title of this BlogPage.
:rtype: str
"""
return self._title
@title.setter
def title(self, title):
"""
Sets the title of this BlogPage.
:param title: The title of this BlogPage.
:type: str
"""
self._title = title
@property
def link_rewrite(self):
"""
Gets the link_rewrite of this BlogPage.
:return: The link_rewrite of this BlogPage.
:rtype: str
"""
return self._link_rewrite
@link_rewrite.setter
def link_rewrite(self, link_rewrite):
"""
Sets the link_rewrite of this BlogPage.
:param link_rewrite: The link_rewrite of this BlogPage.
:type: str
"""
self._link_rewrite = link_rewrite
@property
def description_short(self):
"""
Gets the description_short of this BlogPage.
:return: The description_short of this BlogPage.
:rtype: str
"""
return self._description_short
@description_short.setter
def description_short(self, description_short):
"""
Sets the description_short of this BlogPage.
:param description_short: The description_short of this BlogPage.
:type: str
"""
self._description_short = description_short
@property
def description(self):
"""
Gets the description of this BlogPage.
:return: The description of this BlogPage.
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""
Sets the description of this BlogPage.
:param description: The description of this BlogPage.
:type: str
"""
self._description = description
@property
def meta_title(self):
"""
Gets the meta_title of this BlogPage.
:return: The meta_title of this BlogPage.
:rtype: str
"""
return self._meta_title
@meta_title.setter
def meta_title(self, meta_title):
"""
Sets the meta_title of this BlogPage.
:param meta_title: The meta_title of this BlogPage.
:type: str
"""
self._meta_title = meta_title
@property
def meta_description(self):
"""
Gets the meta_description of this BlogPage.
:return: The meta_description of this BlogPage.
:rtype: str
"""
return self._meta_description
@meta_description.setter
def meta_description(self, meta_description):
"""
Sets the meta_description of this BlogPage.
:param meta_description: The meta_description of this BlogPage.
:type: str
"""
self._meta_description = meta_description
@property
def meta_keywords(self):
"""
Gets the meta_keywords of this BlogPage.
:return: The meta_keywords of this BlogPage.
:rtype: str
"""
return self._meta_keywords
@meta_keywords.setter
def meta_keywords(self, meta_keywords):
"""
Sets the meta_keywords of this BlogPage.
:param meta_keywords: The meta_keywords of this BlogPage.
:type: str
"""
self._meta_keywords = meta_keywords
@property
def date_add(self):
"""
Gets the date_add of this BlogPage.
:return: The date_add of this BlogPage.
:rtype: str
"""
return self._date_add
@date_add.setter
def date_add(self, date_add):
"""
Sets the date_add of this BlogPage.
:param date_add: The date_add of this BlogPage.
:type: str
"""
self._date_add = date_add
@property
def date_issue(self):
"""
Gets the date_issue of this BlogPage.
:return: The date_issue of this BlogPage.
:rtype: str
"""
return self._date_issue
@date_issue.setter
def date_issue(self, date_issue):
"""
Sets the date_issue of this BlogPage.
:param date_issue: The date_issue of this BlogPage.
:type: str
"""
self._date_issue = date_issue
@property
def date_upd(self):
"""
Gets the date_upd of this BlogPage.
:return: The date_upd of this BlogPage.
:rtype: str
"""
return self._date_upd
@date_upd.setter
def date_upd(self, date_upd):
"""
Sets the date_upd of this BlogPage.
:param date_upd: The date_upd of this BlogPage.
:type: str
"""
self._date_upd = date_upd
@property
def active(self):
"""
Gets the active of this BlogPage.
:return: The active of this BlogPage.
:rtype: int
"""
return self._active
@active.setter
def active(self, active):
"""
Sets the active of this BlogPage.
:param active: The active of this BlogPage.
:type: int
"""
self._active = active
@property
def id_blog_category(self):
"""
Gets the id_blog_category of this BlogPage.
:return: The id_blog_category of this BlogPage.
:rtype: int
"""
return self._id_blog_category
@id_blog_category.setter
def id_blog_category(self, id_blog_category):
"""
Sets the id_blog_category of this BlogPage.
:param id_blog_category: The id_blog_category of this BlogPage.
:type: int
"""
self._id_blog_category = id_blog_category
@property
def cover(self):
"""
Gets the cover of this BlogPage.
:return: The cover of this BlogPage.
:rtype: str
"""
return self._cover
@cover.setter
def cover(self, cover):
"""
Sets the cover of this BlogPage.
:param cover: The cover of this BlogPage.
:type: str
"""
self._cover = cover
@property
def id(self):
"""
Gets the id of this BlogPage.
:return: The id of this BlogPage.
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this BlogPage.
:param id: The id of this BlogPage.
:type: int
"""
self._id = id
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 24.610245
| 274
| 0.556018
|
34b1f67b2cf638441dd771fcfc95dce8f62f6b9e
| 1,122
|
py
|
Python
|
src/injecta/config/ImportDefinitionResolver.py
|
DataSentics/injecta
|
090eeac6c76c43d40be71df678222a07b0a3c783
|
[
"MIT"
] | 3
|
2021-09-27T12:55:00.000Z
|
2022-01-31T19:13:23.000Z
|
src/injecta/config/ImportDefinitionResolver.py
|
DataSentics/injecta
|
090eeac6c76c43d40be71df678222a07b0a3c783
|
[
"MIT"
] | null | null | null |
src/injecta/config/ImportDefinitionResolver.py
|
DataSentics/injecta
|
090eeac6c76c43d40be71df678222a07b0a3c783
|
[
"MIT"
] | 1
|
2021-03-04T09:12:05.000Z
|
2021-03-04T09:12:05.000Z
|
from pathlib import Path
from injecta.package.real_resource_path_resolver import resolve_real_resource_path
class ImportDefinitionResolver:
def resolve(self, import_definition, base_dir: Path) -> set:
if isinstance(import_definition, str):
if import_definition[0:1] == "@":
return {resolve_real_resource_path(import_definition)}
return {base_dir.joinpath(import_definition).resolve()}
if isinstance(import_definition, dict):
if "search" not in import_definition:
raise Exception('Missing the "search" main keyword in the import definition')
if "include" not in import_definition["search"]:
raise Exception('Missing the "include" keyword under "search" main keyword')
base_path_glob = set(base_dir.glob("./**/*.*"))
all_configs_glob = set(map(lambda path: path.resolve(), base_dir.glob(import_definition["search"]["include"])))
return set(all_configs_glob - base_path_glob)
raise Exception("Unexpected import definition type: " + type(import_definition))
| 43.153846
| 123
| 0.678253
|
2905dba6ac46cf8cf9e98748926e71eaae13cffb
| 30,171
|
py
|
Python
|
tests/integration/test_google_page.py
|
konrad-kocik/nicelka
|
a174fce9b8c6d4414312120e89e10bb1e10629df
|
[
"MIT"
] | null | null | null |
tests/integration/test_google_page.py
|
konrad-kocik/nicelka
|
a174fce9b8c6d4414312120e89e10bb1e10629df
|
[
"MIT"
] | null | null | null |
tests/integration/test_google_page.py
|
konrad-kocik/nicelka
|
a174fce9b8c6d4414312120e89e10bb1e10629df
|
[
"MIT"
] | null | null | null |
from pytest import fixture
from tests.integration.utilities.utilities import get_io_dir_paths, create_dir, remove_dir, run_google_searcher, assert_report_file_content_equals
test_suite = 'google_page'
test_cases = ['no_result',
'no_result_twice',
'single_result',
'single_result_with_two_lines',
'single_result_with_four_lines',
'single_result_indirect_match_by_city_skipped',
'single_result_indirect_match_by_zip_code_skipped',
'single_result_indirect_match_by_city_allowed',
'single_result_indirect_match_by_zip_code_head_allowed',
'single_result_indirect_match_by_zip_code_tail_allowed',
'single_result_duplicate_skipped',
'single_result_duplicate_allowed',
'single_result_blacklisted_skipped',
'single_result_blacklisted_allowed',
'single_result_twice',
'multiple_results',
'multiple_results_indirect_matches_by_city_skipped',
'multiple_results_indirect_matches_by_zip_code_head_skipped',
'multiple_results_indirect_matches_by_city_allowed',
'multiple_results_indirect_matches_by_zip_code_head_allowed',
'multiple_results_duplicate_skipped',
'multiple_results_duplicate_allowed',
'multiple_results_not_on_top',
'multiple_results_blacklisted_skipped',
'multiple_results_blacklisted_allowed',
'multiple_results_twice'
]
@fixture(scope='module')
def create_reports_dirs():
for test_case in test_cases:
_, report_dir_path = get_io_dir_paths(test_suite, test_case)
create_dir(report_dir_path)
@fixture(scope='module')
def remove_reports_dirs(request):
def teardown():
for test_case in test_cases:
_, report_dir_path = get_io_dir_paths(test_suite, test_case)
remove_dir(report_dir_path)
request.addfinalizer(teardown)
def test_no_result(create_reports_dirs, remove_reports_dirs):
expected_report = \
'======================================================================' + '\n' + \
'24-200 BABIN' + '\n\n' + \
'Results found: 0'
data_dir_path, report_dir_path = get_io_dir_paths(test_suite, test_case='no_result')
searcher = run_google_searcher(data_dir_path, report_dir_path)
assert_report_file_content_equals(expected_report, searcher.report_file_path)
def test_no_result_twice(create_reports_dirs, remove_reports_dirs):
expected_report = \
'======================================================================' + '\n' + \
'24-200 BABIN' + '\n\n' + \
'======================================================================' + '\n' + \
'32-731 BYTOMSKO' + '\n\n' + \
'Results found: 0'
data_dir_path, report_dir_path = get_io_dir_paths(test_suite, test_case='no_result_twice')
searcher = run_google_searcher(data_dir_path, report_dir_path)
assert_report_file_content_equals(expected_report, searcher.report_file_path)
def test_single_result(create_reports_dirs, remove_reports_dirs):
expected_report = \
'======================================================================' + '\n' + \
'21-030 KONOPNICA' + '\n\n' + \
'#Urząd' + '\n\n' + \
'Urząd Gminy Konopnica' + '\n' + \
'Kozubszczyzna 127a' + '\n' + \
'21-030 Motycz' + '\n\n' + \
'Results found: 1'
data_dir_path, report_dir_path = get_io_dir_paths(test_suite, test_case='single_result')
searcher = run_google_searcher(data_dir_path, report_dir_path)
assert_report_file_content_equals(expected_report, searcher.report_file_path)
def test_single_result_with_two_lines(create_reports_dirs, remove_reports_dirs):
expected_report = \
'======================================================================' + '\n' + \
'34-603 UJANOWICE' + '\n\n' + \
'#Klub' + '\n\n' + \
'AKS UJANOWICE' + '\n' + \
'34-603 Ujanowice' + '\n\n' + \
'Results found: 1'
data_dir_path, report_dir_path = get_io_dir_paths(test_suite, test_case='single_result_with_two_lines')
searcher = run_google_searcher(data_dir_path, report_dir_path)
assert_report_file_content_equals(expected_report, searcher.report_file_path)
def test_single_result_with_four_lines(create_reports_dirs, remove_reports_dirs):
expected_report = \
'======================================================================' + '\n' + \
'32-862 PORĄBKA IWKOWSKA' + '\n\n' + \
'#Produkcja' + '\n\n' + \
'P.P.H.U. NITUS Piotr Nowak' + '\n' + \
'Drużków Pusty' + '\n' + \
'Porąbka Iwkowska 9' + '\n' + \
'32-862 Porąbka Iwkowska' + '\n\n' + \
'Results found: 1'
data_dir_path, report_dir_path = get_io_dir_paths(test_suite, test_case='single_result_with_four_lines')
searcher = run_google_searcher(data_dir_path, report_dir_path)
assert_report_file_content_equals(expected_report, searcher.report_file_path)
def test_single_result_indirect_match_by_city_skipped(create_reports_dirs, remove_reports_dirs):
expected_report = \
'======================================================================' + '\n' + \
'24-300 WOLA RUDZKA' + '\n\n' + \
'Results found: 0'
data_dir_path, report_dir_path = get_io_dir_paths(test_suite, test_case='single_result_indirect_match_by_city_skipped')
searcher = run_google_searcher(data_dir_path, report_dir_path)
assert_report_file_content_equals(expected_report, searcher.report_file_path)
def test_single_result_indirect_match_by_zip_code_skipped(create_reports_dirs, remove_reports_dirs):
expected_report = \
'======================================================================' + '\n' + \
'24-400 CUPLE' + '\n\n' + \
'Results found: 0'
data_dir_path, report_dir_path = get_io_dir_paths(test_suite, test_case='single_result_indirect_match_by_zip_code_skipped')
searcher = run_google_searcher(data_dir_path, report_dir_path)
assert_report_file_content_equals(expected_report, searcher.report_file_path)
def test_single_result_indirect_match_by_city_allowed(create_reports_dirs, remove_reports_dirs):
expected_report = \
'======================================================================' + '\n' + \
'34-654 PISARZOWA' + '\n\n' + \
'#Sąd' + '\n\n' + \
'Sąd Rejonowy w Limanowej' + '\n' + \
'Marka 19' + '\n' + \
'34-600 Limanowa' + '\n\n' + \
'Results found: 1'
data_dir_path, report_dir_path = get_io_dir_paths(test_suite, test_case='single_result_indirect_match_by_city_allowed')
searcher = run_google_searcher(data_dir_path, report_dir_path, allow_indirect_matches=True)
assert_report_file_content_equals(expected_report, searcher.report_file_path)
def test_single_result_indirect_match_by_zip_code_head_allowed(create_reports_dirs, remove_reports_dirs):
expected_report = \
'======================================================================' + '\n' + \
'32-725 RAJBRO' + '\n\n' + \
'#ZNP' + '\n\n' + \
'Związek Nauczycielstwa Polskiego. Oddział' + '\n' + \
'Jana Pawła II 42' + '\n' + \
'34-600 Limanowa' + '\n\n' + \
'Results found: 1'
data_dir_path, report_dir_path = get_io_dir_paths(test_suite, test_case='single_result_indirect_match_by_zip_code_head_allowed')
searcher = run_google_searcher(data_dir_path, report_dir_path, allow_indirect_matches=True)
assert_report_file_content_equals(expected_report, searcher.report_file_path)
def test_single_result_indirect_match_by_zip_code_tail_allowed(create_reports_dirs, remove_reports_dirs):
expected_report = \
'======================================================================' + '\n' + \
'24-150 NAŁĘCZÓW' + '\n\n' + \
'#Wydział' + '\n\n' + \
'Urząd Miejski w Nałęczowie' + '\n' + \
'Lipowa 3' + '\n' + \
'24-140 Nałęczów' + '\n\n' + \
'Results found: 1'
data_dir_path, report_dir_path = get_io_dir_paths(test_suite, test_case='single_result_indirect_match_by_zip_code_tail_allowed')
searcher = run_google_searcher(data_dir_path, report_dir_path)
assert_report_file_content_equals(expected_report, searcher.report_file_path)
def test_single_result_duplicate_skipped(create_reports_dirs, remove_reports_dirs):
expected_report = \
'======================================================================' + '\n' + \
'34-603 STRZESZYCE' + '\n\n' + \
'#Produkcja' + '\n\n' + \
'Olivea Małopolska Sp. z o. o.' + '\n' + \
'Strzeszyce 115' + '\n' + \
'34-603 Ujanowice' + '\n' + \
'34-603' + '\n\n' + \
'======================================================================' + '\n' + \
'34-603 STRZESZYCE' + '\n\n' + \
'Results found: 1'
data_dir_path, report_dir_path = get_io_dir_paths(test_suite, test_case='single_result_duplicate_skipped')
searcher = run_google_searcher(data_dir_path, report_dir_path)
assert_report_file_content_equals(expected_report, searcher.report_file_path)
def test_single_result_duplicate_allowed(create_reports_dirs, remove_reports_dirs):
expected_report = \
'======================================================================' + '\n' + \
'34-603 UJANOWICE' + '\n\n' + \
'#Bank' + '\n\n' + \
'Bank Spółdzielczy w Limanowej. Punkt obsługi klienta' + '\n' + \
'Ujanowice 2' + '\n' + \
'34-603 Ujanowice' + '\n\n' + \
'======================================================================' + '\n' + \
'34-603 UJANOWICE' + '\n\n' + \
'#Bank' + '\n\n' + \
'Bank Spółdzielczy w Limanowej. Punkt obsługi klienta' + '\n' + \
'Ujanowice 2' + '\n' + \
'34-603 Ujanowice' + '\n\n' + \
'Results found: 2'
data_dir_path, report_dir_path = get_io_dir_paths(test_suite, test_case='single_result_duplicate_allowed')
searcher = run_google_searcher(data_dir_path, report_dir_path, allow_duplicates=True)
assert_report_file_content_equals(expected_report, searcher.report_file_path)
def test_single_result_blacklisted_skipped(create_reports_dirs, remove_reports_dirs):
expected_report = \
'======================================================================' + '\n' + \
'13-340 BIELICE' + '\n\n' + \
'Results found: 0'
data_dir_path, report_dir_path = get_io_dir_paths(test_suite, test_case='single_result_blacklisted_skipped')
searcher = run_google_searcher(data_dir_path, report_dir_path)
assert_report_file_content_equals(expected_report, searcher.report_file_path)
def test_single_result_blacklisted_allowed(create_reports_dirs, remove_reports_dirs):
expected_report = \
'======================================================================' + '\n' + \
'13-340 BIELICE' + '\n\n' + \
'#Szkoła' + '\n\n' + \
'Zespół Szkół w Bielicach, Gimnazjum im. Narodów Zjednoczonej Europy' + '\n' + \
'Bielice 120' + '\n' + \
'13-330 Bielice' + '\n\n' + \
'Results found: 1'
data_dir_path, report_dir_path = get_io_dir_paths(test_suite, test_case='single_result_blacklisted_allowed')
searcher = run_google_searcher(data_dir_path, report_dir_path, allow_blacklisted=True)
assert_report_file_content_equals(expected_report, searcher.report_file_path)
def test_single_result_twice(create_reports_dirs, remove_reports_dirs):
expected_report = \
'======================================================================' + '\n' + \
'34-600 MORDARKA' + '\n\n' + \
'#Przedszkole' + '\n\n' + \
'Niepubliczne Przedszkole Integracyjne Chatka Małego Skrzatka' + '\n' + \
'34-600 Mordarka' + '\n\n' + \
'#Produkcja' + '\n\n' + \
'FUHP Stalkomplet S.C Walenty Szubryt Stanisław Bubula' + '\n' + \
'Mordarka dz.1236' + '\n' + \
'34-600' + '\n\n' + \
'Results found: 2'
data_dir_path, report_dir_path = get_io_dir_paths(test_suite, test_case='single_result_twice')
searcher = run_google_searcher(data_dir_path, report_dir_path)
assert_report_file_content_equals(expected_report, searcher.report_file_path)
def test_multiple_results(create_reports_dirs, remove_reports_dirs):
expected_report = \
'======================================================================' + '\n' + \
'33-300 NOWY SĄCZ' + '\n\n' + \
'#muzeum' + '\n\n' + \
'Muzeum Okręgowe w Nowym Sączu' + '\n' + \
'Lwowska 3' + '\n' + \
'33-300 Nowy Sącz' + '\n\n' + \
'Muzeum Okręgowe w Nowym Sączu - Gmach Głowny' + '\n' + \
'Jagiellońska 56' + '\n' + \
'33-300 Nowy Sącz' + '\n\n' + \
'Sądecki Park Etnograficzny' + '\n' + \
'Gen. Wieniawy-Długoszowskiego 83B' + '\n' + \
'33-300 Nowy Sącz' + '\n\n' + \
'Miasteczko Galicyjskie. Oddział Muzeum Okręgowego w Nowym Sączu' + '\n' + \
'Lwowska 226' + '\n' + \
'33-300 Nowy Sącz' + '\n\n' + \
'Galeria Marii Ritter. Oddział Muzeum Okręgowego' + '\n' + \
'Rynek 2' + '\n' + \
'33-300 Nowy Sącz' + '\n\n' + \
'Results found: 5'
data_dir_path, report_dir_path = get_io_dir_paths(test_suite, test_case='multiple_results')
searcher = run_google_searcher(data_dir_path, report_dir_path)
assert_report_file_content_equals(expected_report, searcher.report_file_path)
def test_multiple_results_indirect_matches_by_city_skipped(create_reports_dirs, remove_reports_dirs):
expected_report = \
'======================================================================' + '\n' + \
'87-207 MAŁE PUŁKOWO' + '\n\n' + \
'#PZU' + '\n\n' + \
'Kuźnia Smaków' + '\n' + \
'20' + '\n' + \
'20' + '\n' + \
'87-207 Małe Pułkowo' + '\n\n' + \
'Markostal Marek Mrowiński' + '\n' + \
'Małe Pułkowo 67' + '\n' + \
'87-207 Małe Pułkowo' + '\n\n' + \
'Results found: 2'
data_dir_path, report_dir_path = get_io_dir_paths(test_suite, test_case='multiple_results_indirect_matches_by_city_skipped')
searcher = run_google_searcher(data_dir_path, report_dir_path)
assert_report_file_content_equals(expected_report, searcher.report_file_path)
def test_multiple_results_indirect_matches_by_zip_code_head_skipped(create_reports_dirs, remove_reports_dirs):
expected_report = \
'======================================================================' + '\n' + \
'33-393 MARCINKOWICE' + '\n\n' + \
'#Szkoła' + '\n\n' + \
'Szkoła Podstawowa im. Marszałka Józefa Piłsudskiego' + '\n' + \
'33-393' + '\n' + \
'Marcinkowicka 132' + '\n' + \
'33-395 Marcinkowice' + '\n\n' + \
'Zespół Szkół im. Władysława Orkana' + '\n' + \
'Marcinkowice 1' + '\n' + \
'33-395 Marcinkowice' + '\n\n' + \
'Results found: 2'
data_dir_path, report_dir_path = get_io_dir_paths(test_suite, test_case='multiple_results_indirect_matches_by_zip_code_head_skipped')
searcher = run_google_searcher(data_dir_path, report_dir_path)
assert_report_file_content_equals(expected_report, searcher.report_file_path)
def test_multiple_results_indirect_matches_by_city_allowed(create_reports_dirs, remove_reports_dirs):
expected_report = \
'======================================================================' + '\n' + \
'87-207 MAŁE PUŁKOWO' + '\n\n' + \
'#PZU' + '\n\n' + \
'PZU' + '\n' + \
'Wolności 44' + '\n' + \
'87-200 Wąbrzeźno' + '\n\n' + \
'PZU' + '\n' + \
'Plac Tysiąclecia 22A' + '\n' + \
'87-400 Golub-Dobrzyń' + '\n\n' + \
'PZU Ubezpieczenia Wąbrzeźno - Agent Andrzej Sadłowski' + '\n' + \
'Poniatowskiego 8' + '\n' + \
'87-200 Wąbrzeźno' + '\n\n' + \
'Kuźnia Smaków' + '\n' + \
'20' + '\n' + \
'20' + '\n' + \
'87-207 Małe Pułkowo' + '\n\n' + \
'PZU Ubezpieczenia Czernikowo - Agent Bożena Zygnerska-Nawrotek' + '\n' + \
'Juliusza Słowackiego 3' + '\n' + \
'87-125 Czernikowo' + '\n\n' + \
'Agencja PZU S.A' + '\n' + \
'Generała Władysława Sikorskiego 36a' + '\n' + \
'87-140 Chełmża' + '\n\n' + \
'Grochocka G. Ubezpieczenia' + '\n' + \
'Krasińskiego 5' + '\n' + \
'87-200 Wąbrzeźno' + '\n\n' + \
'PZU Ubezpieczenia Brodnica - Agent Katarzyna Korzeńska' + '\n' + \
'Główna 29' + '\n' + \
'87-300 Jabłonowo Pomorskie' + '\n\n' + \
'PZU Ubezpieczenia Rypin - Agent Grzegorz Makowski' + '\n' + \
'Jana Pawła II 6' + '\n' + \
'87-500 Rypin' + '\n\n' + \
'Oddział PZU - Rypin, Nowy Rynek,' + '\n' + \
'Nowy Rynek 6' + '\n' + \
'87-500 Rypin' + '\n\n' + \
'Markostal Marek Mrowiński' + '\n' + \
'Małe Pułkowo 67' + '\n' + \
'87-207 Małe Pułkowo' + '\n\n' + \
'Bieganowska Anna. Ubezpieczenia' + '\n' + \
'Plac Tysiąclecia 2' + '\n' + \
'87-400 Golub-Dobrzyń' + '\n\n' + \
'Results found: 12'
data_dir_path, report_dir_path = get_io_dir_paths(test_suite, test_case='multiple_results_indirect_matches_by_city_allowed')
searcher = run_google_searcher(data_dir_path, report_dir_path, allow_indirect_matches=True)
assert_report_file_content_equals(expected_report, searcher.report_file_path)
def test_multiple_results_indirect_matches_by_zip_code_head_allowed(create_reports_dirs, remove_reports_dirs):
expected_report = \
'======================================================================' + '\n' + \
'33-393 MARCINKOWICE' + '\n\n' + \
'#Szkoła' + '\n\n' + \
'Szkoła Podstawowa im. Marszałka Józefa Piłsudskiego' + '\n' + \
'33-393' + '\n' + \
'Marcinkowicka 132' + '\n' + \
'33-395 Marcinkowice' + '\n\n' + \
'Zespół Szkół im. Władysława Orkana' + '\n' + \
'Marcinkowice 1' + '\n' + \
'33-395 Marcinkowice' + '\n\n' + \
'Szkoła Podstawowa im. Stanisława i Jana Potoczków' + '\n' + \
'33-395 Rdziostów' + '\n\n' + \
'Szkoła Podstawowa im. Mieszka l' + '\n' + \
'Marcinkowicka 46' + '\n' + \
'78-640 Marcinkowice' + '\n\n' + \
'Szkoła Pływania "TB" Tomasz Baliczek' + '\n' + \
'Marcinkowicka 9' + '\n' + \
'33-395 Nowy Sącz' + '\n\n' + \
'Results found: 5'
data_dir_path, report_dir_path = get_io_dir_paths(test_suite, test_case='multiple_results_indirect_matches_by_zip_code_head_allowed')
searcher = run_google_searcher(data_dir_path, report_dir_path, allow_indirect_matches=True)
assert_report_file_content_equals(expected_report, searcher.report_file_path)
def test_multiple_results_duplicate_skipped(create_reports_dirs, remove_reports_dirs):
expected_report = \
'======================================================================' + '\n' + \
'88-140 GNIEWKOWO' + '\n\n' + \
'#Przedsiębiorstwo' + '\n\n' + \
'Gniewkowo Sp. z o.o. Przedsiębiorstwo komunalne' + '\n' + \
'Jana Kilińskiego 9' + '\n' + \
'88-140 Gniewkowo' + '\n\n' + \
'I.T.I. Poland Sp. z o.o.' + '\n' + \
'Przemysłowa 2' + '\n' + \
'88-140 Gniewkowo' + '\n\n' + \
'Przedsiębiorstwo Techniki Pompowej IMPELLER' + '\n' + \
'Zajezierze 8 B' + '\n' + \
'88-140 Gniewkowo' + '\n\n' + \
'Pipczyńska Katarzyna. Przedsiębiorstwo wielobranżowe' + '\n' + \
'Jana Kilińskiego 49' + '\n' + \
'88-140 Gniewkowo' + '\n\n' + \
'Tinapol. PH. Lubańska T.' + '\n' + \
'Wojska Polskiego 23' + '\n' + \
'88-140 Gniewkowo' + '\n\n' + \
'Przedsiębiorstwo Wielobranżowe "e-mir"' + '\n' + \
'Toruńska 33a' + '\n' + \
'88-140 Gniewkowo' + '\n\n' + \
'Results found: 6'
data_dir_path, report_dir_path = get_io_dir_paths(test_suite, test_case='multiple_results_duplicate_skipped')
searcher = run_google_searcher(data_dir_path, report_dir_path)
assert_report_file_content_equals(expected_report, searcher.report_file_path)
def test_multiple_results_duplicate_allowed(create_reports_dirs, remove_reports_dirs):
expected_report = \
'======================================================================' + '\n' + \
'88-140 GNIEWKOWO' + '\n\n' + \
'#Przedsiębiorstwo' + '\n\n' + \
'Gniewkowo Sp. z o.o. Przedsiębiorstwo komunalne' + '\n' + \
'Jana Kilińskiego 9' + '\n' + \
'88-140 Gniewkowo' + '\n\n' + \
'I.T.I. Poland Sp. z o.o.' + '\n' + \
'Przemysłowa 2' + '\n' + \
'88-140 Gniewkowo' + '\n\n' + \
'Przedsiębiorstwo Techniki Pompowej IMPELLER' + '\n' + \
'Zajezierze 8 B' + '\n' + \
'88-140 Gniewkowo' + '\n\n' + \
'Pipczyńska Katarzyna. Przedsiębiorstwo wielobranżowe' + '\n' + \
'Jana Kilińskiego 49' + '\n' + \
'88-140 Gniewkowo' + '\n\n' + \
'Tinapol. PH. Lubańska T.' + '\n' + \
'Wojska Polskiego 23' + '\n' + \
'88-140 Gniewkowo' + '\n\n' + \
'Przedsiębiorstwo Wielobranżowe "e-mir"' + '\n' + \
'Toruńska 33a' + '\n' + \
'88-140 Gniewkowo' + '\n\n' + \
'#Przedsiębiorstwo' + '\n\n' + \
'Gniewkowo Sp. z o.o. Przedsiębiorstwo komunalne' + '\n' + \
'Jana Kilińskiego 9' + '\n' + \
'88-140 Gniewkowo' + '\n\n' + \
'I.T.I. Poland Sp. z o.o.' + '\n' + \
'Przemysłowa 2' + '\n' + \
'88-140 Gniewkowo' + '\n\n' + \
'Przedsiębiorstwo Techniki Pompowej IMPELLER' + '\n' + \
'Zajezierze 8 B' + '\n' + \
'88-140 Gniewkowo' + '\n\n' + \
'Pipczyńska Katarzyna. Przedsiębiorstwo wielobranżowe' + '\n' + \
'Jana Kilińskiego 49' + '\n' + \
'88-140 Gniewkowo' + '\n\n' + \
'Tinapol. PH. Lubańska T.' + '\n' + \
'Wojska Polskiego 23' + '\n' + \
'88-140 Gniewkowo' + '\n\n' + \
'Przedsiębiorstwo Wielobranżowe "e-mir"' + '\n' + \
'Toruńska 33a' + '\n' + \
'88-140 Gniewkowo' + '\n\n' + \
'Results found: 12'
data_dir_path, report_dir_path = get_io_dir_paths(test_suite, test_case='multiple_results_duplicate_allowed')
searcher = run_google_searcher(data_dir_path, report_dir_path, allow_duplicates=True)
assert_report_file_content_equals(expected_report, searcher.report_file_path)
def test_multiple_results_not_on_top(create_reports_dirs, remove_reports_dirs):
expected_report = \
'======================================================================' + '\n' + \
'33-300 NOWY SĄCZ' + '\n\n' + \
'#Fundacja' + '\n\n' + \
'Fundacja Renovo' + '\n' + \
'Krakowska 92/5' + '\n' + \
'33-300 Nowy Sącz' + '\n\n' + \
'Fundacja Tarcza' + '\n' + \
'Jeremiego Wiśniowieckiego 125' + '\n' + \
'33-300 Nowy Sącz' + '\n\n' + \
'Fundacja im. dra Jerzego Masiora w Nowym Sączu' + '\n' + \
'Tarnowska 25' + '\n' + \
'33-300 Nowy Sącz' + '\n\n' + \
'Fundacja Inicjatyw Społeczno - Akademickich' + '\n' + \
'Nawojowska 95' + '\n' + \
'33-300 Nowy Sącz' + '\n\n' + \
'Fundacja Rozwoju Ziem Górskich' + '\n' + \
'Węgierska 33' + '\n' + \
'33-300 Nowy Sącz' + '\n\n' + \
'Mada. Fundacja Pomocy Osobom z Autyzmem' + '\n' + \
'Al. Wolności 19' + '\n' + \
'33-300 Nowy Sącz' + '\n\n' + \
'Fundacja Programów Pomocy Dla Rolnictwa' + '\n' + \
'Tadeusza Kościuszki 7' + '\n' + \
'33-300 Nowy Sącz' + '\n\n' + \
'Fundacja Instytut Państwa i Prawa' + '\n' + \
'Stefana Czarnieckiego 5' + '\n' + \
'33-300 Nowy Sącz' + '\n\n' + \
'Nox. Fundacja Pomocy Osobom Fizycznie Niepełnosprawnym' + '\n' + \
'Jana Kochanowskiego 17' + '\n' + \
'33-300 Nowy Sącz' + '\n\n' + \
'Stowarzyszenie Sursum Corda ("w górę serca")' + '\n' + \
'Lwowska 11' + '\n' + \
'33-300 Nowy Sącz' + '\n\n' + \
'Fundacja na rzecz Rozwoju Polskiego Rolnictwa. Biuro terenowe' + '\n' + \
'Tarnowska 28' + '\n' + \
'33-395 Nowy Sącz' + '\n\n' + \
'Nadzieja. Stowarzyszenie Rodziców i Przyjaciół Dzieci Niepełnosprawnych Ruchowo i Umysłowo' + '\n' + \
'Jana Freislera 10' + '\n' + \
'33-300 Nowy Sącz' + '\n\n' + \
'Europejski Instytut Rozwoju Obywatelskiego' + '\n' + \
'Jagiellońska 18' + '\n' + \
'33-300 Nowy Sącz' + '\n\n' + \
'Humaneo' + '\n' + \
'biuro' + '\n' + \
'Nawojowska 12' + '\n' + \
'33-300 Nowy Sącz' + '\n\n' + \
'Consilium' + '\n' + \
'ul' + '\n' + \
'Nadbrzeżna 3' + '\n' + \
'33-300 Nowy Sącz' + '\n\n' + \
'Fundacja Prawa Dzieci oddział Nowy Sącz' + '\n' + \
'Rynek 30' + '\n' + \
'33-300 Nowy Sącz' + '\n\n' + \
'Towarzystwo Przyjaciół Dzieci' + '\n' + \
'Świętej Kunegundy 16' + '\n' + \
'33-300 Nowy Sącz' + '\n\n' + \
'Fundacja SZOK' + '\n' + \
'Władysława Broniewskiego 20 E/13' + '\n' + \
'33-300 Nowy Sącz' + '\n\n' + \
'Wspólnota Emaus - Nowosądeckie Towarzystwa Pomocy im. św. Brata Alberta' + '\n' + \
'Szwedzka 18' + '\n' + \
'33-300 Nowy Sącz' + '\n\n' + \
'Results found: 19'
data_dir_path, report_dir_path = get_io_dir_paths(test_suite, test_case='multiple_results_not_on_top')
searcher = run_google_searcher(data_dir_path, report_dir_path)
assert_report_file_content_equals(expected_report, searcher.report_file_path)
def test_multiple_results_blacklisted_skipped(create_reports_dirs, remove_reports_dirs):
expected_report = \
'======================================================================' + '\n' + \
'88-140 GNIEWKOWO' + '\n\n' + \
'#Przedsiębiorstwo' + '\n\n' + \
'Gniewkowo Sp. z o.o. Przedsiębiorstwo komunalne' + '\n' + \
'Jana Kilińskiego 9' + '\n' + \
'88-140 Gniewkowo' + '\n\n' + \
'Przedsiębiorstwo Techniki Pompowej IMPELLER' + '\n' + \
'Zajezierze 8 B' + '\n' + \
'88-140 Gniewkowo' + '\n\n' + \
'Tinapol. PH. Lubańska T.' + '\n' + \
'Wojska Polskiego 23' + '\n' + \
'88-140 Gniewkowo' + '\n\n' + \
'Results found: 3'
data_dir_path, report_dir_path = get_io_dir_paths(test_suite, test_case='multiple_results_blacklisted_skipped')
searcher = run_google_searcher(data_dir_path, report_dir_path)
assert_report_file_content_equals(expected_report, searcher.report_file_path)
def test_multiple_results_blacklisted_allowed(create_reports_dirs, remove_reports_dirs):
expected_report = \
'======================================================================' + '\n' + \
'88-140 GNIEWKOWO' + '\n\n' + \
'#Przedsiębiorstwo' + '\n\n' + \
'Gniewkowo Sp. z o.o. Przedsiębiorstwo komunalne' + '\n' + \
'Jana Kilińskiego 9' + '\n' + \
'88-140 Gniewkowo' + '\n\n' + \
'I.T.I. Poland Sp. z o.o.' + '\n' + \
'Przemysłowa 2' + '\n' + \
'88-140 Gniewkowo' + '\n\n' + \
'Przedsiębiorstwo Techniki Pompowej IMPELLER' + '\n' + \
'Zajezierze 8 B' + '\n' + \
'88-140 Gniewkowo' + '\n\n' + \
'Pipczyńska Katarzyna. Przedsiębiorstwo wielobranżowe' + '\n' + \
'Jana Kilińskiego 49' + '\n' + \
'88-140 Gniewkowo' + '\n\n' + \
'Tinapol. PH. Lubańska T.' + '\n' + \
'Wojska Polskiego 23' + '\n' + \
'88-140 Gniewkowo' + '\n\n' + \
'Przedsiębiorstwo Wielobranżowe "e-mir"' + '\n' + \
'Toruńska 33a' + '\n' + \
'88-140 Gniewkowo' + '\n\n' + \
'Results found: 6'
data_dir_path, report_dir_path = get_io_dir_paths(test_suite, test_case='multiple_results_blacklisted_allowed')
searcher = run_google_searcher(data_dir_path, report_dir_path, allow_blacklisted=True)
assert_report_file_content_equals(expected_report, searcher.report_file_path)
def test_multiple_results_twice(create_reports_dirs, remove_reports_dirs):
expected_report = \
'======================================================================' + '\n' + \
'86-134 DOLNA GRUPA' + '\n\n' + \
'#Produkcja' + '\n\n' + \
'O.M.N. Altomix' + '\n' + \
'Dolna Grupa 55B' + '\n' + \
'86-134 Dolna Grupa' + '\n\n' + \
'BUMAX Okna Drzwi Meble' + '\n' + \
'Tartaczna 9' + '\n' + \
'86-134 Dolna Grupa' + '\n\n' + \
'Altomix sp.j. Odlewnia metali' + '\n' + \
'Dolna Grupa 55b' + '\n' + \
'86-134 Dolna Grupa' + '\n\n' + \
'Klocek Ryszard. Deski, więźby dachowe. Usługi tartaczne' + '\n' + \
'86-134 Dolna Grupa; Tartaczna' + '\n' + \
'86-134' + '\n\n' + \
'Kubiak-Pol. Skup, sprzedaż naprawa palet' + '\n' + \
'DK91 18' + '\n' + \
'86-134 Dolna Grupa' + '\n\n' + \
'======================================================================' + '\n' + \
'87-123 GŁOGOWO' + '\n\n' + \
'#Produkcja' + '\n\n' + \
'ALWA' + '\n' + \
'Spokojna 8' + '\n' + \
'87-123 Głogowo' + '\n\n' + \
'WW Ekochem' + '\n' + \
'Akacjowa 1' + '\n' + \
'87-123 Głogowo' + '\n\n' + \
'MECHATRONIKA' + '\n' + \
'Ul: Wilcza 36' + '\n' + \
'Głogowo' + '\n' + \
'87-123 Dobrzejewice' + '\n\n' + \
'Results found: 8'
data_dir_path, report_dir_path = get_io_dir_paths(test_suite, test_case='multiple_results_twice')
searcher = run_google_searcher(data_dir_path, report_dir_path)
assert_report_file_content_equals(expected_report, searcher.report_file_path)
| 46.345622
| 146
| 0.555732
|
7af881feb31cd701b3745c9fdb2f521fb6e438e9
| 5,663
|
py
|
Python
|
src/olympia/legacy_api/utils.py
|
Rob--W/addons-server
|
cc104705e17ddeeb57254403ed292acb904a9a41
|
[
"BSD-3-Clause"
] | 1
|
2020-04-07T07:21:25.000Z
|
2020-04-07T07:21:25.000Z
|
src/olympia/legacy_api/utils.py
|
Rob--W/addons-server
|
cc104705e17ddeeb57254403ed292acb904a9a41
|
[
"BSD-3-Clause"
] | null | null | null |
src/olympia/legacy_api/utils.py
|
Rob--W/addons-server
|
cc104705e17ddeeb57254403ed292acb904a9a41
|
[
"BSD-3-Clause"
] | 2
|
2018-03-04T00:11:22.000Z
|
2019-12-14T09:45:55.000Z
|
import re
from django.conf import settings
from django.utils.html import strip_tags
from olympia import amo
from olympia.amo.helpers import absolutify
from olympia.amo.urlresolvers import reverse
from olympia.amo.utils import urlparams, epoch
from olympia.tags.models import Tag
from olympia.versions.compare import version_int
# For app version major.minor matching.
m_dot_n_re = re.compile(r'^\d+\.\d+$')
def addon_to_dict(addon, disco=False, src='api'):
"""
Renders an addon in JSON for the API.
"""
def url(u, **kwargs):
return settings.SITE_URL + urlparams(u, **kwargs)
v = addon.current_version
if disco:
learnmore = settings.SERVICES_URL + reverse('discovery.addons.detail',
args=[addon.slug])
learnmore = urlparams(learnmore, src='discovery-personalrec')
else:
learnmore = url(addon.get_url_path(), src=src)
d = {
'id': addon.id,
'name': unicode(addon.name) if addon.name else None,
'guid': addon.guid,
'status': amo.STATUS_CHOICES_API[addon.status],
'type': amo.ADDON_SLUGS_UPDATE[addon.type],
'authors': [{'id': a.id, 'name': unicode(a.name),
'link': absolutify(a.get_url_path(src=src))}
for a in addon.listed_authors],
'summary': (
strip_tags(unicode(addon.summary)) if addon.summary else None),
'description': strip_tags(unicode(addon.description)),
'icon': addon.icon_url,
'learnmore': learnmore,
'reviews': url(addon.reviews_url),
'total_dls': addon.total_downloads,
'weekly_dls': addon.weekly_downloads,
'adu': addon.average_daily_users,
'created': epoch(addon.created),
'last_updated': epoch(addon.last_updated),
'homepage': unicode(addon.homepage) if addon.homepage else None,
'support': unicode(addon.support_url) if addon.support_url else None,
}
if addon.is_persona():
d['theme'] = addon.persona.theme_data
if v:
d['version'] = v.version
d['platforms'] = [unicode(a.name) for a in v.supported_platforms]
d['compatible_apps'] = [
{unicode(amo.APP_IDS[obj.application].pretty): {
'min': unicode(obj.min), 'max': unicode(obj.max)}}
for obj in v.compatible_apps.values()]
if addon.eula:
d['eula'] = unicode(addon.eula)
if addon.developer_comments:
d['dev_comments'] = unicode(addon.developer_comments)
if addon.takes_contributions:
contribution = {
'link': url(addon.contribution_url, src=src),
'meet_developers': url(addon.meet_the_dev_url(), src=src),
'suggested_amount': addon.suggested_amount,
}
d['contribution'] = contribution
if addon.type == amo.ADDON_PERSONA:
d['previews'] = [addon.persona.preview_url]
else:
d['previews'] = [p.as_dict(src=src) for p in addon.all_previews]
return d
def extract_from_query(term, filter, regexp, end_of_word_boundary=True):
"""
This pulls out a keyword filter from a search term and returns the value
for the filter and a new term with the filter removed.
E.g. term="yslow version:3", filter='version', regexp='\w+' will result in
a return value of: (yslow, 3).
"""
re_string = r'\b%s:\s*(%s)' % (filter, regexp)
if end_of_word_boundary:
re_string += r'\b'
match = re.search(re_string, term)
if match:
term = term.replace(match.group(0), '').strip()
value = match.group(1)
else:
value = None
return (term, value)
def extract_filters(term, opts=None):
"""
Pulls all the filtering options out of the term and returns a cleaned term
and a dictionary of filter names and filter values. Term filters override
filters found in opts.
"""
opts = opts or {}
filters = {}
params = {}
# Type filters.
term, addon_type = extract_from_query(term, 'type', '\w+')
addon_type = addon_type or opts.get('addon_type')
if addon_type:
try:
atype = int(addon_type)
if atype in amo.ADDON_SEARCH_TYPES:
filters['type'] = atype
except ValueError:
# `addon_type` is not a digit.
# Try to find it in `ADDON_SEARCH_SLUGS`.
atype = amo.ADDON_SEARCH_SLUGS.get(addon_type.lower())
if atype:
filters['type'] = atype
# Platform and version filters.
# We don't touch the filters dict for platform and version: that filtering
# is (sadly) done by the view after ES has returned results, using
# addon.compatible_version().
term, platform = extract_from_query(term, 'platform', '\w+')
params['platform'] = platform or opts.get('platform')
term, version = extract_from_query(term, 'version', '[0-9.]+')
params['version'] = version or opts.get('version')
# Tag filters.
term, tag = extract_from_query(term, 'tag', '\w+')
if tag:
tag = Tag.objects.filter(tag_text=tag).values_list('tag_text',
flat=True)
if tag:
filters['tags__in'] = list(tag)
return (term, filters, params)
def filter_version(version, app_id):
"""
Returns filters that can be sent to ES for app version ranges.
If the version is a alpha, beta, or pre-release this does an exact match.
Otherwise it will query where max >= M.Na and min <= M.N.
"""
low = version_int(version)
return {'appversion.%s.min__lte' % app_id: low}
| 33.91018
| 78
| 0.616987
|
ce49f86e05134b930e7bd9fc0154f56602ec04be
| 152
|
py
|
Python
|
lab02/ejercicios/apps.py
|
AlexanderRod/TECSUP-DAE-2021-2
|
47b2cce717ff012c1b40394955388d8b2a8beb63
|
[
"MIT"
] | null | null | null |
lab02/ejercicios/apps.py
|
AlexanderRod/TECSUP-DAE-2021-2
|
47b2cce717ff012c1b40394955388d8b2a8beb63
|
[
"MIT"
] | null | null | null |
lab02/ejercicios/apps.py
|
AlexanderRod/TECSUP-DAE-2021-2
|
47b2cce717ff012c1b40394955388d8b2a8beb63
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
class EjerciciosConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'ejercicios'
| 21.714286
| 56
| 0.769737
|
8d51f6d9f2c1e47db94f09d41aef877c93205498
| 1,541
|
py
|
Python
|
botcord/ext/commands.py
|
KEN-2000l/BotCord
|
5184a0fc5af03e31ff216469f5dec6c12490e918
|
[
"MIT"
] | null | null | null |
botcord/ext/commands.py
|
KEN-2000l/BotCord
|
5184a0fc5af03e31ff216469f5dec6c12490e918
|
[
"MIT"
] | null | null | null |
botcord/ext/commands.py
|
KEN-2000l/BotCord
|
5184a0fc5af03e31ff216469f5dec6c12490e918
|
[
"MIT"
] | null | null | null |
import os
from discord.ext.commands import Cog as _Cog
from botcord.configs import YAML, recursive_update
# noinspection PyAttributeOutsideInit
class Cog(_Cog):
def config_init(self, file, path='configs.yml'):
"""PASS THE __file__ VARIABLE IN AS AN ARGUMENT FROM THE EXTENSION FILE,
SO THE CONFIG PATH IS IN THE EXTENSION'S FOLDER AND NOT IN THE BOTCORD FILES HERE"""
self._config_dir = f'{os.path.dirname(os.path.abspath(file))}/{path}'
self.load_config()
self._configed = True
def save_config(self):
with open(self._config_dir, mode='w', encoding='UTF-8') as file:
YAML.dump(self.config, file)
def load_config(self):
self._config = self._load_config()
def refresh_config(self):
file_conf = self._load_config()
recursive_update(file_conf, self.config)
self.save_config()
def _load_config(self):
with open(self._config_dir, mode='a+', encoding='UTF-8') as wfile:
wfile.seek(0)
wloaded = YAML.load(wfile)
if not wloaded:
wloaded = {}
return wloaded
@property
def config(self) -> dict:
if not getattr(self, '_configed', False):
raise AttributeError(f'type {type(self)} {self.__name__} has no attribute \'config\' \n'
f'NOTE: Please call \'config_init()\' if you wish to utilize config files for this Cog.')
return self._config
def cog_unload(self):
self.save_config()
| 33.5
| 122
| 0.625568
|
e22ad34aeeea2777d6ccbf13008e44ca5ec9b456
| 6,805
|
py
|
Python
|
howdy/tv/cli/get_tv_batch.py
|
tanimislam/plexstuff
|
811dd504e8464df1270a27084ef465c15299b00a
|
[
"BSD-2-Clause"
] | 9
|
2019-11-10T16:41:24.000Z
|
2020-06-17T12:35:42.000Z
|
howdy/tv/cli/get_tv_batch.py
|
tanimislam/plexstuff
|
811dd504e8464df1270a27084ef465c15299b00a
|
[
"BSD-2-Clause"
] | 2
|
2020-06-27T15:52:22.000Z
|
2020-07-29T20:36:07.000Z
|
howdy/tv/cli/get_tv_batch.py
|
tanimislam/plexstuff
|
811dd504e8464df1270a27084ef465c15299b00a
|
[
"BSD-2-Clause"
] | 2
|
2019-10-28T10:03:06.000Z
|
2020-05-22T18:32:40.000Z
|
import sys, signal
from howdy import signal_handler
signal.signal( signal.SIGINT, signal_handler )
import os, numpy, glob, time, datetime
import multiprocessing, logging
from argparse import ArgumentParser
#
from howdy.core import core
from howdy.tv import tv, get_token
def finish_statement( step ):
return '%d, finished on %s.' % ( step + 1, datetime.datetime.now( ).strftime(
'%B %d, %Y @ %I:%M:%S %p' ) )
def main( ):
time0 = time.time( )
default_time = 1000
default_iters = 2
default_num_threads = 2 * multiprocessing.cpu_count( )
#
parser = ArgumentParser( )
parser.add_argument('--maxtime', dest='maxtime_in_secs', type=int, action='store', default = default_time,
help = ' '.join([
'The maximum amount of time to spend (in seconds),',
'per candidate magnet link,',
'trying to download a TV show.',
'Default is %d seconds.' % default_time ] ) )
parser.add_argument('--num', dest='num_iters', type=int, action='store', default = default_iters,
help = ' '.join([
'The maximum number of different magnet links to try',
'before giving up. Default is %d.' % default_iters ]) )
parser.add_argument('--token', dest='token', type=str, action='store',
help = 'Optional argument. If chosen, user provided Plex access token.')
parser.add_argument('--debuglevel', dest='debug_level', action='store', type=str, default = 'None',
choices = [ 'None', 'info', 'debug' ], help = 'Choose the debug level for the system logger. Default is None (no logging). Can be one of None (no logging), info, or debug.' )
parser.add_argument('--numthreads', dest='numthreads', type=int, action='store', default = default_num_threads,
help = 'Number of threads over which to search for TV shows in my library. Default is %d.' %
default_num_threads )
parser.add_argument('--nomax', dest='do_restrict_maxsize', action='store_false', default=True,
help = 'If chosen, do not restrict maximum size of downloaded file.' )
parser.add_argument('--nomin', dest='do_restrict_minsize', action='store_false', default=True,
help = 'If chosen, do not restrict minimum size of downloaded file.' )
parser.add_argument('--raw', dest='do_raw', action='store_true', default = False,
help = 'If chosen, then use the raw string to specify TV show torrents.' )
parser.add_argument('--x265', dest='do_x265', action='store_true', default = False,
help = 'If chosen, then use append "x265" (do explicit search for HEVC/H65 torrents) to torrent search. Only works with --raw flag set.' )
args = parser.parse_args( )
#
logger = logging.getLogger( )
if args.debug_level == 'info': logger.setLevel( logging.INFO )
if args.debug_level == 'debug': logger.setLevel( logging.DEBUG )
assert( args.maxtime_in_secs >= 60 ), 'error, max time must be >= 60 seconds.'
assert( args.num_iters >= 1 ), 'error, must have a positive number of maximum iterations.'
step = 0
print( '%d, started on %s' % ( step, datetime.datetime.now( ).strftime( '%B %d, %Y @ %I:%M:%S %p' ) ) )
step += 1
#
## get plex server token
dat = core.checkServerCredentials( doLocal = True )
if dat is None:
print('\n'.join([
'%d, error, could not access local Plex server in %0.3f seconds. Exiting...' % (
step, time.time( ) - time0 ),
finish_statement( step ) ] ) )
return
fullURL, token = dat
if args.token is not None: token = args.token
#
## first find out which libraries are the TV show ones
library_dict = core.get_libraries( token,
fullURL = fullURL, do_full = True )
if library_dict is None:
print('\n'.join([
'%d, error, could not access libraries in plex server in %0.3f seconds. Exiting...' % (
step, time.time( ) - time0 ), finish_statement( step ) ]))
return
#
valid_keys = list(filter(lambda key: library_dict[ key ][ -1 ] ==
'show', library_dict ) )
if len( valid_keys ) == 0:
print('\n'.join([
'%d, Error, could not find a TV show library in %0.3f seconds. Exiting...' %
( time.time( ) - time0, step ), finish_statement( step ) ]))
return
tvlib_title = library_dict[ max( valid_keys ) ][ 0 ]
print( '%d, found TV library: %s.' % ( step, tvlib_title ) )
step += 1
#
## now get the TV shows
time0 = time.time( )
tvdata = core.get_library_data(
tvlib_title, token = token, num_threads = args.numthreads )
print( '%d, found %d shows in the TV library, in %0.3f seconds.' % (
step, len( tvdata ), time.time( ) - time0 ) )
step += 1
showsToExclude = tv.get_shows_to_exclude( tvdata )
if len( showsToExclude ) != 0:
print( '%d, excluding these TV shows: %s.' % (
step, '; '.join( showsToExclude ) ) )
step += 1
tvdb_token = get_token( )
if tvdb_token is None:
print( '\n'.join([
'%d, error, could not access the TVDB API server in %0.3f seconds. Exiting...' % (
step, time.time( ) - time0 ) ] ) )
return
toGet = tv.get_remaining_episodes(
tvdata, showSpecials = False,
showsToExclude = showsToExclude,
num_threads = args.numthreads )
if len( toGet ) == 0:
print('\n'.join([
'%d, no episodes to download in %0.3f seconds. Exiting...' % (
step, time.time( ) - time0 ), finish_statement( step ) ]))
return
print( '%d, took %0.3f seconds to get list of %d episodes to download.' % (
step, time.time( ) - time0, sum(
map(lambda tvshow: len(toGet[tvshow]['episodes']), toGet ) ) ) )
step += 1
#
## now download these episodes
tvTorUnits, newdirs = tv.create_tvTorUnits(
toGet, restrictMaxSize = args.do_restrict_maxsize,
restrictMinSize = args.do_restrict_minsize, do_raw = args.do_raw, do_x265 = args.do_x265 )
print('%d, here are the %d episodes to get: %s.' % ( step,
len( tvTorUnits ), ', '.join(map(lambda tvTorUnit: tvTorUnit[ 'torFname_disp' ], tvTorUnits))))
step += 1
tv.download_batched_tvtorrent_shows(
tvTorUnits, newdirs = newdirs, maxtime_in_secs = args.maxtime_in_secs,
num_iters = args.num_iters )
print( '\n'.join([ '%d, everything done in %0.3f seconds.' % ( step, time.time( ) - time0 ),
finish_statement( step ) ]))
| 51.165414
| 194
| 0.591036
|
fa03e88b928fbfa2e527ce8113fab56d9f1394cb
| 367
|
py
|
Python
|
30 days of code/Day 25 Running Time and Complexity.py
|
rahamath2009/git-github.com-nishant-sethi-HackerRank
|
14d9bd3e772a863aceba22d9a3361a8325cca4bc
|
[
"Apache-2.0"
] | 76
|
2018-06-28T04:29:14.000Z
|
2022-03-21T01:57:27.000Z
|
30 days of code/Day 25 Running Time and Complexity.py
|
rahamath2009/git-github.com-nishant-sethi-HackerRank
|
14d9bd3e772a863aceba22d9a3361a8325cca4bc
|
[
"Apache-2.0"
] | 31
|
2018-10-01T09:12:05.000Z
|
2022-03-08T23:39:01.000Z
|
30 days of code/Day 25 Running Time and Complexity.py
|
rahamath2009/git-github.com-nishant-sethi-HackerRank
|
14d9bd3e772a863aceba22d9a3361a8325cca4bc
|
[
"Apache-2.0"
] | 44
|
2018-07-09T11:31:20.000Z
|
2022-01-12T19:21:20.000Z
|
import math
def check_prime(num):
if num==1:
return "Not prime"
elif num==2:
return "Prime"
else:
for i in range(2,int(math.sqrt(num))+1):
if num%i==0:
return "Not prime"
return "Prime"
t=int(input())
for i in range(t):
n=int(input())
print(check_prime(n))
| 22.9375
| 49
| 0.479564
|
c9c0a4de60a46831f2eb6ccaf567a83e40c70589
| 6,645
|
py
|
Python
|
pandas/tests/extension/test_integer.py
|
IamJasonBian/pandas
|
21024d5a8e05f611d0fef5ddf884ffa237643772
|
[
"BSD-3-Clause"
] | null | null | null |
pandas/tests/extension/test_integer.py
|
IamJasonBian/pandas
|
21024d5a8e05f611d0fef5ddf884ffa237643772
|
[
"BSD-3-Clause"
] | null | null | null |
pandas/tests/extension/test_integer.py
|
IamJasonBian/pandas
|
21024d5a8e05f611d0fef5ddf884ffa237643772
|
[
"BSD-3-Clause"
] | null | null | null |
"""
This file contains a minimal set of tests for compliance with the extension
array interface test suite, and should contain no other tests.
The test suite for the full functionality of the array is located in
`pandas/tests/arrays/`.
The tests in this file are inherited from the BaseExtensionTests, and only
minimal tweaks should be applied to get the tests passing (by overwriting a
parent method).
Additional tests should either be added to one of the BaseExtensionTests
classes (if they are relevant for the extension interface for all dtypes), or
be added to the array-specific tests in `pandas/tests/arrays/`.
"""
import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
from pandas.api.types import (
is_extension_array_dtype,
is_integer_dtype,
)
from pandas.core.arrays.integer import (
Int8Dtype,
Int16Dtype,
Int32Dtype,
Int64Dtype,
UInt8Dtype,
UInt16Dtype,
UInt32Dtype,
UInt64Dtype,
)
from pandas.tests.extension import base
def make_data():
return list(range(1, 9)) + [pd.NA] + list(range(10, 98)) + [pd.NA] + [99, 100]
@pytest.fixture(
params=[
Int8Dtype,
Int16Dtype,
Int32Dtype,
Int64Dtype,
UInt8Dtype,
UInt16Dtype,
UInt32Dtype,
UInt64Dtype,
]
)
def dtype(request):
return request.param()
@pytest.fixture
def data(dtype):
return pd.array(make_data(), dtype=dtype)
@pytest.fixture
def data_for_twos(dtype):
return pd.array(np.ones(100) * 2, dtype=dtype)
@pytest.fixture
def data_missing(dtype):
return pd.array([pd.NA, 1], dtype=dtype)
@pytest.fixture
def data_for_sorting(dtype):
return pd.array([1, 2, 0], dtype=dtype)
@pytest.fixture
def data_missing_for_sorting(dtype):
return pd.array([1, pd.NA, 0], dtype=dtype)
@pytest.fixture
def na_cmp():
# we are pd.NA
return lambda x, y: x is pd.NA and y is pd.NA
@pytest.fixture
def na_value():
return pd.NA
@pytest.fixture
def data_for_grouping(dtype):
b = 1
a = 0
c = 2
na = pd.NA
return pd.array([b, b, na, na, a, a, b, c], dtype=dtype)
class TestDtype(base.BaseDtypeTests):
@pytest.mark.skip(reason="using multiple dtypes")
def test_is_dtype_unboxes_dtype(self):
# we have multiple dtypes, so skip
pass
class TestArithmeticOps(base.BaseArithmeticOpsTests):
def check_opname(self, s, op_name, other, exc=None):
# overwriting to indicate ops don't raise an error
super().check_opname(s, op_name, other, exc=None)
def _check_op(self, s, op, other, op_name, exc=NotImplementedError):
if exc is None:
sdtype = tm.get_dtype(s)
if (
hasattr(other, "dtype")
and not is_extension_array_dtype(other.dtype)
and is_integer_dtype(other.dtype)
and sdtype.is_unsigned_integer
):
# TODO: comment below is inaccurate; other can be int8, int16, ...
# and the trouble is that e.g. if s is UInt8 and other is int8,
# then result is UInt16
# other is np.int64 and would therefore always result in
# upcasting, so keeping other as same numpy_dtype
other = other.astype(sdtype.numpy_dtype)
result = op(s, other)
expected = self._combine(s, other, op)
if op_name in ("__rtruediv__", "__truediv__", "__div__"):
expected = expected.fillna(np.nan).astype("Float64")
else:
# combine method result in 'biggest' (int64) dtype
expected = expected.astype(sdtype)
self.assert_equal(result, expected)
else:
with pytest.raises(exc):
op(s, other)
def _check_divmod_op(self, s, op, other, exc=None):
super()._check_divmod_op(s, op, other, None)
class TestComparisonOps(base.BaseComparisonOpsTests):
def _check_op(self, s, op, other, op_name, exc=NotImplementedError):
if exc is None:
result = op(s, other)
# Override to do the astype to boolean
expected = s.combine(other, op).astype("boolean")
self.assert_series_equal(result, expected)
else:
with pytest.raises(exc):
op(s, other)
def check_opname(self, s, op_name, other, exc=None):
super().check_opname(s, op_name, other, exc=None)
def _compare_other(self, s, data, op_name, other):
self.check_opname(s, op_name, other)
class TestInterface(base.BaseInterfaceTests):
pass
class TestConstructors(base.BaseConstructorsTests):
pass
class TestReshaping(base.BaseReshapingTests):
pass
# for test_concat_mixed_dtypes test
# concat of an Integer and Int coerces to object dtype
# TODO(jreback) once integrated this would
class TestGetitem(base.BaseGetitemTests):
pass
class TestSetitem(base.BaseSetitemTests):
pass
class TestMissing(base.BaseMissingTests):
pass
class TestMethods(base.BaseMethodsTests):
@pytest.mark.skip(reason="uses nullable integer")
def test_value_counts(self, all_data, dropna):
all_data = all_data[:10]
if dropna:
other = np.array(all_data[~all_data.isna()])
else:
other = all_data
result = pd.Series(all_data).value_counts(dropna=dropna).sort_index()
expected = pd.Series(other).value_counts(dropna=dropna).sort_index()
expected.index = expected.index.astype(all_data.dtype)
self.assert_series_equal(result, expected)
@pytest.mark.skip(reason="uses nullable integer")
def test_value_counts_with_normalize(self, data):
pass
class TestCasting(base.BaseCastingTests):
pass
class TestGroupby(base.BaseGroupbyTests):
pass
class TestNumericReduce(base.BaseNumericReduceTests):
def check_reduce(self, s, op_name, skipna):
# overwrite to ensure pd.NA is tested instead of np.nan
# https://github.com/pandas-dev/pandas/issues/30958
result = getattr(s, op_name)(skipna=skipna)
if not skipna and s.isna().any():
expected = pd.NA
else:
expected = getattr(s.dropna().astype("int64"), op_name)(skipna=skipna)
tm.assert_almost_equal(result, expected)
@pytest.mark.skip(reason="Tested in tests/reductions/test_reductions.py")
class TestBooleanReduce(base.BaseBooleanReduceTests):
pass
class TestPrinting(base.BasePrintingTests):
pass
class TestParsing(base.BaseParsingTests):
pass
class Test2DCompat(base.Dim2CompatTests):
pass
| 26.474104
| 82
| 0.660346
|
a11e4e122c090694f9b5ac44863851e5c16510d7
| 17
|
py
|
Python
|
cosivina/options.py
|
cosivina/cosivina_python
|
0fa88a012f9c48774487fe4947ef36f398b21f49
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
cosivina/options.py
|
cosivina/cosivina_python
|
0fa88a012f9c48774487fe4947ef36f398b21f49
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
cosivina/options.py
|
cosivina/cosivina_python
|
0fa88a012f9c48774487fe4947ef36f398b21f49
|
[
"BSD-2-Clause-FreeBSD"
] | 1
|
2022-01-13T00:35:50.000Z
|
2022-01-13T00:35:50.000Z
|
useNumba = False
| 8.5
| 16
| 0.764706
|
17637d0e548fca0da887278a378ec38947fe7563
| 16,359
|
py
|
Python
|
packages/w3af/w3af/core/data/fuzzer/tests/test_fuzzer.py
|
ZooAtmosphereGroup/HelloPackages
|
0ccffd33bf927b13d28c8f715ed35004c33465d9
|
[
"Apache-2.0"
] | 3
|
2019-04-09T22:59:33.000Z
|
2019-06-14T09:23:24.000Z
|
tools/w3af/w3af/core/data/fuzzer/tests/test_fuzzer.py
|
sravani-m/Web-Application-Security-Framework
|
d9f71538f5cba6fe1d8eabcb26c557565472f6a6
|
[
"MIT"
] | null | null | null |
tools/w3af/w3af/core/data/fuzzer/tests/test_fuzzer.py
|
sravani-m/Web-Application-Security-Framework
|
d9f71538f5cba6fe1d8eabcb26c557565472f6a6
|
[
"MIT"
] | null | null | null |
"""
test_fuzzer.py
Copyright 2006 Andres Riancho
This file is part of w3af, http://w3af.org/ .
w3af is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation version 2 of the License.
w3af is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with w3af; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
import unittest
from w3af.core.data.kb.config import Config
from w3af.core.data.kb.config import cf as cf_singleton
from w3af.core.data.fuzzer.fuzzer import create_mutants
from w3af.core.data.request.fuzzable_request import FuzzableRequest
from w3af.core.data.parsers.doc.url import URL
from w3af.core.data.parsers.utils.form_params import FormParameters
from w3af.core.data.fuzzer.mutants.querystring_mutant import QSMutant
from w3af.core.data.fuzzer.mutants.cookie_mutant import CookieMutant
from w3af.core.data.fuzzer.mutants.headers_mutant import HeadersMutant
from w3af.core.data.fuzzer.mutants.filename_mutant import FileNameMutant
from w3af.core.data.fuzzer.mutants.postdata_mutant import PostDataMutant
from w3af.core.data.fuzzer.mutants.xmlrpc_mutant import XmlRpcMutant
from w3af.core.data.parsers.doc.tests.test_xmlrpc import XML_WITH_FUZZABLE
from w3af.core.data.dc.cookie import Cookie
from w3af.core.data.dc.headers import Headers
from w3af.core.data.dc.urlencoded_form import URLEncodedForm
class TestFuzzer(unittest.TestCase):
def setUp(self):
self.payloads = ['abc', 'def']
self.cf_backup = Config(cf_singleton)
def tearDown(self):
cf_singleton = self.cf_backup
def assertAllInstance(self, items, _type):
for item in items:
self.assertIsInstance(item, _type)
def assertAllHaveTokens(self, items):
self.assertTrue(all([m.get_token() is not None for m in items]))
def test_simple(self):
cf_singleton.save('fuzzable_headers', [])
cf_singleton.save('fuzz_cookies', False)
cf_singleton.save('fuzz_url_filenames', False)
cf_singleton.save('fuzzed_files_extension', 'gif')
cf_singleton.save('fuzz_form_files', False)
cf_singleton.save('fuzz_url_parts', False)
url = URL('http://moth/?id=1')
freq = FuzzableRequest(url)
generated_mutants = create_mutants(freq, self.payloads)
expected_urls = ['http://moth/?id=abc',
'http://moth/?id=def']
generated_urls = [m.get_uri().url_string for m in generated_mutants]
self.assertEqual(generated_urls, expected_urls)
self.assertAllInstance(generated_mutants, QSMutant)
self.assertAllHaveTokens(generated_mutants)
def test_empty_string_as_payload(self):
url = URL('http://moth/?id=1&spam=2')
freq = FuzzableRequest(url)
generated_mutants = create_mutants(freq, [''])
expected_urls = ['http://moth/?id=&spam=2',
'http://moth/?id=1&spam=']
generated_urls = [m.get_uri().url_string for m in generated_mutants]
self.assertEqual(generated_urls, expected_urls)
self.assertAllInstance(generated_mutants, QSMutant)
self.assertAllHaveTokens(generated_mutants)
def test_empty_string_as_payload_one_param(self):
url = URL('http://moth/?id=1')
freq = FuzzableRequest(url)
generated_mutants = create_mutants(freq, [''])
expected_urls = ['http://moth/?id=']
generated_urls = [m.get_uri().url_string for m in generated_mutants]
self.assertEqual(generated_urls, expected_urls)
self.assertAllInstance(generated_mutants, QSMutant)
self.assertAllHaveTokens(generated_mutants)
def test_special_url_characters(self):
initial_url = 'http://w3af.org/' \
'?__VIEWSTATE=/' \
'&__EVENTVALIDATION=\\X+W=='\
'&_ctl0:TextBox1=%s'
url = URL(initial_url % '')
freq = FuzzableRequest(url)
generated_mutants = create_mutants(freq, self.payloads)
decoded_url = 'http://w3af.org/' \
'?__VIEWSTATE=/' \
'&__EVENTVALIDATION=\\X%%20W=='\
'&_ctl0:TextBox1=%s'
expected_urls = [decoded_url % 'abc',
decoded_url % 'def']
generated_urls = [str(m.get_uri()) for m in generated_mutants]
self.assertEqual(generated_urls, expected_urls)
self.assertAllInstance(generated_mutants, QSMutant)
self.assertAllHaveTokens(generated_mutants)
def test_fuzz_headers_no_headers_in_request(self):
cf_singleton.save('fuzzable_headers', ['Referer']) # This one changed
cf_singleton.save('fuzz_cookies', False)
cf_singleton.save('fuzz_url_filenames', False)
cf_singleton.save('fuzzed_files_extension', 'gif')
cf_singleton.save('fuzz_form_files', False)
cf_singleton.save('fuzz_url_parts', False)
url = URL('http://moth/?id=1')
# No headers in the original request
#headers = Headers([('Referer', 'http://moths/')])
freq = FuzzableRequest(url)
mutants = create_mutants(freq, self.payloads)
expected_urls = ['http://moth/?id=abc',
'http://moth/?id=def',
'http://moth/?id=1',
'http://moth/?id=1', ]
generated_urls = [m.get_uri().url_string for m in mutants]
self.assertEqual(generated_urls, expected_urls)
expected_headers = [Headers([('Referer', '')]),
Headers([('Referer', '')]),
Headers([('Referer', 'abc')]),
Headers([('Referer', 'def')]), ]
generated_headers = [m.get_headers() for m in mutants]
self.assertEqual(expected_headers, generated_headers)
self.assertAllInstance(mutants[:2], QSMutant)
self.assertAllInstance(mutants[2:], HeadersMutant)
self.assertAllHaveTokens(mutants)
def test_fuzz_headers(self):
cf_singleton.save('fuzzable_headers', ['Referer']) # This one changed
cf_singleton.save('fuzz_cookies', False)
cf_singleton.save('fuzz_url_filenames', False)
cf_singleton.save('fuzzed_files_extension', 'gif')
cf_singleton.save('fuzz_form_files', False)
cf_singleton.save('fuzz_url_parts', False)
url = URL('http://moth/?id=1')
# With headers
headers = Headers([('Referer', 'http://moths/'),
('Foo', 'Bar')])
freq = FuzzableRequest(url, headers=headers)
generated_mutants = create_mutants(freq, self.payloads)
expected_urls = ['http://moth/?id=abc',
'http://moth/?id=def',
'http://moth/?id=1',
'http://moth/?id=1', ]
generated_urls = [m.get_uri().url_string for m in generated_mutants]
self.assertEqual(generated_urls, expected_urls)
expected_headers = [
headers,
headers,
Headers([('Referer', 'abc'), ('Foo', 'Bar')]),
Headers([('Referer', 'def'), ('Foo', 'Bar')]),]
generated_headers = [m.get_headers() for m in generated_mutants]
self.assertEqual(expected_headers, generated_headers)
self.assertAllInstance(generated_mutants[:2], QSMutant)
self.assertAllInstance(generated_mutants[2:], HeadersMutant)
self.assertAllHaveTokens(generated_mutants)
def test_no_cookie_in_request(self):
cf_singleton.save('fuzzable_headers', [])
cf_singleton.save('fuzz_cookies', True) # This one changed
cf_singleton.save('fuzz_url_filenames', False)
cf_singleton.save('fuzzed_files_extension', 'gif')
cf_singleton.save('fuzz_form_files', False)
cf_singleton.save('fuzz_url_parts', False)
url = URL('http://moth/?id=1')
# But there is no cookie
freq = FuzzableRequest(url)
generated_mutants = create_mutants(freq, self.payloads)
expected_urls = ['http://moth/?id=abc',
'http://moth/?id=def']
generated_urls = [m.get_uri().url_string for m in generated_mutants]
self.assertEqual(generated_urls, expected_urls)
self.assertAllInstance(generated_mutants, QSMutant)
self.assertAllHaveTokens(generated_mutants)
def test_qs_and_cookie(self):
"""
Even when fuzz_cookies is True, we won't create HeaderMutants based
on a FuzzableRequest. This is one of the ugly things related with
https://github.com/andresriancho/w3af/issues/3149
Which we fixed!
"""
cf_singleton.save('fuzzable_headers', [])
cf_singleton.save('fuzz_cookies', True) # This one changed
cf_singleton.save('fuzz_url_filenames', False)
cf_singleton.save('fuzzed_files_extension', 'gif')
cf_singleton.save('fuzz_form_files', False)
cf_singleton.save('fuzz_url_parts', False)
url = URL('http://moth/?id=1')
# And now there is a cookie
cookie = Cookie('foo=bar')
freq = FuzzableRequest(url, cookie=cookie)
mutants = create_mutants(freq, self.payloads)
expected_urls = [u'http://moth/?id=abc',
u'http://moth/?id=def',
u'http://moth/?id=1',
u'http://moth/?id=1']
generated_urls = [m.get_uri().url_string for m in mutants]
self.assertEqual(generated_urls, expected_urls)
self.assertAllInstance(mutants[:2], QSMutant)
self.assertAllInstance(mutants[2:], CookieMutant)
self.assertAllHaveTokens(mutants)
def test_filename_only_dir_path(self):
cf_singleton.save('fuzzable_headers', [])
cf_singleton.save('fuzz_cookies', False)
cf_singleton.save('fuzz_url_filenames', True) # This one changed
cf_singleton.save('fuzzed_files_extension', 'gif')
cf_singleton.save('fuzz_form_files', False)
cf_singleton.save('fuzz_url_parts', False)
url = URL('http://moth/')
freq = FuzzableRequest(url)
generated_mutants = create_mutants(freq, self.payloads)
self.assertEqual(generated_mutants, [])
def test_filename_fname_qs(self):
cf_singleton.save('fuzzable_headers', [])
cf_singleton.save('fuzz_cookies', False)
cf_singleton.save('fuzz_url_filenames', True) # This one changed
cf_singleton.save('fuzzed_files_extension', 'gif')
cf_singleton.save('fuzz_form_files', False)
cf_singleton.save('fuzz_url_parts', False)
url = URL('http://moth/foo.htm?id=1')
freq = FuzzableRequest(url)
generated_mutants = create_mutants(freq, self.payloads)
expected_urls = [u'http://moth/foo.htm?id=abc',
u'http://moth/foo.htm?id=def',
u'http://moth/abc.htm',
u'http://moth/def.htm',
u'http://moth/foo.abc',
u'http://moth/foo.def',
]
generated_urls = [m.get_uri().url_string for m in generated_mutants]
self.assertEqual(generated_urls, expected_urls)
self.assertAllInstance(generated_mutants[:2], QSMutant)
self.assertAllInstance(generated_mutants[2:], FileNameMutant)
self.assertAllHaveTokens(generated_mutants)
def test_form_file_qs(self):
cf_singleton.save('fuzzable_headers', [])
cf_singleton.save('fuzz_cookies', False)
cf_singleton.save('fuzz_url_filenames', False)
cf_singleton.save('fuzzed_files_extension', 'gif')
cf_singleton.save('fuzz_form_files', True) # This one changed
cf_singleton.save('fuzz_url_parts', False)
url = URL('http://moth/foo.htm')
freq = FuzzableRequest(url)
generated_mutants = create_mutants(freq, self.payloads)
self.assertEqual(generated_mutants, [])
def test_xmlrpc_mutant(self):
url = URL('http://moth/?id=1')
post_data = XML_WITH_FUZZABLE
headers = Headers()
freq = FuzzableRequest.from_parts(url, 'POST', post_data, headers)
mutants = create_mutants(freq, self.payloads)
self.assertAllInstance(mutants[:2], QSMutant)
self.assertAllInstance(mutants[4:], XmlRpcMutant)
self.assertAllHaveTokens(mutants)
def test_form_file_post_no_files(self):
cf_singleton.save('fuzzable_headers', [])
cf_singleton.save('fuzz_cookies', False)
cf_singleton.save('fuzz_url_filenames', False)
cf_singleton.save('fuzzed_files_extension', 'gif')
cf_singleton.save('fuzz_form_files', True) # This one changed
cf_singleton.save('fuzz_url_parts', False)
form_params = FormParameters()
form_params.add_field_by_attr_items([("name", "username"), ("value", "")])
form_params.add_field_by_attr_items([("name", "address"), ("value", "")])
form = URLEncodedForm(form_params)
freq = FuzzableRequest(URL('http://www.w3af.com/?id=3'), post_data=form,
method='PUT')
mutants = create_mutants(freq, self.payloads)
self.assertTrue(all(isinstance(m, QSMutant) for m in mutants[:2]))
self.assertTrue(all(isinstance(m, PostDataMutant) for m in mutants[4:]))
self.assertTrue(all(m.get_method() == 'PUT' for m in mutants))
expected_uris = {'http://www.w3af.com/?id=abc',
'http://www.w3af.com/?id=def',
'http://www.w3af.com/?id=3',
'http://www.w3af.com/?id=3',
'http://www.w3af.com/?id=3',
'http://www.w3af.com/?id=3'}
created_uris = set([i.get_uri().url_string for i in mutants])
self.assertEqual(expected_uris, created_uris)
expected_dcs = {'id=abc', 'id=def',
'username=abc&address=Bonsai%20Street%20123',
'username=def&address=Bonsai%20Street%20123',
'username=John8212&address=abc',
'username=John8212&address=def'}
created_dcs = set([str(i.get_dc()) for i in mutants])
self.assertEqual(created_dcs, expected_dcs)
def test_urlparts_no_path(self):
cf_singleton.save('fuzzable_headers', [])
cf_singleton.save('fuzz_cookies', False)
cf_singleton.save('fuzz_url_filenames', False)
cf_singleton.save('fuzzed_files_extension', 'gif')
cf_singleton.save('fuzz_form_files', False)
cf_singleton.save('fuzz_url_parts', True) # This one changed
url = URL('http://moth/')
freq = FuzzableRequest(url)
generated_mutants = create_mutants(freq, self.payloads)
self.assertEqual(generated_mutants, [])
def test_urlparts_filename_path_qs(self):
cf_singleton.save('fuzzable_headers', [])
cf_singleton.save('fuzz_cookies', False)
cf_singleton.save('fuzz_url_filenames', True) # This one changed
cf_singleton.save('fuzzed_files_extension', 'gif')
cf_singleton.save('fuzz_form_files', False)
cf_singleton.save('fuzz_url_parts', True) # This one changed
url = URL('http://moth/foo/bar.htm?id=1')
freq = FuzzableRequest(url)
generated_mutants = create_mutants(freq, self.payloads)
generated_uris = [m.get_uri().url_string for m in generated_mutants]
expected_uris = [
'http://moth/foo/bar.htm?id=abc',
'http://moth/foo/bar.htm?id=def',
'http://moth/foo/abc.htm',
'http://moth/foo/def.htm',
'http://moth/foo/bar.abc',
'http://moth/foo/bar.def',
'http://moth/abc/bar.htm',
'http://moth/def/bar.htm',
'http://moth/foo/abc',
'http://moth/foo/def',
]
self.assertEqual(generated_uris, expected_uris)
| 41
| 82
| 0.63274
|
3a7ef2b98e8a4b8b411391f30c5d3a22364fd460
| 1,569
|
py
|
Python
|
test/test_coins_forwarding_automations_limit_reached.py
|
xan187/Crypto_APIs_2.0_SDK_Python
|
a56c75df54ef037b39be1315ed6e54de35bed55b
|
[
"MIT"
] | null | null | null |
test/test_coins_forwarding_automations_limit_reached.py
|
xan187/Crypto_APIs_2.0_SDK_Python
|
a56c75df54ef037b39be1315ed6e54de35bed55b
|
[
"MIT"
] | null | null | null |
test/test_coins_forwarding_automations_limit_reached.py
|
xan187/Crypto_APIs_2.0_SDK_Python
|
a56c75df54ef037b39be1315ed6e54de35bed55b
|
[
"MIT"
] | 1
|
2021-07-21T03:35:18.000Z
|
2021-07-21T03:35:18.000Z
|
"""
CryptoAPIs
Crypto APIs 2.0 is a complex and innovative infrastructure layer that radically simplifies the development of any Blockchain and Crypto related applications. Organized around REST, Crypto APIs 2.0 can assist both novice Bitcoin/Ethereum enthusiasts and crypto experts with the development of their blockchain applications. Crypto APIs 2.0 provides unified endpoints and data, raw data, automatic tokens and coins forwardings, callback functionalities, and much more. # noqa: E501
The version of the OpenAPI document: 2.0.0
Contact: developers@cryptoapis.io
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import cryptoapis
from cryptoapis.model.coins_forwarding_automations_limit_reached_error import CoinsForwardingAutomationsLimitReachedError
globals()['CoinsForwardingAutomationsLimitReachedError'] = CoinsForwardingAutomationsLimitReachedError
from cryptoapis.model.coins_forwarding_automations_limit_reached import CoinsForwardingAutomationsLimitReached
class TestCoinsForwardingAutomationsLimitReached(unittest.TestCase):
"""CoinsForwardingAutomationsLimitReached unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testCoinsForwardingAutomationsLimitReached(self):
"""Test CoinsForwardingAutomationsLimitReached"""
# FIXME: construct object with mandatory attributes with example values
# model = CoinsForwardingAutomationsLimitReached() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 40.230769
| 484
| 0.793499
|
08843696e00d64f211f18db087d90b1b3a29c9de
| 3,808
|
py
|
Python
|
opensilexClientToolsPython/models/infrastructure_facility_named_dto.py
|
OpenSILEX/opensilexClientToolsPython
|
41b1e7e707670ecf1b2c06d79bdd9749945788cb
|
[
"RSA-MD"
] | null | null | null |
opensilexClientToolsPython/models/infrastructure_facility_named_dto.py
|
OpenSILEX/opensilexClientToolsPython
|
41b1e7e707670ecf1b2c06d79bdd9749945788cb
|
[
"RSA-MD"
] | 7
|
2021-05-25T14:06:04.000Z
|
2021-11-05T15:42:14.000Z
|
opensilexClientToolsPython/models/infrastructure_facility_named_dto.py
|
OpenSILEX/opensilexClientToolsPython
|
41b1e7e707670ecf1b2c06d79bdd9749945788cb
|
[
"RSA-MD"
] | null | null | null |
# coding: utf-8
"""
OpenSilex API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: INSTANCE-SNAPSHOT
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class InfrastructureFacilityNamedDTO(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'uri': 'str',
'name': 'str'
}
attribute_map = {
'uri': 'uri',
'name': 'name'
}
def __init__(self, uri=None, name=None): # noqa: E501
"""InfrastructureFacilityNamedDTO - a model defined in Swagger""" # noqa: E501
self._uri = None
self._name = None
self.discriminator = None
if uri is not None:
self.uri = uri
if name is not None:
self.name = name
@property
def uri(self):
"""Gets the uri of this InfrastructureFacilityNamedDTO. # noqa: E501
:return: The uri of this InfrastructureFacilityNamedDTO. # noqa: E501
:rtype: str
"""
return self._uri
@uri.setter
def uri(self, uri):
"""Sets the uri of this InfrastructureFacilityNamedDTO.
:param uri: The uri of this InfrastructureFacilityNamedDTO. # noqa: E501
:type: str
"""
self._uri = uri
@property
def name(self):
"""Gets the name of this InfrastructureFacilityNamedDTO. # noqa: E501
:return: The name of this InfrastructureFacilityNamedDTO. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this InfrastructureFacilityNamedDTO.
:param name: The name of this InfrastructureFacilityNamedDTO. # noqa: E501
:type: str
"""
self._name = name
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(InfrastructureFacilityNamedDTO, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, InfrastructureFacilityNamedDTO):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 26.816901
| 119
| 0.565651
|
b61640f0828ba362065d84fcdf7e1967c7d99071
| 19,524
|
py
|
Python
|
model.py
|
enijkamp/glow
|
b12a2768fdb4bbe5419d40c81f3b6f219def2057
|
[
"MIT"
] | null | null | null |
model.py
|
enijkamp/glow
|
b12a2768fdb4bbe5419d40c81f3b6f219def2057
|
[
"MIT"
] | null | null | null |
model.py
|
enijkamp/glow
|
b12a2768fdb4bbe5419d40c81f3b6f219def2057
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
import tfops as Z
import optim
import numpy as np
# import horovod.tensorflow as hvd
from tensorflow.contrib.framework.python.ops import add_arg_scope
'''
f_loss: function with as input the (x,y,reuse=False), and as output a list/tuple whose first element is the loss.
'''
def abstract_model_xy(sess, hps, feeds, train_iterator, test_iterator, data_init, lr, f_loss):
# == Create class with static fields and methods
class m(object):
pass
m.sess = sess
m.feeds = feeds
m.lr = lr
# === Loss and optimizer
loss_train, stats_train = f_loss(train_iterator, True)
all_params = tf.trainable_variables()
if hps.gradient_checkpointing == 1:
from memory_saving_gradients import gradients
gs = gradients(loss_train, all_params)
else:
gs = tf.gradients(loss_train, all_params)
optimizer = {'adam': optim.adam, 'adamax': optim.adamax,
'adam2': optim.adam2}[hps.optimizer]
train_op, polyak_swap_op, ema = optimizer(
all_params, gs, alpha=lr, hps=hps)
if hps.direct_iterator:
m.train = lambda _lr: sess.run([train_op, stats_train], {lr: _lr})[1]
else:
def _train(_lr):
_x, _y = train_iterator()
return sess.run([train_op, stats_train], {feeds['x']: _x,
feeds['y']: _y, lr: _lr})[1]
m.train = _train
m.polyak_swap = lambda: sess.run(polyak_swap_op)
# === Testing
loss_test, stats_test = f_loss(test_iterator, False, reuse=True)
if hps.direct_iterator:
m.test = lambda: sess.run(stats_test)
else:
def _test():
_x, _y = test_iterator()
return sess.run(stats_test, {feeds['x']: _x,
feeds['y']: _y})
m.test = _test
# === Saving and restoring
saver = tf.train.Saver()
saver_ema = tf.train.Saver(ema.variables_to_restore())
m.save_ema = lambda path: saver_ema.save(
sess, path, write_meta_graph=False)
m.save = lambda path: saver.save(sess, path, write_meta_graph=False)
m.restore = lambda path: saver.restore(sess, path)
# === Initialize the parameters
if hps.restore_path != '':
m.restore(hps.restore_path)
else:
with Z.arg_scope([Z.get_variable_ddi, Z.actnorm], init=True):
results_init = f_loss(None, True, reuse=True)
sess.run(tf.global_variables_initializer())
sess.run(results_init, {feeds['x']: data_init['x'],
feeds['y']: data_init['y']})
# sess.run(hvd.broadcast_global_variables(0))
return m
def codec(hps):
def encoder(z, objective):
eps = []
for i in range(hps.n_levels):
z, objective = revnet2d(str(i), z, objective, hps)
if i < hps.n_levels-1:
z, objective, _eps = split2d("pool"+str(i), z, objective=objective)
eps.append(_eps)
return z, objective, eps
def decoder(z, eps=[None]*hps.n_levels, eps_std=None):
for i in reversed(range(hps.n_levels)):
if i < hps.n_levels-1:
z = split2d_reverse("pool"+str(i), z, eps=eps[i], eps_std=eps_std)
z, _ = revnet2d(str(i), z, 0, hps, reverse=True)
return z
return encoder, decoder
def prior(name, y_onehot, hps):
with tf.variable_scope(name):
n_z = hps.top_shape[-1]
h = tf.zeros([tf.shape(y_onehot)[0]]+hps.top_shape[:2]+[2*n_z])
if hps.learntop:
h = Z.conv2d_zeros('p', h, 2*n_z)
if hps.ycond:
h += tf.reshape(Z.linear_zeros("y_emb", y_onehot,
2*n_z), [-1, 1, 1, 2 * n_z])
pz = Z.gaussian_diag(h[:, :, :, :n_z], h[:, :, :, n_z:])
def logp(z1):
objective = pz.logp(z1)
return objective
def sample(eps=None, eps_std=None):
if eps is not None:
# Already sampled eps. Don't use eps_std
z = pz.sample2(eps)
elif eps_std is not None:
# Sample with given eps_std
z = pz.sample2(pz.eps * tf.reshape(eps_std, [-1, 1, 1, 1]))
else:
# Sample normally
z = pz.sample
return z
def eps(z1):
return pz.get_eps(z1)
return logp, sample, eps
def model(sess, hps, train_iterator, test_iterator, data_init):
# Only for decoding/init, rest use iterators directly
with tf.name_scope('input'):
X = tf.placeholder(
tf.uint8, [None, hps.image_size, hps.image_size, 3], name='image')
Y = tf.placeholder(tf.int32, [None], name='label')
lr = tf.placeholder(tf.float32, None, name='learning_rate')
encoder, decoder = codec(hps)
hps.n_bins = 2. ** hps.n_bits_x
def preprocess(x):
x = tf.cast(x, 'float32')
if hps.n_bits_x < 8:
x = tf.floor(x / 2 ** (8 - hps.n_bits_x))
x = x / hps.n_bins - .5
return x
def postprocess(x):
return tf.cast(tf.clip_by_value(tf.floor((x + .5)*hps.n_bins)*(256./hps.n_bins), 0, 255), 'uint8')
def _f_loss(x, y, is_training, reuse=False):
with tf.variable_scope('model', reuse=reuse):
y_onehot = tf.cast(tf.one_hot(y, hps.n_y, 1, 0), 'float32')
# Discrete -> Continuous
objective = tf.zeros_like(x, dtype='float32')[:, 0, 0, 0]
z = preprocess(x)
z = z + tf.random_uniform(tf.shape(z), 0, 1./hps.n_bins)
objective += - np.log(hps.n_bins) * np.prod(Z.int_shape(z)[1:])
# Encode
z = Z.squeeze2d(z, 2) # > 16x16x12
z, objective, _ = encoder(z, objective)
# Prior
hps.top_shape = Z.int_shape(z)[1:]
logp, _, _ = prior("prior", y_onehot, hps)
objective += logp(z)
# Generative loss
nobj = - objective
bits_x = nobj / (np.log(2.) * int(x.get_shape()[1]) * int(
x.get_shape()[2]) * int(x.get_shape()[3])) # bits per subpixel
# Predictive loss
if hps.weight_y > 0 and hps.ycond:
# Classification loss
h_y = tf.reduce_mean(z, axis=[1, 2])
y_logits = Z.linear_zeros("classifier", h_y, hps.n_y)
bits_y = tf.nn.softmax_cross_entropy_with_logits_v2(
labels=y_onehot, logits=y_logits) / np.log(2.)
# Classification accuracy
y_predicted = tf.argmax(y_logits, 1, output_type=tf.int32)
classification_error = 1 - \
tf.cast(tf.equal(y_predicted, y), tf.float32)
else:
bits_y = tf.zeros_like(bits_x)
classification_error = tf.ones_like(bits_x)
return bits_x, bits_y, classification_error
def f_loss(iterator, is_training, reuse=False):
if False and hps.direct_iterator and iterator is not None:
x, y = iterator.get_next()
else:
x, y = X, Y
bits_x, bits_y, pred_loss = _f_loss(x, y, is_training, reuse)
local_loss = bits_x + hps.weight_y * bits_y
stats = [local_loss, bits_x, bits_y, pred_loss]
global_stats = Z.allreduce_mean(
tf.stack([tf.reduce_mean(i) for i in stats]))
return tf.reduce_mean(local_loss), global_stats
feeds = {'x': X, 'y': Y}
m = abstract_model_xy(sess, hps, feeds, train_iterator,
test_iterator, data_init, lr, f_loss)
# === Sampling function
def f_sample(y, eps_std):
with tf.variable_scope('model', reuse=True):
y_onehot = tf.cast(tf.one_hot(y, hps.n_y, 1, 0), 'float32')
_, sample, _ = prior("prior", y_onehot, hps)
z = sample(eps_std=eps_std)
z = decoder(z, eps_std=eps_std)
z = Z.unsqueeze2d(z, 2) # 8x8x12 -> 16x16x3
x = postprocess(z)
return x
m.eps_std = tf.placeholder(tf.float32, [None], name='eps_std')
x_sampled = f_sample(Y, m.eps_std)
def sample(_y, _eps_std):
return m.sess.run(x_sampled, {Y: _y, m.eps_std: _eps_std})
m.sample = sample
if hps.inference:
# === Encoder-Decoder functions
def f_encode(x, y, reuse=True):
with tf.variable_scope('model', reuse=reuse):
y_onehot = tf.cast(tf.one_hot(y, hps.n_y, 1, 0), 'float32')
# Discrete -> Continuous
objective = tf.zeros_like(x, dtype='float32')[:, 0, 0, 0]
z = preprocess(x)
z = z + tf.random_uniform(tf.shape(z), 0, 1. / hps.n_bins)
objective += - np.log(hps.n_bins) * np.prod(Z.int_shape(z)[1:])
# Encode
z = Z.squeeze2d(z, 2) # > 16x16x12
z, objective, eps = encoder(z, objective)
# Prior
hps.top_shape = Z.int_shape(z)[1:]
logp, _, _eps = prior("prior", y_onehot, hps)
objective += logp(z)
eps.append(_eps(z))
return eps
def f_decode(y, eps, reuse=True):
with tf.variable_scope('model', reuse=reuse):
y_onehot = tf.cast(tf.one_hot(y, hps.n_y, 1, 0), 'float32')
_, sample, _ = prior("prior", y_onehot, hps)
z = sample(eps=eps[-1])
z = decoder(z, eps=eps[:-1])
z = Z.unsqueeze2d(z, 2) # 8x8x12 -> 16x16x3
x = postprocess(z)
return x
enc_eps = f_encode(X, Y)
dec_eps = []
print(enc_eps)
for i, _eps in enumerate(enc_eps):
print(_eps)
dec_eps.append(tf.placeholder(tf.float32, _eps.get_shape().as_list(), name="dec_eps_" + str(i)))
dec_x = f_decode(Y, dec_eps)
eps_shapes = [_eps.get_shape().as_list()[1:] for _eps in enc_eps]
def flatten_eps(eps):
# [BS, eps_size]
return np.concatenate([np.reshape(e, (e.shape[0], -1)) for e in eps], axis=-1)
def unflatten_eps(feps):
index = 0
eps = []
bs = feps.shape[0]
for shape in eps_shapes:
eps.append(np.reshape(feps[:, index: index+np.prod(shape)], (bs, *shape)))
index += np.prod(shape)
return eps
# If model is uncondtional, always pass y = np.zeros([bs], dtype=np.int32)
def encode(x, y):
return flatten_eps(sess.run(enc_eps, {X: x, Y: y}))
def decode(y, feps):
eps = unflatten_eps(feps)
feed_dict = {Y: y}
for i in range(len(dec_eps)):
feed_dict[dec_eps[i]] = eps[i]
return sess.run(dec_x, feed_dict)
m.encode = encode
m.decode = decode
return m
def checkpoint(z, logdet):
zshape = Z.int_shape(z)
z = tf.reshape(z, [-1, zshape[1]*zshape[2]*zshape[3]])
logdet = tf.reshape(logdet, [-1, 1])
combined = tf.concat([z, logdet], axis=1)
tf.add_to_collection('checkpoints', combined)
logdet = combined[:, -1]
z = tf.reshape(combined[:, :-1], [-1, zshape[1], zshape[2], zshape[3]])
return z, logdet
@add_arg_scope
def revnet2d(name, z, logdet, hps, reverse=False):
with tf.variable_scope(name):
if not reverse:
for i in range(hps.depth):
z, logdet = checkpoint(z, logdet)
z, logdet = revnet2d_step(str(i), z, logdet, hps, reverse)
z, logdet = checkpoint(z, logdet)
else:
for i in reversed(range(hps.depth)):
z, logdet = revnet2d_step(str(i), z, logdet, hps, reverse)
return z, logdet
# Simpler, new version
@add_arg_scope
def revnet2d_step(name, z, logdet, hps, reverse):
with tf.variable_scope(name):
shape = Z.int_shape(z)
n_z = shape[3]
assert n_z % 2 == 0
if not reverse:
z, logdet = Z.actnorm("actnorm", z, logdet=logdet)
if hps.flow_permutation == 0:
z = Z.reverse_features("reverse", z)
elif hps.flow_permutation == 1:
z = Z.shuffle_features("shuffle", z)
elif hps.flow_permutation == 2:
z, logdet = invertible_1x1_conv("invconv", z, logdet)
else:
raise Exception()
z1 = z[:, :, :, :n_z // 2]
z2 = z[:, :, :, n_z // 2:]
if hps.flow_coupling == 0:
z2 += f("f1", z1, hps.width)
elif hps.flow_coupling == 1:
h = f("f1", z1, hps.width, n_z)
shift = h[:, :, :, 0::2]
# scale = tf.exp(h[:, :, :, 1::2])
scale = tf.nn.sigmoid(h[:, :, :, 1::2] + 2.)
z2 += shift
z2 *= scale
logdet += tf.reduce_sum(tf.log(scale), axis=[1, 2, 3])
else:
raise Exception()
z = tf.concat([z1, z2], 3)
else:
z1 = z[:, :, :, :n_z // 2]
z2 = z[:, :, :, n_z // 2:]
if hps.flow_coupling == 0:
z2 -= f("f1", z1, hps.width)
elif hps.flow_coupling == 1:
h = f("f1", z1, hps.width, n_z)
shift = h[:, :, :, 0::2]
# scale = tf.exp(h[:, :, :, 1::2])
scale = tf.nn.sigmoid(h[:, :, :, 1::2] + 2.)
z2 /= scale
z2 -= shift
logdet -= tf.reduce_sum(tf.log(scale), axis=[1, 2, 3])
else:
raise Exception()
z = tf.concat([z1, z2], 3)
if hps.flow_permutation == 0:
z = Z.reverse_features("reverse", z, reverse=True)
elif hps.flow_permutation == 1:
z = Z.shuffle_features("shuffle", z, reverse=True)
elif hps.flow_permutation == 2:
z, logdet = invertible_1x1_conv(
"invconv", z, logdet, reverse=True)
else:
raise Exception()
z, logdet = Z.actnorm("actnorm", z, logdet=logdet, reverse=True)
return z, logdet
def f(name, h, width, n_out=None):
n_out = n_out or int(h.get_shape()[3])
with tf.variable_scope(name):
h = tf.nn.relu(Z.conv2d("l_1", h, width))
h = tf.nn.relu(Z.conv2d("l_2", h, width, filter_size=[1, 1]))
h = Z.conv2d_zeros("l_last", h, n_out)
return h
def f_resnet(name, h, width, n_out=None):
n_out = n_out or int(h.get_shape()[3])
with tf.variable_scope(name):
h = tf.nn.relu(Z.conv2d("l_1", h, width))
h = Z.conv2d_zeros("l_2", h, n_out)
return h
# Invertible 1x1 conv
@add_arg_scope
def invertible_1x1_conv(name, z, logdet, reverse=False):
if True: # Set to "False" to use the LU-decomposed version
with tf.variable_scope(name):
shape = Z.int_shape(z)
w_shape = [shape[3], shape[3]]
# Sample a random orthogonal matrix:
w_init = np.linalg.qr(np.random.randn(
*w_shape))[0].astype('float32')
w = tf.get_variable("W", dtype=tf.float32, initializer=w_init)
# dlogdet = tf.linalg.LinearOperator(w).log_abs_determinant() * shape[1]*shape[2]
dlogdet = tf.cast(tf.log(abs(tf.matrix_determinant(
tf.cast(w, 'float64')))), 'float32') * shape[1]*shape[2]
if not reverse:
_w = tf.reshape(w, [1, 1] + w_shape)
z = tf.nn.conv2d(z, _w, [1, 1, 1, 1],
'SAME', data_format='NHWC')
logdet += dlogdet
return z, logdet
else:
_w = tf.matrix_inverse(w)
_w = tf.reshape(_w, [1, 1]+w_shape)
z = tf.nn.conv2d(z, _w, [1, 1, 1, 1],
'SAME', data_format='NHWC')
logdet -= dlogdet
return z, logdet
else:
# LU-decomposed version
shape = Z.int_shape(z)
with tf.variable_scope(name):
dtype = 'float64'
# Random orthogonal matrix:
import scipy
np_w = scipy.linalg.qr(np.random.randn(shape[3], shape[3]))[
0].astype('float32')
np_p, np_l, np_u = scipy.linalg.lu(np_w)
np_s = np.diag(np_u)
np_sign_s = np.sign(np_s)
np_log_s = np.log(abs(np_s))
np_u = np.triu(np_u, k=1)
p = tf.get_variable("P", initializer=np_p, trainable=False)
l = tf.get_variable("L", initializer=np_l)
sign_s = tf.get_variable(
"sign_S", initializer=np_sign_s, trainable=False)
log_s = tf.get_variable("log_S", initializer=np_log_s)
# S = tf.get_variable("S", initializer=np_s)
u = tf.get_variable("U", initializer=np_u)
p = tf.cast(p, dtype)
l = tf.cast(l, dtype)
sign_s = tf.cast(sign_s, dtype)
log_s = tf.cast(log_s, dtype)
u = tf.cast(u, dtype)
w_shape = [shape[3], shape[3]]
l_mask = np.tril(np.ones(w_shape, dtype=dtype), -1)
l = l * l_mask + tf.eye(*w_shape, dtype=dtype)
u = u * np.transpose(l_mask) + tf.diag(sign_s * tf.exp(log_s))
w = tf.matmul(p, tf.matmul(l, u))
if True:
u_inv = tf.matrix_inverse(u)
l_inv = tf.matrix_inverse(l)
p_inv = tf.matrix_inverse(p)
w_inv = tf.matmul(u_inv, tf.matmul(l_inv, p_inv))
else:
w_inv = tf.matrix_inverse(w)
w = tf.cast(w, tf.float32)
w_inv = tf.cast(w_inv, tf.float32)
log_s = tf.cast(log_s, tf.float32)
if not reverse:
w = tf.reshape(w, [1, 1] + w_shape)
z = tf.nn.conv2d(z, w, [1, 1, 1, 1],
'SAME', data_format='NHWC')
logdet += tf.reduce_sum(log_s) * (shape[1]*shape[2])
return z, logdet
else:
w_inv = tf.reshape(w_inv, [1, 1]+w_shape)
z = tf.nn.conv2d(
z, w_inv, [1, 1, 1, 1], 'SAME', data_format='NHWC')
logdet -= tf.reduce_sum(log_s) * (shape[1]*shape[2])
return z, logdet
@add_arg_scope
def split2d(name, z, objective=0.):
with tf.variable_scope(name):
n_z = Z.int_shape(z)[3]
z1 = z[:, :, :, :n_z // 2]
z2 = z[:, :, :, n_z // 2:]
pz = split2d_prior(z1)
objective += pz.logp(z2)
z1 = Z.squeeze2d(z1)
eps = pz.get_eps(z2)
return z1, objective, eps
@add_arg_scope
def split2d_reverse(name, z, eps, eps_std):
with tf.variable_scope(name):
z1 = Z.unsqueeze2d(z)
pz = split2d_prior(z1)
if eps is not None:
# Already sampled eps
z2 = pz.sample2(eps)
elif eps_std is not None:
# Sample with given eps_std
z2 = pz.sample2(pz.eps * tf.reshape(eps_std, [-1, 1, 1, 1]))
else:
# Sample normally
z2 = pz.sample
z = tf.concat([z1, z2], 3)
return z
@add_arg_scope
def split2d_prior(z):
n_z2 = int(z.get_shape()[3])
n_z1 = n_z2
h = Z.conv2d_zeros("conv", z, 2 * n_z1)
mean = h[:, :, :, 0::2]
logs = h[:, :, :, 1::2]
return Z.gaussian_diag(mean, logs)
| 33.374359
| 113
| 0.522997
|
76c012b75bfb8de221a3fa27d87c1fb47c5dbefd
| 8,233
|
py
|
Python
|
python/federatedml/statistic/intersect/base_intersect.py
|
jat001/FATE
|
b402362fb82869651ba6122f4ec1cf5499a644da
|
[
"Apache-2.0"
] | null | null | null |
python/federatedml/statistic/intersect/base_intersect.py
|
jat001/FATE
|
b402362fb82869651ba6122f4ec1cf5499a644da
|
[
"Apache-2.0"
] | null | null | null |
python/federatedml/statistic/intersect/base_intersect.py
|
jat001/FATE
|
b402362fb82869651ba6122f4ec1cf5499a644da
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright 2021 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import functools
import uuid
from federatedml.param.intersect_param import IntersectParam
from federatedml.statistic.intersect.intersect_preprocess import BitArray
from federatedml.transfer_variable.transfer_class.intersection_func_transfer_variable import IntersectionFuncTransferVariable
from federatedml.util import LOGGER
class Intersect(object):
def __init__(self):
super().__init__()
self.cache_id = None
self.model_param = IntersectParam()
self.transfer_variable = None
self.cache_transfer_variable = IntersectionFuncTransferVariable().cache_id_from_host
self.filter = None
self.intersect_num = None
self.cache = None
self.model_param_name = "IntersectModelParam"
self.model_meta_name = "IntersectModelMeta"
self._guest_id = None
self._host_id = None
self._host_id_list = None
def load_params(self, param):
self.model_param = param
self.intersect_method = param.intersect_method
self.only_output_key = param.only_output_key
self.sync_intersect_ids = param.sync_intersect_ids
self.cardinality_only = param.cardinality_only
self.sync_cardinality = param.sync_cardinality
self.run_preprocess = param.run_preprocess
self.intersect_preprocess_params = param.intersect_preprocess_params
self.run_cache = param.run_cache
@property
def guest_party_id(self):
return self._guest_id
@guest_party_id.setter
def guest_party_id(self, guest_id):
if not isinstance(guest_id, int):
raise ValueError("party id should be integer, but get {}".format(guest_id))
self._guest_id = guest_id
@property
def host_party_id(self):
return self._host_id
@host_party_id.setter
def host_party_id(self, host_id):
if not isinstance(host_id, int):
raise ValueError("party id should be integer, but get {}".format(host_id))
self._host_id = host_id
@property
def host_party_id_list(self):
return self._host_id_list
@host_party_id_list.setter
def host_party_id_list(self, host_id_list):
if not isinstance(host_id_list, list):
raise ValueError(
"type host_party_id should be list, but get {} with {}".format(type(host_id_list), host_id_list))
self._host_id_list = host_id_list
def get_intersect_method_meta(self):
pass
def get_intersect_key(self, party_id):
pass
def load_intersect_key(self, cache_meta):
pass
def run_intersect(self, data_instances):
raise NotImplementedError("method should not be called here")
def run_cardinality(self, data_instances):
raise NotImplementedError("method should not be called here")
def generate_cache(self, data_instances):
raise NotImplementedError("method should not be called here")
@staticmethod
def extract_cache_list(cache_data, party_list):
if not isinstance(party_list, list):
party_list = [party_list]
cache_list = [cache_data.get(str(party_id)) for party_id in party_list]
return cache_list
def run_cache_intersect(self, data_instances, cache_data):
raise NotImplementedError("method should not be called here")
def set_flowid(self, flowid=0):
if self.transfer_variable is not None:
self.transfer_variable.set_flowid(flowid)
@staticmethod
def get_value_from_data(intersect_ids, data_instances):
if intersect_ids is not None:
intersect_ids = intersect_ids.join(data_instances, lambda i, d: d)
intersect_ids.schema = data_instances.schema
LOGGER.info("obtain intersect data_instances!")
return intersect_ids
@staticmethod
def get_common_intersection(intersect_ids_list: list, keep_encrypt_ids=False):
if len(intersect_ids_list) == 1:
return intersect_ids_list[0]
if keep_encrypt_ids:
def f(id, v): return id + v
else:
def f(id, v): return "id"
intersect_ids = None
for i, value in enumerate(intersect_ids_list):
if intersect_ids is None:
intersect_ids = value
continue
intersect_ids = intersect_ids.join(value, f)
return intersect_ids
@staticmethod
def extract_intersect_ids(intersect_ids, all_ids):
intersect_ids = intersect_ids.join(all_ids, lambda e, h: h)
return intersect_ids
@staticmethod
def filter_intersect_ids(encrypt_intersect_ids, keep_encrypt_ids=False):
if keep_encrypt_ids:
def f(k, v): return (v, [k])
else:
def f(k, v): return (v, 1)
if len(encrypt_intersect_ids) > 1:
raw_intersect_ids = [e.map(f) for e in encrypt_intersect_ids]
intersect_ids = Intersect.get_common_intersection(raw_intersect_ids, keep_encrypt_ids)
else:
intersect_ids = encrypt_intersect_ids[0]
intersect_ids = intersect_ids.map(f)
return intersect_ids
@staticmethod
def map_raw_id_to_encrypt_id(raw_id_data, encrypt_id_data, keep_value=False):
encrypt_id_data_exchange_kv = encrypt_id_data.map(lambda k, v: (v, k))
encrypt_raw_id = raw_id_data.join(encrypt_id_data_exchange_kv, lambda r, e: (e, r))
if keep_value:
encrypt_common_id = encrypt_raw_id.map(lambda k, v: (v[0], v[1]))
else:
encrypt_common_id = encrypt_raw_id.map(lambda k, v: (v[0], "id"))
return encrypt_common_id
@staticmethod
def map_encrypt_id_to_raw_id(encrypt_id_data, raw_id_data):
"""
Parameters
----------
encrypt_id_data: E(id)
raw_id_data: (E(id), (id, v))
Returns
-------
(id, E(id))
"""
encrypt_id_raw_id = raw_id_data.join(encrypt_id_data, lambda r, e: r)
raw_id = encrypt_id_raw_id.map(lambda k, v: (v[0], k))
return raw_id
@staticmethod
def hash(value, hash_operator, salt=''):
h_value = hash_operator.compute(value, suffix_salt=salt)
return h_value
@staticmethod
def generate_new_uuid():
return str(uuid.uuid4())
@staticmethod
def insert_key(kv_iterator, filter, hash_operator=None, salt=None):
res_filter = None
for k, _ in kv_iterator:
if hash_operator:
res_filter = filter.insert(hash_operator.compute(k, suffix_salt=salt))
else:
res_filter = filter.insert(k)
return res_filter
@staticmethod
def count_key_in_filter(kv_iterator, filter):
count = 0
for k, _ in kv_iterator:
count += filter.check(k)
return count
@staticmethod
def construct_filter(data, false_positive_rate, hash_method, random_state, hash_operator=None, salt=None):
n = data.count()
m, k = BitArray.get_filter_param(n, false_positive_rate)
filter = BitArray(m, k, hash_method, random_state)
LOGGER.debug(f"filter bit count is: {filter.bit_count}")
LOGGER.debug(f"filter hash func count: {filter.hash_func_count}")
f = functools.partial(Intersect.insert_key, filter=filter, hash_operator=hash_operator, salt=salt)
new_array = data.mapPartitions(f).reduce(lambda x, y: x | y)
LOGGER.debug(f"filter array obtained")
filter.set_array(new_array)
# LOGGER.debug(f"after insert, filter sparsity is: {filter.sparsity}")
return filter
| 35.487069
| 125
| 0.672052
|
45d54b512b40eda5adccfcc4abb1b118168ab4bd
| 2,211
|
py
|
Python
|
salt/modules/tuned.py
|
l2ol33rt/salt
|
ff68bbd9f4bda992a3e039822fb32f141e94347c
|
[
"Apache-2.0"
] | 1
|
2022-02-09T06:40:14.000Z
|
2022-02-09T06:40:14.000Z
|
salt/modules/tuned.py
|
l2ol33rt/salt
|
ff68bbd9f4bda992a3e039822fb32f141e94347c
|
[
"Apache-2.0"
] | null | null | null |
salt/modules/tuned.py
|
l2ol33rt/salt
|
ff68bbd9f4bda992a3e039822fb32f141e94347c
|
[
"Apache-2.0"
] | 4
|
2020-11-04T06:28:05.000Z
|
2022-02-09T10:54:49.000Z
|
# -*- coding: utf-8 -*-
'''
Interface to Red Hat tuned-adm module
:maintainer: Syed Ali <alicsyed@gmail.com>
:maturity: new
:depends: tuned-adm
:platform: Linux
'''
# Import Python libs
from __future__ import absolute_import
import re
# Import Salt libs
import salt.utils
__func_alias__ = {
'list_': 'list',
}
__virtualname__ = 'tuned'
def __virtual__():
'''
Check to see if tuned-adm binary is installed on the system
'''
tuned_adm = salt.utils.which('tuned-adm')
if not tuned_adm:
return (False, 'The tuned execution module failed to load: the tuned-adm binary is not in the path.')
return __virtualname__
def list_():
'''
List the profiles available
CLI Example:
.. code-block:: bash
salt '*' tuned.list
'''
result = __salt__['cmd.run']('tuned-adm list').splitlines()
# Remove "Available profiles:"
result.pop(0)
# Remove "Current active profile:.*"
result.pop()
# Output can be : " - <profile name> - <description>" (v2.7.1)
# or " - <profile name> " (v2.4.1)
result = [i.split('- ')[1].strip() for i in result]
return result
def active():
'''
Return current active profile
CLI Example:
.. code-block:: bash
salt '*' tuned.active
'''
# turn off all profiles
result = __salt__['cmd.run']('tuned-adm active')
pattern = re.compile(r'''(?P<stmt>Current active profile:) (?P<profile>\w+.*)''')
match = re.match(pattern, result)
return '{0}'.format(match.group('profile'))
def off():
'''
Turn off all profiles
CLI Example:
.. code-block:: bash
salt '*' tuned.off
'''
# turn off all profiles
result = __salt__['cmd.retcode']('tuned-adm off')
if int(result) != 0:
return False
return True
def profile(profile_name):
'''
Activate specified profile
CLI Example:
.. code-block:: bash
salt '*' tuned.profile virtual-guest
'''
# run tuned-adm with the profile specified
result = __salt__['cmd.retcode']('tuned-adm profile {0}'.format(profile_name))
if int(result) != 0:
return False
return '{0}'.format(profile_name)
| 20.1
| 109
| 0.604251
|
74f4dfcb81f48122d44b0cc2dba14b0320b2fb51
| 1,776
|
py
|
Python
|
polling_stations/apps/data_collection/management/commands/import_waltham_forest.py
|
mtravis/UK-Polling-Stations
|
26e0331dc29253dc436a0462ffaa01e974c5dc52
|
[
"BSD-3-Clause"
] | null | null | null |
polling_stations/apps/data_collection/management/commands/import_waltham_forest.py
|
mtravis/UK-Polling-Stations
|
26e0331dc29253dc436a0462ffaa01e974c5dc52
|
[
"BSD-3-Clause"
] | null | null | null |
polling_stations/apps/data_collection/management/commands/import_waltham_forest.py
|
mtravis/UK-Polling-Stations
|
26e0331dc29253dc436a0462ffaa01e974c5dc52
|
[
"BSD-3-Clause"
] | null | null | null |
from django.contrib.gis.geos import Point
from data_collection.management.commands import BaseXpressDemocracyClubCsvImporter
class Command(BaseXpressDemocracyClubCsvImporter):
council_id = "E09000031"
addresses_name = (
"local.2018-05-03/Version 2/Democracy_Club__03May2018 (1) Waltham Forest.tsv"
)
stations_name = (
"local.2018-05-03/Version 2/Democracy_Club__03May2018 (1) Waltham Forest.tsv"
)
elections = ["local.2018-05-03"]
csv_delimiter = "\t"
def station_record_to_dict(self, record):
rec = super().station_record_to_dict(record)
# better point for Mission Grove South Site
if record.polling_place_id == "2820":
rec["location"] = Point(-0.025035, 51.581813, srid=4326)
return rec
def address_record_to_dict(self, record):
uprn = record.property_urn.strip().lstrip("0")
if uprn == "10091187735":
rec = super().address_record_to_dict(record)
rec["postcode"] = "E11 4ED"
return rec
if uprn == "200001420963":
rec = super().address_record_to_dict(record)
rec["postcode"] = "E11 3AA"
return rec
if uprn == "10091185796":
rec = super().address_record_to_dict(record)
rec["postcode"] = "E10 7EA"
return rec
if record.addressline6 == "E17 9BU":
return None
if record.addressline6 == "E10 5PW":
return None
if uprn == "200001424667":
rec = super().address_record_to_dict(record)
rec["postcode"] = "E17 6PR"
return rec
if record.addressline6 == "E10 6EZ":
return None
return super().address_record_to_dict(record)
| 30.101695
| 85
| 0.609234
|
0e9e5e8e5e012bc5085c5275a4e98859392c1d69
| 6,000
|
py
|
Python
|
homeassistant/components/solarlog/const.py
|
DoctorU/core
|
5b218d7e1c4164e32d41473977459cbaf23adf42
|
[
"Apache-2.0"
] | 7
|
2019-08-15T13:36:58.000Z
|
2020-03-18T10:46:29.000Z
|
homeassistant/components/solarlog/const.py
|
DoctorU/core
|
5b218d7e1c4164e32d41473977459cbaf23adf42
|
[
"Apache-2.0"
] | 87
|
2020-07-15T13:43:35.000Z
|
2022-03-23T07:43:10.000Z
|
homeassistant/components/solarlog/const.py
|
marecabo/home-assistant
|
e33774a61e7fcc88aff752dfa4618dd26a746872
|
[
"Apache-2.0"
] | 7
|
2018-10-04T10:12:45.000Z
|
2021-12-29T20:55:40.000Z
|
"""Constants for the Solar-Log integration."""
from __future__ import annotations
from dataclasses import dataclass
from homeassistant.components.sensor import (
STATE_CLASS_MEASUREMENT,
STATE_CLASS_TOTAL,
SensorEntityDescription,
)
from homeassistant.const import (
DEVICE_CLASS_ENERGY,
DEVICE_CLASS_POWER,
DEVICE_CLASS_POWER_FACTOR,
DEVICE_CLASS_TIMESTAMP,
DEVICE_CLASS_VOLTAGE,
ELECTRIC_POTENTIAL_VOLT,
ENERGY_KILO_WATT_HOUR,
PERCENTAGE,
POWER_WATT,
)
DOMAIN = "solarlog"
# Default config for solarlog.
DEFAULT_HOST = "http://solar-log"
DEFAULT_NAME = "solarlog"
@dataclass
class SolarLogSensorEntityDescription(SensorEntityDescription):
"""Describes Solarlog sensor entity."""
factor: float | None = None
SENSOR_TYPES: tuple[SolarLogSensorEntityDescription, ...] = (
SolarLogSensorEntityDescription(
key="time",
name="last update",
device_class=DEVICE_CLASS_TIMESTAMP,
),
SolarLogSensorEntityDescription(
key="power_ac",
name="power AC",
icon="mdi:solar-power",
native_unit_of_measurement=POWER_WATT,
state_class=STATE_CLASS_MEASUREMENT,
),
SolarLogSensorEntityDescription(
key="power_dc",
name="power DC",
icon="mdi:solar-power",
native_unit_of_measurement=POWER_WATT,
state_class=STATE_CLASS_MEASUREMENT,
),
SolarLogSensorEntityDescription(
key="voltage_ac",
name="voltage AC",
native_unit_of_measurement=ELECTRIC_POTENTIAL_VOLT,
device_class=DEVICE_CLASS_VOLTAGE,
state_class=STATE_CLASS_MEASUREMENT,
),
SolarLogSensorEntityDescription(
key="voltage_dc",
name="voltage DC",
native_unit_of_measurement=ELECTRIC_POTENTIAL_VOLT,
device_class=DEVICE_CLASS_VOLTAGE,
state_class=STATE_CLASS_MEASUREMENT,
),
SolarLogSensorEntityDescription(
key="yield_day",
name="yield day",
icon="mdi:solar-power",
native_unit_of_measurement=ENERGY_KILO_WATT_HOUR,
factor=0.001,
),
SolarLogSensorEntityDescription(
key="yield_yesterday",
name="yield yesterday",
icon="mdi:solar-power",
native_unit_of_measurement=ENERGY_KILO_WATT_HOUR,
factor=0.001,
),
SolarLogSensorEntityDescription(
key="yield_month",
name="yield month",
icon="mdi:solar-power",
native_unit_of_measurement=ENERGY_KILO_WATT_HOUR,
factor=0.001,
),
SolarLogSensorEntityDescription(
key="yield_year",
name="yield year",
icon="mdi:solar-power",
native_unit_of_measurement=ENERGY_KILO_WATT_HOUR,
factor=0.001,
),
SolarLogSensorEntityDescription(
key="yield_total",
name="yield total",
icon="mdi:solar-power",
native_unit_of_measurement=ENERGY_KILO_WATT_HOUR,
state_class=STATE_CLASS_TOTAL,
factor=0.001,
),
SolarLogSensorEntityDescription(
key="consumption_ac",
name="consumption AC",
native_unit_of_measurement=POWER_WATT,
device_class=DEVICE_CLASS_POWER,
state_class=STATE_CLASS_MEASUREMENT,
),
SolarLogSensorEntityDescription(
key="consumption_day",
name="consumption day",
native_unit_of_measurement=ENERGY_KILO_WATT_HOUR,
device_class=DEVICE_CLASS_ENERGY,
factor=0.001,
),
SolarLogSensorEntityDescription(
key="consumption_yesterday",
name="consumption yesterday",
native_unit_of_measurement=ENERGY_KILO_WATT_HOUR,
device_class=DEVICE_CLASS_ENERGY,
factor=0.001,
),
SolarLogSensorEntityDescription(
key="consumption_month",
name="consumption month",
native_unit_of_measurement=ENERGY_KILO_WATT_HOUR,
device_class=DEVICE_CLASS_ENERGY,
factor=0.001,
),
SolarLogSensorEntityDescription(
key="consumption_year",
name="consumption year",
native_unit_of_measurement=ENERGY_KILO_WATT_HOUR,
device_class=DEVICE_CLASS_ENERGY,
factor=0.001,
),
SolarLogSensorEntityDescription(
key="consumption_total",
name="consumption total",
native_unit_of_measurement=ENERGY_KILO_WATT_HOUR,
device_class=DEVICE_CLASS_ENERGY,
state_class=STATE_CLASS_TOTAL,
factor=0.001,
),
SolarLogSensorEntityDescription(
key="total_power",
name="installed peak power",
icon="mdi:solar-power",
native_unit_of_measurement=POWER_WATT,
device_class=DEVICE_CLASS_POWER,
),
SolarLogSensorEntityDescription(
key="alternator_loss",
name="alternator loss",
icon="mdi:solar-power",
native_unit_of_measurement=POWER_WATT,
device_class=DEVICE_CLASS_POWER,
state_class=STATE_CLASS_MEASUREMENT,
),
SolarLogSensorEntityDescription(
key="capacity",
name="capacity",
icon="mdi:solar-power",
native_unit_of_measurement=PERCENTAGE,
device_class=DEVICE_CLASS_POWER_FACTOR,
state_class=STATE_CLASS_MEASUREMENT,
factor=100,
),
SolarLogSensorEntityDescription(
key="efficiency",
name="efficiency",
native_unit_of_measurement=PERCENTAGE,
device_class=DEVICE_CLASS_POWER_FACTOR,
state_class=STATE_CLASS_MEASUREMENT,
factor=100,
),
SolarLogSensorEntityDescription(
key="power_available",
name="power available",
icon="mdi:solar-power",
native_unit_of_measurement=POWER_WATT,
device_class=DEVICE_CLASS_POWER,
state_class=STATE_CLASS_MEASUREMENT,
),
SolarLogSensorEntityDescription(
key="usage",
name="usage",
native_unit_of_measurement=PERCENTAGE,
device_class=DEVICE_CLASS_POWER_FACTOR,
state_class=STATE_CLASS_MEASUREMENT,
factor=100,
),
)
| 30.150754
| 63
| 0.6795
|
7b4f4996d71d7498a7fdc16f86a58d791179c046
| 15,095
|
py
|
Python
|
django/utils/http.py
|
Elorex/django
|
16454ac35f6a24a04b23a9340b0d62c33edbc1ea
|
[
"PSF-2.0",
"BSD-3-Clause"
] | 2
|
2019-05-08T02:16:55.000Z
|
2020-05-18T01:10:26.000Z
|
django/utils/http.py
|
Elorex/django
|
16454ac35f6a24a04b23a9340b0d62c33edbc1ea
|
[
"PSF-2.0",
"BSD-3-Clause"
] | 1
|
2020-01-31T11:30:21.000Z
|
2020-01-31T11:30:21.000Z
|
django/utils/http.py
|
Elorex/django
|
16454ac35f6a24a04b23a9340b0d62c33edbc1ea
|
[
"PSF-2.0",
"BSD-3-Clause"
] | 1
|
2019-01-20T13:03:39.000Z
|
2019-01-20T13:03:39.000Z
|
import base64
import calendar
import datetime
import re
import unicodedata
from binascii import Error as BinasciiError
from email.utils import formatdate
from urllib.parse import (
ParseResult, SplitResult, _coerce_args, _splitnetloc, _splitparams, quote,
quote_plus, scheme_chars, unquote, unquote_plus,
urlencode as original_urlencode, uses_params,
)
from django.core.exceptions import TooManyFieldsSent
from django.utils.datastructures import MultiValueDict
from django.utils.functional import keep_lazy_text
# based on RFC 7232, Appendix C
ETAG_MATCH = re.compile(r'''
\A( # start of string and capture group
(?:W/)? # optional weak indicator
" # opening quote
[^"]* # any sequence of non-quote characters
" # end quote
)\Z # end of string and capture group
''', re.X)
MONTHS = 'jan feb mar apr may jun jul aug sep oct nov dec'.split()
__D = r'(?P<day>\d{2})'
__D2 = r'(?P<day>[ \d]\d)'
__M = r'(?P<mon>\w{3})'
__Y = r'(?P<year>\d{4})'
__Y2 = r'(?P<year>\d{2})'
__T = r'(?P<hour>\d{2}):(?P<min>\d{2}):(?P<sec>\d{2})'
RFC1123_DATE = re.compile(r'^\w{3}, %s %s %s %s GMT$' % (__D, __M, __Y, __T))
RFC850_DATE = re.compile(r'^\w{6,9}, %s-%s-%s %s GMT$' % (__D, __M, __Y2, __T))
ASCTIME_DATE = re.compile(r'^\w{3} %s %s %s %s$' % (__M, __D2, __T, __Y))
RFC3986_GENDELIMS = ":/?#[]@"
RFC3986_SUBDELIMS = "!$&'()*+,;="
FIELDS_MATCH = re.compile('[&;]')
@keep_lazy_text
def urlquote(url, safe='/'):
"""
A legacy compatibility wrapper to Python's urllib.parse.quote() function.
(was used for unicode handling on Python 2)
"""
return quote(url, safe)
@keep_lazy_text
def urlquote_plus(url, safe=''):
"""
A legacy compatibility wrapper to Python's urllib.parse.quote_plus()
function. (was used for unicode handling on Python 2)
"""
return quote_plus(url, safe)
@keep_lazy_text
def urlunquote(quoted_url):
"""
A legacy compatibility wrapper to Python's urllib.parse.unquote() function.
(was used for unicode handling on Python 2)
"""
return unquote(quoted_url)
@keep_lazy_text
def urlunquote_plus(quoted_url):
"""
A legacy compatibility wrapper to Python's urllib.parse.unquote_plus()
function. (was used for unicode handling on Python 2)
"""
return unquote_plus(quoted_url)
def urlencode(query, doseq=False):
"""
A version of Python's urllib.parse.urlencode() function that can operate on
MultiValueDict and non-string values.
"""
if isinstance(query, MultiValueDict):
query = query.lists()
elif hasattr(query, 'items'):
query = query.items()
query_params = []
for key, value in query:
if value is None:
raise TypeError(
'Cannot encode None in a query string. Did you mean to pass '
'an empty string or omit the value?'
)
elif isinstance(value, (str, bytes)):
query_val = value
else:
try:
itr = iter(value)
except TypeError:
query_val = value
else:
# Consume generators and iterators, even when doseq=True, to
# work around https://bugs.python.org/issue31706.
query_val = []
for item in itr:
if item is None:
raise TypeError(
'Cannot encode None in a query string. Did you '
'mean to pass an empty string or omit the value?'
)
elif not isinstance(item, bytes):
item = str(item)
query_val.append(item)
query_params.append((key, query_val))
return original_urlencode(query_params, doseq)
def http_date(epoch_seconds=None):
"""
Format the time to match the RFC1123 date format as specified by HTTP
RFC7231 section 7.1.1.1.
`epoch_seconds` is a floating point number expressed in seconds since the
epoch, in UTC - such as that outputted by time.time(). If set to None, it
defaults to the current time.
Output a string in the format 'Wdy, DD Mon YYYY HH:MM:SS GMT'.
"""
return formatdate(epoch_seconds, usegmt=True)
def parse_http_date(date):
"""
Parse a date format as specified by HTTP RFC7231 section 7.1.1.1.
The three formats allowed by the RFC are accepted, even if only the first
one is still in widespread use.
Return an integer expressed in seconds since the epoch, in UTC.
"""
# email.utils.parsedate() does the job for RFC1123 dates; unfortunately
# RFC7231 makes it mandatory to support RFC850 dates too. So we roll
# our own RFC-compliant parsing.
for regex in RFC1123_DATE, RFC850_DATE, ASCTIME_DATE:
m = regex.match(date)
if m is not None:
break
else:
raise ValueError("%r is not in a valid HTTP date format" % date)
try:
year = int(m.group('year'))
if year < 100:
if year < 70:
year += 2000
else:
year += 1900
month = MONTHS.index(m.group('mon').lower()) + 1
day = int(m.group('day'))
hour = int(m.group('hour'))
min = int(m.group('min'))
sec = int(m.group('sec'))
result = datetime.datetime(year, month, day, hour, min, sec)
return calendar.timegm(result.utctimetuple())
except Exception as exc:
raise ValueError("%r is not a valid date" % date) from exc
def parse_http_date_safe(date):
"""
Same as parse_http_date, but return None if the input is invalid.
"""
try:
return parse_http_date(date)
except Exception:
pass
# Base 36 functions: useful for generating compact URLs
def base36_to_int(s):
"""
Convert a base 36 string to an int. Raise ValueError if the input won't fit
into an int.
"""
# To prevent overconsumption of server resources, reject any
# base36 string that is longer than 13 base36 digits (13 digits
# is sufficient to base36-encode any 64-bit integer)
if len(s) > 13:
raise ValueError("Base36 input too large")
return int(s, 36)
def int_to_base36(i):
"""Convert an integer to a base36 string."""
char_set = '0123456789abcdefghijklmnopqrstuvwxyz'
if i < 0:
raise ValueError("Negative base36 conversion input.")
if i < 36:
return char_set[i]
b36 = ''
while i != 0:
i, n = divmod(i, 36)
b36 = char_set[n] + b36
return b36
def urlsafe_base64_encode(s):
"""
Encode a bytestring to a base64 string for use in URLs. Strip any trailing
equal signs.
"""
return base64.urlsafe_b64encode(s).rstrip(b'\n=').decode('ascii')
def urlsafe_base64_decode(s):
"""
Decode a base64 encoded string. Add back any trailing equal signs that
might have been stripped.
"""
s = s.encode()
try:
return base64.urlsafe_b64decode(s.ljust(len(s) + len(s) % 4, b'='))
except (LookupError, BinasciiError) as e:
raise ValueError(e)
def parse_etags(etag_str):
"""
Parse a string of ETags given in an If-None-Match or If-Match header as
defined by RFC 7232. Return a list of quoted ETags, or ['*'] if all ETags
should be matched.
"""
if etag_str.strip() == '*':
return ['*']
else:
# Parse each ETag individually, and return any that are valid.
etag_matches = (ETAG_MATCH.match(etag.strip()) for etag in etag_str.split(','))
return [match.group(1) for match in etag_matches if match]
def quote_etag(etag_str):
"""
If the provided string is already a quoted ETag, return it. Otherwise, wrap
the string in quotes, making it a strong ETag.
"""
if ETAG_MATCH.match(etag_str):
return etag_str
else:
return '"%s"' % etag_str
def is_same_domain(host, pattern):
"""
Return ``True`` if the host is either an exact match or a match
to the wildcard pattern.
Any pattern beginning with a period matches a domain and all of its
subdomains. (e.g. ``.example.com`` matches ``example.com`` and
``foo.example.com``). Anything else is an exact string match.
"""
if not pattern:
return False
pattern = pattern.lower()
return (
pattern[0] == '.' and (host.endswith(pattern) or host == pattern[1:]) or
pattern == host
)
def is_safe_url(url, allowed_hosts, require_https=False):
"""
Return ``True`` if the url is a safe redirection (i.e. it doesn't point to
a different host and uses a safe scheme).
Always return ``False`` on an empty url.
If ``require_https`` is ``True``, only 'https' will be considered a valid
scheme, as opposed to 'http' and 'https' with the default, ``False``.
"""
if url is not None:
url = url.strip()
if not url:
return False
if allowed_hosts is None:
allowed_hosts = set()
elif isinstance(allowed_hosts, str):
allowed_hosts = {allowed_hosts}
# Chrome treats \ completely as / in paths but it could be part of some
# basic auth credentials so we need to check both URLs.
return (_is_safe_url(url, allowed_hosts, require_https=require_https) and
_is_safe_url(url.replace('\\', '/'), allowed_hosts, require_https=require_https))
# Copied from urllib.parse.urlparse() but uses fixed urlsplit() function.
def _urlparse(url, scheme='', allow_fragments=True):
"""Parse a URL into 6 components:
<scheme>://<netloc>/<path>;<params>?<query>#<fragment>
Return a 6-tuple: (scheme, netloc, path, params, query, fragment).
Note that we don't break the components up in smaller bits
(e.g. netloc is a single string) and we don't expand % escapes."""
url, scheme, _coerce_result = _coerce_args(url, scheme)
splitresult = _urlsplit(url, scheme, allow_fragments)
scheme, netloc, url, query, fragment = splitresult
if scheme in uses_params and ';' in url:
url, params = _splitparams(url)
else:
params = ''
result = ParseResult(scheme, netloc, url, params, query, fragment)
return _coerce_result(result)
# Copied from urllib.parse.urlsplit() with
# https://github.com/python/cpython/pull/661 applied.
def _urlsplit(url, scheme='', allow_fragments=True):
"""Parse a URL into 5 components:
<scheme>://<netloc>/<path>?<query>#<fragment>
Return a 5-tuple: (scheme, netloc, path, query, fragment).
Note that we don't break the components up in smaller bits
(e.g. netloc is a single string) and we don't expand % escapes."""
url, scheme, _coerce_result = _coerce_args(url, scheme)
netloc = query = fragment = ''
i = url.find(':')
if i > 0:
for c in url[:i]:
if c not in scheme_chars:
break
else:
scheme, url = url[:i].lower(), url[i + 1:]
if url[:2] == '//':
netloc, url = _splitnetloc(url, 2)
if (('[' in netloc and ']' not in netloc) or
(']' in netloc and '[' not in netloc)):
raise ValueError("Invalid IPv6 URL")
if allow_fragments and '#' in url:
url, fragment = url.split('#', 1)
if '?' in url:
url, query = url.split('?', 1)
v = SplitResult(scheme, netloc, url, query, fragment)
return _coerce_result(v)
def _is_safe_url(url, allowed_hosts, require_https=False):
# Chrome considers any URL with more than two slashes to be absolute, but
# urlparse is not so flexible. Treat any url with three slashes as unsafe.
if url.startswith('///'):
return False
try:
url_info = _urlparse(url)
except ValueError: # e.g. invalid IPv6 addresses
return False
# Forbid URLs like http:///example.com - with a scheme, but without a hostname.
# In that URL, example.com is not the hostname but, a path component. However,
# Chrome will still consider example.com to be the hostname, so we must not
# allow this syntax.
if not url_info.netloc and url_info.scheme:
return False
# Forbid URLs that start with control characters. Some browsers (like
# Chrome) ignore quite a few control characters at the start of a
# URL and might consider the URL as scheme relative.
if unicodedata.category(url[0])[0] == 'C':
return False
scheme = url_info.scheme
# Consider URLs without a scheme (e.g. //example.com/p) to be http.
if not url_info.scheme and url_info.netloc:
scheme = 'http'
valid_schemes = ['https'] if require_https else ['http', 'https']
return ((not url_info.netloc or url_info.netloc in allowed_hosts) and
(not scheme or scheme in valid_schemes))
def limited_parse_qsl(qs, keep_blank_values=False, encoding='utf-8',
errors='replace', fields_limit=None):
"""
Return a list of key/value tuples parsed from query string.
Copied from urlparse with an additional "fields_limit" argument.
Copyright (C) 2013 Python Software Foundation (see LICENSE.python).
Arguments:
qs: percent-encoded query string to be parsed
keep_blank_values: flag indicating whether blank values in
percent-encoded queries should be treated as blank strings. A
true value indicates that blanks should be retained as blank
strings. The default false value indicates that blank values
are to be ignored and treated as if they were not included.
encoding and errors: specify how to decode percent-encoded sequences
into Unicode characters, as accepted by the bytes.decode() method.
fields_limit: maximum number of fields parsed or an exception
is raised. None means no limit and is the default.
"""
if fields_limit:
pairs = FIELDS_MATCH.split(qs, fields_limit)
if len(pairs) > fields_limit:
raise TooManyFieldsSent(
'The number of GET/POST parameters exceeded '
'settings.DATA_UPLOAD_MAX_NUMBER_FIELDS.'
)
else:
pairs = FIELDS_MATCH.split(qs)
r = []
for name_value in pairs:
if not name_value:
continue
nv = name_value.split('=', 1)
if len(nv) != 2:
# Handle case of a control-name with no equal sign
if keep_blank_values:
nv.append('')
else:
continue
if nv[1] or keep_blank_values:
name = nv[0].replace('+', ' ')
name = unquote(name, encoding=encoding, errors=errors)
value = nv[1].replace('+', ' ')
value = unquote(value, encoding=encoding, errors=errors)
r.append((name, value))
return r
def escape_leading_slashes(url):
"""
If redirecting to an absolute path (two leading slashes), a slash must be
escaped to prevent browsers from handling the path as schemaless and
redirecting to another host.
"""
if url.startswith('//'):
url = '/%2F{}'.format(url[2:])
return url
| 34.46347
| 93
| 0.629149
|
fb2c5570b339118e422670b03ae9cb4dfd4d2ce2
| 402
|
py
|
Python
|
club/migrations/0030_auto_20200109_0957.py
|
DSC-RPI/dsc-portal
|
bf2d0c067d10dd199317ccc00863d85db0d07094
|
[
"MIT"
] | 2
|
2020-01-29T20:14:35.000Z
|
2020-02-15T23:01:42.000Z
|
club/migrations/0030_auto_20200109_0957.py
|
DSC-RPI/dsc-portal
|
bf2d0c067d10dd199317ccc00863d85db0d07094
|
[
"MIT"
] | 62
|
2019-11-26T17:47:58.000Z
|
2022-01-13T02:05:51.000Z
|
club/migrations/0030_auto_20200109_0957.py
|
DSC-RPI/dsc-portal
|
bf2d0c067d10dd199317ccc00863d85db0d07094
|
[
"MIT"
] | 1
|
2020-01-23T17:12:38.000Z
|
2020-01-23T17:12:38.000Z
|
# Generated by Django 3.0.2 on 2020-01-09 14:57
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('club', '0029_question'),
]
operations = [
migrations.AlterField(
model_name='question',
name='answer',
field=models.TextField(help_text='The answer.', max_length=2000),
),
]
| 21.157895
| 77
| 0.597015
|
f0ad0ec962cd1054ee1d646d7ce325ce958a2817
| 4,192
|
py
|
Python
|
algorithms_without_visualisation/divide_and_conquer_convex_hull_algorithm.py
|
SzymczakJ/Geometric_algorithms_project
|
b1f5fd89c2a6d4ef281d092d350b45e0a069bdcb
|
[
"MIT"
] | null | null | null |
algorithms_without_visualisation/divide_and_conquer_convex_hull_algorithm.py
|
SzymczakJ/Geometric_algorithms_project
|
b1f5fd89c2a6d4ef281d092d350b45e0a069bdcb
|
[
"MIT"
] | null | null | null |
algorithms_without_visualisation/divide_and_conquer_convex_hull_algorithm.py
|
SzymczakJ/Geometric_algorithms_project
|
b1f5fd89c2a6d4ef281d092d350b45e0a069bdcb
|
[
"MIT"
] | 1
|
2022-03-23T10:27:51.000Z
|
2022-03-23T10:27:51.000Z
|
from algorithms_without_visualisation.incremental_convex_hull_algorithm import incremental_convex_hull
from additional_functions.additional_functions import *
def merge_hulls(left_convex_hull, right_convex_hull, epsilon):
right_n = len(right_convex_hull)
left_n = len(left_convex_hull)
rightmost_point = 0
for i in range(left_n):
if left_convex_hull[i][0] > left_convex_hull[rightmost_point][0]:
rightmost_point = i
leftmost_point = 0
for i in range(right_n):
if right_convex_hull[i][0] < right_convex_hull[leftmost_point][0]:
leftmost_point = i
left_convex_point = rightmost_point
right_convex_point = leftmost_point
while orientation(left_convex_hull[left_convex_point], right_convex_hull[right_convex_point],
right_convex_hull[(right_convex_point + 1) % right_n], epsilon) != -1 or \
orientation(right_convex_hull[right_convex_point], left_convex_hull[left_convex_point],
left_convex_hull[(left_convex_point - 1) % left_n]) != 1:
while orientation(left_convex_hull[left_convex_point], right_convex_hull[right_convex_point],
right_convex_hull[(right_convex_point + 1) % right_n], epsilon) != -1:
right_convex_point = (right_convex_point + 1) % right_n
while orientation(right_convex_hull[right_convex_point], left_convex_hull[left_convex_point],
left_convex_hull[(left_convex_point - 1) % left_n], epsilon) != 1:
left_convex_point = (left_convex_point - 1) % left_n
points_of_tangent = [left_convex_point, right_convex_point]
left_convex_point = rightmost_point
right_convex_point = leftmost_point
while orientation(left_convex_hull[left_convex_point], right_convex_hull[right_convex_point],
right_convex_hull[(right_convex_point - 1) % right_n], epsilon) != 1 or \
orientation(right_convex_hull[right_convex_point], left_convex_hull[left_convex_point],
left_convex_hull[(left_convex_point + 1) % left_n], epsilon) != -1:
while orientation(left_convex_hull[left_convex_point], right_convex_hull[right_convex_point],
right_convex_hull[(right_convex_point - 1) % right_n]) != 1:
right_convex_point = (right_convex_point - 1) % right_n
while orientation(right_convex_hull[right_convex_point], left_convex_hull[left_convex_point],
left_convex_hull[(left_convex_point + 1) % left_n], epsilon) != -1:
left_convex_point = (left_convex_point + 1) % left_n
points_of_tangent.append(left_convex_point)
points_of_tangent.append(right_convex_point)
res = [left_convex_hull[points_of_tangent[0]]]
i = points_of_tangent[1]
while i != points_of_tangent[3]:
res.append(right_convex_hull[i])
i = (i + 1) % right_n
res.append(right_convex_hull[points_of_tangent[3]])
i = points_of_tangent[2]
while i != points_of_tangent[0]:
res.append(left_convex_hull[i])
i = (i + 1) % left_n
return res
def divide_and_conquer(points, epsilon=10 ** (-12)):
if len(points) < 3:
return None, None
prev_points_division = [sorted(points, key=lambda x: x[0])]
new_points_division = []
while len(prev_points_division[0]) > 6:
for points_group in prev_points_division:
new_points_division.append(points_group[:int((len(points_group) + 1) / 2)])
new_points_division.append(points_group[int((len(points_group) + 1) / 2):])
prev_points_division = new_points_division
new_points_division = []
convex_hulls = [0] * len(prev_points_division)
for i in range(len(prev_points_division)):
convex_hulls[i] = incremental_convex_hull(prev_points_division[i])
new_convex_hulls = []
while len(convex_hulls) > 1:
i = 0
while i + 1 < len(convex_hulls):
new_convex_hulls.append(merge_hulls(convex_hulls[i], convex_hulls[i + 1], epsilon))
i += 2
convex_hulls = new_convex_hulls
new_convex_hulls = []
return convex_hulls[0]
| 49.317647
| 102
| 0.684399
|
1422cd4f933fb4ec1103e001f370b1318c7fc00e
| 141
|
py
|
Python
|
qlib/contrib/meta/__init__.py
|
lpd6375/qlib
|
3a911bc09ba5136cd7c61c2c8dcca8a63339e738
|
[
"MIT"
] | 2
|
2021-06-12T20:48:26.000Z
|
2021-06-25T02:26:09.000Z
|
qlib/contrib/meta/__init__.py
|
lpd6375/qlib
|
3a911bc09ba5136cd7c61c2c8dcca8a63339e738
|
[
"MIT"
] | 1
|
2022-03-10T03:57:50.000Z
|
2022-03-10T03:57:50.000Z
|
qlib/contrib/meta/__init__.py
|
lpd6375/qlib
|
3a911bc09ba5136cd7c61c2c8dcca8a63339e738
|
[
"MIT"
] | 1
|
2022-02-22T11:57:15.000Z
|
2022-02-22T11:57:15.000Z
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
from .data_selection import MetaTaskDS, MetaDatasetDS, MetaModelDS
| 28.2
| 66
| 0.808511
|
fe864f94506fb3b33381a3324f0c9b022e3efccf
| 345
|
py
|
Python
|
core/__init__.py
|
Forestjylee/scut_spider
|
8b37f4d714da0dd93451f22cabbc05acfd40b129
|
[
"MIT"
] | 1
|
2019-04-20T03:39:14.000Z
|
2019-04-20T03:39:14.000Z
|
core/__init__.py
|
Forest75/scut_spider
|
8b37f4d714da0dd93451f22cabbc05acfd40b129
|
[
"MIT"
] | 2
|
2021-03-31T19:12:04.000Z
|
2021-12-13T20:02:09.000Z
|
core/__init__.py
|
Forest75/scut_spider
|
8b37f4d714da0dd93451f22cabbc05acfd40b129
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
@file: __init__.py.py
@time: 2019/4/18 16:52
Created by
___ _
|_ | (_)
| | _ _ _ __ _ _ _
| || | | || '_ \ | | | || |
/\__/ /| |_| || | | || |_| || |
\____/ \__,_||_| |_| \__, ||_|
__/ |
|___/
"""
| 23
| 31
| 0.249275
|
5be64a0ce8955951759e9610e8a9c2187dcffe79
| 45,604
|
py
|
Python
|
jenkins_jobs/modules/properties.py
|
koying-mrmc/jenkins-job-builder
|
2cec182a3f2622681111da7d16bc3d45e23bc054
|
[
"Apache-2.0"
] | null | null | null |
jenkins_jobs/modules/properties.py
|
koying-mrmc/jenkins-job-builder
|
2cec182a3f2622681111da7d16bc3d45e23bc054
|
[
"Apache-2.0"
] | null | null | null |
jenkins_jobs/modules/properties.py
|
koying-mrmc/jenkins-job-builder
|
2cec182a3f2622681111da7d16bc3d45e23bc054
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2012 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
The Properties module supplies a wide range of options that are
implemented as Jenkins job properties.
**Component**: properties
:Macro: property
:Entry Point: jenkins_jobs.properties
Example::
job:
name: test_job
properties:
- github:
url: https://github.com/openstack-infra/jenkins-job-builder/
"""
import logging
import pkg_resources
import xml.etree.ElementTree as XML
from jenkins_jobs.errors import InvalidAttributeError
from jenkins_jobs.errors import JenkinsJobsException
from jenkins_jobs.errors import MissingAttributeError
from jenkins_jobs.errors import AttributeConflictError
import jenkins_jobs.modules.base
import jenkins_jobs.modules.helpers as helpers
def builds_chain_fingerprinter(registry, xml_parent, data):
"""yaml: builds-chain-fingerprinter
Builds chain fingerprinter.
Requires the Jenkins :jenkins-wiki:`Builds chain fingerprinter Plugin
<Builds+chain+fingerprinter>`.
:arg bool per-builds-chain: enable builds hierarchy fingerprinting
(default false)
:arg bool per-job-chain: enable jobs hierarchy fingerprinting
(default false)
Example:
.. literalinclude:: /../../tests/properties/fixtures/fingerprinter.yaml
:language: yaml
"""
fingerprinter = XML.SubElement(
xml_parent,
"org.jenkinsci.plugins."
"buildschainfingerprinter."
"AutomaticFingerprintJobProperty",
)
mapping = [
("per-builds-chain", "isPerBuildsChainEnabled", False),
("per-job-chain", "isPerJobsChainEnabled", False),
]
helpers.convert_mapping_to_xml(fingerprinter, data, mapping, fail_required=True)
def ownership(registry, xml_parent, data):
"""yaml: ownership
Plugin provides explicit ownership for jobs and slave nodes.
Requires the Jenkins :jenkins-plugins:`Ownership Plugin <ownership>`.
:arg bool enabled: whether ownership enabled (default : true)
:arg str owner: the owner of job
:arg list co-owners: list of job co-owners
Example:
.. literalinclude:: /../../tests/properties/fixtures/ownership.yaml
:language: yaml
"""
ownership_plugin = XML.SubElement(
xml_parent,
"com.synopsys.arc.jenkins.plugins.ownership.jobs.JobOwnerJobProperty",
)
ownership = XML.SubElement(ownership_plugin, "ownership")
owner = str(data.get("enabled", True)).lower()
XML.SubElement(ownership, "ownershipEnabled").text = owner
XML.SubElement(ownership, "primaryOwnerId").text = data.get("owner")
coownersIds = XML.SubElement(ownership, "coownersIds")
for coowner in data.get("co-owners", []):
XML.SubElement(coownersIds, "string").text = coowner
def promoted_build(registry, xml_parent, data):
"""yaml: promoted-build
Marks a build for promotion. A promotion process with an identical
name must be created via the web interface in the job in order for the job
promotion to persist. Promotion processes themselves cannot be configured
by jenkins-jobs due to the separate storage of plugin configuration files.
Requires the Jenkins :jenkins-plugins:`Promoted Builds Plugin
<promoted-builds>`.
:arg list names: the promoted build names (optional)
Example:
.. literalinclude:: /../../tests/properties/fixtures/promoted_build.yaml
:language: yaml
"""
promoted = XML.SubElement(
xml_parent, "hudson.plugins.promoted__builds." "JobPropertyImpl"
)
names = data.get("names", [])
if names:
active_processes = XML.SubElement(promoted, "activeProcessNames")
for n in names:
XML.SubElement(active_processes, "string").text = str(n)
def gitbucket(parser, xml_parent, data):
"""yaml: gitbucket
Integrate GitBucket to Jenkins.
Requires the Jenkins :jenkins-plugins:`GitBucket Plugin <gitbucket>`.
:arg str url: GitBucket URL to issue (required)
:arg bool link-enabled: Enable hyperlink to issue (default false)
Minimal Example:
.. literalinclude:: /../../tests/properties/fixtures/gitbucket-minimal.yaml
:language: yaml
Full Example:
.. literalinclude:: /../../tests/properties/fixtures/gitbucket-full.yaml
:language: yaml
"""
gitbucket = XML.SubElement(
xml_parent, "org.jenkinsci.plugins.gitbucket.GitBucketProjectProperty"
)
gitbucket.set("plugin", "gitbucket")
mapping = [("url", "url", None), ("link-enabled", "linkEnabled", False)]
helpers.convert_mapping_to_xml(gitbucket, data, mapping, fail_required=True)
def github(registry, xml_parent, data):
"""yaml: github
Sets the GitHub URL for the project.
:arg str url: the GitHub URL (required)
:arg str display-name: This value will be used as context name for commit
status if status builder or status publisher is defined for this
project. (>= 1.14.1) (default '')
Minimal Example:
.. literalinclude:: /../../tests/properties/fixtures/github-minimal.yaml
:language: yaml
Full Example:
.. literalinclude:: /../../tests/properties/fixtures/github-full.yaml
:language: yaml
"""
github = XML.SubElement(
xml_parent, "com.coravy.hudson.plugins.github.GithubProjectProperty"
)
github.set("plugin", "github")
mapping = [("url", "projectUrl", None), ("display-name", "displayName", "")]
helpers.convert_mapping_to_xml(github, data, mapping, fail_required=True)
def gitlab(registry, xml_parent, data):
"""yaml: gitlab
Sets the GitLab connection for the project. Configured via Jenkins Global
Configuration.
Requires the Jenkins :jenkins-plugins:`GitLab Plugin <gitlab-plugin>`.
:arg str connection: the GitLab connection name (required)
Example:
.. literalinclude:: /../../tests/properties/fixtures/gitlab.yaml
:language: yaml
"""
gitlab = XML.SubElement(
xml_parent,
"com.dabsquared.gitlabjenkins.connection." "GitLabConnectionProperty",
)
mapping = [("connection", "gitLabConnection", None)]
helpers.convert_mapping_to_xml(gitlab, data, mapping, fail_required=True)
def gitlab_logo(registry, xml_parent, data):
"""yaml: gitlab-logo
Configures the GitLab-Logo Plugin.
Requires the Jenkins :jenkins-plugins:`GitLab Logo Plugin
<gitlab-logo>`.
:arg str repository-name: the GitLab repository name (required)
Example:
.. literalinclude:: /../../tests/properties/fixtures/gitlab-logo.yaml
:language: yaml
"""
logo = XML.SubElement(
xml_parent, "org.jenkinsci.plugins.gitlablogo." "GitlabLogoProperty"
)
mapping = [("repository-name", "repositoryName", None)]
helpers.convert_mapping_to_xml(logo, data, mapping, fail_required=True)
def disk_usage(registry, xml_parent, data):
"""yaml: disk-usage
Enables the Disk Usage Plugin.
Requires the Jenkins :jenkins-plugins:`Disk Usage Plugin <disk-usage>`.
Example:
.. literalinclude:: /../../tests/properties/fixtures/disk-usage.yaml
:language: yaml
"""
XML.SubElement(xml_parent, "hudson.plugins.disk__usage." "DiskUsageProperty")
def least_load(registry, xml_parent, data):
"""yaml: least-load
Enables the Least Load Plugin.
Requires the Jenkins :jenkins-plugins:`Least Load Plugin <leastload>`.
:arg bool disabled: whether or not leastload is disabled (default true)
Example:
.. literalinclude:: /../../tests/properties/fixtures/least-load002.yaml
:language: yaml
"""
least = XML.SubElement(
xml_parent,
"org.bstick12.jenkinsci.plugins.leastload." "LeastLoadDisabledProperty",
)
mapping = [("disabled", "leastLoadDisabled", True)]
helpers.convert_mapping_to_xml(least, data, mapping, fail_required=True)
def throttle(registry, xml_parent, data):
"""yaml: throttle
Throttles the number of builds for this job.
Requires the Jenkins :jenkins-plugins:`Throttle Concurrent Builds Plugin
<throttle-concurrents>`.
:arg str option: throttle `project` (throttle the project alone)
or `category` (throttle the project as part of one or more categories)
:arg int max-per-node: max concurrent builds per node (default 0)
:arg int max-total: max concurrent builds (default 0)
:arg bool enabled: whether throttling is enabled (default true)
:arg list categories: multiproject throttle categories
:arg bool matrix-builds: throttle matrix master builds (default true)
:arg bool matrix-configs: throttle matrix config builds (default false)
:arg str parameters-limit: prevent jobs with matching parameters from
running concurrently (default false)
:arg list parameters-check-list: Comma-separated list of parameters
to use when comparing jobs (optional)
Example:
.. literalinclude:: /../../tests/properties/fixtures/throttle001.yaml
:language: yaml
"""
throttle = XML.SubElement(
xml_parent, "hudson.plugins.throttleconcurrents." "ThrottleJobProperty"
)
mapping = [
("max-per-node", "maxConcurrentPerNode", "0"),
("max-total", "maxConcurrentTotal", "0"),
("enabled", "throttleEnabled", True),
]
helpers.convert_mapping_to_xml(throttle, data, mapping, fail_required=True)
cat = data.get("categories", [])
if cat:
cn = XML.SubElement(throttle, "categories")
for c in cat:
XML.SubElement(cn, "string").text = str(c)
options_list = ("category", "project")
option = data.get("option")
if option not in options_list:
raise InvalidAttributeError("option", option, options_list)
mapping = [
("", "throttleOption", option),
("", "configVersion", "1"),
("parameters-limit", "limitOneJobWithMatchingParams", False),
]
helpers.convert_mapping_to_xml(throttle, data, mapping, fail_required=True)
matrixopt = XML.SubElement(throttle, "matrixOptions")
mapping = [
("matrix-builds", "throttleMatrixBuilds", True),
("matrix-configs", "throttleMatrixConfigurations", False),
]
helpers.convert_mapping_to_xml(matrixopt, data, mapping, fail_required=True)
params_to_use = data.get("parameters-check-list", [])
XML.SubElement(throttle, "paramsToUseForLimit").text = ",".join(params_to_use)
def branch_api(registry, xml_parent, data):
"""yaml: branch-api
Enforces a minimum time between builds based on the desired maximum rate.
Requires the Jenkins :jenkins-plugins:`Branch API Plugin <branch-api>`.
:arg int number-of-builds: The maximum number of builds allowed within
the specified time period. (default 1)
:arg str time-period: The time period within which the maximum number
of builds will be enforced. (default 'Hour')
:valid values: **Hour**, **Day**, **Week**, **Month**, **Year**
:arg bool skip-rate-limit: Permit user triggered builds to
skip the rate limit (default false)
Minimal Example:
.. literalinclude::
/../../tests/properties/fixtures/branch-api-minimal.yaml
:language: yaml
Full example:
.. literalinclude::
/../../tests/properties/fixtures/branch-api-full.yaml
:language: yaml
"""
branch = XML.SubElement(
xml_parent, "jenkins.branch." "RateLimitBranchProperty_-JobPropertyImpl"
)
branch.set("plugin", "branch-api")
valid_time_periods = ["Hour", "Day", "Week", "Month", "Year"]
mapping = [
("time-period", "durationName", "Hour", valid_time_periods),
("number-of-builds", "count", 1),
("skip-rate-limit", "userBoost", False),
]
helpers.convert_mapping_to_xml(branch, data, mapping, fail_required=True)
def sidebar(registry, xml_parent, data):
"""yaml: sidebar
Allows you to add links in the sidebar.
Requires the Jenkins :jenkins-plugins:`Sidebar-Link Plugin <sidebar-link>`.
:arg str url: url to link to (optional)
:arg str text: text for the link (optional)
:arg str icon: path to icon (optional)
Example:
.. literalinclude:: /../../tests/properties/fixtures/sidebar02.yaml
:language: yaml
"""
sidebar = xml_parent.find("hudson.plugins.sidebar__link.ProjectLinks")
if sidebar is None:
sidebar = XML.SubElement(
xml_parent, "hudson.plugins.sidebar__link.ProjectLinks"
)
links = XML.SubElement(sidebar, "links")
else:
links = sidebar.find("links")
action = XML.SubElement(links, "hudson.plugins.sidebar__link.LinkAction")
mapping = [("url", "url", ""), ("text", "text", ""), ("icon", "icon", "")]
helpers.convert_mapping_to_xml(action, data, mapping, fail_required=True)
def inject(registry, xml_parent, data):
"""yaml: inject
Allows you to inject environment variables into the build.
Requires the Jenkins :jenkins-plugins:`EnvInject Plugin <envinject>`.
:arg str properties-file: file to read with properties (optional)
:arg str properties-content: key=value properties (optional)
:arg str script-file: file with script to run (optional)
:arg str script-content: script to run (optional)
:arg str groovy-content: groovy script to run (optional)
:arg bool groovy-sandbox: run groovy script in sandbox (default false)
:arg bool load-from-master: load files from master (default false)
:arg bool enabled: injection enabled (default true)
:arg bool keep-system-variables: keep system variables (default true)
:arg bool keep-build-variables: keep build variable (default true)
:arg bool override-build-parameters: override build parameters
(default false)
Example:
.. literalinclude:: /../../tests/properties/fixtures/inject001.yaml
:language: yaml
"""
inject = XML.SubElement(xml_parent, "EnvInjectJobProperty")
info = XML.SubElement(inject, "info")
mapping = [
("properties-file", "propertiesFilePath", None),
("properties-content", "propertiesContent", None),
("script-file", "scriptFilePath", None),
("script-content", "scriptContent", None),
("load-from-master", "loadFilesFromMaster", False),
]
helpers.convert_mapping_to_xml(info, data, mapping, fail_required=False)
# determine version of plugin
plugin_info = registry.get_plugin_info("Groovy")
version = pkg_resources.parse_version(plugin_info.get("version", "0"))
if version >= pkg_resources.parse_version("2.0.0"):
secure_groovy_script = XML.SubElement(info, "secureGroovyScript")
mapping = [
("groovy-content", "script", None),
("groovy-sandbox", "sandbox", False),
]
helpers.convert_mapping_to_xml(
secure_groovy_script, data, mapping, fail_required=False
)
else:
mapping = [("groovy-content", "groovyScriptContent", None)]
helpers.convert_mapping_to_xml(info, data, mapping, fail_required=False)
mapping = [
("enabled", "on", True),
("keep-system-variables", "keepJenkinsSystemVariables", True),
("keep-build-variables", "keepBuildVariables", True),
("override-build-parameters", "overrideBuildParameters", False),
]
helpers.convert_mapping_to_xml(inject, data, mapping, fail_required=True)
def authenticated_build(registry, xml_parent, data):
"""yaml: authenticated-build
Specifies an authorization matrix where only authenticated users
may trigger a build.
.. deprecated:: 0.1.0. Please use :ref:`authorization <authorization>`.
Example:
.. literalinclude::
/../../tests/properties/fixtures/authenticated_build.yaml
:language: yaml
"""
# TODO: generalize this
security = XML.SubElement(
xml_parent, "hudson.security." "AuthorizationMatrixProperty"
)
XML.SubElement(
security, "permission"
).text = "hudson.model.Item.Build:authenticated"
def authorization(registry, xml_parent, data):
"""yaml: authorization
Specifies an authorization matrix
.. _authorization:
:arg list <name>: `<name>` is the name of the group or user, containing
the list of rights to grant.
:<name> rights:
* **credentials-create**
* **credentials-delete**
* **credentials-manage-domains**
* **credentials-update**
* **credentials-view**
* **job-build**
* **job-cancel**
* **job-configure**
* **job-delete**
* **job-discover**
* **job-extended-read**
* **job-move**
* **job-read**
* **job-status**
* **job-workspace**
* **ownership-jobs**
* **run-delete**
* **run-replay**
* **run-update**
* **scm-tag**
Example:
.. literalinclude:: /../../tests/properties/fixtures/authorization.yaml
:language: yaml
"""
# get the folder name if it exists
in_a_folder = data.pop("_use_folder_perms", None) if data else None
credentials = "com.cloudbees.plugins.credentials.CredentialsProvider."
ownership = "com.synopsys.arc.jenkins.plugins.ownership.OwnershipPlugin."
mapping = {
"credentials-create": "".join((credentials, "Create")),
"credentials-delete": "".join((credentials, "Delete")),
"credentials-manage-domains": "".join((credentials, "ManageDomains")),
"credentials-update": "".join((credentials, "Update")),
"credentials-view": "".join((credentials, "View")),
"job-build": "hudson.model.Item.Build",
"job-cancel": "hudson.model.Item.Cancel",
"job-configure": "hudson.model.Item.Configure",
"job-delete": "hudson.model.Item.Delete",
"job-discover": "hudson.model.Item.Discover",
"job-extended-read": "hudson.model.Item.ExtendedRead",
"job-move": "hudson.model.Item.Move",
"job-read": "hudson.model.Item.Read",
"job-status": "hudson.model.Item.ViewStatus",
"job-workspace": "hudson.model.Item.Workspace",
"ownership-jobs": "".join((ownership, "Jobs")),
"run-delete": "hudson.model.Run.Delete",
"run-replay": "hudson.model.Run.Replay",
"run-update": "hudson.model.Run.Update",
"scm-tag": "hudson.scm.SCM.Tag",
}
if data:
if in_a_folder:
matrix = XML.SubElement(
xml_parent,
"com.cloudbees.hudson.plugins.folder.properties.AuthorizationMatrixProperty",
)
XML.SubElement(
matrix,
"inheritanceStrategy",
{
"class": "org.jenkinsci.plugins.matrixauth.inheritance.InheritParentStrategy"
},
)
else:
matrix = XML.SubElement(
xml_parent, "hudson.security.AuthorizationMatrixProperty"
)
for (username, perms) in data.items():
for perm in perms:
pe = XML.SubElement(matrix, "permission")
try:
pe.text = "{0}:{1}".format(mapping[perm], username)
except KeyError:
raise InvalidAttributeError(username, perm, mapping.keys())
def priority_sorter(registry, xml_parent, data):
"""yaml: priority-sorter
Allows simple ordering of builds, using a configurable job priority.
Requires the Jenkins :jenkins-plugins:`Priority Sorter Plugin
<PrioritySorter>`.
:arg int priority: Priority of the job. Higher value means higher
priority, with 3 as the default priority. (required)
Example:
.. literalinclude::
/../../tests/properties/fixtures/priority_sorter002.yaml
:language: yaml
"""
plugin_info = registry.get_plugin_info("PrioritySorter")
version = pkg_resources.parse_version(plugin_info.get("version", "0"))
if version >= pkg_resources.parse_version("3.0"):
priority_sorter_tag = XML.SubElement(
xml_parent,
"jenkins.advancedqueue.jobinclusion." "strategy.JobInclusionJobProperty",
)
mapping = [("use", "useJobGroup", True), ("priority", "jobGroupName", None)]
elif version >= pkg_resources.parse_version("2.0"):
priority_sorter_tag = XML.SubElement(
xml_parent, "jenkins.advancedqueue.priority." "strategy.PriorityJobProperty"
)
mapping = [("use", "useJobPriority", True), ("priority", "priority", None)]
else:
priority_sorter_tag = XML.SubElement(
xml_parent, "hudson.queueSorter." "PrioritySorterJobProperty"
)
mapping = [("priority", "priority", None)]
helpers.convert_mapping_to_xml(
priority_sorter_tag, data, mapping, fail_required=True
)
def build_blocker(registry, xml_parent, data):
"""yaml: build-blocker
This plugin keeps the actual job in the queue
if at least one name of currently running jobs
is matching with one of the given regular expressions.
Requires the Jenkins :jenkins-plugins:`Build Blocker Plugin
<build-blocker-plugin>`.
:arg bool use-build-blocker: Enable or disable build blocker (default true)
:arg list blocking-jobs: One regular expression per line to select
blocking jobs by their names (required)
:arg str block-level: block build globally ('GLOBAL') or per node ('NODE')
(default 'GLOBAL')
:arg str queue-scanning: scan build queue for all builds ('ALL') or only
buildable builds ('BUILDABLE') (default 'DISABLED')
Example:
Minimal Example:
.. literalinclude::
/../../tests/properties/fixtures/build-blocker-minimal.yaml
:language: yaml
Full Example:
.. literalinclude::
/../../tests/properties/fixtures/build-blocker-full.yaml
:language: yaml
"""
blocker = XML.SubElement(
xml_parent, "hudson.plugins." "buildblocker.BuildBlockerProperty"
)
if data is None or "blocking-jobs" not in data:
raise JenkinsJobsException("blocking-jobs field is missing")
elif data.get("blocking-jobs", None) is None:
raise JenkinsJobsException("blocking-jobs list must not be empty")
jobs = ""
for setting, value in data.items():
if setting == "blocking-jobs":
jobs = "\n".join(value)
block_level_types = ["GLOBAL", "NODE"]
queue_scan_types = ["DISABLED", "ALL", "BUILDABLE"]
mapping = [
("use-build-blocker", "useBuildBlocker", True),
("", "blockingJobs", jobs),
("block-level", "blockLevel", "GLOBAL", block_level_types),
("queue-scanning", "scanQueueFor", "DISABLED", queue_scan_types),
]
helpers.convert_mapping_to_xml(blocker, data, mapping, fail_required=True)
def copyartifact(registry, xml_parent, data):
"""yaml: copyartifact
Specify a list of projects that have access to copy the artifacts of
this project.
Requires the Jenkins :jenkins-plugins:`Copy Artifact plugin
<copyartifact>`.
:arg str projects: comma separated list of projects that can copy
artifacts of this project. Wild card character '*' is available.
Example:
.. literalinclude::
/../../tests/properties/fixtures/copyartifact.yaml
:language: yaml
"""
copyartifact = XML.SubElement(
xml_parent,
"hudson.plugins." "copyartifact." "CopyArtifactPermissionProperty",
plugin="copyartifact",
)
if not data or not data.get("projects", None):
raise JenkinsJobsException("projects string must exist and " "not be empty")
projectlist = XML.SubElement(copyartifact, "projectNameList")
for project in str(data.get("projects")).split(","):
XML.SubElement(projectlist, "string").text = project
def batch_tasks(registry, xml_parent, data):
"""yaml: batch-tasks
Batch tasks can be tasks for events like releases, integration, archiving,
etc. In this way, anyone in the project team can execute them in a way that
leaves a record.
A batch task consists of a shell script and a name. When you execute
a build, the shell script gets run on the workspace, just like a build.
Batch tasks and builds "lock" the workspace, so when one of those
activities is in progress, all the others will block in the queue.
Requires the Jenkins :jenkins-plugins:`Batch Task Plugin <batch-task>`.
:arg list batch-tasks: Batch tasks.
:Tasks:
* **name** (`str`) Task name.
* **script** (`str`) Task script.
Example:
.. literalinclude:: /../../tests/properties/fixtures/batch-task.yaml
:language: yaml
"""
pdef = XML.SubElement(xml_parent, "hudson.plugins.batch__task.BatchTaskProperty")
tasks = XML.SubElement(pdef, "tasks")
for task in data:
batch_task = XML.SubElement(tasks, "hudson.plugins.batch__task.BatchTask")
mapping = [("name", "name", None), ("script", "script", None)]
helpers.convert_mapping_to_xml(batch_task, task, mapping, fail_required=True)
def heavy_job(registry, xml_parent, data):
"""yaml: heavy-job
This plugin allows you to define "weight" on each job,
and making each job consume that many executors
Requires the Jenkins :jenkins-plugins:`Heavy Job Plugin <heavy-job>`.
:arg int weight: Specify the total number of executors
that this job should occupy (default 1)
Example:
.. literalinclude:: /../../tests/properties/fixtures/heavy-job.yaml
:language: yaml
"""
heavyjob = XML.SubElement(
xml_parent, "hudson.plugins." "heavy__job.HeavyJobProperty"
)
mapping = [("weight", "weight", 1)]
helpers.convert_mapping_to_xml(heavyjob, data, mapping, fail_required=True)
def slave_utilization(registry, xml_parent, data):
"""yaml: slave-utilization
This plugin allows you to specify the percentage of a slave's capacity a
job wants to use.
Requires the Jenkins :jenkins-plugins:`Slave Utilization Plugin
<slave-utilization-plugin>`.
:arg int slave-percentage: Specify the percentage of a slave's execution
slots that this job should occupy (default 0)
:arg bool single-instance-per-slave: Control whether concurrent instances
of this job will be permitted to run in parallel on a single slave
(default false)
Example:
.. literalinclude::
/../../tests/properties/fixtures/slave-utilization1.yaml
:language: yaml
"""
utilization = XML.SubElement(
xml_parent, "com.suryagaddipati.jenkins.SlaveUtilizationProperty"
)
percent = int(data.get("slave-percentage", 0))
exclusive_node_access = True if percent else False
mapping = [
("", "needsExclusiveAccessToNode", exclusive_node_access),
("", "slaveUtilizationPercentage", percent),
("single-instance-per-slave", "singleInstancePerSlave", False),
]
helpers.convert_mapping_to_xml(utilization, data, mapping, fail_required=True)
def delivery_pipeline(registry, xml_parent, data):
"""yaml: delivery-pipeline
Requires the Jenkins :jenkins-plugins:`Delivery Pipeline Plugin
<delivery-pipeline-plugin>`.
:arg str stage: Name of the stage for this job (default '')
:arg str task: Name of the task for this job (default '')
:arg str description: task description template for this job
(default '')
Minimal Example:
.. literalinclude::
/../../tests/properties/fixtures/delivery-pipeline-minimal.yaml
:language: yaml
Full Example:
.. literalinclude::
/../../tests/properties/fixtures/delivery-pipeline-full.yaml
:language: yaml
"""
pipeline = XML.SubElement(xml_parent, "se.diabol.jenkins.pipeline.PipelineProperty")
pipeline.set("plugin", "delivery-pipeline-plugin")
mapping = [
("stage", "stageName", ""),
("task", "taskName", ""),
("description", "descriptionTemplate", ""),
]
helpers.convert_mapping_to_xml(pipeline, data, mapping, fail_required=True)
def zeromq_event(registry, xml_parent, data):
"""yaml: zeromq-event
This is a Jenkins plugin that will publish Jenkins Job run events
(start, complete, finish) to a ZMQ PUB socket.
Requires the Jenkins `ZMQ Event Publisher.
<https://opendev.org/x/zmq-event-publisher>`_
Example:
.. literalinclude::
/../../tests/properties/fixtures/zeromq-event.yaml
:language: yaml
"""
zmq_event = XML.SubElement(
xml_parent,
"org.jenkinsci.plugins." "ZMQEventPublisher.HudsonNotificationProperty",
)
mapping = [("", "enabled", True)]
helpers.convert_mapping_to_xml(zmq_event, data, mapping, fail_required=True)
def slack(registry, xml_parent, data):
"""yaml: slack
Requires the Jenkins :jenkins-plugins:`Slack Plugin <slack>`.
When using Slack Plugin version < 2.0, Slack Plugin itself requires a
publisher aswell as properties please note that you have to add the
publisher to your job configuration aswell. When using Slack Plugin
version >= 2.0, you should only configure the publisher.
:arg bool notify-start: Send notification when the job starts
(default false)
:arg bool notify-success: Send notification on success. (default false)
:arg bool notify-aborted: Send notification when job is aborted. (
default false)
:arg bool notify-not-built: Send notification when job set to NOT_BUILT
status. (default false)
:arg bool notify-unstable: Send notification when job becomes unstable.
(default false)
:arg bool notify-failure: Send notification when job fails.
(default false)
:arg bool notify-back-to-normal: Send notification when job is
succeeding again after being unstable or failed. (default false)
:arg bool 'notify-repeated-failure': Send notification when job is
still failing after last failure. (default false)
:arg bool include-test-summary: Include the test summary. (default
False)
:arg bool include-custom-message: Include a custom message into the
notification. (default false)
:arg str custom-message: Custom message to be included. (default '')
:arg str room: A comma separated list of rooms / channels to send
the notifications to. (default '')
Example:
.. literalinclude::
/../../tests/properties/fixtures/slack001.yaml
:language: yaml
"""
logger = logging.getLogger(__name__)
plugin_info = registry.get_plugin_info("Slack Notification Plugin")
plugin_ver = pkg_resources.parse_version(plugin_info.get("version", "0"))
if plugin_ver >= pkg_resources.parse_version("2.0"):
logger.warning("properties section is not used with plugin version >= 2.0")
mapping = (
("notify-start", "startNotification", False),
("notify-success", "notifySuccess", False),
("notify-aborted", "notifyAborted", False),
("notify-not-built", "notifyNotBuilt", False),
("notify-unstable", "notifyUnstable", False),
("notify-failure", "notifyFailure", False),
("notify-back-to-normal", "notifyBackToNormal", False),
("notify-repeated-failure", "notifyRepeatedFailure", False),
("include-test-summary", "includeTestSummary", False),
("include-custom-message", "includeCustomMessage", False),
("custom-message", "customMessage", ""),
("room", "room", ""),
)
slack = XML.SubElement(
xml_parent, "jenkins.plugins.slack.SlackNotifier_-SlackJobProperty"
)
# Ensure that custom-message is set when include-custom-message is set
# to true.
if data.get("include-custom-message", False):
if not data.get("custom-message", ""):
raise MissingAttributeError("custom-message")
helpers.convert_mapping_to_xml(slack, data, mapping, fail_required=True)
def rebuild(registry, xml_parent, data):
"""yaml: rebuild
This plug-in allows the user to rebuild a parameterized build without
entering the parameters again.It will also allow the user to edit the
parameters before rebuilding.
Requires the Jenkins :jenkins-plugins:`Rebuild Plugin <rebuild>`.
:arg bool auto-rebuild: Rebuild without asking for parameters
(default false)
:arg bool rebuild-disabled: Disable rebuilding for this job
(default false)
Minimal Example:
.. literalinclude:: /../../tests/properties/fixtures/rebuild-minimal.yaml
:language: yaml
Full Example:
.. literalinclude:: /../../tests/properties/fixtures/rebuild-full.yaml
:language: yaml
"""
sub_element = XML.SubElement(xml_parent, "com.sonyericsson.rebuild.RebuildSettings")
sub_element.set("plugin", "rebuild")
mapping = [
("auto-rebuild", "autoRebuild", False),
("rebuild-disabled", "rebuildDisabled", False),
]
helpers.convert_mapping_to_xml(sub_element, data, mapping, fail_required=True)
def build_discarder(registry, xml_parent, data):
"""yaml: build-discarder
:arg int days-to-keep: Number of days to keep builds for (default -1)
:arg int num-to-keep: Number of builds to keep (default -1)
:arg int artifact-days-to-keep: Number of days to keep builds with
artifacts (default -1)
:arg int artifact-num-to-keep: Number of builds with artifacts to keep
(default -1)
Example:
.. literalinclude::
/../../tests/properties/fixtures/build-discarder-001.yaml
:language: yaml
.. literalinclude::
/../../tests/properties/fixtures/build-discarder-002.yaml
:language: yaml
"""
base_sub = XML.SubElement(xml_parent, "jenkins.model.BuildDiscarderProperty")
strategy = XML.SubElement(base_sub, "strategy")
strategy.set("class", "hudson.tasks.LogRotator")
mappings = [
("days-to-keep", "daysToKeep", -1),
("num-to-keep", "numToKeep", -1),
("artifact-days-to-keep", "artifactDaysToKeep", -1),
("artifact-num-to-keep", "artifactNumToKeep", -1),
]
helpers.convert_mapping_to_xml(strategy, data, mappings, fail_required=True)
def slave_prerequisites(registry, xml_parent, data):
"""yaml: slave-prerequisites
This plugin allows you to check prerequisites on slave before
a job can run a build on it
Requires the Jenkins :jenkins-plugins:`Slave Prerequisites Plugin
<slave-prerequisites>`.
:arg str script: A script to be executed on slave node.
If returning non 0 status, the node will be vetoed from hosting
the build. (required)
:arg str interpreter: Command line interpreter to be used for executing
the prerequisite script - either `shell` for Unix shell or `cmd` for
Windows batch script. (default shell)
Example:
.. literalinclude::
/../../tests/properties/fixtures/slave-prerequisites-minimal.yaml
:language: yaml
.. literalinclude::
/../../tests/properties/fixtures/slave-prerequisites-full.yaml
:language: yaml
"""
prereqs = XML.SubElement(xml_parent, "com.cloudbees.plugins.JobPrerequisites")
mappings = [
("script", "script", None),
(
"interpreter",
"interpreter",
"shell",
{"cmd": "windows batch command", "shell": "shell script"},
),
]
helpers.convert_mapping_to_xml(prereqs, data, mappings, fail_required=True)
def groovy_label(registry, xml_parent, data):
"""yaml: groovy-label
This plugin allows you to use Groovy script to restrict where this project
can be run.
Requires the Jenkins :jenkins-plugins:`Groovy Label Assignment Plugin
<groovy-label-assignment>`.
Return value from Groovy script is treated as Label Expression.
It is treated as followings:
- A non-string value will be converted to a string using toString()
- When null or blank string is returned, node restriction does not take
effect (or is not overwritten).
- When exception occurred or Label Expression is not parsed correctly,
builds are canceled.
:arg str script: Groovy script (default '')
:arg bool sandbox: Use Groovy Sandbox. (default false)
If checked, run this Groovy script in a sandbox with limited abilities.
If unchecked, and you are not a Jenkins administrator, you will need to
wait for an administrator to approve the script
:arg list classpath: Additional classpath entries accessible from
the script, each of which should be an absolute local path or
URL to a JAR file, according to "The file URI Scheme" (optional)
Minimal Example:
.. literalinclude::
/../../tests/properties/fixtures/groovy-label-minimal.yaml
:language: yaml
Full Example:
.. literalinclude::
/../../tests/properties/fixtures/groovy-label-full.yaml
:language: yaml
"""
sub_element = XML.SubElement(
xml_parent,
"jp.ikedam.jenkins.plugins."
"groovy__label__assignment."
"GroovyLabelAssignmentProperty",
)
sub_element.set("plugin", "groovy-label-assignment")
security = XML.SubElement(sub_element, "secureGroovyScript")
security.set("plugin", "script-security")
mapping = [("script", "script", ""), ("sandbox", "sandbox", False)]
helpers.convert_mapping_to_xml(security, data, mapping, fail_required=True)
if data and "classpath" in data:
classpath = XML.SubElement(security, "classpath")
for value in data["classpath"]:
entry = XML.SubElement(classpath, "entry")
XML.SubElement(entry, "url").text = value
def lockable_resources(registry, xml_parent, data):
"""yaml: lockable-resources
Requires the Jenkins :jenkins-plugins:`Lockable Resources Plugin
<lockable-resources>`.
:arg str resources: List of required resources, space separated.
(required, mutual exclusive with label)
:arg str label: If you have created a pool of resources, i.e. a label,
you can take it into use here. The build will select the resource(s)
from the pool that includes all resources sharing the given label.
(required, mutual exclusive with resources)
:arg str var-name: Name for the Jenkins variable to store the reserved
resources in. Leave empty to disable. (default '')
:arg int number: Number of resources to request, empty value or 0 means
all. This is useful, if you have a pool of similar resources,
from which you want one or more to be reserved. (default 0)
:arg str match-script: Groovy script to reserve resource based on its
properties. Leave empty to disable. (default None)
:arg bool groovy-sandbox: Execute the provided match-script in Groovy
sandbox. Leave empty to disable. (default False)
Example:
.. literalinclude::
/../../tests/properties/fixtures/lockable_resources_minimal.yaml
:language: yaml
.. literalinclude::
/../../tests/properties/fixtures/lockable_resources_label.yaml
:language: yaml
.. literalinclude::
/../../tests/properties/fixtures/lockable_resources_full.yaml
:language: yaml
.. literalinclude::
/../../tests/properties/fixtures/lockable_resources_groovy.yaml
:language: yaml
"""
lockable_resources = XML.SubElement(
xml_parent, "org.jenkins.plugins.lockableresources.RequiredResourcesProperty"
)
if data.get("resources") and data.get("label"):
raise AttributeConflictError("resources", ("label",))
mapping = [
("resources", "resourceNames", ""),
("var-name", "resourceNamesVar", ""),
("number", "resourceNumber", 0),
("label", "labelName", ""),
]
helpers.convert_mapping_to_xml(
lockable_resources, data, mapping, fail_required=True
)
secure_groovy_script = XML.SubElement(lockable_resources, "resourceMatchScript")
mapping = [("match-script", "script", None), ("groovy-sandbox", "sandbox", False)]
helpers.convert_mapping_to_xml(
secure_groovy_script, data, mapping, fail_required=False
)
def docker_container(registry, xml_parent, data):
"""yaml: docker-container
Requires the Jenkins: :jenkins-plugins:`Docker Plugin <docker-plugin>`.
:arg str docker-registry-url: URL of the Docker registry. (default '')
:arg str credentials-id: Credentials Id for the Docker registey.
(default '')
:arg bool commit-on-success: When a job completes, the docker slave
instance is committed with repository based on the job name and build
number as tag. (default false)
:arg str additional-tag: Additional tag to apply to the docker slave
instance when committing it. (default '')
:arg bool push-on-success: Also push the resulting image when committing
the docker slave instance. (default false)
:arg bool clean-local-images: Clean images from the local daemon after
building. (default true)
Minimal Example:
.. literalinclude::
/../../tests/properties/fixtures/docker-container-minimal.yaml
:language: yaml
Full Example:
.. literalinclude::
/../../tests/properties/fixtures/docker-container-full.yaml
:language: yaml
"""
xml_docker = XML.SubElement(
xml_parent, "com.nirima.jenkins.plugins.docker.DockerJobProperty"
)
registry = XML.SubElement(xml_docker, "registry")
registry.set("plugin", "docker-commons")
registry_mapping = [
("docker-registry-url", "url", ""),
("credentials-id", "credentialsId", ""),
]
helpers.convert_mapping_to_xml(
registry, data, registry_mapping, fail_required=False
)
mapping = [
("commit-on-success", "tagOnCompletion", False),
("additional-tag", "additionalTag", ""),
("push-on-success", "pushOnSuccess", False),
("clean-local-images", "cleanImages", True),
]
helpers.convert_mapping_to_xml(xml_docker, data, mapping, fail_required=True)
def disable_resume(registry, xml_parent, data):
"""yaml: disable-resume
Do not allow the pipeline to resume if the master restarts
Requires the Jenkins :jenkins-plugins:`Pipeline Job Plugin
<workflow-aggregator>`.
Example:
.. literalinclude::
/../../tests/properties/fixtures/disable-resume.yaml
:language: yaml
"""
XML.SubElement(
xml_parent,
"org.jenkinsci.plugins.workflow.job.properties." "DisableResumeJobProperty",
)
def cachet_gating(registry, xml_parent, data):
"""yaml: cachet-gating
The Cachet Gating Plugin provides a gating mechanism
based on the availability of resources.
Requires the Jenkins: :jenkins-plugins:`Cachet Gate Plugin
<cachet-gating>`.
:arg bool required-resources: Confirm availability of listed
resources before building. Requires the list of resources to
also be defined. (default true)
:arg list resources: which resources to gate
Example:
.. literalinclude:: /../../tests/properties/fixtures/cachet-gating.yaml
:language: yaml
"""
cachet = XML.SubElement(
xml_parent, "com.redhat.jenkins.plugins.cachet.CachetJobProperty"
)
cachet.set("plugin", "cachet-gating")
mapping = [("required-resources", "requiredResources", True)]
helpers.convert_mapping_to_xml(cachet, data, mapping, fail_required=True)
resources_data = data.get("resources", [])
if resources_data:
resources = XML.SubElement(cachet, "resources")
for resource in resources_data:
XML.SubElement(resources, "string").text = str(resource)
class Properties(jenkins_jobs.modules.base.Base):
sequence = 20
component_type = "property"
component_list_type = "properties"
def gen_xml(self, xml_parent, data):
properties = xml_parent.find("properties")
if properties is None:
properties = XML.SubElement(xml_parent, "properties")
for prop in data.get("properties", []):
# Pass a flag for folder permissions to the authorization method
if next(iter(prop)) == "authorization":
# Only projects are placed in folders
if "project-type" in data:
if data["project-type"] == "folder":
prop["authorization"]["_use_folder_perms"] = True
elif data["project-type"] == "multibranch":
prop["authorization"]["_use_folder_perms"] = True
else:
prop["authorization"]["_use_folder_perms"] = "folder" in data
else:
prop["authorization"]["_use_folder_perms"] = False
self.registry.dispatch("property", properties, prop)
| 35.655981
| 97
| 0.664372
|
b71dacf8e651440e1d27f1d7df9d0e4cb3e98d0c
| 522
|
py
|
Python
|
tournaments/bfsComponentSize/bfsComponentSize.py
|
gurfinkel/codeSignal
|
114817947ac6311bd53a48f0f0e17c0614bf7911
|
[
"MIT"
] | 5
|
2020-02-06T09:51:22.000Z
|
2021-03-19T00:18:44.000Z
|
tournaments/bfsComponentSize/bfsComponentSize.py
|
gurfinkel/codeSignal
|
114817947ac6311bd53a48f0f0e17c0614bf7911
|
[
"MIT"
] | null | null | null |
tournaments/bfsComponentSize/bfsComponentSize.py
|
gurfinkel/codeSignal
|
114817947ac6311bd53a48f0f0e17c0614bf7911
|
[
"MIT"
] | 3
|
2019-09-27T13:06:21.000Z
|
2021-04-20T23:13:17.000Z
|
def bfsComponentSize(matrix):
visited = [ False for i in range(len(matrix))]
queue = []
componentSize = 0
visited[1] = True
queue.append(1)
while len(queue) > 0:
currentVertex = queue.pop()
visited[currentVertex] = True
componentSize += 1
for nextVertex in range(len(matrix)):
if matrix[currentVertex][nextVertex] and not visited[nextVertex]:
visited[nextVertex] = True
queue.append(nextVertex)
return componentSize
| 29
| 77
| 0.60728
|
e2a81306a96bf5231d94fa6a46289aa2114f45c4
| 2,607
|
py
|
Python
|
mayaside/CharacterAdaptionPreious.py
|
Kususumu/pythonServerWorkplace
|
d76080276b9616bbf5945413bcf4336779546ebc
|
[
"MIT"
] | null | null | null |
mayaside/CharacterAdaptionPreious.py
|
Kususumu/pythonServerWorkplace
|
d76080276b9616bbf5945413bcf4336779546ebc
|
[
"MIT"
] | null | null | null |
mayaside/CharacterAdaptionPreious.py
|
Kususumu/pythonServerWorkplace
|
d76080276b9616bbf5945413bcf4336779546ebc
|
[
"MIT"
] | null | null | null |
#CharacterAdaptionPreious.py
#Introdaction
#An python C script that can
#0.load a scene with the furniture
# ->and the character
#1.import a Character & import obj
#2.move & rotate it to where you want
#3.duplicate one Charactor and make it a white model
#4.let the White Charactor do
# ->what the Color Charactor do
#5.do an animation
#6.render an animation to special folder
#start from move a cube and duplicate it
#to make an animation
#first create and copy the object named colorCharacter
#delete the name whos name is "Color********"
#In the newestModel.mb every step is 75 in the grid
#Function : keyFrameForTwoCharacter
#Usage : Input the animation Character and List of what they do
# ->would happen on the object
# ->To make the Two Character do the same animation in one step
#
#The True Editer is main
#meshList = cmds.ls('Main',sl = False , mat = False)
#result = meshList[0]
#print(result)
#cmds.move(0,0,0,result)
def keyFrameForTwoCharacter(colorName,whiteName,listToDo):
#colorObject
cmds.setKeyframe(colorName,time=startTime,attribute='translateX',value=0 )
cmds.setKeyframe(colorName,time=endTime,attribute='translateX',value=2)
#White Object
cmds.setKeyframe(whiteName,time=startTime,attribute='translateY',value=0 )
cmds.setKeyframe(whiteName,time=endTime,attribute='translateY',value=2)
return;
#Function : shapeWhiteCharacter
#Usage : When get a copy of the Original Character
# ->We need to change the copy on to white Character
#
def shapeWhiteCharacter(whiteCharacter):
return;
#Function : renderProcess
#Usage : Use to render the final scene
#
def renderProcess():
return;
cubeList = cmds.ls('Color*')
if len(cubeList) > 0:
cmds.delete(cubeList )
cubeList = cmds.ls('White*')
if len(cubeList) > 0:
cmds.delete(cubeList )
#create a cube that is 3,3,3
result = cmds.polyCube(w=3,h=3,d=3,name = 'ColorCharacter')
#print the result
print 'result:' + str( result )
#get the Name of the Model from result
transformName = result[0]
duplicateResult = cmds.duplicate(transformName , name='WhiteCharacter')
print 'duplicateResult: ' + str( duplicateResult )
duplicateName = duplicateResult[0]
#start giving an animation
startTime = cmds.playbackOptions(query=True , minTime=True)
endTime = cmds.playbackOptions(query=True , maxTime=True)
#Delete the Key of one Animation
cmds.cutKey(transformName,time=(startTime,endTime),attribute='translateX')
#set KeyFrame must be a function choose by the user
#Set Key Frame
keyFrameForTwoCharacter(transformName,duplicateName,startTime);
| 28.336957
| 78
| 0.740315
|
c8d53db37c614f3c6a8cba5aa09275650b08700e
| 12,116
|
py
|
Python
|
delphi/apps/rest_api/models.py
|
mwdchang/delphi
|
c6177f2d614118883eaaa7f5300f3e46f10ddc7e
|
[
"Apache-2.0"
] | null | null | null |
delphi/apps/rest_api/models.py
|
mwdchang/delphi
|
c6177f2d614118883eaaa7f5300f3e46f10ddc7e
|
[
"Apache-2.0"
] | null | null | null |
delphi/apps/rest_api/models.py
|
mwdchang/delphi
|
c6177f2d614118883eaaa7f5300f3e46f10ddc7e
|
[
"Apache-2.0"
] | 1
|
2019-07-18T19:13:13.000Z
|
2019-07-18T19:13:13.000Z
|
import json
from uuid import uuid4
from enum import Enum, unique
from typing import Optional, List
from dataclasses import dataclass, field, asdict
from flask_sqlalchemy import SQLAlchemy
from delphi.apps.rest_api import db
from sqlalchemy import PickleType
from sqlalchemy.inspection import inspect
from sqlalchemy.ext import mutable
from sqlalchemy.sql import operators
from sqlalchemy.types import TypeDecorator
class JsonEncodedList(db.TypeDecorator):
"""Enables list storage by encoding and decoding on the fly."""
impl = db.Text
def process_bind_param(self, value, dialect):
if value is None:
return "[]"
else:
return str(value)
def process_result_value(self, value, dialect):
if value is None:
return []
else:
return json.loads(value.replace("'", '"'))
mutable.MutableList.associate_with(JsonEncodedList)
class JsonEncodedDict(db.TypeDecorator):
"""Enables JsonEncodedDict storage by encoding and decoding on the fly."""
impl = db.Text
def process_bind_param(self, value, dialect):
if value is None:
return "{}"
else:
return json.dumps(value)
def process_result_value(self, value, dialect):
if value is None:
return {}
else:
return json.loads(value)
mutable.MutableDict.associate_with(JsonEncodedDict)
class Serializable(object):
def deserialize(self):
return {c: getattr(self, c) for c in inspect(self).attrs.keys()}
@staticmethod
def deserialize_list(l):
return [m.serialize() for m in l]
class DelphiModel(db.Model, Serializable):
""" Delphi AnalysisGraph Model """
__tablename__ = "delphimodel"
id = db.Column(db.String, primary_key=True)
icm_metadata = db.relationship(
"ICMMetadata", backref="delphimodel", lazy=True, uselist=False
)
model = db.Column(db.PickleType)
class ICMMetadata(db.Model, Serializable):
""" Placeholder docstring for class ICMMetadata. """
__tablename__ = "icmmetadata"
id = db.Column(db.String, primary_key=True, default=str(uuid4()))
icmProvider = db.Column(
db.Enum("BAE", "BBN", "STR", "DUMMY"), nullable=True
)
title = db.Column(db.String, nullable=True)
version = db.Column(db.Integer, nullable=True)
created = db.Column(db.String, nullable=True)
createdByUser_id = db.Column(
db.Integer, db.ForeignKey("user.id"), nullable=True
)
createdByUser = db.relationship("User", foreign_keys=[createdByUser_id])
lastAccessed = db.Column(db.String, nullable=True)
lastAccessedByUser_id = db.Column(
db.Integer, db.ForeignKey("user.id"), nullable=True
)
lastAccessedByUser = db.relationship(
"User", foreign_keys=[lastAccessedByUser_id]
)
lastUpdated = db.Column(db.String, nullable=True)
lastUpdatedByUser_id = db.Column(
db.Integer, db.ForeignKey("user.id"), nullable=True
)
lastUpdatedByUser = db.relationship(
"User", foreign_keys=[lastUpdatedByUser_id]
)
estimatedNumberOfPrimitives = db.Column(db.Integer, nullable=True)
lifecycleState = db.Column(
db.Enum(
"PROPOSED",
"APPROVED",
"EXPERIMENTAL",
"OPERATIONAL",
"SUSPENDED",
"ARCHIVED",
"CREATED",
),
nullable=True,
)
derivation = db.Column(JsonEncodedList, nullable=True)
model_id = db.Column(db.String, db.ForeignKey("delphimodel.id"))
__mapper_args__ = {"polymorphic_identity": "ICMMetadata"}
class User(db.Model, Serializable):
""" Placeholder docstring for class User. """
__tablename__ = "user"
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String, nullable=True)
firstName = db.Column(db.String, nullable=True)
lastName = db.Column(db.String, nullable=True)
email = db.Column(db.String, nullable=True)
password = db.Column(db.String, nullable=True)
phone = db.Column(db.String, nullable=True)
userStatus = db.Column(db.Integer, nullable=True)
__mapper_args__ = {"polymorphic_identity": "User"}
class ServerResponse(db.Model, Serializable):
""" Placeholder docstring for class ServerResponse. """
__tablename__ = "serverresponse"
id = db.Column(db.String, primary_key=True, default=str(uuid4()))
message = db.Column(db.String, nullable=True)
__mapper_args__ = {"polymorphic_identity": "ServerResponse"}
class CausalPrimitive(db.Model, Serializable):
""" Top level object that contains common properties that would apply to any causal primitive (variable, relationship, etc.) """
__tablename__ = "causalprimitive"
baseType = db.Column(db.String)
namespaces = db.Column(JsonEncodedDict, nullable=True)
types = db.Column(JsonEncodedList, nullable=True)
editable = db.Column(db.Boolean, nullable=True, default=True)
disableable = db.Column(db.Boolean, nullable=True, default=True)
disabled = db.Column(db.Boolean, nullable=True, default=False)
id = db.Column(db.String, primary_key=True, default=str(uuid4()))
label = db.Column(db.String, nullable=True)
description = db.Column(db.String, nullable=True)
lastUpdated = db.Column(db.String, nullable=True)
auxiliaryProperties = db.Column(JsonEncodedList, nullable=True)
model_id = db.Column(db.String, db.ForeignKey("delphimodel.id"))
__mapper_args__ = {
"polymorphic_identity": "CausalPrimitive",
"polymorphic_on": baseType,
}
class Entity(CausalPrimitive):
""" Placeholder docstring for class Entity. """
__tablename__ = "entity"
id = db.Column(
db.String,
db.ForeignKey("causalprimitive.id"),
primary_key=True,
default=str(uuid4()),
)
confidence = db.Column(db.Float, nullable=True)
__mapper_args__ = {"polymorphic_identity": "Entity"}
class CausalVariable(CausalPrimitive):
""" Placeholder docstring for class CausalVariable. """
__tablename__ = "causalvariable"
id = db.Column(
db.String,
db.ForeignKey("causalprimitive.id"),
primary_key=True,
default=str(uuid4()),
)
units = db.Column(db.String, nullable=True)
backingEntities = db.Column(JsonEncodedList, nullable=True)
lastKnownValue = db.Column(JsonEncodedDict, nullable=True)
confidence = db.Column(db.Float, nullable=True)
range = db.Column(JsonEncodedDict, nullable=True)
__mapper_args__ = {"polymorphic_identity": "CausalVariable"}
class ConfigurationVariable(CausalPrimitive):
""" Placeholder docstring for class ConfigurationVariable. """
__tablename__ = "configurationvariable"
id = db.Column(
db.String,
db.ForeignKey("causalprimitive.id"),
primary_key=True,
default=str(uuid4()),
)
units = db.Column(db.String, nullable=True)
lastKnownValue = db.Column(JsonEncodedDict, nullable=True)
range = db.Column(JsonEncodedDict, nullable=True)
__mapper_args__ = {"polymorphic_identity": "ConfigurationVariable"}
class CausalRelationship(CausalPrimitive):
""" Placeholder docstring for class CausalRelationship. """
__tablename__ = "causalrelationship"
id = db.Column(
db.String,
db.ForeignKey("causalprimitive.id"),
primary_key=True,
default=str(uuid4()),
)
source = db.Column(JsonEncodedDict, nullable=True)
target = db.Column(JsonEncodedDict, nullable=True)
confidence = db.Column(db.Float, nullable=True)
strength = db.Column(db.Float, nullable=True)
reinforcement = db.Column(db.Boolean, nullable=True)
__mapper_args__ = {"polymorphic_identity": "CausalRelationship"}
class Relationship(CausalPrimitive):
""" Placeholder docstring for class Relationship. """
__tablename__ = "relationship"
id = db.Column(
db.String,
db.ForeignKey("causalprimitive.id"),
primary_key=True,
default=str(uuid4()),
)
source = db.Column(JsonEncodedDict, nullable=True)
target = db.Column(JsonEncodedDict, nullable=True)
confidence = db.Column(db.Float, nullable=True)
__mapper_args__ = {"polymorphic_identity": "Relationship"}
class Evidence(db.Model, Serializable):
""" Object that holds a reference to evidence (either KO from TA1 or human provided). """
__tablename__ = "evidence"
id = db.Column(db.String, primary_key=True, default=str(uuid4()))
link = db.Column(db.String, nullable=True)
description = db.Column(db.String, nullable=True)
category = db.Column(db.String, nullable=True)
rank = db.Column(db.Integer, nullable=True)
causalrelationship_id = db.Column(
db.String, db.ForeignKey("causalrelationship.id")
)
__mapper_args__ = {"polymorphic_identity": "Evidence"}
class Experiment(db.Model, Serializable):
""" structure used for experimentation """
__tablename__ = "experiment"
baseType = db.Column(db.String)
id = db.Column(db.String, primary_key=True, default=str(uuid4()))
label = db.Column(db.String, nullable=True)
options = db.Column(JsonEncodedDict, nullable=True)
__mapper_args__ = {
"polymorphic_identity": "Experiment",
"polymorphic_on": baseType,
}
class ForwardProjection(Experiment):
""" Placeholder docstring for class ForwardProjection. """
__tablename__ = "forwardprojection"
id = db.Column(
db.String,
db.ForeignKey("experiment.id"),
primary_key=True,
default=str(uuid4()),
)
interventions = db.Column(JsonEncodedList, nullable=True)
projection = db.Column(JsonEncodedDict, nullable=True)
__mapper_args__ = {"polymorphic_identity": "ForwardProjection"}
class SensitivityAnalysis(Experiment):
""" Placeholder docstring for class SensitivityAnalysis. """
__tablename__ = "sensitivityanalysis"
id = db.Column(
db.String,
db.ForeignKey("experiment.id"),
primary_key=True,
default=str(uuid4()),
)
nodeOfInterest = db.Column(db.String, nullable=True)
numSteps = db.Column(db.Integer, nullable=True)
sensitivityVariables = db.Column(JsonEncodedList, nullable=True)
__mapper_args__ = {"polymorphic_identity": "SensitivityAnalysis"}
class ExperimentResult(db.Model, Serializable):
""" Notional model of experiment results """
__tablename__ = "experimentresult"
baseType = db.Column(db.String)
id = db.Column(db.String, primary_key=True, default=str(uuid4()))
__mapper_args__ = {
"polymorphic_identity": "ExperimentResult",
"polymorphic_on": baseType,
}
class CauseMosForwardProjectionResult(ExperimentResult):
""" Placeholder docstring for class CauseMosForwardProjectionResult. """
__tablename__ = "causemosforwardprojectionresult"
id = db.Column(
db.String,
db.ForeignKey("experimentresult.id"),
primary_key=True,
default=str(uuid4()),
)
results = db.Column(JsonEncodedDict, nullable=True)
__mapper_args__ = {"polymorphic_identity": "CauseMosForwardProjectionResult"}
class ForwardProjectionResult(ExperimentResult):
""" Placeholder docstring for class ForwardProjectionResult. """
__tablename__ = "forwardprojectionresult"
id = db.Column(
db.String,
db.ForeignKey("experimentresult.id"),
primary_key=True,
default=str(uuid4()),
)
projection = db.Column(JsonEncodedDict, nullable=True)
results = db.Column(JsonEncodedList, nullable=True)
__mapper_args__ = {"polymorphic_identity": "ForwardProjectionResult"}
class SensitivityAnalysisResult(ExperimentResult):
""" Placeholder docstring for class SensitivityAnalysisResult. """
__tablename__ = "sensitivityanalysisresult"
id = db.Column(
db.String,
db.ForeignKey("experimentresult.id"),
primary_key=True,
default=str(uuid4()),
)
results = db.Column(JsonEncodedList, nullable=True)
__mapper_args__ = {"polymorphic_identity": "SensitivityAnalysisResult"}
| 33.285714
| 132
| 0.685127
|
b6e0f29a1d3ce4088ab0cc3b03974a1afa83d557
| 80,222
|
py
|
Python
|
cellpack/mgl_tools/upy/ucsfchimera/chimeraHelper.py
|
mesoscope/cellpack
|
ec6b736fc706c1fae16392befa814b5337a3a692
|
[
"MIT"
] | null | null | null |
cellpack/mgl_tools/upy/ucsfchimera/chimeraHelper.py
|
mesoscope/cellpack
|
ec6b736fc706c1fae16392befa814b5337a3a692
|
[
"MIT"
] | 21
|
2021-10-02T00:07:05.000Z
|
2022-03-30T00:02:10.000Z
|
cellpack/mgl_tools/upy/ucsfchimera/chimeraHelper.py
|
mesoscope/cellpack
|
ec6b736fc706c1fae16392befa814b5337a3a692
|
[
"MIT"
] | null | null | null |
"""
Copyright (C) <2010> Autin L. TSRI
This file git_upy/ucsfchimera/chimeraHelper.py is part of upy.
upy is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
upy is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with upy. If not, see <http://www.gnu.org/licenses/gpl-3.0.html>.
"""
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 9 11:18:03 2011
@author: -
"""
# -*- coding: utf-8 -*-
"""
Created on Sun Dec 5 23:30:44 2010
@author: Ludovic Autin - ludovic.autin@gmail.com
"""
# standardmodule
import sys
import os
import struct
import string
import types
import math
from math import *
from types import StringType, ListType
import numpy
import Image
# base helper class
from upy import hostHelper
# Problem instance doesnt really exist as its. Or its instance of mesh/sphere/cylinder directly.
# check autofill display
import chimera
class chimeraHelper(hostHelper.Helper):
"""
The DejaVu helper abstract class
============================
This is the DejaVu helper Object. The helper
give access to the basic function need for create and edit a host 3d object and scene.
"""
# this id can probably found in c4d.symbols
# TAG ID
SPLINE = "kNurbsCurve"
INSTANCE = "Dejavu.Geom"
EMPTY = "Dejavu.Geom"
SPHERE = "DejaVu.Spheres"
POLYGON = "DejaVu.IndexedPolygons"
# msutil = om.MScriptUtil()
pb = False
VERBOSE = 0
DEBUG = 0
viewer = None
host = "chimera"
def __init__(self, master=None):
hostHelper.Helper.__init__(self)
# we can define here some function alias
self.updateAppli = self.update
self.Cube = self.box
self.Box = self.box
# if type(master) is dict :
# self.viewer = master["master"]
# else :
# self.viewer = master
# if not isinstance(self.viewer,Viewer) or self.viewer is None :
# self.viewer = Viewer(master)
# self.getCurrentScene = c4d.documents.GetActiveDocument
# self.Polylines = Polylines
# self.Spheres = Spheres
# self.Cylinders =Cylinders
# self.Points = Points
# self.Geom = Geom
# self.Labels = Labels
self.viewer = chimera.openModels
# self.IndexedPolygons = IndexedPolygons
if self.viewer is not None:
self.AddObject = self.viewer.AddObject
def setViewer(self, vi):
self.viewer = vi
self.AddObject = self.viewer.AddObject
self.Labels = self.viewer.Labels
def getCurrentScene(self):
# actually return the Viewer instance
return self.viewer
def progressBar(self, progress=None, label=None):
"""update the progress bar status by progress value and label string
@type progress: Int/Float
@param progress: the new progress
@type label: string
@param label: the new message to put in the progress status
""" # resetProgressBar
pass
self.update()
def resetProgressBar(self):
"""reset the Progress Bar, using value"""
pass
self.update()
def update(self):
pass
def getType(self, object):
return object.__module__
def getMesh(self, m):
return None
def getName(self, o):
return o
def getObject(self, name):
obj = self.viewer.get(name)
return None
def getChilds(self, obj):
return None
def deleteObject(self, obj):
return None
def newEmpty(
self, name, location=None, parentCenter=None, display=1, visible=0, **kw
):
return None
def newInstance(self, name, object, location=None, c4dmatrice=None, matrice=None):
return None
def setObjectMatrix(self, object, matrice, c4dmatrice=None):
return None
def concatObjectMatrix(self, object, matrice, c4dmatrice=None, local=True):
# #local or global?
# cmg = object.GetMg()
# cml = object.GetMl()
# if c4dmatrice !=None :
# #type of matrice
# if local :
# object.SetMl(cml*c4dmatrice)
# else :
# object.SetMg(cmg*c4dmatrice)
# else :
# mx = self.matrix2c4dMat(matrice,transpose=False)
# if local :
# object.SetMl(cml*mx)
# else :
# object.SetMg(cmg*mx)
pass
def GetAbsPosUntilRoot(self, obj):
# stop = False
# parent = obj.GetUp()
# pos=self.FromVec((0.,0.,0.))
# while not stop :
# pos = pos + parent.GetAbsPos()
# parent = parent.GetUp()
# if parent is None :
# stop = True
return [0, 0.0, 0.0]
def addObjectToScene(self, doc, obj, parent=None, centerRoot=True, rePos=None):
# doc.start_undo()
return None
def addCameraToScene(self, name, Type, focal, center, sc):
pass
def addLampToScene(self, name, Type, rgb, dist, energy, soft, shadow, center, sc):
pass
def reParent(self, obj, parent):
return None
def setInstance(self, name, object, location=None, c4dmatrice=None, matrice=None):
# instance = c4d.BaseObject(c4d.Oinstance)
# instance[1001]=object
# instance.SetName(name)#.replace(":","_")
# if location != None :
# instance.SetAbsPos(self.FromVec(location))
# if c4dmatrice !=None :
# #type of matre
# instance.SetMg(c4dmatrice)
# if matrice != None:
# mx = self.matrix2c4dMat(matrice)
# instance.SetMl(mx)
# p = instance.GetAbsPos()
# instance.SetAbsPos(c4d.Vector(p.y,p.z,p.x))
# return instance
return None
def getTranslation(self, name):
return None
def setTranslation(self, name, pos=[0.0, 0.0, 0.0]):
return None
def IndexedPolygons(self, name, vertices=None, faces=None, normals=None, **kw):
# should I build the geom->VRML->read it ?
return None
# def translateObj(self,obj,position,use_parent=True):
# if len(position) == 1 : c = position[0]
# else : c = position
# #print "upadteObj"
# newPos=self.FromVec(c)
#
# if use_parent :
# parentPos = self.GetAbsPosUntilRoot(obj)#parent.GetAbsPos()
# newPos = newPos - parentPos
# obj.ConcatTranslation(newPos)
#
# def scaleObj(self,obj,sc):
# if type(sc) is float :
# sc = [sc,sc,sc]
# #obj.scale = sc #SetScale()?
## obj.SetScale(numpy.array(sc))
# obj.Set(scale=numpy.array(sc))
#
# def rotateObj(self,obj,rot):
# #take radians, give degrees
# obj.rotation = rot
#
# def getTransformation(self,geom):
# geom = self.getObject(geom)
# return geom.GetMatrix(geom.LastParentBeforeRoot())
#
# def toggleDisplay(self,obj,display):
# obj = self.getObject(obj)
# if obj is None :
# return
# obj.Set(visible = display)
#
# def getVisibility(self,obj,editor=True, render=False, active=False):
# #0 off, 1#on, 2 undef
# display = {0:True,1:False,2:True}
# if type (obj) == str :
# obj = self.getObject(obj)
# if editor and not render and not active:
# return display[obj.GetEditorMode()]
# elif not editor and render and not active:
# return display[obj.GetRenderMode()]
# elif not editor and not render and active:
# return bool(obj[906])
# else :
# return display[obj.GetEditorMode()],display[obj.GetRenderMode()],bool(obj[906])
#
#
# def getCurrentSelection(self,):
# """
# Return the current/active selected object in the document or scene
# DejaVu support only one object at a time.
# @rtype: liste
# @return: the list of selected object
# """
# return [self.getCurrentScene().currentObject]
#
# #####################MATERIALS FUNCTION########################
# def addMaterial(self,name,color):
# pass
#
# def createTexturedMaterial(self,name,filename,normal=False,mat=None):
# footex = Texture()
# im = Image.open(filename)
# footex.Set(enable=1, image=im)
# return footex
#
# def assignMaterial(self,object,mat,texture= False):
# if texture :
# object.Set(texture=mat)
# else :
# object.Set(materials=[mat,])
#
# def changeObjColorMat(self,obj,color):
# doc = self.getCurrentScene()
# obj.Set(materials=[color])
#
# def getMaterialObject(self,o):
# pass
# return None
#
# def getMaterial(self,mat):
# return None
#
# def getAllMaterials(self):
# return None
#
# def getMaterialName(self,mat):
# return None
#
# def ObjectsSelection(self,listeObjects,typeSel="new"):
# """
# Modify the current object selection.
#
# @type listeObjects: list
# @param listeObjects: list of object to joins
# @type typeSel: string
# @param listeObjects: type of modification: new,add,...
#
# """
# pass
## dic={"add":c4d.SELECTION_ADD,"new":c4d.SELECTION_NEW}
## sc = self.getCurrentScene()
## [sc.SetSelection(x,dic[typeSel]) for x in listeObjects]
#
#
# def oneCylinder(self,name,head,tail,radius=None,instance=None,material=None,
# parent = None,color=None):
# #laenge,mx=self.getTubeProperties(head,tail)
# lenght = self.measure_distance(head,tail)
# if True:#instance is None:
# stick = self.getObject(name)
# if stick is None :
# v = numpy.array([tail,head])
# f = numpy.arange(len(v))
# f.shape=(-1,2)
# stick = Cylinders(name,inheritMaterial = False,
# vertices=v,faces=f,
# radii=[1])
# #stick = self.Cylinder(name,length=lenght,pos =head)
# self.addObjectToScene(self.getCurrentScene(),stick,parent=parent)
# else :
# pos = numpy.array(head)
# v = numpy.array([tail,head])
# f = numpy.arange(len(v))
# f.shape=(-1,2)
# stick.Set(vertices=v,faces=f,redo=1)
# else :
# stick = instance
# v = instance.vertexSet.vertices.array
# i = len(v)
## v = numpy.concatenate((v,numpy.array([head,tail])))
# instance.vertexSet.vertices.AddValues([head,tail])
# instance.faceSet.faces.AddValues([i,i+1])
# r = instance.vertexSet.radii.array[0]
# instance.vertexSet.radii.AddValues(r)
# instance.Set(redo=1)
# return stick
#
# def Cylinder(self,name,radius=1.,length=1.,res=0, pos = [0.,0.,0.],parent=None):
## QualitySph={"0":16,"1":3,"2":4,"3":8,"4":16,"5":32}
# pos = numpy.array(pos)
# v = numpy.array([pos,pos + numpy.array([0.,length,0.])])
# f = numpy.arange(len(v))
# f.shape=(-1,2)
# baseCyl = Cylinders(name,inheritMaterial = False,quality=res,
# vertices=v,faces=f,
# radii=[radius])#, visible=1)
# #if str(res) not in QualitySph.keys():
# self.addObjectToScene(self.getCurrentScene(),baseCyl,parent=parent)
# return [baseCyl,baseCyl]
#
# def updateTubeMesh(self,mesh,cradius=1.0,quality=0,**kw):
# #change the radius to cradius
# mesh=self.getMesh(mesh)
# if type(mesh) is list :
# mesh= mesh[0]
## mesh=geom.mesh.GetDown()#should be the cylinder
# #mesh[5000]=cradius
## cradius = cradius*1/0.2
# #should used current Y scale too
# mesh.Set(radii=[cradius],quality=quality)
#
# def Sphere(self,name,radius=1.0,res=0,parent=None,color=None,mat=None,pos=None):
# QualitySph={"0":6,"1":4,"2":5,"3":6,"4":8,"5":16}
# baseSphere = self.Spheres(name,radii=[radius,],centers=[[0.,0.,0.]],
# quality=res,inheritMaterial = False, visible=1)
# if mat is not None :
# mat = self.getMaterial(mat)
# self.assignMaterial(mat, baseSphere)
# else :
# if color != None :
# color = [1.,1.,0.]
# baseSphere.Set(materials=[color,])
## mat = self.addMaterial(name,color)
## self.assignMaterial(mat, baseSphere)
# self.addObjectToScene(self.getCurrentScene(),baseSphere,parent=parent)
# if pos != None :
# self.setTranslation(baseSphere,pos)
# return [baseSphere,baseSphere]
#
## def updateSphereMesh(self,mesh,verts=None,faces=None,basemesh=None,
## scale=1.):
## mesh=self.getMesh(mesh)
## mesh[905]=self.FromVec([scale,scale,scale])
## mesh.Message(c4d.MSG_UPDATE)
##
## def updateSphereObj(self,obj,coord):
## self.updateObjectPos(obj,coord)
##
## def updateObjectPos(self,object,position):
## if len(position) == 1 : c = position[0]
## else : c = position
## #print "upadteObj"
## newPos=self.FromVec(c)
## parentPos = self.GetAbsPosUntilRoot(object)#parent.GetAbsPos()
## object.SetAbsPos(newPos-parentPos)
##
### def clonesAtomsSphere(self,name,x,iMe,doc,mat=None,scale=1.0,
### Res=32,R=None,join=0):
### spher=[]
### k=0
### n='S'
### AtmRadi = {"A":1.7,"N":1.54,"C":1.7,"P":1.7,"O":1.52,"S":1.85,"H":1.2}
###
### if scale == 0.0 : scale = 1.0
### if mat == None : mat=create_Atoms_materials()
### if name.find('balls') != (-1) : n='B'
### for j in range(len(x)): spher.append(None)
### for j in range(len(x)):
### #at=res.atoms[j]
### at=x[j]
### atN=at.name
### #print atN
### fullname = at.full_name()
### #print fullname
### atC=at._coords[0]
### spher[j] = iMe[atN[0]].GetClone()
### spher[j].SetName(n+"_"+fullname)#.replace(":","_"))
### spher[j].SetAbsPos(c4d.Vector(float(atC[2]),float(atC[1]),float(atC[0])))
### spher[j][905]=c4d.Vector(float(scale),float(scale),float(scale))
### #
### #print atN[0]
### #print mat[atN[0]]
### texture = spher[j].MakeTag(c4d.Ttexture)
### texture[1010] = mat[atN[0]]
### k=k+1
### return spher
##
# def instancesSphere(self,name,centers,radii,meshsphere,
# colors,scene,parent=None):
# sphs=[]
# vertices = []
# for i in range(len(centers)):
# vertices.append(centers[i])
# meshsphere.Set(vertices=vertices,materials=colors,radii=radii)
# return meshsphere
#
### def spheresMesh(self,name,x,mat=None,scale=1.0,Res=32,R=None,join=0):
### if scale == 0.0 : scale =1.
### scale = scale *2.
### spher=[]
### if Res == 0 : Res = 10.
### else : Res = Res *5.
### k=0
### if mat == None : mat=self.create_Atoms_materials()
### #print len(x)
### for j in range(len(x)): spher.append(None)
### for j in range(len(x)):
### #at=res.atoms[j]
### at=x[j]
### atN=at.name
### #print atN
### fullname = at.full_name()
### #print fullname
### atC=at._coords[0]
### #if R !=None : rad=R
### #elif AtmRadi.has_key(atN[0]) : rad=AtmRadi[atN[0]]
### #else : rad=AtmRadi['H']
### #print at.vdwRadius
### rad=at.vdwRadius
### #print rad
### spher[j] = c4d.BaseObject(c4d.Osphere)
### spher[j].SetName(fullname.replace(":","_"))
### spher[j][PRIM_SPHERE_RAD] = float(rad)*float(scale)
### spher[j].SetAbsPos(c4d.Vector(float(atC[0]),float(atC[1]),float(atC[2])))
### spher[j].MakeTag(c4d.Tphong)
### # create a texture tag on the PDBgeometry object
### #texture = spher[j].MakeTag(c4d.Ttexture)
### #create the dedicayed material
### #print mat[atN[0]]
### #texture[1010] = mat[atN[0]]
### #spher.append(me)
### k=k+1
### return spher
##
# def instancesCylinder(self,name,points,faces,radii,
# mesh,colors,scene,parent=None):
# mesh.Set(vertices=points,faces=faces,radii=radii,materials=colors)
# return mesh
#
## def updateTubeMesh(self,mesh,cradius=1.0,quality=0,**kw):
## mesh=self.getMesh(mesh)
### mesh=geom.mesh.GetDown()#should be the cylinder
## #mesh[5000]=cradius
### cradius = cradius*1/0.2
## mesh[905]=c4d.Vector(float(cradius),1.,float(cradius))
## mesh.Message(c4d.MSG_UPDATE)
## #pass
##
## def updateTubeObj(self,coord1,coord2,bicyl=False):
## laenge,mx=self.getTubeProperties(coord1,coord2)
## o.SetMl(mx)
## o[905,1001]=float(laenge)
## parentPos = self.GetAbsPosUntilRoot(o)#parent.GetAbsPos()
## currentPos = o.GetAbsPos()
## o.SetAbsPos(currentPos - parentPos)
##
### def oldTube(set,atms,points,faces,doc,mat=None,res=32,size=0.25,sc=1.,join=0,instance=None,hiera = 'perRes'):
### bonds, atnobnd = set.bonds
### backbone = ['N', 'CA', 'C', 'O']
### stick=[]
### tube=[]
### #size=size*2.
### #coord1=x[0].atms[x[0].atms.CApos()].xyz() #x.xyz()[i].split()
### #coord2=x[1].atms[x[1].atms.CApos()].xyz() #x.xyz()[i+1].split()
### #print len(points)
### #print len(faces)
### #print len(atms)
### atm1=bonds[0].atom1#[faces[0][0]]
### atm2=bonds[0].atom2#[faces[0][1]]
### #name="T_"+atm1.name+str(atm1.number)+"_"+atm2.name+str(atm2.number)
### name="T_"+atm1.full_name()+"_"+atm2.name
### mol=atm1.getParentOfType(Protein)
### laenge,mx=getStickProperties(points[faces[0][0]],points[faces[0][1]])
### if mat == None : mat=create_sticks_materials()
### if instance == None :
### stick.append(c4d.BaseObject(CYLINDER))#(res, size, laenge/sc) #1. CAtrace, 0.25 regular |sc=1 CATrace, 2 regular
### stick[0].SetMg(mx)
### stick[0][5005]=laenge/sc#size
### stick[0][5000]=size#radius
### stick[0][5008]=res#resolution
### stick[0][5006]=2#heght segment
### else :
### stick.append(c4d.BaseObject(INSTANCE))
### stick[0][1001]=instance
### stick[0].SetMg(mx)
### stick[0][905,1001]=float(laenge)
### texture=stick[0].MakeTag(c4d.Ttexture)
### #print atms[faces[0][0]].name[0]+atms[faces[0][1]].name[0]
### name1=atms[faces[0][0]].name[0]
### name2=atms[faces[0][1]].name[0]
### if name1 not in AtmRadi.keys(): name1="A"
### if name2 not in AtmRadi.keys(): name2="A"
### texture[1010]=mat[name1+name2]
### stick[0].SetName(name)
### #stick[0].SetAbsPos(c4d.Vector(float(z1+z2)/2,float(y1+y2)/2,float(x1+x2)/2))
### #stick[0].set_rot(c4d.Vector(float(wz),float(0),float(wsz)))
### #stick[0][904,1000] = wz #RY/RH
### #stick[0][904,1002] = wsz #RZ/RB
### stick[0].MakeTag(c4d.Tphong)
### hierarchy=parseObjectName("B_"+atm1.full_name())
### #parent=getObject(mol.geomContainer.masterGeom.chains_obj[hierarchy[1]+"_balls"])
### if hiera == 'perRes' :
### parent = getObject(mol.geomContainer.masterGeom.res_obj[hierarchy[2]])
### elif hiera == 'perAtom' :
### if atm1.name in backbone :
### parent = getObject(atm1.full_name()+"_bond")
### else :
### parent = getObject(atm1.full_name()+"_sbond")
### else :
### parent=getObject(mol.geomContainer.masterGeom.chains_obj[hierarchy[1]+"_balls"])
### addObjectToScene(doc,stick[0],parent=parent)
### for i in range(1,len(faces)):
### atm1=bonds[i].atom1#[faces[i][0]]
### atm2=bonds[i].atom2#[faces[i][1]]
### #name="T_"+atm1.name+str(atm1.number)+"_"+atm2.name+str(atm2.number)
### name="T_"+atm1.full_name()+"_"+atm2.name
### laenge,mx=getStickProperties(points[faces[i][0]],points[faces[i][1]])
### if instance == None :
### stick.append(c4d.BaseObject(CYLINDER))#(res, size, laenge/sc) #1. CAtrace, 0.25 regular |sc=1 CATrace, 2 regular
### stick[i].SetMl(mx)
### stick[i][5005]=laenge/sc#radius
### stick[i][5000]=size#height/size
### stick[i][5008]=res#resolution rotation segment
### stick[i][5006]=2#heght segment
### else :
### stick.append(c4d.BaseObject(INSTANCE))
### stick[i][1001]=instance
### stick[i].SetMl(mx)
### stick[i][905,1001]=float(laenge)
### texture=stick[i].MakeTag(c4d.Ttexture)
### #print i,i+1
### name1=atms[faces[i][0]].name[0]
### name2=atms[faces[i][1]].name[0]
### if name1 not in AtmRadi.keys(): name1="A"
### if name2 not in AtmRadi.keys(): name2="A"
###
### if i < len(atms) :
### #print name1+name2
### texture[1010]=mat[name1+name2]
### else :
### texture[1010]=mat[name1+name2]
### stick[i].SetName(name)
### #stick[i].SetAbsPos(c4d.Vector(float(z1+z2)/2,float(y1+y2)/2,float(x1+x2)/2))
### #stick[i].set_rot(c4d.Vector(float(wz),float(0.),float(wsz)))
### stick[i].SetMl(mx)
### stick[i].MakeTag(c4d.Tphong)
### hierarchy=parseObjectName("B_"+atm1.full_name())
### #parent=getObject(mol.geomContainer.masterGeom.chains_obj[hierarchy[1]+"_balls"])
### if hiera == 'perRes' :
### parent = getObject(mol.geomContainer.masterGeom.res_obj[hierarchy[2]])
### elif hiera == 'perAtom' :
### if atm1.name in backbone :
### parent = getObject(atm1.full_name()+"_bond")
### else :
### parent = getObject(atm1.full_name()+"_sbond")
### else :
### parent=getObject(mol.geomContainer.masterGeom.chains_obj[hierarchy[1]+"_balls"])
###
### addObjectToScene(doc,stick[i],parent=parent)
###
### #if join==1 :
### # stick[0].join(stick[1:])
### # for ind in range(1,len(stick)):
### #obj[0].join([obj[ind]])
### # scn.unlink(stick[ind])
### #obj[0].setName(name)
### return [stick]
###
###
# def FromVec(self,points,pos=True):
# return numpy.array(points)#numpy.array(float(points[0]),float(points[1]),float(points[2]))
##
# def ToVec(self,v):
# return v
##
## def getCoordinateMatrix(self,pos,direction):
## offset=pos
## v_2=direction
## v_2.Normalize()
## v_1=c4d.Vector(float(1.),float(0.),float(0.))
## v_3=c4d.Vector.Cross(v_1,v_2)
## v_3.Normalize()
## v_1=c4d.Vector.Cross(v_2,v_3)
## v_1.Normalize()
## #from mglutil.math import rotax
## #pmx=rotax.rotVectToVect([1.,0.,0.], [float(z1-z2),float(y1-y2),float(x1-x2)], i=None)
## return c4d.Matrix(offset,v_1, v_2, v_3)
##
## def getCoordinateMatrixBis(self,pos,v1,v2):
## offset=self.FromVec(pos)
## v_2=self.FromVec(v2)
## v_1=self.FromVec(v1)
## v_3=c4d.Vector.Cross(v_1,v_2)
## v_3.Normalize()
## #from mglutil.math import rotax
## #pmx=rotax.rotVectToVect([1.,0.,0.], [float(z1-z2),float(y1-y2),float(x1-x2)], i=None)
## return c4d.Matrix(offset,v_1, v_2, v_3)
##
## def loftnurbs(self,name,mat=None):
## loft=c4d.BaseObject(self.LOFTNURBS)
## loft[1008]=0 #adaptive UV false
## loft.SetName(name)
## loft.MakeTag(c4d.Tphong)
## texture = loft.MakeTag(c4d.Ttexture)
## texture[1004]=6 #UVW Mapping
## #create the dedicayed material
## if mat is not None :
## texture[1010] = mat
## return loft
##
## def sweepnurbs(self,name,mat=None):
## loft=c4d.BaseObject(c4d.Osweep)
## loft.SetName(name)
## loft.MakeTag(c4d.Tphong)
## #create the dedicayed material
### if mat == None :
### texture[1010] = self.create_loft_material(name='mat_'+name)
### else : texture[1010] = mat
## if mat is not None :
## texture = loft.MakeTag(c4d.Ttexture)
## texture[1010] = mat
## return loft
##
## def addShapeToNurb(self,loft,shape,position=-1):
## list_shape=loft.GetChilds()
## shape.insert_after(list_shape[position])
##
## #def createShapes2D()
## # sh=c4d.BaseObject(dshape)
##
# def spline(self,name, points,close=0,type=1,scene=None,parent=None):
# f=[[x,x+1] for x in range(len(points))]
# spline=self.Polylines(name, vertices=points,faces=f)
# self.AddObject(spline, parent=parent)
# return spline,None
#
# def update_spline(self,name,new_points):
# spline=self.getObject(name)
# if spline is None :
# return False
# f=[[x,x+1] for i in range(len(new_points))]
# spline.Set(vertices=new_points,faces=f)
# return True
##
## def createShapes2Dspline(self,doc=None,parent=None):
## circle=c4d.BaseObject(self.CIRCLE)
## circle[2012]=float(0.3)
## circle[2300]=1
## if doc : addObjectToScene(doc,circle,parent=parent )
## rectangle=c4d.BaseObject(self.RECTANGLE)
## rectangle[2060]=float(2.2)
## rectangle[2061]=float(0.7)
## rectangle[2300]=1
## if doc : addObjectToScene(doc,rectangle,parent=parent )
## fourside=c4d.BaseObject(self.FOURSIDE)
## fourside[2121]=float(2.5)
## fourside[2122]=float(0.9)
## fourside[2300]=1
## if doc : addObjectToScene(doc,fourside,parent=parent )
## shape2D={}
## pts=[[0,0,0],[0,1,0],[0,1,1],[0,0,1]]
## #helixshape
## helixshape=fourside.get_real_spline()#spline('helix',pts,close=1,type=2)#AKIMA
## helixshape.SetName('helix')
## shape2D['Heli']=helixshape
## #sheetshape
## sheetshape=rectangle.get_real_spline()#spline('sheet',pts,close=1,type=0)#LINEAR
## sheetshape.SetName('sheet')
## shape2D['Shee']=sheetshape
## #strandshape
## strandshape=sheetshape.GetClone()
## strandshape.SetName('strand')
## shape2D['Stra']=strandshape
## #coilshape
## coilshape=circle.get_real_spline()#spline('coil',pts,close=1,type=4)#BEZIER
## coilshape.SetName('coil')
## shape2D['Coil']=coilshape
## #turnshape
## turnshape=coilshape.GetClone()
## turnshape.SetName('turn')
## shape2D['Turn']=turnshape
## if doc :
## for o in shape2D.values() :
## self.addObjectToScene(doc,o,parent=parent )
## return shape2D,[circle,rectangle,fourside,helixshape,sheetshape,strandshape,coilshape,turnshape]
##
##
## def constraintLookAt(self,object):
## """
## Cosntraint an hostobject to llok at the camera
##
## @type object: Hostobject
## @param object: object to constraint
## """
## self.getObject(object)
## object.MakeTag(self.LOOKATCAM)
##
## def updateText(self,text,string="",parent=None,size=None,pos=None,font=None):
## text = self.getObject(text)
## if text is None :
## return
## if string : text[c4d.PRIM_TEXT_TEXT] = string
## if size is not None : text[c4d.PRIM_TEXT_HEIGHT]= size
## if pos is not None : self.setTranslation(text,pos)
## if parent is not None : self.reParent(text,parent)
##
## def Text(self,name="",string="",parent=None,size=5.,
##pos=None,font=None,lookAt=False):
## text = c4d.BaseObject(self.TEXT)
## text.SetName(name)
## text[c4d.PRIM_TEXT_TEXT] = string #Text
## text[c4d.PRIM_TEXT_HEIGHT]= size
## text[c4d.ID_BASEOBJECT_REL_ROTATION,c4d.VECTOR_X] = 3.14 #inverse
### if font is not None:
### text[c4d.PRIM_TEXT_FONT]
## if pos is not None :
## self.setTranslation(text,pos)
## if parent is not None:
## self.addObjectToScene(self.getCurrentScene(),text,parent=parent)
## if lookAt:
## self.constraintLookAt(text)
## return text
##
## def Circle(self,name, rad=1.):
## circle=c4d.BaseObject(c4d.Osplinecircle)
## circle.SetName(name)
## circle[2012]=float(rad)
## circle[2300]=0
## return circle
##
## def createShapes2D(self,doc=None,parent=None):
## if doc is None :
## doc = self.getCurrentScene()
## shape2D={}
## circle=c4d.BaseObject(self.CIRCLE)
## circle[2012]=float(0.3)
## circle[2300]=0
## circle.SetName('Circle1')
## circle2=circle.GetClone()
## circle2.SetName('Circle2')
##
## coil=c4d.BaseObject(c4d.Onull)
## coil.SetName('coil')
## turn=c4d.BaseObject(c4d.Onull)
## turn.SetName('turn')
## shape2D['Coil']=coil
## shape2D['Turn']=turn
##
## self.addObjectToScene(doc,coil,parent=parent )
## self.addObjectToScene(doc,circle,parent=coil )
## self.addObjectToScene(doc,turn,parent=parent )
## self.addObjectToScene(doc,circle2,parent=turn )
##
## rectangle=c4d.BaseObject(RECTANGLE)
## rectangle[2060]=float(2.2)
## rectangle[2061]=float(0.7)
## rectangle[2300]=0
## rectangle.SetName('Rectangle1')
## rectangle2=rectangle.GetClone()
## rectangle2.SetName('Rectangle2')
##
## stra=c4d.BaseObject(c4d.Onull)
## stra.SetName('stra')
## shee=c4d.BaseObject(c4d.Onull)
## shee.SetName('shee')
## shape2D['Stra']=stra
## shape2D['Shee']=shee
##
## self.addObjectToScene(doc,stra,parent=parent )
## self.addObjectToScene(doc,rectangle,parent=stra )
## self.addObjectToScene(doc,shee,parent=parent )
## self.addObjectToScene(doc,rectangle2,parent=shee )
##
## fourside=c4d.BaseObject(FOURSIDE)
## fourside[2121]=float(2.5)
## fourside[2122]=float(0.9)
## fourside[2300]=0
## heli=c4d.BaseObject(c4d.Onull)
## heli.SetName('heli')
## shape2D['Heli']=heli
##
## self.addObjectToScene(doc,heli,parent=parent )
## self.addObjectToScene(doc,fourside,parent=heli)
##
## return shape2D,[circle,rectangle,fourside]
##
## def getShapes2D(self):
## shape2D={}
## shape2D['Coil']=getObject('coil')
## shape2D['Turn']=getObject('turn')
## shape2D['Heli']=getObject('heli')
## shape2D['Stra']=getObject('stra')
## return shape2D
##
## def morph2dObject(self,name,objsrc,target):
## obj=objsrc.GetClone()
## obj.SetName(name)
## mixer=obj.MakeTag(self.POSEMIXER)
## mixer[1001]=objsrc #the default pose
## #for i,sh in enumerate(shape2D) :
## # mixer[3002,1000+int(i)]=shape2D[sh]
## mixer[3002,1000]=target#shape2D[sh] target 1
## return obj
##
## def c4dSpecialRibon(self,name,points,dshape=CIRCLE,shape2dlist=None,mat=None):
## #if loft == None : loft=loftnurbs('loft',mat=mat)
## shape=[]
## pos=c4d.Vector(float(points[0][2]),float(points[0][1]),float(points[0][0]))
## direction=c4d.Vector(float(points[0][2]-points[1][2]),float(points[0][1]-points[1][1]),float(points[0][0]-points[1][0]))
## mx=self.getCoordinateMatrix(pos,direction)
## if shape2dlist : shape.append(morph2dObject(dshape+str(0),shape2dlist[dshape],shape2dlist['Heli']))
## else :
## shape.append(c4d.BaseObject(dshape))
## if dshape == self.CIRCLE :
## shape[0][2012]=float(0.3)
## #shape[0][2300]=1
## if dshape == self.RECTANGLE :
## shape[0][2060]=float(0.3*4.)
## shape[0][2061]=float(0.3*3.)
## #shape[0][2300]=1
## if dshape == self.FOURSIDE:
## shape[0][2121]=float(0.3*4.)
## shape[0][2122]=float(0.1)
## #shape[0][2300]=0
## shape[0].SetMg(mx)
## if len(points)==2: return shape
## i=1
## while i < (len(points)-1):
## #print i
## pos=c4d.Vector(float(points[i][2]),float(points[i][1]),float(points[i][0]))
## direction=c4d.Vector(float(points[i-1][2]-points[i+1][2]),float(points[i-1][1]-points[i+1][1]),float(points[i-1][0]-points[i+1][0]))
## mx=self.getCoordinateMatrix(pos,direction)
## if shape2dlist : shape.append(morph2dObject(dshape+str(i),shape2dlist[dshape],shape2dlist['Heli']))
## else :
## shape.append(c4d.BaseObject(dshape))
## if dshape == self.CIRCLE :
## shape[i][2012]=float(0.3)
## shape[i][2300]=2
## if dshape == self.RECTANGLE :
## shape[i][2060]=float(0.3*4.)
## shape[i][2061]=float(0.3*3.)
## shape[i][2300]=2
## if dshape == self.FOURSIDE:
## shape[i][2121]=float(0.3*4.)
## shape[i][2122]=float(0.1)
## shape[i][2300]=2
## shape[i].SetMg(mx)
## i=i+1
## pos=c4d.Vector(float(points[i][2]),float(points[i][1]),float(points[i][0]))
## direction=c4d.Vector(float(points[i-1][2]-points[i][2]),float(points[i-1][1]-points[i][1]),float(points[i-1][0]-points[i][0]))
## mx=self.getCoordinateMatrix(pos,direction)
## if shape2dlist : shape.append(morph2dObject(dshape+str(i),shape2dlist[dshape],shape2dlist['Heli']))
## else :
## shape.append(c4d.BaseObject(dshape))
## if dshape == self.CIRCLE :
## shape[i][2012]=float(0.3)
## shape[i][2300]=2
## if dshape == self.RECTANGLE :
## shape[i][2060]=float(0.3*4.)
## shape[i][2061]=float(0.3*3.)
## shape[i][2300]=2
## if dshape == self.FOURSIDE:
## shape[i][2121]=float(0.3*4.)
## shape[i][2122]=float(0.1)
## shape[i][2300]=2
## shape[i].SetMg(mx)
## return shape
##
## def c4dSecondaryLofts(self,name,matrices,dshape=CIRCLE,mat=None):
## #if loft == None : loft=loftnurbs('loft',mat=mat)
## shape=[]
## i=0
## while i < (len(matrices)):
## #pos=c4d.Vector(float(points[i][2]),float(points[i][1]),float(points[i][0]))
## #direction=c4d.Vector(float(points[i-1][2]-points[i+1][2]),float(points[i-1][1]-points[i+1][1]),float(points[i-1][0]-points[i+1][0]))
## mx=self.getCoordinateMatrixBis(matrices[i][2],matrices[i][0],matrices[i][1])
## #mx=getCoordinateMatrix(pos,direction)
## shape.append(c4d.BaseObject(dshape))
## shape[i].SetMg(mx)
## if dshape == self.CIRCLE :
## shape[i][2012]=float(0.3)
## shape[i][2300]=0
## if dshape == self.RECTANGLE :
## shape[i][2060]=float(2.2)
## shape[i][2061]=float(0.7)
## shape[i][2300]=0
## if dshape == self.FOURSIDE:
## shape[i][2121]=float(2.5)
## shape[i][2122]=float(0.9)
## shape[i][2300]=0
## i=i+1
## return shape
##
## def instanceShape(self,ssname,shape2D):
## #if shape2D=None : shape2D=createShapes2D()
## shape=c4d.BaseObject(c4d.Oinstance)
## shape[1001]=shape2D[ssname[:4]]
## shape.SetName(ssname[:4])
## return shape
##
## def makeShape(self,dshape,ssname):
## shape=c4d.BaseObject(dshape)
## if dshape == self.CIRCLE :
## shape[2012]=float(0.3)
## shape[2300]=0
## shape.SetName(ssname[:4])
## if dshape == self.RECTANGLE :
## shape[2060]=float(2.2)
## shape[2061]=float(0.7)
## shape[2300]=0
## shape.SetName(ssname[:4])
## if dshape == self.FOURSIDE:
## shape[2121]=float(2.5)
## shape[2122]=float(0.9)
## shape[2300]=0
## shape.SetName(ssname[:4])
## return shape
##
## def c4dSecondaryLoftsSp(self,name,atoms,dshape=CIRCLE,mat=None,shape2dmorph=None,shapes2d=None,instance=False):
## #print "ok build loft shape"
## #if loft == None : loft=loftnurbs('loft',mat=mat)
## shape=[]
## prev=None
## ssSet=atoms[0].parent.parent.secondarystructureset
## molname=atoms[0].full_name().split(":")[0]
## chname= atoms[0].full_name().split(":")[1]
## i=0
## iK=0
## #get The pmv-extruder
## sheet=atoms[0].parent.secondarystructure.sheet2D
## matrices=sheet.matrixTransfo
## if mat == None : mat = c4d.documents.GetActiveDocument().SearchMaterial('mat_loft'+molname+'_'+chname)
## while i < (len(atoms)):
## ssname=atoms[i].parent.secondarystructure.name
## dshape=SSShapes[ssname[:4]]#ssname[:4]
## #print ssname,dshape
## #pos=c4d.Vector(float(points[i][2]),float(points[i][1]),float(points[i][0]))
## #direction=c4d.Vector(float(points[i-1][2]-points[i+1][2]),float(points[i-1][1]-points[i+1][1]),float(points[i-1][0]-points[i+1][0]))
## mx=self.getCoordinateMatrixBis(matrices[i][2],matrices[i][0],matrices[i][1])
## #mx=getCoordinateMatrix(pos,direction)
## #iK=iK+1
## if shape2dmorph :
## shape.append(self.morph2dObject(dshape+str(i),shape2dmorph[dshape],shape2dmorph['Heli']))
## shape[-1].SetMg(mx)
## else :
## #print str(prev),ssname
## if prev != None: #end of loop
## if ssname[:4] != prev[:4]:
## if not instance : shape.append(self.makeShape(SSShapes[prev[:4]],prev))
## else : shape.append(self.instanceShape(prev,shapes2d))
## shape[-1].SetMg(mx)
## if not instance : shape.append(self.makeShape(dshape,ssname))
## else : shape.append(self.instanceShape(ssname,shapes2d))
## shape[-1].SetMg(mx)
## prev=ssname
## i=i+1
## if mat != None:
## prev=None
## #i=(len(shape))
## i=0
## while i < (len(shape)):
## ssname=shape[i].GetName()
## #print ssname
## pos=1-((((i)*100.)/len(shape))/100.0)
## if pos < 0 : pos = 0.
## #print pos
## #change the material knote according ss color / cf atom color...
## #col=atoms[i].colors['secondarystructure']
## col=self.c4dColor(SSColor[ssname])
## nc=c4d.Vector(col[0],col[1],col[2])
## ncp=c4d.Vector(0,0,0)
## if prev != None :
## pcol=self.c4dColor(SSColor[prev])
## ncp=c4d.Vector(pcol[0],pcol[1],pcol[2])
## #print col
## #print ssname[:4]
## #print prev
## if ssname != prev : #new ss
## grad=mat[8000][1007]
## #iK=iK+1
## nK=grad.GetKnotCount()
## #print "knot count ",nK,iK
## if iK >= nK :
## #print "insert ",pos,nK
## #print "grad.insert_knot(c4d.Vector("+str(col[0])+str(col[1])+str(col[2])+"), 1.0, "+str(pos)+",0.5)"
## if prev != None :
## grad.InsertKnot(ncp, 1.0, pos+0.01,0.5)
## iK=iK+1
## grad.InsertKnot(nc, 1.0, pos-0.01,0.5)
## #grad.insert_knot(ncp, 1.0, pos+0.1,0.5)
## iK=iK+1
## else :
## #print "set ",iK,pos
## if prev != None :grad.SetKnot(iK-1,ncp,1.0,pos,0.5)
## grad.SetKnot(iK,nc,1.0,pos,0.5)
## mat[8000][1007]=grad
## prev=ssname
## mat.Message(c4d.MSG_UPDATE)
## i=i+1
## #mx=getCoordinateMatrixBis(matrices[i][2],matrices[i][0],matrices[i][1])
## #if shape2dlist : shape.append(morph2dObject(dshape+str(i),shape2dlist[shape],shape2dlist['Heli']))
## return shape
##
## def LoftOnSpline(self,name,chain,atoms,Spline=None,dshape=CIRCLE,mat=None,
## shape2dmorph=None,shapes2d=None,instance=False):
## #print "ok build loft/spline"
## molname = atoms[0].full_name().split(":")[0]
## chname = atoms[0].full_name().split(":")[1]
## #we first need the spline
## #if loft == None : loft=loftnurbs('loft',mat=mat)
## shape=[]
## prev=None
## #mol = atoms[0].top
## ssSet=chain.secondarystructureset#atoms[0].parent.parent.secondarystructureset
## i=0
## iK=0
## #get The pmv-extruder
## sheet=chain.residues[0].secondarystructure.sheet2D
## matrices=sheet.matrixTransfo
## ca=atoms.get('CA')
## o =atoms.get('O')
## if Spline is None :
## parent=atoms[0].parent.parent.parent.geomContainer.masterGeom.chains_obj[chname]
## Spline,ospline = spline(name+'spline',ca.coords)#
## addObjectToScene(getCurrentScene(),Spline,parent=parent)
## #loftname = 'loft'+mol.name+'_'+ch.name
## #matloftname = 'mat_loft'+mol.name+'_'+ch.name
## if mat == None :
## mat = c4d.documents.GetActiveDocument().SearchMaterial('mat_loft'+molname+'_'+chname)
## if mat is not None :
## if DEBUG : print "ok find mat"
## #if mat == None :
## # mat = create_loft_material(name='mat_loft'+molname+'_'+chname)
## if DEBUG : print "CA",len(ca)
## while i < (len(ca)):
## pos= float(((i*1.) / len(ca)))
## #print str(pos)+" %"
## #print atoms[i],atoms[i].parent,hasattr(atoms[i].parent,'secondarystructure')
## if hasattr(ca[i].parent,'secondarystructure') : ssname=ca[i].parent.secondarystructure.name
## else : ssname="Coil"
## dshape=SSShapes[ssname[:4]]#ssname[:4]
## #mx =getCoordinateMatrixBis(matrices[i][2],matrices[i][0],matrices[i][1])
## #have to place the shape on the spline
## if shape2dmorph :
## shape.append(morph2dObject(dshape+str(i),shape2dmorph[dshape],shape2dmorph['Heli']))
## path=shape[i].MakeTag(Follow_PATH)
## path[1001] = Spline
## path[1000] = 0#tangantial
## path[1003] = pos
## path[1007] = 2#1 axe
## #shape[-1].SetMg(mx)
## else :
## #print str(prev),ssname
## #if prev != None: #end of loop
## # if ssname[:4] != prev[:4]: #newSS need transition
## # if not instance : shape.append(makeShape(SSShapes[prev[:4]],prev))
## # else : shape.append(instanceShape(prev,shapes2d))
## # #shape[-1].SetMg(mx)
## # path=shape[-1].MakeTag(Follow_PATH)
## # path[1001] = Spline
## # path[1000] = 1
## # path[1003] = pos
## if not instance : shape.append(makeShape(dshape,ssname))
## else : shape.append(instanceShape(ssname,shapes2d))
## path=shape[i].MakeTag(Follow_PATH)
## path[1001] = Spline
## path[1000] = 0
## path[1003] = pos
## path[1007] = 2#1
## #shape[-1].SetMg(mx)
## if i >=1 :
## laenge,mx=getStickProperties(ca[i].coords,ca[i-1].coords)
## #if i > len(o) : laenge,mx=getStickProperties(ca[i].coords,o[i-1].coords)
## #else :laenge,mx=getStickProperties(ca[i].coords,o[i].coords)
## shape[i].SetMg(mx)
## prev=ssname
## i=i+1
## laenge,mx=getStickProperties(ca[0].coords,ca[1].coords)
## #laenge,mx=getStickProperties(ca[0].coords,o[0].coords)
## shape[0].SetMg(mx)
## if False :#(mat != None):
## prev=None
## #i=(len(shape))
## i=0
## while i < (len(shape)):
## ssname=shape[i].GetName()
## #print ssname
## pos=1-((((i)*100.)/len(shape))/100.0)
## if pos < 0 : pos = 0.
## #print pos
## #change the material knote according ss color / cf atom color...
## #col=atoms[i].colors['secondarystructure']
## col=c4dColor(SSColor[ssname])
## nc=c4d.Vector(col[0],col[1],col[2])
## ncp=c4d.Vector(0,0,0)
## if prev != None :
## pcol=c4dColor(SSColor[prev])
## ncp=c4d.Vector(pcol[0],pcol[1],pcol[2])
## #print col
## #print ssname[:4]
## #print prev
## if ssname != prev : #new ss
## grad=mat[8000][1007]
## #iK=iK+1
## nK=grad.GetKnotCount()
## #print "knot count ",nK,iK
## if iK >= nK :
## #print "insert ",pos,nK
## #print "grad.insert_knot(c4d.Vector("+str(col[0])+str(col[1])+str(col[2])+"), 1.0, "+str(pos)+",0.5)"
## if prev != None :
## grad.InsertKnot(ncp, 1.0, pos+0.01,0.5)
## iK=iK+1
## grad.InsertKnot(nc, 1.0, pos-0.01,0.5)
## #grad.insert_knot(ncp, 1.0, pos+0.1,0.5)
## iK=iK+1
## else :
## #print "set ",iK,pos
## if prev != None :grad.SetKnot(iK-1,ncp,1.0,pos,0.5)
## grad.SetKnot(iK,nc,1.0,pos,0.5)
## mat[8000][1007]=grad
## prev=ssname
## mat.Message(c4d.MSG_UPDATE)
## i=i+1
## #mx=getCoordinateMatrixBis(matrices[i][2],matrices[i][0],matrices[i][1])
## #if shape2dlist : shape.append(morph2dObject(dshape+str(i),shape2dlist[shape],shape2dlist['Heli']))
## return shape
##
## def update_2dsheet(shapes,builder,loft):
## dicSS={'C':'Coil','T' : 'Turn', 'H':'Heli','E':'Stra','P':'Coil'}
## shape2D=getShapes2D()
## for i,ss in enumerate(builder):
## if shapes[i].GetName() != dicSS[ss]:
## shapes[i][1001]=shape2D[dicSS[ss]]#ref object
## shapes[i].SetName(dicSS[ss])
##
## texture = loft.GetTags()[0]
## mat=texture[1010]
## grad=mat[8000][1007]
## grad.delete_all_knots()
## mat[8000][1007]=grad
##
## prev=None
## i = 0
## iK = 0
## while i < (len(shapes)):
## ssname=shapes[i].GetName()
## #print ssname
## pos=1-((((i)*100.)/len(shapes))/100.0)
## if pos < 0 : pos = 0.
## #print pos
## #change the material knote according ss color / cf atom color...
## #col=atoms[i].colors['secondarystructure']
## col=c4dColor(SSColor[ssname])
## nc=c4d.Vector(col[0],col[1],col[2])
## ncp=c4d.Vector(0,0,0)
## if prev != None :
## pcol=c4dColor(SSColor[prev])
## ncp=c4d.Vector(pcol[0],pcol[1],pcol[2])
## #print col
## #print ssname[:4]
## #print prev
## if ssname != prev : #new ss
## grad=mat[8000][1007]
## #iK=iK+1
## nK=grad.get_knot_count()
## #print "knot count ",nK,iK
## if iK >= nK :
## #print "insert ",pos,nK
## #print "grad.insert_knot(c4d.Vector("+str(col[0])+str(col[1])+str(col[2])+"), 1.0, "+str(pos)+",0.5)"
## if prev != None :
## grad.insert_knot(ncp, 1.0, pos+0.01,0.5)
## iK=iK+1
## grad.insert_knot(nc, 1.0, pos-0.01,0.5)
## #grad.insert_knot(ncp, 1.0, pos+0.1,0.5)
## iK=iK+1
## else :
## #print "set ",iK,pos
## if prev != None :grad.set_knot(iK-1,ncp,1.0,pos,0.5)
## grad.set_knot(iK,nc,1.0,pos,0.5)
## mat[8000][1007]=grad
## prev=ssname
## mat.Message(c4d.MSG_UPDATE)
## i=i+1
##
## def makeLines(self,name,points,faces,parent=None):
## rootLine = self.newEmpty(name)
## self.addObjectToScene(self.getCurrentScene(),rootLine,parent=parent)
## spline=c4d.BaseObject(c4d.Ospline)
## #spline[1000]=type
## #spline[1002]=close
## spline.SetName(name+'mainchain')
## spline.ResizeObject(int(len(points)))
## cd4vertices = map(self.FromVec,points)
## map(polygon.SetPoint,range(len(points)),cd4vertices)
## #for i,p in enumerate(points):
## # spline.SetPoint(i, c4dv(p))
## self.addObjectToScene(self.getCurrentScene(),spline,parent=rootLine)
## spline=c4d.BaseObject(c4d.Ospline)
## #spline[1000]=type
## #spline[1002]=close
## spline.SetName(name+'sidechain')
## spline.ResizeObject(int(len(points)))
## for i,p in enumerate(points):
## spline.SetPoint(i, self.FromVec(p))
## self.addObjectToScene(self.getCurrentScene(),spline,parent=rootLine)
##
## def updateLines(self,lines, chains=None):
## #lines = getObject(name)
## #if lines == None or chains == None:
## #print lines,chains
## #parent = getObject(chains.full_name())
## #print parent
## # bonds, atnobnd = chains.residues.atoms.bonds
## # indices = map(lambda x: (x.atom1._bndIndex_,
## # x.atom2._bndIndex_), bonds)
## # updatePoly(lines,vertices=chains.residues.atoms.coords,faces=indices)
## self.updatePoly(self,lines,vertices=chains.residues.atoms.coords)
##
### def getCoordByAtomType(chain):
### dic={}
### #extract the different atomset by type
### for i,atms in enumerate(AtomElements.keys()):
### atomset = chain.residues.atoms.get(atms)
### bonds, atnobnd = atomset.bonds
### indices = map(lambda x: (x.atom1._bndIndex_,
### x.atom2._bndIndex_), bonds)
### dic[atms] = [atomset]
###
### def stickballASmesh(molecules,atomSets):
### bsms=[]
### for mol, atms, in map(None, molecules, atomSets):
### for ch in mol.chains:
### parent = getObject(ch.full_name())
### lines = getObject(ch.full_name()+'_bsm')
### if lines == None :
### lines=newEmpty(ch.full_name()+'_bsm')
### addObjectToScene(getCurrentScene(),lines,parent=parent)
### dic = getCoordByAtomType(ch)
### for type in dic.keys():
### bsm = createsNmesh(ch.full_name()+'_bsm'+type,dic[type][0],
### None,dic[type][1])
### bsms.append(bsm)
### addObjectToScene(getCurrentScene(),bsm,parent=lines)
##
### def editLines(molecules,atomSets):
### for mol, atms, in map(None, molecules, atomSets):
### #check if line exist
### for ch in mol.chains:
### parent = getObject(ch.full_name())
### lines = getObject(ch.full_name()+'_line')
### if lines == None :
### arr = c4d.BaseObject(ATOMARRAY)
### arr.SetName(ch.full_name()+'_lineds')
### arr[1000] = 0.1 #radius cylinder
### arr[1001] = 0.1 #radius sphere
### arr[1002] = 3 #subdivision
### addObjectToScene(getCurrentScene(),arr,parent=parent)
### bonds, atnobnd = ch.residues.atoms.bonds
### indices = map(lambda x: (x.atom1._bndIndex_,
### x.atom2._bndIndex_), bonds)
###
### lines = createsNmesh(ch.full_name()+'_line',ch.residues.atoms.coords,
### None,indices)
### addObjectToScene(getCurrentScene(),lines[0] ,parent=arr)
### mol.geomContainer.geoms[ch.full_name()+'_line'] = lines
### #display using AtomArray
### else : #need to update
### updateLines(lines, chains=ch)
##
# def Points(self,name,**kw):
# #need to add the AtomArray modifier....
# parent = None
# if "parent" in kw:
# parent = kw.pop("parent")
# from DejaVu.Points import Points
# obj= Points(name,**kw)
# self.addObjectToScene(self.getCurrentScene(),obj,parent=parent)
# return obj
##
## def PolygonColorsObject(self,name,vertColors):
## obj= c4d.PolygonObject(len(vertColors), len(vertColors)/2.)
## obj.SetName(name+'_color')
## cd4vertices = map(self.FromVec,vertColors)
## map(obj.SetPoint,range(len(vertColors)),cd4vertices)
## #for k,v in enumerate(vertColors) :
## # obj.SetPoint(k, c4dv(v))
## return obj
##
# def updatePoly(self,polygon,faces=None,vertices=None):
# if type(polygon) == str:
# polygon = self.getObject(polygon)
# if polygon == None : return
# if vertices != None:
# polygon.Set(vertices=vertices)
# if faces != None:
# polygon.Set(faces=faces)
##
## def redoPoly(self,poly,vertices,faces,proxyCol=False,colors=None,parent=None,mol=None):
## doc = self.getCurrentScene()
## doc.SetActiveObject(poly)
## name=poly.GetName()
## texture = poly.GetTags()[0]
## c4d.CallCommand(100004787) #delete the obj
## obj=self.createsNmesh(name,vertices,None,faces,smooth=False,material=texture[1010],proxyCol=proxyCol)
## self.addObjectToScene(doc,obj[0],parent=parent)
## if proxyCol and colors!=None:
## pObject=self.getObject(name+"_color")
## doc.SetActiveObject(pObject)
## c4d.CallCommand(100004787) #delete the obj
## pObject=PolygonColorsObject(name,colors)
## self.addObjectToScene(doc,pObject,parent=parent)
##
## def reCreatePoly(self,poly,vertices,faces,proxyCol=False,colors=None,parent=None,mol=None):
## doc = self.getCurrentScene()
## doc.SetActiveObject(poly)
## name=poly.GetName()
## texture = poly.GetTags()[0]
## c4d.CallCommand(100004787) #delete the obj
## obj=self.createsNmesh(name,vertices,None,faces,smooth=False,material=texture[1010],proxyCol=proxyCol)
## self.addObjectToScene(doc,obj[0],parent=parent)
## if proxyCol and colors!=None:
## pObject=self.getObject(name+"_color")
## doc.SetActiveObject(pObject)
## c4d.CallCommand(100004787) #delete the obj
## pObject=self.PolygonColorsObject(name,colors)
## self.addObjectToScene(doc,pObject,parent=parent)
##
## """def UVWColorTag(obj,vertColors):
## uvw=obj.MakeTag(c4d.Tuvw)
##
## obj= c4d.PolygonObject(len(vertColors), len(vertColors)/2.)
## obj.SetName(name+'_color')
## k=0
## for v in vertColors :
## print v
## obj.SetPoint(k, c4d.Vector(float(v[0]), float(v[1]), float(v[2])))
## k=k+1
## return obj
## """
##
# def updateMesh(self,obj,vertices=None,faces = None):
# if type(obj) == str:
# obj = self.getObject(obj)
# if obj == None : return
# self.updatePoly(obj,faces=faces,vertices=vertices)
#
## def updateMeshProxy(self,obj,proxyCol=False,parent=None,mol=None):
## doc = getCurrentScene()
## doc.SetActiveObject(g.obj)
## name=obj.GetName()
## texture = obj.GetTags()[0]
## c4d.CallCommand(100004787) #delete the obj
## vertices=g.getVertices()
## faces=g.getFaces()
### if DEBUG : print len(vertices),len(faces)
## sys.stderr.write('\nnb v %d f %d\n' % (len(vertices),len(faces)))
## #if proxyCol : o=PolygonColorsObject
## obj=self.createsNmesh(name,vertices,None,faces,smooth=False,material=texture[1010],proxyCol=proxyCol)
## self.addObjectToScene(doc,obj[0],parent=parent)
## #obj.Message(c4d.MSG_UPDATE)
## return obj[0]
## # if proxyCol :
## # colors=mol.geomContainer.getGeomColor(g.name)
## # if hasattr(g,'color_obj'):
## # pObject=g.color_obj#getObject(name+"_color")
## # doc.SetActiveObject(pObject)
## # c4d.CallCommand(100004787) #delete the obj
## # pObject=PolygonColorsObject(name,colors)
## # g.color_obj=pObject
## # addObjectToScene(doc,pObject,parent=parent)
##
## def c4df(self,face,g,polygon):
## A = int(face[0])
## B = int(face[1])
## if len(face)==2 :
## C = B
## D = B
## poly=c4d.CPolygon(A, B, C)
## elif len(face)==3 :
## C = int(face[2])
## D = C
## poly=c4d.CPolygon(A, B, C)
## elif len(face)==4 :
## C = int(face[2])
## D = int(face[3])
## poly=c4d.CPolygon(A, B, C, D)
## polygon.SetPolygon(id=g, polygon=poly)
## return [A,B,C,D]
##
## def polygons(self,name,proxyCol=False,smooth=False,color=None, material=None, **kw):
## import time
## t1 = time.time()
## vertices = kw["vertices"]
## faces = kw["faces"]
## normals = kw["normals"]
## frontPolyMode='fill'
## if kw.has_key("frontPolyMode"):
## frontPolyMode = kw["frontPolyMode"]
## if kw.has_key("shading") :
## shading=kw["shading"]#'flat'
## if frontPolyMode == "line" : #wire mode
## material = self.getCurrentScene().SearchMaterial("wire")
## if material == None:
## material = self.addMaterial("wire",(0.5,0.5,0.5))
## polygon = c4d.PolygonObject(len(vertices), len(faces))
## polygon.SetName(name)
## k=0
## #map function is faster than the usual for loop
## #what about the lambda?
## cd4vertices = map(self.FromVec,vertices)
## map(polygon.SetPoint,range(len(vertices)),cd4vertices)
## #for v in vertices :
## #print v
## # polygon.SetPoint(k, c4dv(v))
## #polygon.SetPoint(k, c4d.Vector(float(v[0]), float(v[1]), float(v[2])))
## # k=k+1
## #c4dfaces = map(c4df,faces,range(len(faces)),[polygon]*len(faces))
## #map(polygon.SetPolygon,range(len(faces)),c4dfaces)
## for g in range(len(faces)):
## A = int(faces[g][0])
## B = int(faces[g][1])
## if len(faces[g])==2 :
## C = B
## D = B
## polygon.SetPolygon(id=g, polygon=c4d.CPolygon( A, B, C))
## elif len(faces[g])==3 :
## C = int(faces[g][2])
## D = C
## polygon.SetPolygon(id=g, polygon=c4d.CPolygon( A, B, C))
## elif len(faces[g])==4 :
## C = int(faces[g][2])
## D = int(faces[g][3])
## #print A
## polygon.SetPolygon(id=g, polygon=c4d.CPolygon( A, B, C, D ))
## t2=time.time()
## #print "time to create Mesh", (t2 - t1)
## #sys.stderr.write('\ntime to create Mesh %f\n' % (t2-t1))
## polygon.MakeTag(c4d.Tphong) #shading ?
## # create a texture tag on the PDBgeometry object
## if not proxyCol :
## texture = polygon.MakeTag(c4d.Ttexture)
## #create the dedicayed material
## if material == None :
## texture[1010] = self.addMaterial("mat_"+name,color[0])
## else : texture[1010] = material
## polygon.Message(c4d.MSG_UPDATE)
## return polygon
##
##
# def createsNmesh(self,name,vertices,vnormals,faces,smooth=False,
# material=None,proxyCol=False,color=[[1,0,0],],**kw):
# """
# This is the main function that create a polygonal mesh.
#
# @type name: string
# @param name: name of the pointCloud
# @type vertices: array
# @param vertices: list of x,y,z vertices points
# @type vnormals: array
# @param vnormals: list of x,y,z vertex normals vector
# @type faces: array
# @param faces: list of i,j,k indice of vertex by face
# @type smooth: boolean
# @param smooth: smooth the mesh
# @type material: hostApp obj
# @param material: material to apply to the mesh
# @type proxyCol: booelan
# @param proxyCol: do we need a special object for color by vertex (ie C4D)
# @type color: array
# @param color: r,g,b value to color the mesh
#
# @rtype: hostApp obj
# @return: the polygon object
# """
#
# PDBgeometry = IndexedPolygons(name, vertices=vertices,
# faces=faces, vnormals=vnormals,materials=color,shading='flat',
# )
# parent = None
# if "parent" in kw :
# parent = kw["parent"]
# self.addObjectToScene(None,PDBgeometry,parent = parent)
# return [PDBgeometry,PDBgeometry]
#
# def instancePolygon(self,name, matrices=None, mesh=None,parent=None,
# transpose= False,colors=None):
# if matrices == None : return None
# if mesh == None : return None
# instance = []
# geom = None
# if mesh is None :
# print("no mesh???")
# else:
# geom = IndexedPolygons(name, vertices=mesh.getVertices(),
# faces=mesh.getFaces(), vnormals=mesh.getVNormals()
# )
# self.addObjectToScene(None,geom,parent=parent)
# print("geom",geom)
# geom.Set(instanceMatrices=matrices, visible=1)
# if colors is not None :
# geom.Set(materials=colors, inheritMaterial=0)
# return geom
#
# def changeColor(self,obj,colors,perVertex=False,
# proxyObject=False,doc=None,pb=False):
# mesh=self.getMesh(obj)
# unic=False
# ncolor=None
# faces = mesh.getVertices()
# vertices = mesh.getFaces()
# #print len(colors),len(mesh.verts),len(mesh.faces)
# if len(colors) != len(vertices) and len(colors) == len(faces):
# perVertex=False
# elif len(colors) == len(vertices) and len(colors) != len(faces):
# perVertex=True
# else :
# if (len(colors) - len(vertices)) > (len(colors) - len(faces)) :
# perVertex=True
# else :
# perVertex=False
# #print perVertex
## if len(colors)==1 :
## #print colors
## unic=True
## ncolor = self.convertColor(colors[0])#blenderColor(colors[0])
## else :
## colors = [self.convertColor(c) for c in colors]
# mesh.Set(materials = colors,inheritMaterial=False)
#
# def box(self,name,center=[0.,0.,0.],size=[1.,1.,1.],cornerPoints=None,visible=1,
# mat = None,**kw):
# #import numpy
# box=Box(name)#, cornerPoints=bb, visible=1
# if cornerPoints != None :
# for i in range(3):
# size[i] = cornerPoints[1][i]-cornerPoints[0][i]
# center=(numpy.array(cornerPoints[0])+numpy.array(cornerPoints[1]))/2.
# box.Set(cornerPoints=list(cornerPoints))
# else :
# box.Set(center=center,xside=size[0],yside=size[1],zside=size[2])
# #material is a liste of color per faces.
# #aMat=addMaterial("wire")
# parent = None
# if "parent" in kw :
# parent = kw["parent"]
# self.addObjectToScene(self.getCurrentScene(),box,parent=parent)
# return box
#
# def updateBox(self,box,center=[0.,0.,0.],size=[1.,1.,1.],cornerPoints=None,visible=1,
# mat = None):
# #import numpy
# box=self.getObject(box)
# if cornerPoints != None :
# for i in range(3):
# size[i] = cornerPoints[1][i]-cornerPoints[0][i]
# for i in range(3):
# center[i]=(cornerPoints[0][i]+cornerPoints[1][i])/2.
# box.Set(cornerPoints=list(cornerPoints))
# else :
# box.Set(center=center,xside=size[0],yside=size[1],zside=size[2])
#
# def getCornerPointCube(self,cube):
# if hasattr(cube,"size"):
# size = cube.side
# else :
# size = (cube.xside,cube.yside,cube.zside)
# center = cube.center
# cornerPoints=[]
# #lowCorner
# lc = [center[0] - size[0]/2.,
# center[1] - size[1]/2.,
# center[2] - size[1]/2.]
# uc = [center[0] + size[0]/2.,
# center[1] + size[1]/2.,
# center[2] + size[2]/2.]
# cornerPoints=[[lc[0],lc[1],lc[1]],[uc[0],uc[1],uc[1]]]
# return cornerPoints
#
#
#
# def plane(self,name,center=[0.,0.,0.],size=[1.,1.],cornerPoints=None,visible=1,**kw):
# #plane or grid
# xres = 2
# yres = 2
# if "subdivision" in kw :
# xres = kw["subdivision"][0]
# yres = kw["subdivision"][1]
# if xres == 1 : xres = 2
# if yres == 1 : yres = 2
#
# #need to build vertices/faces for the plane
# #4corner points
# # *--*
# # |\ |
# # | \|
# # *--*
# #basic plane, no subdivision
# #what about subdivision
# vertices =[ (-0.5,0.5,0.0),
# (0.5,0.5,0.0),
# (0.5,-0.5,0.0),
# (-0.5,-0.5,0.0)]
# vnormals =[ (0.0,0.0,1.0),
# (0.0,0.0,1.0),
# (0.0,0.0,1.0),
# (0.0,0.0,1.0)]
# faces = ((2,1,0),(3,2,0))
#
# obj = IndexedPolygons(name, vertices=vertices,
# faces=faces, vnormals=None,shading='flat',
# materials=[[1,0,0],]
# )
#
# if cornerPoints != None :
# for i in range(3):
# size[i] = cornerPoints[1][i]-cornerPoints[0][i]
# center=(numpy.array(cornerPoints[0])+numpy.array(cornerPoints[1]))/2.
# obj.translation = (float(center[0]),float(center[1]),float(center[2]))
# obj.Set(scale = (float(size[0]),float(size[1]),1.0))
#
# if "axis" in kw : #orientation
# dic = { "+X":[1.,0.,0.],"-X":[-1.,0.,0.],
# "+Y":[0.,1.,0.],"-Y":[0.,-1.,0.],
# "+Z":[0.,0.,1.],"-Z":[0.,0.,-1.]}
# idic = { 0:[1.,0.,0.],1:[-1.,0.,0.],
# 2:[0.,1.,0.],3:[0.,-1.,0.],
# 4:[0.,0.,1.],5:[0.,0.,-1.]}
# if type(kw["axis"]) is str :
# axis = dic[kw["axis"]]
# else : #int
# axis = idic[kw["axis"]]
# #plane[c4d.PRIM_AXIS]=axis
# #should rotate around the axis
#
# if "material" in kw :
# if type(kw["material"]) is not bool :
# self.assignMaterial(plane,[kw["material"],])
# else :
# self.addMaterial(name,[1.,1.,0.])
# parent = None
# if "parent" in kw :
# parent = kw["parent"]
# self.addObjectToScene(self.getCurrentScene(),obj,parent=parent)
# return obj
#
# def getFace(self,face):
# return face
#
## def triangulate(self,poly):
## #select poly
## doc = self.getCurrentScene()
## doc.SetActiveObject(poly)
## c4d.CallCommand(14048)#triangulate
##
## def makeEditable(self,object,copy=True):
## doc = self.getCurrentScene()
## #make a copy?
## if copy:
## clone = object.GetClone()
## clone.SetName("clone")
## doc.InsertObject(clone)
## doc.SetActiveObject(clone)
## c4d.CallCommand(12236)#make editable
## clone.Message(c4d.MSG_UPDATE)
## return clone
## else :
## doc.SetActiveObject(object)
## c4d.CallCommand(12236)
## return object
##
#
# def getMeshVertices(self,poly,transform=False):
# mesh = self.checkIsMesh(poly)
# return mesh.getVertices()
#
# def getMeshNormales(self,poly):
# mesh = self.checkIsMesh(poly)
# return mesh.getVNormals()
#
# def getMeshEdges(self,poly):
# mesh = self.checkIsMesh(poly)
# return None
#
# def getMeshFaces(self,poly):
# mesh = self.checkIsMesh(poly)
# return mesh.getFaces()
#
# def DecomposeMesh(self,poly,edit=True,copy=True,tri=True,transform=True):
# #get infos
# faces = poly.getFaces()
# vertices = poly.getVertices()
# vnormals = poly.getVNormals()
# if transform :
# mat = poly.GetMatrix(poly.LastParentBeforeRoot())
# vertices = self.ApplyMatrix(vertices,mat)
# return faces,vertices,vnormals
#
# def changeColorO(self,object,colors):
# object.Set(materials=colors)
#
# def setRigidBody(self,*args,**kw):
# pass
#
# def pathDeform(self,*args,**kw):
# pass
#
# def updatePathDeform(self,*args,**kw):
# pass
##
## ##############################AR METHODS#######################################
## def ARstep(mv):
## #from Pmv.hostappInterface import comput_util as C
## mv.art.beforeRedraw()
## #up(self,dialog)
## for arcontext in mv.art.arcontext :
## for pat in arcontext.patterns.values():
## if pat.isdetected:
## #print pat
## geoms_2_display = pat.geoms
## transfo_mat = pat.mat_transfo[:]
## #print transfo_mat[12:15]
## for geom in geoms_2_display :
## if hasattr(pat,'offset') : offset = pat.offset[:]
## else : offset =[0.,0.,0.]
## transfo_mat[12] = (transfo_mat[12]+offset[0])* mv.art.scaleDevice
## transfo_mat[13] = (transfo_mat[13]+offset[1])* mv.art.scaleDevice
## transfo_mat[14] = (transfo_mat[14]+offset[2])* mv.art.scaleDevice
## mat = transfo_mat.reshape(4,4)
## model = geom.obj
## # print obj.GetName()
## #r,t,s = C.Decompose4x4(Numeric.array(mat).reshape(16,))
## #print t
## #newPos = c4dv(t)
## #model.SetAbsPos(newPos)
## #model.Message(c4d.MSG_UPDATE)
## setObjectMatrix(model,mat)
## #updateAppli()
##
## def ARstepM(mv):
## #from Pmv.hostappInterface import comput_util as C
## from mglutil.math import rotax
## mv.art.beforeRedraw()
## #up(self,dialog)
## for arcontext in mv.art.arcontext :
## for pat in arcontext.patterns.values():
## if pat.isdetected:
## #print pat
## geoms_2_display = pat.geoms
##
## #m = pat.mat_transfo[:]#pat.moveMat[:]
## if mv.art.concat :
## m = pat.moveMat[:].reshape(16,)
## else :
## m = pat.mat_transfo[:].reshape(16,)
## #print transfo_mat[12:15]
## for geom in geoms_2_display :
## scale = float(mv.art.scaleObject)
## model = geom.obj
## if mv.art.patternMgr.mirror:
## #apply scale transformation GL.glScalef(-1.,1.,1)
## scaleObj(model,[-1.,1.,1.])
## if mv.art.concat :
## if hasattr(pat,'offset') : offset = pat.offset[:]
## else : offset =[0.,0.,0.]
## m[12] = (m[12]+offset[0])#* mv.art.scaleDevice
## m[13] = (m[13]+offset[1])#* mv.art.scaleDevice
## m[14] = (m[14]+offset[2])#* mv.art.scaleDevice
## newMat=rotax.interpolate3DTransform([m.reshape(4,4)], [1],
## mv.art.scaleDevice)
## concatObjectMatrix(model,newMat)
## else :
## if hasattr(pat,'offset') : offset = pat.offset[:]
## else : offset =[0.,0.,0.]
## m[12] = (m[12]+offset[0])* mv.art.scaleDevice
## m[13] = (m[13]+offset[1])* mv.art.scaleDevice
## m[14] = (m[14]+offset[2])* mv.art.scaleDevice
## #r1=m.reshape(4,4)
## #newMat=rotax.interpolate3DTransform([r1], [1],
## # mv.art.scaleDevice)
## #m[0:3][0:3]=newMat[0:3][0:3]
## setObjectMatrix(model,m.reshape(4,4))
## scaleObj(model,[scale,scale,scale])
## #updateAppli()
##
## def ARloop(mv,ar=True,im=None,ims=None,max=1000):
## count = 0
## while count < max:
## #print count
## if im is not None:
## updateImage(mv,im,scale=ims)
## if ar :
## ARstep(mv)
## update()
## count = count + 1
##
## def AR(mv,v=None,ar=True):#,im=None,ims=None,max=1000):
## count = 0
## while 1:
## #print count
## if v is not None:
## #updateBmp(mv,bmp,scale=None,show=False,viewport=v)
## updateImage(mv,viewport=v)
## if ar :
## ARstepM(mv)
## #update()
## count = count + 1
##
##
## Y=range(480)*640
## Y.sort()
##
## X=range(640)*480
##
##
## #import StringIO
## #im = Image.open(StringIO.StringIO(buffer))
## #helper.updateImage(self,viewport=Right,order=[1, 2, 3, 1])
## def updateImage(mv,viewport=None,order=[1, 2, 3, 1]):
## #debug image is just white...
## try :
## if viewport is not None :
## viewport[c4d.BASEDRAW_DATA_SHOWPICTURE] = bool(mv.art.AR.show_tex)
## import Image
## cam = mv.art.arcontext[0].cam
## cam.lock.acquire()
## #print "acquire"
## #arcontext = mv.art.arcontext[0]
## #array = Numeric.array(cam.im_array[:])
## #n=int(len(array)/(cam.width*cam.height))
## if mv.art.AR.debug :
## array = cam.imd_array[:]#.tostring()
## #print "debug",len(array)
## else :
## array = cam.im_array[:]#.tostring()
## #print "normal",len(array)
## #img=Numeric.array(array[:])
## #n=int(len(img)/(arcontext.cam.width*arcontext.cam.height))
## #img=img.reshape(arcontext.cam.height,arcontext.cam.width,n)
## #if n == 3 :
## # mode = "RGB"
## #else :
## # mode = "RGBA"
## #im = Image.fromarray(img, mode)#.resize((160,120),Image.NEAREST).transpose(Image.FLIP_TOP_BOTTOM)
## im = Image.fromstring("RGBA",(mv.art.video.width,mv.art.video.height),
## array.tostring() ).resize((320,240),Image.NEAREST)
## #cam.lock.release()
## #scale/resize image ?
## #print "image"
## rgba = im.split()
## new = Image.merge("RGBA", (rgba[order[0]],rgba[order[1]],rgba[order[2]],rgba[order[3]]))
## #print "save"
## if mv.art.patternMgr.mirror :
## import ImageOps
## im=ImageOps.mirror(pilImage)
## imf=ImageOps.flip(im)
## imf.save("/tmp/arpmv.jpg")
## else :
## new.save("/tmp/arpmv.jpg")
## if viewport is not None :
## viewport[c4d.BASEDRAW_DATA_PICTURE] = "/tmp/arpmv.jpg"
## #print "update"
## cam.lock.release()
## except:
## print "PROBLEM VIDEO"
##
##
## def updateBmp(mv,bmp,scale=None,order=[3, 2, 2, 1],show=True,viewport=None):
## #cam.lock.acquire()
## #dialog.keyModel.Set(imarray=cam.im_array.copy())
## #cam.lock.release()
## #import Image
## cam = mv.art.arcontext[0].cam
## mv.art.arcontext[0].cam.lock.acquire()
## array = Numeric.array(cam.im_array[:])
## mv.art.arcontext[0].cam.lock.release()
## n=int(len(array)/(cam.width*cam.height))
## array.shape = (-1,4)
## map( lambda x,y,v,bmp=bmp: bmp.SetPixel(x, y, v[1], v[2], v[3]),X, Y, array)
##
## if scale != None :
## bmp.Scale(scale,256,False,False)
## if show : c4d.bitmaps.ShowBitmap(scale)
## scale.Save(name="/tmp/arpmv.jpg", format=c4d.symbols.FILTER_JPG)
## else :
## if show : c4d.bitmaps.ShowBitmap(bmp)
## bmp.Save(name="/tmp/arpmv.jpg", format=c4d.symbols.FILTER_JPG)
## if viewport is not None:
## viewport[c4d.symbols.BASEDRAW_DATA_PICTURE] = "/tmp/arpmv.jpg"
##
##
##
## def render(name,w,h):
## doc = c4d.documents.GetActiveDocument()
## rd = doc.GetActiveRenderData().GetData()
## bmp = c4d.bitmaps.BaseBitmap()
## #Initialize the bitmap with the result size.
## #The resolution must match with the output size of the render settings.
## bmp.Init(x=w, y=h, depth=32)
## c4d.documents.RenderDocument(doc, rd, bmp, c4d.RENDERFLAGS_EXTERNAL)
## #bitmaps.ShowBitmap(bmp)
## bmp.Save(name,c4d.FILTER_TIF)
##
| 40.721827
| 147
| 0.519072
|
8f792e948b24ceb5a378af4d236018ea53586a80
| 877
|
py
|
Python
|
Scripts/rst2xetex.py
|
Srinath-tr/Goferbot
|
0f734d01c6504c6c97dbdf45f5adf8b25c0f9fd9
|
[
"Apache-2.0",
"bzip2-1.0.6"
] | 1
|
2019-04-23T21:50:08.000Z
|
2019-04-23T21:50:08.000Z
|
Scripts/rst2xetex.py
|
Srinath-tr/Goferbot
|
0f734d01c6504c6c97dbdf45f5adf8b25c0f9fd9
|
[
"Apache-2.0",
"bzip2-1.0.6"
] | null | null | null |
Scripts/rst2xetex.py
|
Srinath-tr/Goferbot
|
0f734d01c6504c6c97dbdf45f5adf8b25c0f9fd9
|
[
"Apache-2.0",
"bzip2-1.0.6"
] | 2
|
2019-02-14T08:13:33.000Z
|
2019-04-23T21:47:48.000Z
|
#!g:\python27\python.exe
# $Id: rst2xetex.py 7847 2015-03-17 17:30:47Z milde $
# Author: Guenter Milde
# Copyright: This module has been placed in the public domain.
"""
A minimal front end to the Docutils Publisher, producing Lua/XeLaTeX code.
"""
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline
description = ('Generates LaTeX documents from standalone reStructuredText '
'sources for compilation with the Unicode-aware TeX variants '
'XeLaTeX or LuaLaTeX. '
'Reads from <source> (default is stdin) and writes to '
'<destination> (default is stdout). See '
'<http://docutils.sourceforge.net/docs/user/latex.html> for '
'the full reference.')
publish_cmdline(writer_name='xetex', description=description)
| 31.321429
| 77
| 0.670468
|
01f0ae72905a0e7ab33ab0a7c00391e471f0cc24
| 1,611
|
py
|
Python
|
safeai/utils/distribution.py
|
HanbumKo/SafeAI
|
ad7e5d66abcfe82b0de260b606853bddb68e68ee
|
[
"MIT"
] | 13
|
2018-11-02T12:10:01.000Z
|
2020-05-18T17:38:25.000Z
|
safeai/utils/distribution.py
|
HanbumKo/SafeAI
|
ad7e5d66abcfe82b0de260b606853bddb68e68ee
|
[
"MIT"
] | 2
|
2018-11-15T06:16:06.000Z
|
2018-11-19T15:23:04.000Z
|
safeai/utils/distribution.py
|
HanbumKo/SafeAI
|
ad7e5d66abcfe82b0de260b606853bddb68e68ee
|
[
"MIT"
] | 4
|
2018-11-23T05:59:43.000Z
|
2020-08-28T04:21:27.000Z
|
# Copyright (c) 2018 Episys Science, Inc.
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import numpy as np
import tensorflow as tf
def kld_with_uniform(target_distribution):
# Expects (examples, classes) as shape
num_classes = tf.cast(target_distribution.shape[1], tf.float32)
uniform_distribution = tf.divide(tf.ones_like(target_distribution), num_classes)
x = tf.distributions.Categorical(probs=target_distribution)
y = tf.distributions.Categorical(probs=uniform_distribution)
return tf.distributions.kl_divergence(x, y, allow_nan_stats=False) * num_classes # scaling factor
| 53.7
| 102
| 0.782744
|
0bf4ff856c601025337985af647f330321c4c72e
| 889
|
py
|
Python
|
008 Controle de tela/triangulo.py
|
yamadathamine/300ideiasparaprogramarPython
|
331a063bbf8bcd117ae5a34324b8176a6014fc98
|
[
"MIT"
] | null | null | null |
008 Controle de tela/triangulo.py
|
yamadathamine/300ideiasparaprogramarPython
|
331a063bbf8bcd117ae5a34324b8176a6014fc98
|
[
"MIT"
] | 4
|
2020-06-09T19:10:04.000Z
|
2020-06-17T18:23:47.000Z
|
008 Controle de tela/triangulo.py
|
yamadathamine/300ideiasparaprogramarPython
|
331a063bbf8bcd117ae5a34324b8176a6014fc98
|
[
"MIT"
] | null | null | null |
# encoding: utf-8
# usando python 3
# Triângulo com iniciais - Faça um programa que lê valores de linha e coluna,
# além das iniciais de um nome (até 3 caracteres) e desenha um triângulo ("bole" o desenho)
# com um vértice na linha e coluna lidas e com as iniciais dentro.
import os
os.system('clear')
linha = int(input("Digite a linha: "))
coluna = int(input("Digite a coluna: "))
iniciais = input("Digite as suas iniciais (até 3 caracteres): ")
print("\033["+str(linha)+";"+str(coluna)+"H#")
linha += 1
coluna -= 1
print("\033["+str(linha)+";"+str(coluna)+"H# #")
linha += 1
coluna -= 1
print("\033["+str(linha)+";"+str(coluna)+"H#"+iniciais+"#")
linha += 1
coluna -= 1
print("\033["+str(linha)+";"+str(coluna)+"H# #")
linha += 1
coluna -= 1
print("\033["+str(linha)+";"+str(coluna)+"H# #")
linha += 1
coluna -= 1
print("\033["+str(linha)+";"+str(coluna)+"H###########")
| 30.655172
| 92
| 0.622047
|
1d113f6d8b42620d1d1375a9ab9f23de391627d3
| 2,981
|
py
|
Python
|
backend/logger/settings/base.py
|
ThreeDRadio/intranet
|
b8c6ab177d508816da624d5063337cbd475fee9a
|
[
"MIT"
] | null | null | null |
backend/logger/settings/base.py
|
ThreeDRadio/intranet
|
b8c6ab177d508816da624d5063337cbd475fee9a
|
[
"MIT"
] | 1
|
2016-10-31T11:17:13.000Z
|
2016-10-31T11:17:13.000Z
|
backend/logger/settings/base.py
|
ThreeDRadio/intranet
|
b8c6ab177d508816da624d5063337cbd475fee9a
|
[
"MIT"
] | null | null | null |
"""
Django settings for logger project.
Generated by 'django-admin startproject' using Django 1.8.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'quual-o8ahjb)v&0h=h@wo!_ha@r!#$trtg1pi855(^$w_!(wd'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'logger.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'logger.wsgi.application'
REST_FRAMEWORK = {
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.PageNumberPagination',
'PAGE_SIZE': 10,
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.SessionAuthentication',
'rest_framework.authentication.TokenAuthentication',
),
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAuthenticated',
)
}
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATIC_URL = '/static/'
STATICFILES_FINDERS = {
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'djangobower.finders.BowerFinder',
}
BOWER_COMPONENTS_ROOT=os.path.join(BASE_DIR,'components')
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
| 29.81
| 85
| 0.705468
|
d3522c39e6e5a4b7109e944275d42224bf7ce7ec
| 19,370
|
py
|
Python
|
asset/google/cloud/asset_v1beta1/gapic/asset_service_client.py
|
hzyi-google/google-cloud-python
|
aa3c3ca303b385a6b118204ce91fa803c1d001b9
|
[
"Apache-2.0"
] | 1
|
2019-12-09T11:40:28.000Z
|
2019-12-09T11:40:28.000Z
|
asset/google/cloud/asset_v1beta1/gapic/asset_service_client.py
|
hzyi-google/google-cloud-python
|
aa3c3ca303b385a6b118204ce91fa803c1d001b9
|
[
"Apache-2.0"
] | null | null | null |
asset/google/cloud/asset_v1beta1/gapic/asset_service_client.py
|
hzyi-google/google-cloud-python
|
aa3c3ca303b385a6b118204ce91fa803c1d001b9
|
[
"Apache-2.0"
] | 1
|
2021-12-27T05:31:45.000Z
|
2021-12-27T05:31:45.000Z
|
# -*- coding: utf-8 -*-
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Accesses the google.cloud.asset.v1beta1 AssetService API."""
import pkg_resources
import warnings
from google.oauth2 import service_account
import google.api_core.client_options
import google.api_core.gapic_v1.client_info
import google.api_core.gapic_v1.config
import google.api_core.gapic_v1.method
import google.api_core.gapic_v1.routing_header
import google.api_core.grpc_helpers
import google.api_core.operation
import google.api_core.operations_v1
import google.api_core.path_template
import grpc
from google.cloud.asset_v1beta1.gapic import asset_service_client_config
from google.cloud.asset_v1beta1.gapic import enums
from google.cloud.asset_v1beta1.gapic.transports import asset_service_grpc_transport
from google.cloud.asset_v1beta1.proto import asset_service_pb2
from google.cloud.asset_v1beta1.proto import asset_service_pb2_grpc
from google.cloud.asset_v1beta1.proto import assets_pb2
from google.longrunning import operations_pb2
from google.protobuf import timestamp_pb2
_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution("google-cloud-asset").version
class AssetServiceClient(object):
"""Asset service definition."""
SERVICE_ADDRESS = "cloudasset.googleapis.com:443"
"""The default address of the service."""
# The name of the interface for this client. This is the key used to
# find the method configuration in the client_config dictionary.
_INTERFACE_NAME = "google.cloud.asset.v1beta1.AssetService"
@classmethod
def from_service_account_file(cls, filename, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
AssetServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(filename)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@classmethod
def project_path(cls, project):
"""DEPRECATED. Return a fully-qualified project string."""
warnings.warn(
"Resource name helper functions are deprecated.",
PendingDeprecationWarning,
stacklevel=1,
)
return google.api_core.path_template.expand(
"projects/{project}", project=project
)
def __init__(
self,
transport=None,
channel=None,
credentials=None,
client_config=None,
client_info=None,
client_options=None,
):
"""Constructor.
Args:
transport (Union[~.AssetServiceGrpcTransport,
Callable[[~.Credentials, type], ~.AssetServiceGrpcTransport]): A transport
instance, responsible for actually making the API calls.
The default transport uses the gRPC protocol.
This argument may also be a callable which returns a
transport instance. Callables will be sent the credentials
as the first argument and the default transport class as
the second argument.
channel (grpc.Channel): DEPRECATED. A ``Channel`` instance
through which to make calls. This argument is mutually exclusive
with ``credentials``; providing both will raise an exception.
credentials (google.auth.credentials.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is mutually exclusive with providing a
transport instance to ``transport``; doing so will raise
an exception.
client_config (dict): DEPRECATED. A dictionary of call options for
each method. If not specified, the default configuration is used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
client_options (Union[dict, google.api_core.client_options.ClientOptions]):
Client options used to set user options on the client. API Endpoint
should be set through client_options.
"""
# Raise deprecation warnings for things we want to go away.
if client_config is not None:
warnings.warn(
"The `client_config` argument is deprecated.",
PendingDeprecationWarning,
stacklevel=2,
)
else:
client_config = asset_service_client_config.config
if channel:
warnings.warn(
"The `channel` argument is deprecated; use " "`transport` instead.",
PendingDeprecationWarning,
stacklevel=2,
)
api_endpoint = self.SERVICE_ADDRESS
if client_options:
if type(client_options) == dict:
client_options = google.api_core.client_options.from_dict(
client_options
)
if client_options.api_endpoint:
api_endpoint = client_options.api_endpoint
# Instantiate the transport.
# The transport is responsible for handling serialization and
# deserialization and actually sending data to the service.
if transport:
if callable(transport):
self.transport = transport(
credentials=credentials,
default_class=asset_service_grpc_transport.AssetServiceGrpcTransport,
address=api_endpoint,
)
else:
if credentials:
raise ValueError(
"Received both a transport instance and "
"credentials; these are mutually exclusive."
)
self.transport = transport
else:
self.transport = asset_service_grpc_transport.AssetServiceGrpcTransport(
address=api_endpoint, channel=channel, credentials=credentials
)
if client_info is None:
client_info = google.api_core.gapic_v1.client_info.ClientInfo(
gapic_version=_GAPIC_LIBRARY_VERSION
)
else:
client_info.gapic_version = _GAPIC_LIBRARY_VERSION
self._client_info = client_info
# Parse out the default settings for retry and timeout for each RPC
# from the client configuration.
# (Ordinarily, these are the defaults specified in the `*_config.py`
# file next to this one.)
self._method_configs = google.api_core.gapic_v1.config.parse_method_configs(
client_config["interfaces"][self._INTERFACE_NAME]
)
# Save a dictionary of cached API call functions.
# These are the actual callables which invoke the proper
# transport methods, wrapped with `wrap_method` to add retry,
# timeout, and the like.
self._inner_api_calls = {}
# Service calls
def export_assets(
self,
parent,
output_config,
read_time=None,
asset_types=None,
content_type=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Exports assets with time and resource types to a given Cloud Storage
location. The output format is newline-delimited JSON. This API
implements the ``google.longrunning.Operation`` API allowing you to keep
track of the export.
Example:
>>> from google.cloud import asset_v1beta1
>>>
>>> client = asset_v1beta1.AssetServiceClient()
>>>
>>> parent = client.project_path('[PROJECT]')
>>>
>>> # TODO: Initialize `output_config`:
>>> output_config = {}
>>>
>>> response = client.export_assets(parent, output_config)
>>>
>>> def callback(operation_future):
... # Handle result.
... result = operation_future.result()
>>>
>>> response.add_done_callback(callback)
>>>
>>> # Handle metadata.
>>> metadata = response.metadata()
Args:
parent (str): Required. The relative name of the root asset. This can only be an
organization number (such as "organizations/123"), a project ID (such as
"projects/my-project-id"), a project number (such as "projects/12345"), or
a folder number (such as "folders/123").
output_config (Union[dict, ~google.cloud.asset_v1beta1.types.OutputConfig]): Required. Output configuration indicating where the results will be output
to. All results will be in newline delimited JSON format.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.asset_v1beta1.types.OutputConfig`
read_time (Union[dict, ~google.cloud.asset_v1beta1.types.Timestamp]): Timestamp to take an asset snapshot. This can only be set to a timestamp
between 2018-10-02 UTC (inclusive) and the current time. If not specified,
the current time will be used. Due to delays in resource data collection
and indexing, there is a volatile window during which running the same
query may get different results.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.asset_v1beta1.types.Timestamp`
asset_types (list[str]): A list of asset types of which to take a snapshot for. For example:
"google.compute.Disk". If specified, only matching assets will be
returned. See `Introduction to Cloud Asset
Inventory <https://cloud.google.com/resource-manager/docs/cloud-asset-inventory/overview>`__
for all supported asset types.
content_type (~google.cloud.asset_v1beta1.types.ContentType): Asset content type. If not specified, no content but the asset name will be
returned.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will
be retried using a default configuration.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.asset_v1beta1.types._OperationFuture` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "export_assets" not in self._inner_api_calls:
self._inner_api_calls[
"export_assets"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.export_assets,
default_retry=self._method_configs["ExportAssets"].retry,
default_timeout=self._method_configs["ExportAssets"].timeout,
client_info=self._client_info,
)
request = asset_service_pb2.ExportAssetsRequest(
parent=parent,
output_config=output_config,
read_time=read_time,
asset_types=asset_types,
content_type=content_type,
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("parent", parent)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
operation = self._inner_api_calls["export_assets"](
request, retry=retry, timeout=timeout, metadata=metadata
)
return google.api_core.operation.from_gapic(
operation,
self.transport._operations_client,
asset_service_pb2.ExportAssetsResponse,
metadata_type=asset_service_pb2.ExportAssetsRequest,
)
def batch_get_assets_history(
self,
parent,
content_type,
read_time_window,
asset_names=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Batch gets the update history of assets that overlap a time window. For
RESOURCE content, this API outputs history with asset in both non-delete
or deleted status. For IAM\_POLICY content, this API outputs history
when the asset and its attached IAM POLICY both exist. This can create
gaps in the output history.
Example:
>>> from google.cloud import asset_v1beta1
>>> from google.cloud.asset_v1beta1 import enums
>>>
>>> client = asset_v1beta1.AssetServiceClient()
>>>
>>> parent = client.project_path('[PROJECT]')
>>>
>>> # TODO: Initialize `content_type`:
>>> content_type = enums.ContentType.CONTENT_TYPE_UNSPECIFIED
>>>
>>> # TODO: Initialize `read_time_window`:
>>> read_time_window = {}
>>>
>>> response = client.batch_get_assets_history(parent, content_type, read_time_window)
Args:
parent (str): Required. The relative name of the root asset. It can only be an
organization number (such as "organizations/123"), a project ID (such as
"projects/my-project-id")", or a project number (such as "projects/12345").
content_type (~google.cloud.asset_v1beta1.types.ContentType): Required. The content type.
read_time_window (Union[dict, ~google.cloud.asset_v1beta1.types.TimeWindow]): Optional. The time window for the asset history. Both start\_time and
end\_time are optional and if set, it must be after 2018-10-02 UTC. If
end\_time is not set, it is default to current timestamp. If start\_time
is not set, the snapshot of the assets at end\_time will be returned.
The returned results contain all temporal assets whose time window
overlap with read\_time\_window.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.asset_v1beta1.types.TimeWindow`
asset_names (list[str]): A list of the full names of the assets. For example:
``//compute.googleapis.com/projects/my_project_123/zones/zone1/instances/instance1``.
See `Resource
Names <https://cloud.google.com/apis/design/resource_names#full_resource_name>`__
for more info.
The request becomes a no-op if the asset name list is empty, and the max
size of the asset name list is 100 in one request.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will
be retried using a default configuration.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.asset_v1beta1.types.BatchGetAssetsHistoryResponse` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "batch_get_assets_history" not in self._inner_api_calls:
self._inner_api_calls[
"batch_get_assets_history"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.batch_get_assets_history,
default_retry=self._method_configs["BatchGetAssetsHistory"].retry,
default_timeout=self._method_configs["BatchGetAssetsHistory"].timeout,
client_info=self._client_info,
)
request = asset_service_pb2.BatchGetAssetsHistoryRequest(
parent=parent,
content_type=content_type,
read_time_window=read_time_window,
asset_names=asset_names,
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("parent", parent)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
return self._inner_api_calls["batch_get_assets_history"](
request, retry=retry, timeout=timeout, metadata=metadata
)
| 44.837963
| 163
| 0.630924
|
78778b057da7a3a6280e31daca547a104102c7c1
| 3,000
|
py
|
Python
|
libica/openapi/libgds/test/test_create_volume_response.py
|
umccr-illumina/libica
|
916d27eea499f29bee590268b84208effb0cc576
|
[
"MIT"
] | null | null | null |
libica/openapi/libgds/test/test_create_volume_response.py
|
umccr-illumina/libica
|
916d27eea499f29bee590268b84208effb0cc576
|
[
"MIT"
] | 4
|
2021-11-15T10:47:51.000Z
|
2022-02-22T04:43:20.000Z
|
libica/openapi/libgds/test/test_create_volume_response.py
|
umccr-illumina/libica
|
916d27eea499f29bee590268b84208effb0cc576
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
Genomic Data Store Service
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v1
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import libica.openapi.libgds
from libica.openapi.libgds.models.create_volume_response import CreateVolumeResponse # noqa: E501
from libica.openapi.libgds.rest import ApiException
class TestCreateVolumeResponse(unittest.TestCase):
"""CreateVolumeResponse unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test CreateVolumeResponse
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = libica.openapi.libgds.models.create_volume_response.CreateVolumeResponse() # noqa: E501
if include_optional :
return CreateVolumeResponse(
id = '0',
name = '0',
tenant_id = '0',
sub_tenant_id = '0',
urn = '0',
root_folder_id = '0',
root_key_prefix = '0',
volume_configuration_name = '0',
inherited_acl = [
'0'
],
time_created = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
created_by = '0',
time_modified = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
modified_by = '0',
job_status = 'None',
metadata = None,
import_session_id = '0',
object_store_access = libica.openapi.libgds.models.object_store_access.ObjectStoreAccess(
session_id = '0',
aws_s3_temporary_upload_credentials = libica.openapi.libgds.models.aws_s3_temporary_upload_credentials.AwsS3TemporaryUploadCredentials(
access_key_id = '0',
secret_access_key = '0',
session_token = '0',
region = '0',
bucket_name = '0',
key_prefix = '0',
expiration_date = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
service_url = '0', ), )
)
else :
return CreateVolumeResponse(
)
def testCreateVolumeResponse(self):
"""Test CreateVolumeResponse"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
| 37.037037
| 155
| 0.569333
|
fcc90fa61a37d13e8ec9adbb16efc035cbae4713
| 496
|
py
|
Python
|
applications/FluidTransportApplication/FluidTransportApplication.py
|
lcirrott/Kratos
|
8406e73e0ad214c4f89df4e75e9b29d0eb4a47ea
|
[
"BSD-4-Clause"
] | 2
|
2019-10-25T09:28:10.000Z
|
2019-11-21T12:51:46.000Z
|
applications/FluidTransportApplication/FluidTransportApplication.py
|
lcirrott/Kratos
|
8406e73e0ad214c4f89df4e75e9b29d0eb4a47ea
|
[
"BSD-4-Clause"
] | 13
|
2019-10-07T12:06:51.000Z
|
2020-02-18T08:48:33.000Z
|
applications/FluidTransportApplication/FluidTransportApplication.py
|
lcirrott/Kratos
|
8406e73e0ad214c4f89df4e75e9b29d0eb4a47ea
|
[
"BSD-4-Clause"
] | null | null | null |
# makes KratosMultiphysics backward compatible with python 2.6 and 2.7
from __future__ import print_function, absolute_import, division
# Application dependent names and paths
import KratosMultiphysics as KM
from KratosFluidTransportApplication import *
application = KratosFluidTransportApplication()
application_name = "KratosFluidTransportApplication"
application_folder = "FluidTransportApplication"
KM._ImportApplicationAsModule(application, application_name, application_folder, __path__)
| 45.090909
| 90
| 0.866935
|
6ddf651f2168fb6f3bc7d4498c1f43442b950ecf
| 333
|
py
|
Python
|
nlidbTranslator/api/adapters/IRNet/constants.py
|
DataManagementLab/univerSQL
|
1e37f089c1f1dfc8756c183db019a90e5a9a2c2c
|
[
"MIT"
] | null | null | null |
nlidbTranslator/api/adapters/IRNet/constants.py
|
DataManagementLab/univerSQL
|
1e37f089c1f1dfc8756c183db019a90e5a9a2c2c
|
[
"MIT"
] | null | null | null |
nlidbTranslator/api/adapters/IRNet/constants.py
|
DataManagementLab/univerSQL
|
1e37f089c1f1dfc8756c183db019a90e5a9a2c2c
|
[
"MIT"
] | null | null | null |
import os
from pathlib import Path
from api.paths import TRANSLATORS_DIR
CURRENT_DIR = os.path.dirname(__file__)
#Paths
IRNET_BASE_DIR = Path(TRANSLATORS_DIR) / "IRNet"
PATH_TO_CONCEPTNET = str(Path(IRNET_BASE_DIR) / "./conceptNet/")
PATH_TO_PRETRAINED_MODEL = Path(IRNET_BASE_DIR) / "saved_model" / "IRNet_pretrained.model"
| 19.588235
| 90
| 0.780781
|
c6b111132bb07cfb621f7069861877320d4b055e
| 1,499
|
py
|
Python
|
main.py
|
BurnySc2/strawberry-subscription
|
77f07de62187e1f7ac1273f18144f5183d3c294c
|
[
"MIT"
] | null | null | null |
main.py
|
BurnySc2/strawberry-subscription
|
77f07de62187e1f7ac1273f18144f5183d3c294c
|
[
"MIT"
] | null | null | null |
main.py
|
BurnySc2/strawberry-subscription
|
77f07de62187e1f7ac1273f18144f5183d3c294c
|
[
"MIT"
] | null | null | null |
from typing import AsyncGenerator, Set
import uvicorn
from fastapi import FastAPI
from loguru import logger
import strawberry
from strawberry.fastapi import GraphQLRouter
from broadcaster import Broadcast, Subscriber, BroadcastEvent
active_users: Set[str] = set()
broadcast = Broadcast()
@strawberry.type
class Query:
@strawberry.field
def hello(self) -> str:
return 'Hello World'
@strawberry.type
class Mutation:
@strawberry.mutation
async def chat_join_room(self, username: str) -> bool:
if username in active_users:
return False
active_users.add(username)
await broadcast.publish("chatroom", username)
return True
@strawberry.type
class Subscription:
@strawberry.subscription
async def chat_user_joined(self) -> AsyncGenerator[str, None]:
subscriber: Subscriber
async with broadcast.subscribe(channel="chatroom") as subscriber:
logger.info("Subscribed")
event: BroadcastEvent
try:
async for event in subscriber:
logger.info(event)
yield event.message
finally:
logger.info("Unsubscribed")
schema = strawberry.Schema(Query, mutation=Mutation, subscription=Subscription)
graphql_app = GraphQLRouter(schema)
app = FastAPI()
app.include_router(graphql_app, prefix='/graphql')
if __name__ == '__main__':
uvicorn.run('__main__:app', host='0.0.0.0', port=8000, reload=True)
| 25.844828
| 79
| 0.679787
|
04946f555b0e3eaa7fdb8497fb6c5f29e5a7ad76
| 445
|
py
|
Python
|
aggregables/captures/matplotlib/timelines.py
|
nevesnunes/aggregables
|
8fb7ea1f97ceeb16c719312d0075294b19d098b3
|
[
"MIT"
] | null | null | null |
aggregables/captures/matplotlib/timelines.py
|
nevesnunes/aggregables
|
8fb7ea1f97ceeb16c719312d0075294b19d098b3
|
[
"MIT"
] | null | null | null |
aggregables/captures/matplotlib/timelines.py
|
nevesnunes/aggregables
|
8fb7ea1f97ceeb16c719312d0075294b19d098b3
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import matplotlib.pyplot as plt
import numpy as np
import sys
if __name__ == "__main__":
filename = sys.argv[1]
data = np.genfromtxt(filename, delimiter=",", names=True)
begin = data[data.dtype.names[0]]
end = data[data.dtype.names[1]]
event = ["{}".format(i) for i in range(len(begin))]
plt.barh(range(len(begin)), end-begin, left=begin)
plt.yticks(range(len(begin)), event)
plt.show()
| 27.8125
| 61
| 0.653933
|
9aba9d63556da7022eadedef3e6bfb84c7458932
| 34,431
|
py
|
Python
|
env/Lib/site-packages/sqlalchemy/orm/state.py
|
m4573rn3rd/flaskaiml
|
076c4064a52e8717a80fc5e79304e1c5d889116c
|
[
"MIT"
] | null | null | null |
env/Lib/site-packages/sqlalchemy/orm/state.py
|
m4573rn3rd/flaskaiml
|
076c4064a52e8717a80fc5e79304e1c5d889116c
|
[
"MIT"
] | null | null | null |
env/Lib/site-packages/sqlalchemy/orm/state.py
|
m4573rn3rd/flaskaiml
|
076c4064a52e8717a80fc5e79304e1c5d889116c
|
[
"MIT"
] | 1
|
2021-06-20T19:28:37.000Z
|
2021-06-20T19:28:37.000Z
|
# orm/state.py
# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Defines instrumentation of instances.
This module is usually not directly visible to user applications, but
defines a large part of the ORM's interactivity.
"""
import weakref
from . import base
from . import exc as orm_exc
from . import interfaces
from .base import ATTR_WAS_SET
from .base import INIT_OK
from .base import NEVER_SET
from .base import NO_VALUE
from .base import PASSIVE_NO_INITIALIZE
from .base import PASSIVE_NO_RESULT
from .base import PASSIVE_OFF
from .base import SQL_OK
from .path_registry import PathRegistry
from .. import exc as sa_exc
from .. import inspection
from .. import util
# late-populated by session.py
_sessions = None
# optionally late-provided by sqlalchemy.ext.asyncio.session
_async_provider = None
@inspection._self_inspects
class InstanceState(interfaces.InspectionAttrInfo):
"""tracks state information at the instance level.
The :class:`.InstanceState` is a key object used by the
SQLAlchemy ORM in order to track the state of an object;
it is created the moment an object is instantiated, typically
as a result of :term:`instrumentation` which SQLAlchemy applies
to the ``__init__()`` method of the class.
:class:`.InstanceState` is also a semi-public object,
available for runtime inspection as to the state of a
mapped instance, including information such as its current
status within a particular :class:`.Session` and details
about data on individual attributes. The public API
in order to acquire a :class:`.InstanceState` object
is to use the :func:`_sa.inspect` system::
>>> from sqlalchemy import inspect
>>> insp = inspect(some_mapped_object)
.. seealso::
:ref:`core_inspection_toplevel`
"""
session_id = None
key = None
runid = None
load_options = util.EMPTY_SET
load_path = PathRegistry.root
insert_order = None
_strong_obj = None
modified = False
expired = False
_deleted = False
_load_pending = False
_orphaned_outside_of_session = False
is_instance = True
identity_token = None
_last_known_values = ()
callables = ()
"""A namespace where a per-state loader callable can be associated.
In SQLAlchemy 1.0, this is only used for lazy loaders / deferred
loaders that were set up via query option.
Previously, callables was used also to indicate expired attributes
by storing a link to the InstanceState itself in this dictionary.
This role is now handled by the expired_attributes set.
"""
def __init__(self, obj, manager):
self.class_ = obj.__class__
self.manager = manager
self.obj = weakref.ref(obj, self._cleanup)
self.committed_state = {}
self.expired_attributes = set()
expired_attributes = None
"""The set of keys which are 'expired' to be loaded by
the manager's deferred scalar loader, assuming no pending
changes.
see also the ``unmodified`` collection which is intersected
against this set when a refresh operation occurs."""
@util.memoized_property
def attrs(self):
"""Return a namespace representing each attribute on
the mapped object, including its current value
and history.
The returned object is an instance of :class:`.AttributeState`.
This object allows inspection of the current data
within an attribute as well as attribute history
since the last flush.
"""
return util.ImmutableProperties(
dict((key, AttributeState(self, key)) for key in self.manager)
)
@property
def transient(self):
"""Return ``True`` if the object is :term:`transient`.
.. seealso::
:ref:`session_object_states`
"""
return self.key is None and not self._attached
@property
def pending(self):
"""Return ``True`` if the object is :term:`pending`.
.. seealso::
:ref:`session_object_states`
"""
return self.key is None and self._attached
@property
def deleted(self):
"""Return ``True`` if the object is :term:`deleted`.
An object that is in the deleted state is guaranteed to
not be within the :attr:`.Session.identity_map` of its parent
:class:`.Session`; however if the session's transaction is rolled
back, the object will be restored to the persistent state and
the identity map.
.. note::
The :attr:`.InstanceState.deleted` attribute refers to a specific
state of the object that occurs between the "persistent" and
"detached" states; once the object is :term:`detached`, the
:attr:`.InstanceState.deleted` attribute **no longer returns
True**; in order to detect that a state was deleted, regardless
of whether or not the object is associated with a
:class:`.Session`, use the :attr:`.InstanceState.was_deleted`
accessor.
.. versionadded: 1.1
.. seealso::
:ref:`session_object_states`
"""
return self.key is not None and self._attached and self._deleted
@property
def was_deleted(self):
"""Return True if this object is or was previously in the
"deleted" state and has not been reverted to persistent.
This flag returns True once the object was deleted in flush.
When the object is expunged from the session either explicitly
or via transaction commit and enters the "detached" state,
this flag will continue to report True.
.. versionadded:: 1.1 - added a local method form of
:func:`.orm.util.was_deleted`.
.. seealso::
:attr:`.InstanceState.deleted` - refers to the "deleted" state
:func:`.orm.util.was_deleted` - standalone function
:ref:`session_object_states`
"""
return self._deleted
@property
def persistent(self):
"""Return ``True`` if the object is :term:`persistent`.
An object that is in the persistent state is guaranteed to
be within the :attr:`.Session.identity_map` of its parent
:class:`.Session`.
.. versionchanged:: 1.1 The :attr:`.InstanceState.persistent`
accessor no longer returns True for an object that was
"deleted" within a flush; use the :attr:`.InstanceState.deleted`
accessor to detect this state. This allows the "persistent"
state to guarantee membership in the identity map.
.. seealso::
:ref:`session_object_states`
"""
return self.key is not None and self._attached and not self._deleted
@property
def detached(self):
"""Return ``True`` if the object is :term:`detached`.
.. seealso::
:ref:`session_object_states`
"""
return self.key is not None and not self._attached
@property
@util.preload_module("sqlalchemy.orm.session")
def _attached(self):
return (
self.session_id is not None
and self.session_id in util.preloaded.orm_session._sessions
)
def _track_last_known_value(self, key):
"""Track the last known value of a particular key after expiration
operations.
.. versionadded:: 1.3
"""
if key not in self._last_known_values:
self._last_known_values = dict(self._last_known_values)
self._last_known_values[key] = NO_VALUE
@property
def session(self):
"""Return the owning :class:`.Session` for this instance,
or ``None`` if none available.
Note that the result here can in some cases be *different*
from that of ``obj in session``; an object that's been deleted
will report as not ``in session``, however if the transaction is
still in progress, this attribute will still refer to that session.
Only when the transaction is completed does the object become
fully detached under normal circumstances.
.. seealso::
:attr:`_orm.InstanceState.async_session`
"""
if self.session_id:
try:
return _sessions[self.session_id]
except KeyError:
pass
return None
@property
def async_session(self):
"""Return the owning :class:`_asyncio.AsyncSession` for this instance,
or ``None`` if none available.
This attribute is only non-None when the :mod:`sqlalchemy.ext.asyncio`
API is in use for this ORM object. The returned
:class:`_asyncio.AsyncSession` object will be a proxy for the
:class:`_orm.Session` object that would be returned from the
:attr:`_orm.InstanceState.session` attribute for this
:class:`_orm.InstanceState`.
.. versionadded:: 1.4.18
.. seealso::
:ref:`asyncio_toplevel`
"""
if _async_provider is None:
return None
sess = self.session
if sess is not None:
return _async_provider(sess)
else:
return None
@property
def object(self):
"""Return the mapped object represented by this
:class:`.InstanceState`."""
return self.obj()
@property
def identity(self):
"""Return the mapped identity of the mapped object.
This is the primary key identity as persisted by the ORM
which can always be passed directly to
:meth:`_query.Query.get`.
Returns ``None`` if the object has no primary key identity.
.. note::
An object which is :term:`transient` or :term:`pending`
does **not** have a mapped identity until it is flushed,
even if its attributes include primary key values.
"""
if self.key is None:
return None
else:
return self.key[1]
@property
def identity_key(self):
"""Return the identity key for the mapped object.
This is the key used to locate the object within
the :attr:`.Session.identity_map` mapping. It contains
the identity as returned by :attr:`.identity` within it.
"""
# TODO: just change .key to .identity_key across
# the board ? probably
return self.key
@util.memoized_property
def parents(self):
return {}
@util.memoized_property
def _pending_mutations(self):
return {}
@util.memoized_property
def _empty_collections(self):
return {}
@util.memoized_property
def mapper(self):
"""Return the :class:`_orm.Mapper` used for this mapped object."""
return self.manager.mapper
@property
def has_identity(self):
"""Return ``True`` if this object has an identity key.
This should always have the same value as the
expression ``state.persistent`` or ``state.detached``.
"""
return bool(self.key)
@classmethod
def _detach_states(self, states, session, to_transient=False):
persistent_to_detached = (
session.dispatch.persistent_to_detached or None
)
deleted_to_detached = session.dispatch.deleted_to_detached or None
pending_to_transient = session.dispatch.pending_to_transient or None
persistent_to_transient = (
session.dispatch.persistent_to_transient or None
)
for state in states:
deleted = state._deleted
pending = state.key is None
persistent = not pending and not deleted
state.session_id = None
if to_transient and state.key:
del state.key
if persistent:
if to_transient:
if persistent_to_transient is not None:
persistent_to_transient(session, state)
elif persistent_to_detached is not None:
persistent_to_detached(session, state)
elif deleted and deleted_to_detached is not None:
deleted_to_detached(session, state)
elif pending and pending_to_transient is not None:
pending_to_transient(session, state)
state._strong_obj = None
def _detach(self, session=None):
if session:
InstanceState._detach_states([self], session)
else:
self.session_id = self._strong_obj = None
def _dispose(self):
self._detach()
del self.obj
def _cleanup(self, ref):
"""Weakref callback cleanup.
This callable cleans out the state when it is being garbage
collected.
this _cleanup **assumes** that there are no strong refs to us!
Will not work otherwise!
"""
# Python builtins become undefined during interpreter shutdown.
# Guard against exceptions during this phase, as the method cannot
# proceed in any case if builtins have been undefined.
if dict is None:
return
instance_dict = self._instance_dict()
if instance_dict is not None:
instance_dict._fast_discard(self)
del self._instance_dict
# we can't possibly be in instance_dict._modified
# b.c. this is weakref cleanup only, that set
# is strong referencing!
# assert self not in instance_dict._modified
self.session_id = self._strong_obj = None
del self.obj
def obj(self):
return None
@property
def dict(self):
"""Return the instance dict used by the object.
Under normal circumstances, this is always synonymous
with the ``__dict__`` attribute of the mapped object,
unless an alternative instrumentation system has been
configured.
In the case that the actual object has been garbage
collected, this accessor returns a blank dictionary.
"""
o = self.obj()
if o is not None:
return base.instance_dict(o)
else:
return {}
def _initialize_instance(*mixed, **kwargs):
self, instance, args = mixed[0], mixed[1], mixed[2:] # noqa
manager = self.manager
manager.dispatch.init(self, args, kwargs)
try:
return manager.original_init(*mixed[1:], **kwargs)
except:
with util.safe_reraise():
manager.dispatch.init_failure(self, args, kwargs)
def get_history(self, key, passive):
return self.manager[key].impl.get_history(self, self.dict, passive)
def get_impl(self, key):
return self.manager[key].impl
def _get_pending_mutation(self, key):
if key not in self._pending_mutations:
self._pending_mutations[key] = PendingCollection()
return self._pending_mutations[key]
def __getstate__(self):
state_dict = {"instance": self.obj()}
state_dict.update(
(k, self.__dict__[k])
for k in (
"committed_state",
"_pending_mutations",
"modified",
"expired",
"callables",
"key",
"parents",
"load_options",
"class_",
"expired_attributes",
"info",
)
if k in self.__dict__
)
if self.load_path:
state_dict["load_path"] = self.load_path.serialize()
state_dict["manager"] = self.manager._serialize(self, state_dict)
return state_dict
def __setstate__(self, state_dict):
inst = state_dict["instance"]
if inst is not None:
self.obj = weakref.ref(inst, self._cleanup)
self.class_ = inst.__class__
else:
# None being possible here generally new as of 0.7.4
# due to storage of state in "parents". "class_"
# also new.
self.obj = None
self.class_ = state_dict["class_"]
self.committed_state = state_dict.get("committed_state", {})
self._pending_mutations = state_dict.get("_pending_mutations", {})
self.parents = state_dict.get("parents", {})
self.modified = state_dict.get("modified", False)
self.expired = state_dict.get("expired", False)
if "info" in state_dict:
self.info.update(state_dict["info"])
if "callables" in state_dict:
self.callables = state_dict["callables"]
try:
self.expired_attributes = state_dict["expired_attributes"]
except KeyError:
self.expired_attributes = set()
# 0.9 and earlier compat
for k in list(self.callables):
if self.callables[k] is self:
self.expired_attributes.add(k)
del self.callables[k]
else:
if "expired_attributes" in state_dict:
self.expired_attributes = state_dict["expired_attributes"]
else:
self.expired_attributes = set()
self.__dict__.update(
[
(k, state_dict[k])
for k in ("key", "load_options")
if k in state_dict
]
)
if self.key:
try:
self.identity_token = self.key[2]
except IndexError:
# 1.1 and earlier compat before identity_token
assert len(self.key) == 2
self.key = self.key + (None,)
self.identity_token = None
if "load_path" in state_dict:
self.load_path = PathRegistry.deserialize(state_dict["load_path"])
state_dict["manager"](self, inst, state_dict)
def _reset(self, dict_, key):
"""Remove the given attribute and any
callables associated with it."""
old = dict_.pop(key, None)
if old is not None and self.manager[key].impl.collection:
self.manager[key].impl._invalidate_collection(old)
self.expired_attributes.discard(key)
if self.callables:
self.callables.pop(key, None)
def _copy_callables(self, from_):
if "callables" in from_.__dict__:
self.callables = dict(from_.callables)
@classmethod
def _instance_level_callable_processor(cls, manager, fn, key):
impl = manager[key].impl
if impl.collection:
def _set_callable(state, dict_, row):
if "callables" not in state.__dict__:
state.callables = {}
old = dict_.pop(key, None)
if old is not None:
impl._invalidate_collection(old)
state.callables[key] = fn
else:
def _set_callable(state, dict_, row):
if "callables" not in state.__dict__:
state.callables = {}
state.callables[key] = fn
return _set_callable
def _expire(self, dict_, modified_set):
self.expired = True
if self.modified:
modified_set.discard(self)
self.committed_state.clear()
self.modified = False
self._strong_obj = None
if "_pending_mutations" in self.__dict__:
del self.__dict__["_pending_mutations"]
if "parents" in self.__dict__:
del self.__dict__["parents"]
self.expired_attributes.update(
[impl.key for impl in self.manager._loader_impls]
)
if self.callables:
# the per state loader callables we can remove here are
# LoadDeferredColumns, which undefers a column at the instance
# level that is mapped with deferred, and LoadLazyAttribute,
# which lazy loads a relationship at the instance level that
# is mapped with "noload" or perhaps "immediateload".
# Before 1.4, only column-based
# attributes could be considered to be "expired", so here they
# were the only ones "unexpired", which means to make them deferred
# again. For the moment, as of 1.4 we also apply the same
# treatment relationships now, that is, an instance level lazy
# loader is reset in the same way as a column loader.
for k in self.expired_attributes.intersection(self.callables):
del self.callables[k]
for k in self.manager._collection_impl_keys.intersection(dict_):
collection = dict_.pop(k)
collection._sa_adapter.invalidated = True
if self._last_known_values:
self._last_known_values.update(
(k, dict_[k]) for k in self._last_known_values if k in dict_
)
for key in self.manager._all_key_set.intersection(dict_):
del dict_[key]
self.manager.dispatch.expire(self, None)
def _expire_attributes(self, dict_, attribute_names, no_loader=False):
pending = self.__dict__.get("_pending_mutations", None)
callables = self.callables
for key in attribute_names:
impl = self.manager[key].impl
if impl.accepts_scalar_loader:
if no_loader and (impl.callable_ or key in callables):
continue
self.expired_attributes.add(key)
if callables and key in callables:
del callables[key]
old = dict_.pop(key, NO_VALUE)
if impl.collection and old is not NO_VALUE:
impl._invalidate_collection(old)
if (
self._last_known_values
and key in self._last_known_values
and old is not NO_VALUE
):
self._last_known_values[key] = old
self.committed_state.pop(key, None)
if pending:
pending.pop(key, None)
self.manager.dispatch.expire(self, attribute_names)
def _load_expired(self, state, passive):
"""__call__ allows the InstanceState to act as a deferred
callable for loading expired attributes, which is also
serializable (picklable).
"""
if not passive & SQL_OK:
return PASSIVE_NO_RESULT
toload = self.expired_attributes.intersection(self.unmodified)
toload = toload.difference(
attr
for attr in toload
if not self.manager[attr].impl.load_on_unexpire
)
self.manager.expired_attribute_loader(self, toload, passive)
# if the loader failed, or this
# instance state didn't have an identity,
# the attributes still might be in the callables
# dict. ensure they are removed.
self.expired_attributes.clear()
return ATTR_WAS_SET
@property
def unmodified(self):
"""Return the set of keys which have no uncommitted changes"""
return set(self.manager).difference(self.committed_state)
def unmodified_intersection(self, keys):
"""Return self.unmodified.intersection(keys)."""
return (
set(keys)
.intersection(self.manager)
.difference(self.committed_state)
)
@property
def unloaded(self):
"""Return the set of keys which do not have a loaded value.
This includes expired attributes and any other attribute that
was never populated or modified.
"""
return (
set(self.manager)
.difference(self.committed_state)
.difference(self.dict)
)
@property
def unloaded_expirable(self):
"""Return the set of keys which do not have a loaded value.
This includes expired attributes and any other attribute that
was never populated or modified.
"""
return self.unloaded
@property
def _unloaded_non_object(self):
return self.unloaded.intersection(
attr
for attr in self.manager
if self.manager[attr].impl.accepts_scalar_loader
)
def _instance_dict(self):
return None
def _modified_event(
self, dict_, attr, previous, collection=False, is_userland=False
):
if attr:
if not attr.send_modified_events:
return
if is_userland and attr.key not in dict_:
raise sa_exc.InvalidRequestError(
"Can't flag attribute '%s' modified; it's not present in "
"the object state" % attr.key
)
if attr.key not in self.committed_state or is_userland:
if collection:
if previous is NEVER_SET:
if attr.key in dict_:
previous = dict_[attr.key]
if previous not in (None, NO_VALUE, NEVER_SET):
previous = attr.copy(previous)
self.committed_state[attr.key] = previous
if attr.key in self._last_known_values:
self._last_known_values[attr.key] = NO_VALUE
# assert self._strong_obj is None or self.modified
if (self.session_id and self._strong_obj is None) or not self.modified:
self.modified = True
instance_dict = self._instance_dict()
if instance_dict:
has_modified = bool(instance_dict._modified)
instance_dict._modified.add(self)
else:
has_modified = False
# only create _strong_obj link if attached
# to a session
inst = self.obj()
if self.session_id:
self._strong_obj = inst
# if identity map already had modified objects,
# assume autobegin already occurred, else check
# for autobegin
if not has_modified:
# inline of autobegin, to ensure session transaction
# snapshot is established
try:
session = _sessions[self.session_id]
except KeyError:
pass
else:
if session._transaction is None:
session._autobegin()
if inst is None and attr:
raise orm_exc.ObjectDereferencedError(
"Can't emit change event for attribute '%s' - "
"parent object of type %s has been garbage "
"collected."
% (self.manager[attr.key], base.state_class_str(self))
)
def _commit(self, dict_, keys):
"""Commit attributes.
This is used by a partial-attribute load operation to mark committed
those attributes which were refreshed from the database.
Attributes marked as "expired" can potentially remain "expired" after
this step if a value was not populated in state.dict.
"""
for key in keys:
self.committed_state.pop(key, None)
self.expired = False
self.expired_attributes.difference_update(
set(keys).intersection(dict_)
)
# the per-keys commit removes object-level callables,
# while that of commit_all does not. it's not clear
# if this behavior has a clear rationale, however tests do
# ensure this is what it does.
if self.callables:
for key in (
set(self.callables).intersection(keys).intersection(dict_)
):
del self.callables[key]
def _commit_all(self, dict_, instance_dict=None):
"""commit all attributes unconditionally.
This is used after a flush() or a full load/refresh
to remove all pending state from the instance.
- all attributes are marked as "committed"
- the "strong dirty reference" is removed
- the "modified" flag is set to False
- any "expired" markers for scalar attributes loaded are removed.
- lazy load callables for objects / collections *stay*
Attributes marked as "expired" can potentially remain
"expired" after this step if a value was not populated in state.dict.
"""
self._commit_all_states([(self, dict_)], instance_dict)
@classmethod
def _commit_all_states(self, iter_, instance_dict=None):
"""Mass / highly inlined version of commit_all()."""
for state, dict_ in iter_:
state_dict = state.__dict__
state.committed_state.clear()
if "_pending_mutations" in state_dict:
del state_dict["_pending_mutations"]
state.expired_attributes.difference_update(dict_)
if instance_dict and state.modified:
instance_dict._modified.discard(state)
state.modified = state.expired = False
state._strong_obj = None
class AttributeState(object):
"""Provide an inspection interface corresponding
to a particular attribute on a particular mapped object.
The :class:`.AttributeState` object is accessed
via the :attr:`.InstanceState.attrs` collection
of a particular :class:`.InstanceState`::
from sqlalchemy import inspect
insp = inspect(some_mapped_object)
attr_state = insp.attrs.some_attribute
"""
def __init__(self, state, key):
self.state = state
self.key = key
@property
def loaded_value(self):
"""The current value of this attribute as loaded from the database.
If the value has not been loaded, or is otherwise not present
in the object's dictionary, returns NO_VALUE.
"""
return self.state.dict.get(self.key, NO_VALUE)
@property
def value(self):
"""Return the value of this attribute.
This operation is equivalent to accessing the object's
attribute directly or via ``getattr()``, and will fire
off any pending loader callables if needed.
"""
return self.state.manager[self.key].__get__(
self.state.obj(), self.state.class_
)
@property
def history(self):
"""Return the current **pre-flush** change history for
this attribute, via the :class:`.History` interface.
This method will **not** emit loader callables if the value of the
attribute is unloaded.
.. note::
The attribute history system tracks changes on a **per flush
basis**. Each time the :class:`.Session` is flushed, the history
of each attribute is reset to empty. The :class:`.Session` by
default autoflushes each time a :class:`_query.Query` is invoked.
For
options on how to control this, see :ref:`session_flushing`.
.. seealso::
:meth:`.AttributeState.load_history` - retrieve history
using loader callables if the value is not locally present.
:func:`.attributes.get_history` - underlying function
"""
return self.state.get_history(self.key, PASSIVE_NO_INITIALIZE)
def load_history(self):
"""Return the current **pre-flush** change history for
this attribute, via the :class:`.History` interface.
This method **will** emit loader callables if the value of the
attribute is unloaded.
.. note::
The attribute history system tracks changes on a **per flush
basis**. Each time the :class:`.Session` is flushed, the history
of each attribute is reset to empty. The :class:`.Session` by
default autoflushes each time a :class:`_query.Query` is invoked.
For
options on how to control this, see :ref:`session_flushing`.
.. seealso::
:attr:`.AttributeState.history`
:func:`.attributes.get_history` - underlying function
.. versionadded:: 0.9.0
"""
return self.state.get_history(self.key, PASSIVE_OFF ^ INIT_OK)
class PendingCollection(object):
"""A writable placeholder for an unloaded collection.
Stores items appended to and removed from a collection that has not yet
been loaded. When the collection is loaded, the changes stored in
PendingCollection are applied to it to produce the final result.
"""
def __init__(self):
self.deleted_items = util.IdentitySet()
self.added_items = util.OrderedIdentitySet()
def append(self, value):
if value in self.deleted_items:
self.deleted_items.remove(value)
else:
self.added_items.add(value)
def remove(self, value):
if value in self.added_items:
self.added_items.remove(value)
else:
self.deleted_items.add(value)
| 33.624023
| 80
| 0.589236
|
49001d6360760fe6592bbf956a28b9904e1940e9
| 690
|
py
|
Python
|
RegisterSchedules.py
|
timmo/gardeningPI
|
9a72660087c4cf8fb60ec50c74446416c5f1b53c
|
[
"Apache-2.0"
] | null | null | null |
RegisterSchedules.py
|
timmo/gardeningPI
|
9a72660087c4cf8fb60ec50c74446416c5f1b53c
|
[
"Apache-2.0"
] | null | null | null |
RegisterSchedules.py
|
timmo/gardeningPI
|
9a72660087c4cf8fb60ec50c74446416c5f1b53c
|
[
"Apache-2.0"
] | null | null | null |
import schedule
class RegisterSchedules():
@staticmethod
def registerSchedules(schedules):
for toBeScheduled in schedules:
RegisterSchedules.registerSchedule(toBeScheduled)
@staticmethod
def registerSchedule(toBeScheduled):
schedule.every( toBeScheduled.recurrenceInDays).days.at(toBeScheduled.startTime).do(toBeScheduled.sprinkler.startSprinkler)
schedule.every( toBeScheduled.recurrenceInDays).days.at(toBeScheduled.endTime).do(toBeScheduled.sprinkler.stopSprinkler)
print( 'Schedule registered', toBeScheduled.sprinkler.name, toBeScheduled.startTime, '-', toBeScheduled.endTime, 'P', toBeScheduled.recurrenceInDays ,'D')
| 38.333333
| 162
| 0.766667
|
fd09c2f5163bf6f8487898970da8ded984a0c3ea
| 6,317
|
py
|
Python
|
src/datadog_api_client/v2/model/incident_teams_response.py
|
DataDog/datadog-api-client-python
|
de2fc57dbde9acf4b8c8eef94ac29911227a62a2
|
[
"Apache-2.0"
] | 32
|
2021-01-07T15:09:56.000Z
|
2022-01-30T05:49:23.000Z
|
src/datadog_api_client/v2/model/incident_teams_response.py
|
DataDog/datadog-api-client-python
|
de2fc57dbde9acf4b8c8eef94ac29911227a62a2
|
[
"Apache-2.0"
] | 228
|
2020-09-03T14:03:54.000Z
|
2022-03-31T20:16:12.000Z
|
src/datadog_api_client/v2/model/incident_teams_response.py
|
DataDog/datadog-api-client-python
|
de2fc57dbde9acf4b8c8eef94ac29911227a62a2
|
[
"Apache-2.0"
] | 12
|
2020-09-15T21:36:03.000Z
|
2022-03-31T17:13:17.000Z
|
# Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License.
# This product includes software developed at Datadog (https://www.datadoghq.com/).
# Copyright 2019-Present Datadog, Inc.
from datadog_api_client.v2.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
)
def lazy_import():
from datadog_api_client.v2.model.incident_services_response_meta import IncidentServicesResponseMeta
from datadog_api_client.v2.model.incident_team_included_items import IncidentTeamIncludedItems
from datadog_api_client.v2.model.incident_team_response_data import IncidentTeamResponseData
globals()["IncidentServicesResponseMeta"] = IncidentServicesResponseMeta
globals()["IncidentTeamIncludedItems"] = IncidentTeamIncludedItems
globals()["IncidentTeamResponseData"] = IncidentTeamResponseData
class IncidentTeamsResponse(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {}
validations = {}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
"data": ([IncidentTeamResponseData],), # noqa: E501
"included": ([IncidentTeamIncludedItems],), # noqa: E501
"meta": (IncidentServicesResponseMeta,), # noqa: E501
}
discriminator = None
attribute_map = {
"data": "data", # noqa: E501
"included": "included", # noqa: E501
"meta": "meta", # noqa: E501
}
read_only_vars = {
"included", # noqa: E501
}
_composed_schemas = {}
@convert_js_args_to_python_args
def __init__(self, data, *args, **kwargs): # noqa: E501
"""IncidentTeamsResponse - a model defined in OpenAPI
Args:
data ([IncidentTeamResponseData]): An array of incident teams.
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
included ([IncidentTeamIncludedItems]): Included related resources which the user requested.. [optional] # noqa: E501
meta (IncidentServicesResponseMeta): [optional] # noqa: E501
"""
super().__init__(kwargs)
self._check_pos_args(args)
self.data = data
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, data, *args, **kwargs): # noqa: E501
"""Helper creating a new instance from a response."""
self = super(IncidentTeamsResponse, cls)._from_openapi_data(kwargs)
self._check_pos_args(args)
self.data = data
return self
| 41.559211
| 130
| 0.613107
|
b84cbbe1d09a0a44d5dc7db8be132b012e8065dd
| 5,323
|
py
|
Python
|
nova/api/openstack/compute/flavors.py
|
lixiaoy1/nova
|
357b8b38e88300948bb2e07d1bbaabd1e9d7b60e
|
[
"Apache-2.0"
] | 1
|
2018-12-28T06:47:39.000Z
|
2018-12-28T06:47:39.000Z
|
nova/api/openstack/compute/flavors.py
|
lixiaoy1/nova
|
357b8b38e88300948bb2e07d1bbaabd1e9d7b60e
|
[
"Apache-2.0"
] | 5
|
2019-08-14T06:46:03.000Z
|
2021-12-13T20:01:25.000Z
|
nova/api/openstack/compute/flavors.py
|
lixiaoy1/nova
|
357b8b38e88300948bb2e07d1bbaabd1e9d7b60e
|
[
"Apache-2.0"
] | 2
|
2020-03-15T01:24:15.000Z
|
2020-07-22T20:34:26.000Z
|
# Copyright 2010 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_utils import strutils
import webob
from nova.api.openstack import api_version_request
from nova.api.openstack import common
from nova.api.openstack.compute.schemas import flavors as schema
from nova.api.openstack.compute.views import flavors as flavors_view
from nova.api.openstack import wsgi
from nova.api import validation
from nova.compute import flavors
from nova import exception
from nova.i18n import _
from nova import objects
from nova.policies import flavor_extra_specs as fes_policies
from nova import utils
ALIAS = 'flavors'
class FlavorsController(wsgi.Controller):
"""Flavor controller for the OpenStack API."""
_view_builder_class = flavors_view.ViewBuilder
@validation.query_schema(schema.index_query)
@wsgi.expected_errors(400)
def index(self, req):
"""Return all flavors in brief."""
limited_flavors = self._get_flavors(req)
return self._view_builder.index(req, limited_flavors)
@validation.query_schema(schema.index_query)
@wsgi.expected_errors(400)
def detail(self, req):
"""Return all flavors in detail."""
context = req.environ['nova.context']
limited_flavors = self._get_flavors(req)
include_extra_specs = False
if api_version_request.is_supported(
req, flavors_view.FLAVOR_EXTRA_SPECS_MICROVERSION):
include_extra_specs = context.can(
fes_policies.POLICY_ROOT % 'index', fatal=False)
return self._view_builder.detail(
req, limited_flavors, include_extra_specs=include_extra_specs)
@wsgi.expected_errors(404)
def show(self, req, id):
"""Return data about the given flavor id."""
context = req.environ['nova.context']
try:
flavor = flavors.get_flavor_by_flavor_id(id, ctxt=context)
except exception.FlavorNotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
include_extra_specs = False
if api_version_request.is_supported(
req, flavors_view.FLAVOR_EXTRA_SPECS_MICROVERSION):
include_extra_specs = context.can(
fes_policies.POLICY_ROOT % 'index', fatal=False)
include_description = api_version_request.is_supported(
req, flavors_view.FLAVOR_DESCRIPTION_MICROVERSION)
return self._view_builder.show(
req, flavor, include_description=include_description,
include_extra_specs=include_extra_specs)
def _parse_is_public(self, is_public):
"""Parse is_public into something usable."""
if is_public is None:
# preserve default value of showing only public flavors
return True
elif utils.is_none_string(is_public):
return None
else:
try:
return strutils.bool_from_string(is_public, strict=True)
except ValueError:
msg = _('Invalid is_public filter [%s]') % is_public
raise webob.exc.HTTPBadRequest(explanation=msg)
def _get_flavors(self, req):
"""Helper function that returns a list of flavor dicts."""
filters = {}
sort_key = req.params.get('sort_key') or 'flavorid'
sort_dir = req.params.get('sort_dir') or 'asc'
limit, marker = common.get_limit_and_marker(req)
context = req.environ['nova.context']
if context.is_admin:
# Only admin has query access to all flavor types
filters['is_public'] = self._parse_is_public(
req.params.get('is_public', None))
else:
filters['is_public'] = True
filters['disabled'] = False
if 'minRam' in req.params:
try:
filters['min_memory_mb'] = int(req.params['minRam'])
except ValueError:
msg = _('Invalid minRam filter [%s]') % req.params['minRam']
raise webob.exc.HTTPBadRequest(explanation=msg)
if 'minDisk' in req.params:
try:
filters['min_root_gb'] = int(req.params['minDisk'])
except ValueError:
msg = (_('Invalid minDisk filter [%s]') %
req.params['minDisk'])
raise webob.exc.HTTPBadRequest(explanation=msg)
try:
limited_flavors = objects.FlavorList.get_all(context,
filters=filters, sort_key=sort_key, sort_dir=sort_dir,
limit=limit, marker=marker)
except exception.MarkerNotFound:
msg = _('marker [%s] not found') % marker
raise webob.exc.HTTPBadRequest(explanation=msg)
return limited_flavors
| 39.139706
| 78
| 0.656585
|
8c2314679e3d80d5f013361816aff2da47c8dcea
| 3,009
|
py
|
Python
|
linorobot2_bringup/launch/bringup.launch.py
|
robofoundry/linorobot2
|
678fad08822da891842bbdafa024ad3aefb9ac00
|
[
"Apache-2.0"
] | null | null | null |
linorobot2_bringup/launch/bringup.launch.py
|
robofoundry/linorobot2
|
678fad08822da891842bbdafa024ad3aefb9ac00
|
[
"Apache-2.0"
] | null | null | null |
linorobot2_bringup/launch/bringup.launch.py
|
robofoundry/linorobot2
|
678fad08822da891842bbdafa024ad3aefb9ac00
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2021 Juan Miguel Jimeno
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from launch import LaunchDescription
from launch.actions import DeclareLaunchArgument, IncludeLaunchDescription
from launch.substitutions import LaunchConfiguration, PathJoinSubstitution, PythonExpression
from launch.launch_description_sources import PythonLaunchDescriptionSource
from launch_ros.actions import Node
from launch_ros.substitutions import FindPackageShare
from launch.conditions import IfCondition
def generate_launch_description():
sensors_launch_path = PathJoinSubstitution(
[FindPackageShare('linorobot2_bringup'), 'launch', 'sensors.launch.py']
)
#joy_launch_path = PathJoinSubstitution(
# [FindPackageShare('linorobot2_bringup'), 'launch', 'joy_teleop.launch.py']
#)
joy_launch_path = PathJoinSubstitution(
[FindPackageShare('linorobot2_bringup'), 'launch', 'teleop_twist.launch.py']
)
description_launch_path = PathJoinSubstitution(
[FindPackageShare('linorobot2_description'), 'launch', 'description.launch.py']
)
ekf_config_path = PathJoinSubstitution(
[FindPackageShare("linorobot2_base"), "config", "ekf.yaml"]
)
return LaunchDescription([
DeclareLaunchArgument(
name='base_serial_port',
default_value='/dev/ttyACM0',
description='Linorobot Base Serial Port'
),
DeclareLaunchArgument(
name='joy',
default_value='false',
description='Use Joystick'
),
Node(
package='micro_ros_agent',
executable='micro_ros_agent',
name='micro_ros_agent',
output='screen',
arguments=['serial', '--dev', LaunchConfiguration("base_serial_port")]
),
Node(
package='robot_localization',
executable='ekf_node',
name='ekf_filter_node',
output='screen',
parameters=[
ekf_config_path
],
remappings=[("odometry/filtered", "odom")]
),
IncludeLaunchDescription(
PythonLaunchDescriptionSource(description_launch_path)
),
# IncludeLaunchDescription(
# PythonLaunchDescriptionSource(sensors_launch_path),
# ),
IncludeLaunchDescription(
PythonLaunchDescriptionSource(joy_launch_path),
condition=IfCondition(LaunchConfiguration("joy")),
)
])
| 34.193182
| 92
| 0.674975
|
b3a7f1920715cb4f688467c72f8e6a2b5e2bff33
| 5,680
|
py
|
Python
|
tests/llvm/test_random.py
|
bdsinger/PsyNeuLink
|
71d8a0bb1691ff85061d4ad3de866d9930a69a73
|
[
"Apache-2.0"
] | null | null | null |
tests/llvm/test_random.py
|
bdsinger/PsyNeuLink
|
71d8a0bb1691ff85061d4ad3de866d9930a69a73
|
[
"Apache-2.0"
] | null | null | null |
tests/llvm/test_random.py
|
bdsinger/PsyNeuLink
|
71d8a0bb1691ff85061d4ad3de866d9930a69a73
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python3
import ctypes
import numpy as np
import pytest
import random
from psyneulink.core import llvm as pnlvm
SEED = 0
@pytest.mark.llvm
@pytest.mark.benchmark(group="Mersenne Twister integer PRNG")
@pytest.mark.parametrize('mode', ['numpy',
pytest.param('LLVM', marks=pytest.mark.llvm),
pytest.param('PTX', marks=pytest.mark.cuda)])
# Python uses different algorithm so skip it in this test
def test_random(benchmark, mode):
res = []
if mode == 'numpy':
# Python treats every seed as array, and numpy promotes elements to int64
state = np.random.RandomState(np.asarray([SEED]))
res += [state.randint(0xffffffff, dtype=np.int64)]
res += [state.randint(0xffffffff, dtype=np.int64)]
benchmark(state.randint, 0xffffffff, dtype=np.int64)
elif mode == 'LLVM':
init_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_mt_rand_init')
state = init_fun.byref_arg_types[0]()
init_fun(state, SEED)
gen_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_mt_rand_int32')
out = ctypes.c_longlong()
gen_fun(state, out)
res += [out.value]
gen_fun(state, out)
res += [out.value]
benchmark(gen_fun, state, out)
elif mode == 'PTX':
init_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_mt_rand_init')
state = init_fun.byref_arg_types[0]()
gpu_state = pnlvm.jit_engine.pycuda.driver.to_device(bytearray(state))
init_fun.cuda_call(gpu_state, np.int64(SEED))
gen_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_mt_rand_int32')
out = np.asarray([0], dtype=np.int64)
gpu_out = pnlvm.jit_engine.pycuda.driver.Out(out)
gen_fun.cuda_call(gpu_state, gpu_out)
res += [out[0]]
gen_fun.cuda_call(gpu_state, gpu_out)
res += [out[0]]
benchmark(gen_fun.cuda_call, gpu_state, gpu_out)
assert np.allclose(res, [3626764237, 1654615998])
@pytest.mark.llvm
@pytest.mark.benchmark(group="Mersenne Twister floating point PRNG")
@pytest.mark.parametrize('mode', ['Python', 'numpy',
pytest.param('LLVM', marks=pytest.mark.llvm),
pytest.param('PTX', marks=pytest.mark.cuda)])
def test_random_float(benchmark, mode):
res = []
if mode == 'Python':
# Python treats every seed as array
state = random.Random(SEED)
res += [state.random()]
res += [state.random()]
benchmark(state.random)
elif mode == 'numpy':
# Python treats every seed as array, and numpy promotes elements to int64
state = np.random.RandomState(np.asarray([SEED]))
res += [state.random_sample()]
res += [state.random_sample()]
benchmark(state.random_sample)
elif mode == 'LLVM':
init_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_mt_rand_init')
state = init_fun.byref_arg_types[0]()
init_fun(state, SEED)
gen_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_mt_rand_double')
out = ctypes.c_double()
gen_fun(state, out)
res += [out.value]
gen_fun(state, out)
res += [out.value]
benchmark(gen_fun, state, out)
elif mode == 'PTX':
init_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_mt_rand_init')
state = init_fun.byref_arg_types[0]()
gpu_state = pnlvm.jit_engine.pycuda.driver.to_device(bytearray(state))
init_fun.cuda_call(gpu_state, np.int64(SEED))
gen_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_mt_rand_double')
out = np.asfarray([0.0], dtype=np.float64)
gpu_out = pnlvm.jit_engine.pycuda.driver.Out(out)
gen_fun.cuda_call(gpu_state, gpu_out)
res += [out[0]]
gen_fun.cuda_call(gpu_state, gpu_out)
res += [out[0]]
benchmark(gen_fun.cuda_call, gpu_state, gpu_out)
assert np.allclose(res, [0.8444218515250481, 0.7579544029403025])
@pytest.mark.llvm
@pytest.mark.benchmark(group="Marsenne Twister Normal distribution")
@pytest.mark.parametrize('mode', ['numpy',
pytest.param('LLVM', marks=pytest.mark.llvm),
pytest.param('PTX', marks=pytest.mark.cuda)])
# Python uses different algorithm so skip it in this test
def test_random_normal(benchmark, mode):
if mode == 'numpy':
# Python treats every seed as array, and numpy promotes elements to int64
state = np.random.RandomState(np.asarray([SEED]))
res = state.normal()
benchmark(state.normal)
elif mode == 'LLVM':
init_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_mt_rand_init')
state = init_fun.byref_arg_types[0]()
init_fun(state, SEED)
gen_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_mt_rand_normal')
out = ctypes.c_double()
gen_fun(state, out)
res = out.value
benchmark(gen_fun, state, out)
elif mode == 'PTX':
init_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_mt_rand_init')
state = init_fun.byref_arg_types[0]()
gpu_state = pnlvm.jit_engine.pycuda.driver.to_device(bytearray(state))
init_fun.cuda_call(gpu_state, np.int64(SEED))
gen_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_mt_rand_normal')
out = np.asfarray([0.0], dtype=np.float64)
gpu_out = pnlvm.jit_engine.pycuda.driver.Out(out)
gen_fun.cuda_call(gpu_state, gpu_out)
res = out[0]
benchmark(gen_fun.cuda_call, gpu_state, gpu_out)
assert np.allclose(res, 0.4644982638709743)
| 40.571429
| 81
| 0.644366
|
73ac6776d0ffbaafa30acac519e1b4187bb867e1
| 8,340
|
py
|
Python
|
gamestonk_terminal/fundamental_analysis/fa_menu.py
|
sandsturm/GamestonkTerminal
|
1969ff3b251711099a448024ec71e5b4e50413f7
|
[
"MIT"
] | null | null | null |
gamestonk_terminal/fundamental_analysis/fa_menu.py
|
sandsturm/GamestonkTerminal
|
1969ff3b251711099a448024ec71e5b4e50413f7
|
[
"MIT"
] | null | null | null |
gamestonk_terminal/fundamental_analysis/fa_menu.py
|
sandsturm/GamestonkTerminal
|
1969ff3b251711099a448024ec71e5b4e50413f7
|
[
"MIT"
] | null | null | null |
import argparse
from gamestonk_terminal.fundamental_analysis import alpha_vantage_api as av_api
from gamestonk_terminal.fundamental_analysis import business_insider_api as bi_api
from gamestonk_terminal.fundamental_analysis import (
financial_modeling_prep_api as fmp_api,
)
from gamestonk_terminal.fundamental_analysis import finviz_api as fvz_api
from gamestonk_terminal.fundamental_analysis import market_watch_api as mw_api
from gamestonk_terminal.fundamental_analysis import yahoo_finance_api as yf_api
from gamestonk_terminal.helper_funcs import get_flair
from gamestonk_terminal.menu import session
from prompt_toolkit.completion import NestedCompleter
def print_fundamental_analysis(s_ticker, s_start, s_interval):
""" Print help """
s_intraday = (f"Intraday {s_interval}", "Daily")[s_interval == "1440min"]
if s_start:
print(f"\n{s_intraday} Stock: {s_ticker} (from {s_start.strftime('%Y-%m-%d')})")
else:
print(f"\n{s_intraday} Stock: {s_ticker}")
print("\nFundamental Analysis:") # https://github.com/JerBouma/FundamentalAnalysis
print(" help show this fundamental analysis menu again")
print(" q quit this menu, and shows back to main menu")
print(" quit quit to abandon program")
print("")
print(" screener screen info about the company [Finviz]")
print(" mgmt management team of the company [Business Insider]")
print("")
print("Market Watch API")
print(" income income statement of the company")
print(" balance balance sheet of the company")
print(" cash cash flow statement of the company")
print("")
print("Yahoo Finance API")
print(" info information scope of the company")
print(" shrs shareholders of the company")
print(" sust sustainability values of the company")
print(" cal calendar earnings and estimates of the company")
print("")
print("Alpha Vantage API")
print(" overview overview of the company")
print(" alpha_income income statements of the company")
print(" alpha_balance balance sheet of the company")
print(" alpha_cash cash flow of the company")
print(" earnings earnings dates and reported EPS")
print("")
print("Financial Modeling Prep API")
print(" profile profile of the company")
print(" quote quote of the company")
print(" enterprise enterprise value of the company over time")
print(" dcf discounted cash flow of the company over time")
print(" fmp_income income statements of the company")
print(" fmp_balance balance sheet of the company")
print(" fmp_cash cash flow statement of the company")
print(" metrics key metrics of the company")
print(" ratios financial ratios of the company")
print(" growth financial statement growth of the company")
print("")
return
def key_metrics_explained(l_args):
parser = argparse.ArgumentParser(
add_help=False,
prog="info",
description="""
Provides information about main key metrics. Namely: EBITDA,
EPS, P/E, PEG, FCF, P/B, ROE, DPR, P/S, Dividend Yield Ratio, D/E, and Beta.
""",
)
try:
(_, l_unknown_args) = parser.parse_known_args(l_args)
if l_unknown_args:
print(f"The following args couldn't be interpreted: {l_unknown_args}")
filepath = "fundamental_analysis/key_metrics_explained.txt"
with open(filepath) as fp:
line = fp.readline()
while line:
print(f"{line.strip()}")
line = fp.readline()
print("")
except Exception as e:
print(e)
print("ERROR!\n")
return
# pylint: disable=too-many-branches
def fa_menu(s_ticker, s_start, s_interval):
# Add list of arguments that the fundamental analysis parser accepts
fa_parser = argparse.ArgumentParser(prog="fa", add_help=False)
choices = [
"help",
"q",
"quit",
"screener",
"mgmt",
"info",
"shrs",
"sust",
"cal",
"income",
"balance",
"cash",
"overview",
"key",
"alpha_income",
"alpha_balance",
"alpha_cash",
"earnings",
"profile",
"quote",
"enterprise",
"dcf",
"fmp_income",
"fmp_balance",
"fmp_cash",
"metrics",
"ratios",
"growth",
]
fa_parser.add_argument("cmd", choices=choices)
completer = NestedCompleter.from_nested_dict({c: None for c in choices})
print_fundamental_analysis(s_ticker, s_start, s_interval)
# Loop forever and ever
while True:
# Get input command from user
if session:
as_input = session.prompt(
f"{get_flair()} (fa)> ",
completer=completer,
)
else:
as_input = input(f"{get_flair()} (fa)> ")
# Parse fundamental analysis command of the list of possible commands
try:
(ns_known_args, l_args) = fa_parser.parse_known_args(as_input.split())
except SystemExit:
print("The command selected doesn't exist\n")
continue
if ns_known_args.cmd == "help":
print_fundamental_analysis(s_ticker, s_start, s_interval)
elif ns_known_args.cmd == "q":
# Just leave the FA menu
return False
elif ns_known_args.cmd == "quit":
# Abandon the program
return True
# BUSINESS INSIDER API
elif ns_known_args.cmd == "mgmt":
bi_api.management(l_args, s_ticker)
# FINVIZ API
elif ns_known_args.cmd == "screener":
fvz_api.screener(l_args, s_ticker)
# MARKET WATCH API
elif ns_known_args.cmd == "income":
mw_api.income(l_args, s_ticker)
elif ns_known_args.cmd == "balance":
mw_api.balance(l_args, s_ticker)
elif ns_known_args.cmd == "cash":
mw_api.cash(l_args, s_ticker)
# YAHOO FINANCE API
elif ns_known_args.cmd == "info":
yf_api.info(l_args, s_ticker)
elif ns_known_args.cmd == "shrs":
yf_api.shareholders(l_args, s_ticker)
elif ns_known_args.cmd == "sust":
yf_api.sustainability(l_args, s_ticker)
elif ns_known_args.cmd == "cal":
yf_api.calendar_earnings(l_args, s_ticker)
# ALPHA VANTAGE API
elif ns_known_args.cmd == "overview":
av_api.overview(l_args, s_ticker)
elif ns_known_args.cmd == "alpha_incom":
av_api.income_statement(l_args, s_ticker)
elif ns_known_args.cmd == "alpha_balance":
av_api.balance_sheet(l_args, s_ticker)
elif ns_known_args.cmd == "alpha_cash":
av_api.cash_flow(l_args, s_ticker)
elif ns_known_args.cmd == "earnings":
av_api.earnings(l_args, s_ticker)
# FINANCIAL MODELING PREP API
# Details:
elif ns_known_args.cmd == "profile":
fmp_api.profile(l_args, s_ticker)
elif ns_known_args.cmd == "quote":
fmp_api.quote(l_args, s_ticker)
elif ns_known_args.cmd == "enterprise":
fmp_api.enterprise(l_args, s_ticker)
elif ns_known_args.cmd == "dcf":
fmp_api.discounted_cash_flow(l_args, s_ticker)
# Financial statement:
elif ns_known_args.cmd == "fmp_income":
fmp_api.income_statement(l_args, s_ticker)
elif ns_known_args.cmd == "fmp_balance":
fmp_api.balance_sheet(l_args, s_ticker)
elif ns_known_args.cmd == "fmp_cash":
fmp_api.cash_flow(l_args, s_ticker)
# Ratios:
elif ns_known_args.cmd == "metrics":
fmp_api.key_metrics(l_args, s_ticker)
elif ns_known_args.cmd == "ratios":
fmp_api.financial_ratios(l_args, s_ticker)
elif ns_known_args.cmd == "growth":
fmp_api.financial_statement_growth(l_args, s_ticker)
else:
print("Command not recognized!")
| 33.36
| 88
| 0.611391
|
2f5538556fa6d304a1620ae227e2e21616ef19d7
| 13,776
|
py
|
Python
|
pandas-0.21.0.dev0+412.g062f6f118-py3.6-macosx-10.12.6-x86_64.egg/pandas/core/reshape/tile.py
|
EnjoyLifeFund/py36pkgs
|
0ac677fbbfa7b6d8c527fe2c759ba05117b07fd2
|
[
"MIT",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null |
pandas-0.21.0.dev0+412.g062f6f118-py3.6-macosx-10.12.6-x86_64.egg/pandas/core/reshape/tile.py
|
EnjoyLifeFund/py36pkgs
|
0ac677fbbfa7b6d8c527fe2c759ba05117b07fd2
|
[
"MIT",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null |
pandas-0.21.0.dev0+412.g062f6f118-py3.6-macosx-10.12.6-x86_64.egg/pandas/core/reshape/tile.py
|
EnjoyLifeFund/py36pkgs
|
0ac677fbbfa7b6d8c527fe2c759ba05117b07fd2
|
[
"MIT",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null |
"""
Quantilization functions and related stuff
"""
from pandas.core.dtypes.missing import isna
from pandas.core.dtypes.common import (
is_integer,
is_scalar,
is_categorical_dtype,
is_datetime64_dtype,
is_timedelta64_dtype,
_ensure_int64)
import pandas.core.algorithms as algos
import pandas.core.nanops as nanops
from pandas._libs.lib import infer_dtype
from pandas import (to_timedelta, to_datetime,
Categorical, Timestamp, Timedelta,
Series, Interval, IntervalIndex)
import numpy as np
def cut(x, bins, right=True, labels=None, retbins=False, precision=3,
include_lowest=False):
"""
Return indices of half-open bins to which each value of `x` belongs.
Parameters
----------
x : array-like
Input array to be binned. It has to be 1-dimensional.
bins : int, sequence of scalars, or IntervalIndex
If `bins` is an int, it defines the number of equal-width bins in the
range of `x`. However, in this case, the range of `x` is extended
by .1% on each side to include the min or max values of `x`. If
`bins` is a sequence it defines the bin edges allowing for
non-uniform bin width. No extension of the range of `x` is done in
this case.
right : bool, optional
Indicates whether the bins include the rightmost edge or not. If
right == True (the default), then the bins [1,2,3,4] indicate
(1,2], (2,3], (3,4].
labels : array or boolean, default None
Used as labels for the resulting bins. Must be of the same length as
the resulting bins. If False, return only integer indicators of the
bins.
retbins : bool, optional
Whether to return the bins or not. Can be useful if bins is given
as a scalar.
precision : int, optional
The precision at which to store and display the bins labels
include_lowest : bool, optional
Whether the first interval should be left-inclusive or not.
Returns
-------
out : Categorical or Series or array of integers if labels is False
The return type (Categorical or Series) depends on the input: a Series
of type category if input is a Series else Categorical. Bins are
represented as categories when categorical data is returned.
bins : ndarray of floats
Returned only if `retbins` is True.
Notes
-----
The `cut` function can be useful for going from a continuous variable to
a categorical variable. For example, `cut` could convert ages to groups
of age ranges.
Any NA values will be NA in the result. Out of bounds values will be NA in
the resulting Categorical object
Examples
--------
>>> pd.cut(np.array([.2, 1.4, 2.5, 6.2, 9.7, 2.1]), 3, retbins=True)
... # doctest: +ELLIPSIS
([(0.19, 3.367], (0.19, 3.367], (0.19, 3.367], (3.367, 6.533], ...
Categories (3, interval[float64]): [(0.19, 3.367] < (3.367, 6.533] ...
>>> pd.cut(np.array([.2, 1.4, 2.5, 6.2, 9.7, 2.1]),
... 3, labels=["good", "medium", "bad"])
... # doctest: +SKIP
[good, good, good, medium, bad, good]
Categories (3, object): [good < medium < bad]
>>> pd.cut(np.ones(5), 4, labels=False)
array([1, 1, 1, 1, 1])
"""
# NOTE: this binning code is changed a bit from histogram for var(x) == 0
# for handling the cut for datetime and timedelta objects
x_is_series, series_index, name, x = _preprocess_for_cut(x)
x, dtype = _coerce_to_type(x)
if not np.iterable(bins):
if is_scalar(bins) and bins < 1:
raise ValueError("`bins` should be a positive integer.")
try: # for array-like
sz = x.size
except AttributeError:
x = np.asarray(x)
sz = x.size
if sz == 0:
raise ValueError('Cannot cut empty array')
rng = (nanops.nanmin(x), nanops.nanmax(x))
mn, mx = [mi + 0.0 for mi in rng]
if mn == mx: # adjust end points before binning
mn -= .001 * abs(mn) if mn != 0 else .001
mx += .001 * abs(mx) if mx != 0 else .001
bins = np.linspace(mn, mx, bins + 1, endpoint=True)
else: # adjust end points after binning
bins = np.linspace(mn, mx, bins + 1, endpoint=True)
adj = (mx - mn) * 0.001 # 0.1% of the range
if right:
bins[0] -= adj
else:
bins[-1] += adj
elif isinstance(bins, IntervalIndex):
pass
else:
bins = np.asarray(bins)
bins = _convert_bin_to_numeric_type(bins, dtype)
if (np.diff(bins) < 0).any():
raise ValueError('bins must increase monotonically.')
fac, bins = _bins_to_cuts(x, bins, right=right, labels=labels,
precision=precision,
include_lowest=include_lowest,
dtype=dtype)
return _postprocess_for_cut(fac, bins, retbins, x_is_series,
series_index, name)
def qcut(x, q, labels=None, retbins=False, precision=3, duplicates='raise'):
"""
Quantile-based discretization function. Discretize variable into
equal-sized buckets based on rank or based on sample quantiles. For example
1000 values for 10 quantiles would produce a Categorical object indicating
quantile membership for each data point.
Parameters
----------
x : ndarray or Series
q : integer or array of quantiles
Number of quantiles. 10 for deciles, 4 for quartiles, etc. Alternately
array of quantiles, e.g. [0, .25, .5, .75, 1.] for quartiles
labels : array or boolean, default None
Used as labels for the resulting bins. Must be of the same length as
the resulting bins. If False, return only integer indicators of the
bins.
retbins : bool, optional
Whether to return the (bins, labels) or not. Can be useful if bins
is given as a scalar.
precision : int, optional
The precision at which to store and display the bins labels
duplicates : {default 'raise', 'drop'}, optional
If bin edges are not unique, raise ValueError or drop non-uniques.
.. versionadded:: 0.20.0
Returns
-------
out : Categorical or Series or array of integers if labels is False
The return type (Categorical or Series) depends on the input: a Series
of type category if input is a Series else Categorical. Bins are
represented as categories when categorical data is returned.
bins : ndarray of floats
Returned only if `retbins` is True.
Notes
-----
Out of bounds values will be NA in the resulting Categorical object
Examples
--------
>>> pd.qcut(range(5), 4)
... # doctest: +ELLIPSIS
[(-0.001, 1.0], (-0.001, 1.0], (1.0, 2.0], (2.0, 3.0], (3.0, 4.0]]
Categories (4, interval[float64]): [(-0.001, 1.0] < (1.0, 2.0] ...
>>> pd.qcut(range(5), 3, labels=["good", "medium", "bad"])
... # doctest: +SKIP
[good, good, medium, bad, bad]
Categories (3, object): [good < medium < bad]
>>> pd.qcut(range(5), 4, labels=False)
array([0, 0, 1, 2, 3])
"""
x_is_series, series_index, name, x = _preprocess_for_cut(x)
x, dtype = _coerce_to_type(x)
if is_integer(q):
quantiles = np.linspace(0, 1, q + 1)
else:
quantiles = q
bins = algos.quantile(x, quantiles)
fac, bins = _bins_to_cuts(x, bins, labels=labels,
precision=precision, include_lowest=True,
dtype=dtype, duplicates=duplicates)
return _postprocess_for_cut(fac, bins, retbins, x_is_series,
series_index, name)
def _bins_to_cuts(x, bins, right=True, labels=None,
precision=3, include_lowest=False,
dtype=None, duplicates='raise'):
if duplicates not in ['raise', 'drop']:
raise ValueError("invalid value for 'duplicates' parameter, "
"valid options are: raise, drop")
if isinstance(bins, IntervalIndex):
# we have a fast-path here
ids = bins.get_indexer(x)
result = algos.take_nd(bins, ids)
result = Categorical(result, categories=bins, ordered=True)
return result, bins
unique_bins = algos.unique(bins)
if len(unique_bins) < len(bins) and len(bins) != 2:
if duplicates == 'raise':
raise ValueError("Bin edges must be unique: {bins!r}.\nYou "
"can drop duplicate edges by setting "
"the 'duplicates' kwarg".format(bins=bins))
else:
bins = unique_bins
side = 'left' if right else 'right'
ids = _ensure_int64(bins.searchsorted(x, side=side))
if include_lowest:
ids[x == bins[0]] = 1
na_mask = isna(x) | (ids == len(bins)) | (ids == 0)
has_nas = na_mask.any()
if labels is not False:
if labels is None:
labels = _format_labels(bins, precision, right=right,
include_lowest=include_lowest,
dtype=dtype)
else:
if len(labels) != len(bins) - 1:
raise ValueError('Bin labels must be one fewer than '
'the number of bin edges')
if not is_categorical_dtype(labels):
labels = Categorical(labels, categories=labels, ordered=True)
np.putmask(ids, na_mask, 0)
result = algos.take_nd(labels, ids - 1)
else:
result = ids - 1
if has_nas:
result = result.astype(np.float64)
np.putmask(result, na_mask, np.nan)
return result, bins
def _trim_zeros(x):
while len(x) > 1 and x[-1] == '0':
x = x[:-1]
if len(x) > 1 and x[-1] == '.':
x = x[:-1]
return x
def _coerce_to_type(x):
"""
if the passed data is of datetime/timedelta type,
this method converts it to integer so that cut method can
handle it
"""
dtype = None
if is_timedelta64_dtype(x):
x = to_timedelta(x).view(np.int64)
dtype = np.timedelta64
elif is_datetime64_dtype(x):
x = to_datetime(x).view(np.int64)
dtype = np.datetime64
return x, dtype
def _convert_bin_to_numeric_type(bins, dtype):
"""
if the passed bin is of datetime/timedelta type,
this method converts it to integer
Parameters
----------
bins : list-liek of bins
dtype : dtype of data
Raises
------
ValueError if bins are not of a compat dtype to dtype
"""
bins_dtype = infer_dtype(bins)
if is_timedelta64_dtype(dtype):
if bins_dtype in ['timedelta', 'timedelta64']:
bins = to_timedelta(bins).view(np.int64)
else:
raise ValueError("bins must be of timedelta64 dtype")
elif is_datetime64_dtype(dtype):
if bins_dtype in ['datetime', 'datetime64']:
bins = to_datetime(bins).view(np.int64)
else:
raise ValueError("bins must be of datetime64 dtype")
return bins
def _format_labels(bins, precision, right=True,
include_lowest=False, dtype=None):
""" based on the dtype, return our labels """
closed = 'right' if right else 'left'
if is_datetime64_dtype(dtype):
formatter = Timestamp
adjust = lambda x: x - Timedelta('1ns')
elif is_timedelta64_dtype(dtype):
formatter = Timedelta
adjust = lambda x: x - Timedelta('1ns')
else:
precision = _infer_precision(precision, bins)
formatter = lambda x: _round_frac(x, precision)
adjust = lambda x: x - 10 ** (-precision)
breaks = [formatter(b) for b in bins]
labels = IntervalIndex.from_breaks(breaks, closed=closed)
if right and include_lowest:
# we will adjust the left hand side by precision to
# account that we are all right closed
v = adjust(labels[0].left)
i = IntervalIndex.from_intervals(
[Interval(v, labels[0].right, closed='right')])
labels = i.append(labels[1:])
return labels
def _preprocess_for_cut(x):
"""
handles preprocessing for cut where we convert passed
input to array, strip the index information and store it
seperately
"""
x_is_series = isinstance(x, Series)
series_index = None
name = None
if x_is_series:
series_index = x.index
name = x.name
x = np.asarray(x)
return x_is_series, series_index, name, x
def _postprocess_for_cut(fac, bins, retbins, x_is_series,
series_index, name):
"""
handles post processing for the cut method where
we combine the index information if the originally passed
datatype was a series
"""
if x_is_series:
fac = Series(fac, index=series_index, name=name)
if not retbins:
return fac
return fac, bins
def _round_frac(x, precision):
"""
Round the fractional part of the given number
"""
if not np.isfinite(x) or x == 0:
return x
else:
frac, whole = np.modf(x)
if whole == 0:
digits = -int(np.floor(np.log10(abs(frac)))) - 1 + precision
else:
digits = precision
return np.around(x, digits)
def _infer_precision(base_precision, bins):
"""Infer an appropriate precision for _round_frac
"""
for precision in range(base_precision, 20):
levels = [_round_frac(b, precision) for b in bins]
if algos.unique(levels).size == bins.size:
return precision
return base_precision # default
| 33.115385
| 79
| 0.60119
|
1562cbf064f2af3d5de088682077de2cab6dbdf7
| 3,197
|
py
|
Python
|
segment_text_line/data_conversion.py
|
hushukai/Chinese-ancient-book-recognition-HSK
|
de5b6474dc4346524d95b405223c721aae5b500b
|
[
"Apache-2.0"
] | 2
|
2020-04-12T08:33:50.000Z
|
2020-07-03T09:15:56.000Z
|
segment_text_line/data_conversion.py
|
hushukai/Chinese-ancient-book-recognition-HSK
|
de5b6474dc4346524d95b405223c721aae5b500b
|
[
"Apache-2.0"
] | null | null | null |
segment_text_line/data_conversion.py
|
hushukai/Chinese-ancient-book-recognition-HSK
|
de5b6474dc4346524d95b405223c721aae5b500b
|
[
"Apache-2.0"
] | 4
|
2020-07-03T09:15:58.000Z
|
2020-07-17T09:24:08.000Z
|
# -*- encoding: utf-8 -*-
# Author: hushukai
import os
from PIL import Image
from segment_base.data_pipeline import get_image_and_split_pos
from segment_base.data_pipeline import rotate_90_degrees
from segment_base.visualize import draw_split_lines
from util import check_or_makedirs
from config import SEGMENT_TEXT_LINE_ROOT_DIR
from config import ONE_TEXT_LINE_IMGS_H, ONE_TEXT_LINE_TAGS_FILE_H
from config import ONE_TEXT_LINE_IMGS_V, ONE_TEXT_LINE_TAGS_FILE_V
from config import ONE_TEXT_LINE_TFRECORDS_H, ONE_TEXT_LINE_TFRECORDS_V
from config import SEGMENT_TEXT_LINE_TAGS_FILE_H, SEGMENT_TEXT_LINE_TAGS_FILE_V
from config import SEGMENT_TEXT_LINE_TFRECORDS_H, SEGMENT_TEXT_LINE_TFRECORDS_V
def convert_annotation(img_sources=None, tfrecords_dir=None, dest_file=None):
assert [img_sources, tfrecords_dir].count(None) == 1
check_or_makedirs(os.path.dirname(dest_file))
with open(dest_file, "w", encoding="utf-8") as fw:
if img_sources is not None:
for src_file, root_dir in img_sources:
with open(src_file, "r", encoding="utf-8") as fr:
for line in fr:
img_name, tags_str = line.strip().split("\t")
img_path = os.path.join(root_dir, img_name)
fw.write(img_path + "\t" + tags_str + "\n")
elif tfrecords_dir is not None:
assert os.path.exists(tfrecords_dir)
for file in os.listdir(tfrecords_dir):
if file.endswith(".tfrecords"):
file_path = os.path.join(tfrecords_dir, file)
fw.write(file_path + "\n")
def check_tags(tags_file, segment_task, text_type):
with open(tags_file, "r", encoding="utf8") as fr:
lines = [line.strip() for line in fr.readlines()]
save_path = os.path.join(SEGMENT_TEXT_LINE_ROOT_DIR, "samples")
check_or_makedirs(save_path)
for i, line in enumerate(lines):
np_img, split_pos = get_image_and_split_pos(line, segment_task="mix_line")
text_type = text_type[0].lower()
if (segment_task, text_type) in (("book_page", "h"), ("double_line", "h"), ("text_line", "v"), ("mix_line", "v")):
np_img, split_pos = rotate_90_degrees(np_img, split_pos)
np_img = draw_split_lines(np_img, split_pos)
PIL_img = Image.fromarray(np_img)
PIL_img.save(os.path.join(save_path, str(i) + ".jpg"))
def main():
# convert_annotation(img_sources=[(ONE_TEXT_LINE_TAGS_FILE_H, ONE_TEXT_LINE_IMGS_H)], dest_file=SEGMENT_TEXT_LINE_TAGS_FILE_H)
# convert_annotation(img_sources=[(ONE_TEXT_LINE_TAGS_FILE_V, ONE_TEXT_LINE_IMGS_V)], dest_file=SEGMENT_TEXT_LINE_TAGS_FILE_V)
# convert_annotation(tfrecords_dir=ONE_TEXT_LINE_TFRECORDS_H, dest_file=SEGMENT_TEXT_LINE_TFRECORDS_H)
convert_annotation(tfrecords_dir=ONE_TEXT_LINE_TFRECORDS_V, dest_file=SEGMENT_TEXT_LINE_TFRECORDS_V)
# check_tags(tags_file=SEGMENT_TEXT_LINE_TAGS_FILE_H, segment_task="text_line", text_type="horizontal")
# check_tags(tags_file=SEGMENT_TEXT_LINE_TAGS_FILE_V, segment_task="text_line", text_type="vertical")
if __name__ == '__main__':
print("Done !")
| 44.402778
| 130
| 0.713794
|
50cfa6fec7275376c7d47d4732a442ba28baa5d4
| 906
|
py
|
Python
|
Sword.py
|
victorgscorreia/cgtrab2
|
9def410124f1a2b125a47b4716fe7714529cb292
|
[
"MIT"
] | null | null | null |
Sword.py
|
victorgscorreia/cgtrab2
|
9def410124f1a2b125a47b4716fe7714529cb292
|
[
"MIT"
] | null | null | null |
Sword.py
|
victorgscorreia/cgtrab2
|
9def410124f1a2b125a47b4716fe7714529cb292
|
[
"MIT"
] | null | null | null |
from Base import *
from Object import *
'''
Esta funcao cria um objeto do tipo Sword e o retorna
@PARAMETROS
id_tex_livre - primeiro id de textura nao utilizado - passado como lista de tamanho 1
vertices_list - lista de coordenadas de vertices
textures_coord_list - lista de coordenadas de textura
normals_list - lista de normais de vertices
@RETORNO
object - o objeto Sword criado
'''
def cria_sword(id_tex_livre, vertices_list, textures_coord_list, normals_list):
#adicionando os nomes das texturas utilizdas em uma lista
textures_names = []
textures_names.append("Sword/Sword_texture.png")
filename = "Sword/sword.obj"
mtl_filename = "Sword/sword.mtl"
#criando o objeto
espada = Object(filename, mtl_filename, textures_names, -290, -50, -34, math.pi/2, math.pi/2, 0, 0.1, id_tex_livre, vertices_list, textures_coord_list, normals_list)
return espada
| 39.391304
| 169
| 0.745033
|
52da9374b4fddbc28d9c09566fce4263a379d978
| 3,774
|
py
|
Python
|
tlopu/backprop.py
|
lightonai/transfer-learning-opu
|
15b6093cb6f69cd35b4d9af168c45055800b4567
|
[
"MIT"
] | 26
|
2020-02-20T21:42:43.000Z
|
2022-01-11T03:38:05.000Z
|
tlopu/backprop.py
|
lightonai/transfer-learning-opu
|
15b6093cb6f69cd35b4d9af168c45055800b4567
|
[
"MIT"
] | 4
|
2020-06-28T01:51:20.000Z
|
2022-03-12T00:16:19.000Z
|
tlopu/backprop.py
|
lightonai/transfer-learning-opu
|
15b6093cb6f69cd35b4d9af168c45055800b4567
|
[
"MIT"
] | 3
|
2020-12-20T13:34:21.000Z
|
2021-05-04T09:42:29.000Z
|
from time import time
import torch
def train_model(model, train_loader, criterion, optimizer, device='cpu'):
"""
Trains the given model for one epoch on the given dataset.
Parameters
----------
model: Pytorch model,
neural net model.
train_loader: torch Dataloader,
contains the training images.
criterion: torch.nn.modules.loss,
criterion for the determination of the loss.
optimizer: torch.optim,
optimizer for the training.
device: string,
device to use for the computation. Choose between 'cpu' and 'gpu:x', where
x is the GPU number. Defaults to 'cpu'.
Returns
-------
model: torch model,
model trained on the given dataset.
epoch_loss: float,
loss on the train set.
epoch_acc: float,
accuracy on the train set [%].
"""
tot_train_images = len(train_loader.dataset)
model.train()
running_loss = 0.0
running_corrects = 0
for batch_id, (images, labels) in enumerate(train_loader):
optimizer.zero_grad()
images = images.to(torch.device(device))
labels = labels.to(torch.device(device))
outputs = model(images)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
_, preds = torch.max(outputs.data, 1)
running_loss += loss.data
running_corrects += torch.sum(preds == labels.data)
torch.cuda.synchronize()
del images, outputs
epoch_loss = running_loss.item() / tot_train_images
epoch_acc = running_corrects.item() / tot_train_images * 100
return model, epoch_loss, epoch_acc
def evaluate_model(model, test_loader, criterion, dtype='float32', device='cpu'):
"""
model: Pytorch model,
neural net model.
test_loader: torch Dataloader,
contains the test images.
criterion: torch.nn.modules.loss,
criterion for the determination of the loss. Defaults to CrossEntropyLoss.
dtype: str,
dtype for the inference. Choose between float32 and float16.
device: string,
device to use for the computation. Choose between 'cpu' and 'gpu:x', where
x is the GPU number. Defaults to 'cpu'.
Returns
-------
test_loss: float,
loss on the test set.
test_acc: float,
accuracy on the test set [%].
inference_full: float,
inference time, including the data loading.
inference_conv_time: float,
inference time for the convolutional part only.
"""
tot_test_images = len(test_loader.dataset)
running_loss = 0.0
running_corrects = 0
inference_conv_time = 0
if dtype == 'float16':
model.half()
model.to(torch.device(device)).eval()
torch.cuda.synchronize()
full_start = time()
with torch.no_grad():
for batch_id, (images, labels) in enumerate(test_loader):
images = images.to(torch.device(device))
labels = labels.to(torch.device(device))
if dtype == 'float16':
images = images.half()
torch.cuda.synchronize()
t0 = time()
outputs = model(images)
torch.cuda.synchronize()
inference_conv_time += time() - t0
loss = criterion(outputs, labels)
_, preds = torch.max(outputs.data, 1)
running_loss += loss.data
running_corrects += torch.sum(preds == labels.data)
del images, outputs
test_loss = running_loss.item() / tot_test_images
test_acc = running_corrects.item() / tot_test_images * 100
torch.cuda.synchronize()
inference_full = time() - full_start
return test_loss, test_acc, inference_full, inference_conv_time
| 27.347826
| 82
| 0.627186
|
88e1dc8fe07a087aa16380c52491a1f840cad70b
| 8,253
|
py
|
Python
|
tests/shared/nlu/training_data/test_message.py
|
musa-atlihan/rasa
|
7b4d6bf783055da5afbe73aab104e2eb855a3ad9
|
[
"Apache-2.0"
] | null | null | null |
tests/shared/nlu/training_data/test_message.py
|
musa-atlihan/rasa
|
7b4d6bf783055da5afbe73aab104e2eb855a3ad9
|
[
"Apache-2.0"
] | 187
|
2020-02-25T16:07:06.000Z
|
2022-03-01T13:42:41.000Z
|
tests/shared/nlu/training_data/test_message.py
|
musa-atlihan/rasa
|
7b4d6bf783055da5afbe73aab104e2eb855a3ad9
|
[
"Apache-2.0"
] | null | null | null |
from typing import Optional, Text, List
import pytest
import numpy as np
import scipy.sparse
from rasa.shared.nlu.training_data.features import Features
from rasa.shared.nlu.constants import (
TEXT,
FEATURE_TYPE_SENTENCE,
FEATURE_TYPE_SEQUENCE,
ACTION_TEXT,
ACTION_NAME,
INTENT,
RESPONSE,
INTENT_NAME,
)
import rasa.shared.nlu.training_data.message
from rasa.shared.nlu.training_data.message import Message
@pytest.mark.parametrize(
"features, attribute, featurizers, expected_seq_features, expected_sen_features",
[
(None, TEXT, [], None, None),
(
[Features(np.array([1, 1, 0]), FEATURE_TYPE_SEQUENCE, TEXT, "test")],
TEXT,
[],
[1, 1, 0],
None,
),
(
[
Features(np.array([1, 1, 0]), FEATURE_TYPE_SEQUENCE, TEXT, "c2"),
Features(np.array([1, 2, 2]), FEATURE_TYPE_SENTENCE, TEXT, "c1"),
Features(np.array([1, 2, 1]), FEATURE_TYPE_SEQUENCE, TEXT, "c1"),
],
TEXT,
[],
[1, 1, 0, 1, 2, 1],
[1, 2, 2],
),
(
[
Features(np.array([1, 1, 0]), FEATURE_TYPE_SEQUENCE, TEXT, "c1"),
Features(np.array([1, 2, 1]), FEATURE_TYPE_SENTENCE, TEXT, "test"),
Features(np.array([1, 1, 1]), FEATURE_TYPE_SEQUENCE, TEXT, "test"),
],
TEXT,
["c1"],
[1, 1, 0],
None,
),
],
)
def test_get_dense_features(
features: Optional[List[Features]],
attribute: Text,
featurizers: List[Text],
expected_seq_features: Optional[List[Features]],
expected_sen_features: Optional[List[Features]],
):
message = Message(data={TEXT: "This is a test sentence."}, features=features)
actual_seq_features, actual_sen_features = message.get_dense_features(
attribute, featurizers
)
if actual_seq_features:
actual_seq_features = actual_seq_features.features
if actual_sen_features:
actual_sen_features = actual_sen_features.features
assert np.all(actual_sen_features == expected_sen_features)
assert np.all(actual_seq_features == expected_seq_features)
@pytest.mark.parametrize(
"features, attribute, featurizers, expected_seq_features, expected_sen_features",
[
(None, TEXT, [], None, None),
(
[
Features(
scipy.sparse.csr_matrix([1, 1, 0]),
FEATURE_TYPE_SEQUENCE,
TEXT,
"test",
)
],
TEXT,
[],
[1, 1, 0],
None,
),
(
[
Features(
scipy.sparse.csr_matrix([1, 1, 0]),
FEATURE_TYPE_SEQUENCE,
TEXT,
"c2",
),
Features(
scipy.sparse.csr_matrix([1, 2, 2]),
FEATURE_TYPE_SENTENCE,
TEXT,
"c1",
),
Features(
scipy.sparse.csr_matrix([1, 2, 1]),
FEATURE_TYPE_SEQUENCE,
TEXT,
"c1",
),
],
TEXT,
[],
[1, 1, 0, 1, 2, 1],
[1, 2, 2],
),
(
[
Features(
scipy.sparse.csr_matrix([1, 1, 0]),
FEATURE_TYPE_SEQUENCE,
TEXT,
"c1",
),
Features(
scipy.sparse.csr_matrix([1, 2, 1]),
FEATURE_TYPE_SENTENCE,
TEXT,
"test",
),
Features(
scipy.sparse.csr_matrix([1, 1, 1]),
FEATURE_TYPE_SEQUENCE,
TEXT,
"test",
),
],
TEXT,
["c1"],
[1, 1, 0],
None,
),
],
)
def test_get_sparse_features(
features: Optional[List[Features]],
attribute: Text,
featurizers: List[Text],
expected_seq_features: Optional[List[Features]],
expected_sen_features: Optional[List[Features]],
):
message = Message(data={TEXT: "This is a test sentence."}, features=features)
actual_seq_features, actual_sen_features = message.get_sparse_features(
attribute, featurizers
)
if actual_seq_features:
actual_seq_features = actual_seq_features.features
if actual_sen_features:
actual_sen_features = actual_sen_features.features
if expected_seq_features is None:
assert actual_seq_features is None
else:
assert actual_seq_features is not None
assert np.all(actual_seq_features.toarray() == expected_seq_features)
if expected_sen_features is None:
assert actual_sen_features is None
else:
assert actual_sen_features is not None
assert np.all(actual_sen_features.toarray() == expected_sen_features)
@pytest.mark.parametrize(
"features, attribute, featurizers, expected",
[
(None, TEXT, [], False),
(
[
Features(
scipy.sparse.csr_matrix([1, 1, 0]),
FEATURE_TYPE_SEQUENCE,
TEXT,
"test",
)
],
TEXT,
[],
True,
),
(
[
Features(
scipy.sparse.csr_matrix([1, 1, 0]),
FEATURE_TYPE_SEQUENCE,
TEXT,
"c2",
),
Features(np.ndarray([1, 2, 2]), FEATURE_TYPE_SEQUENCE, TEXT, "c1"),
],
TEXT,
[],
True,
),
(
[
Features(
scipy.sparse.csr_matrix([1, 1, 0]),
FEATURE_TYPE_SEQUENCE,
TEXT,
"c2",
),
Features(np.ndarray([1, 2, 2]), FEATURE_TYPE_SEQUENCE, TEXT, "c1"),
],
TEXT,
["c1"],
True,
),
(
[
Features(
scipy.sparse.csr_matrix([1, 1, 0]),
FEATURE_TYPE_SEQUENCE,
TEXT,
"c2",
),
Features(np.ndarray([1, 2, 2]), FEATURE_TYPE_SEQUENCE, TEXT, "c1"),
],
TEXT,
["other"],
False,
),
],
)
def test_features_present(
features: Optional[List[Features]],
attribute: Text,
featurizers: List[Text],
expected: bool,
):
message = Message(data={TEXT: "This is a test sentence."}, features=features)
actual = message.features_present(attribute, featurizers)
assert actual == expected
def test_ordered():
target = {"a": [1, 3, 2], "c": "a", "b": 1}
assert rasa.shared.nlu.training_data.message.ordered(target) == [
("a", [1, 2, 3]),
("b", 1),
("c", "a"),
]
def test_build_from_action():
test_action_name = "test_action_name"
test_action_text = "test action text"
assert Message.build_from_action(
action_text=test_action_text, action_name=test_action_name
) == Message(data={ACTION_NAME: test_action_name, ACTION_TEXT: test_action_text})
@pytest.mark.parametrize(
"message, core_message",
[
(Message({INTENT: "intent", TEXT: "text"}), False),
(Message({RESPONSE: "response", TEXT: "text"}), False),
(Message({INTENT: "intent"}), True),
(Message({ACTION_TEXT: "action text", ACTION_NAME: ""}), True),
(Message({ACTION_NAME: "action"}), True),
(Message({TEXT: "text"}), True),
(Message({TEXT: None, INTENT_NAME: "affirm"}), True),
],
)
def test_is_core_message(
message: Message, core_message: bool,
):
assert core_message == message.is_core_message()
| 28.65625
| 85
| 0.491458
|
561f9e890e2ae2751b23a30346efa749b390ec9c
| 807
|
py
|
Python
|
pyprob/distributions/gamma.py
|
FrancescoPinto/pyprob
|
5288264695fe27148286ff88ecead01b7ac475c6
|
[
"BSD-2-Clause"
] | null | null | null |
pyprob/distributions/gamma.py
|
FrancescoPinto/pyprob
|
5288264695fe27148286ff88ecead01b7ac475c6
|
[
"BSD-2-Clause"
] | null | null | null |
pyprob/distributions/gamma.py
|
FrancescoPinto/pyprob
|
5288264695fe27148286ff88ecead01b7ac475c6
|
[
"BSD-2-Clause"
] | null | null | null |
import torch
from . import Distribution
from .. import util
class Gamma(Distribution):
def __init__(self, concentration, rate):
concentration = util.to_tensor(concentration)
rate = util.to_tensor(rate)
super().__init__(name='Gamma', address_suffix='Gamma', torch_dist=torch.distributions.Gamma(concentration=concentration, rate=rate))
def __repr__(self):
return 'Gamma(concentration={}, rate={})'.format(self.concentration.cpu().numpy().tolist(), self.rate.cpu().numpy().tolist())
@property
def concentration(self):
return self._torch_dist.concentration
@property
def rate(self):
return self._torch_dist.rate
def to(self, device):
return Gamma(concentration=self.concentration.to(device), rate=self.rate.to(device))
| 31.038462
| 140
| 0.695167
|
ba85a0ee38b05189c94c980aa3ab81686d3e0bcc
| 14,355
|
py
|
Python
|
core/dbt/adapters/base/relation.py
|
jmcarp/dbt
|
46d36cd4123e3ee3cdb293e9555115905efbaf0d
|
[
"Apache-2.0"
] | null | null | null |
core/dbt/adapters/base/relation.py
|
jmcarp/dbt
|
46d36cd4123e3ee3cdb293e9555115905efbaf0d
|
[
"Apache-2.0"
] | 1
|
2021-02-01T17:57:50.000Z
|
2021-02-01T17:57:50.000Z
|
core/dbt/adapters/base/relation.py
|
jmcarp/dbt
|
46d36cd4123e3ee3cdb293e9555115905efbaf0d
|
[
"Apache-2.0"
] | 1
|
2021-02-01T17:54:24.000Z
|
2021-02-01T17:54:24.000Z
|
from collections.abc import Hashable
from dataclasses import dataclass
from typing import (
Optional, TypeVar, Any, Type, Dict, Union, Iterator, Tuple, Set
)
from dbt.contracts.graph.compiled import CompiledNode
from dbt.contracts.graph.parsed import ParsedSourceDefinition, ParsedNode
from dbt.contracts.relation import (
RelationType, ComponentName, HasQuoting, FakeAPIObject, Policy, Path
)
from dbt.exceptions import InternalException
from dbt.node_types import NodeType
from dbt.utils import filter_null_values, deep_merge, classproperty
import dbt.exceptions
Self = TypeVar('Self', bound='BaseRelation')
@dataclass(frozen=True, eq=False, repr=False)
class BaseRelation(FakeAPIObject, Hashable):
type: Optional[RelationType]
path: Path
quote_character: str = '"'
include_policy: Policy = Policy()
quote_policy: Policy = Policy()
dbt_created: bool = False
def _is_exactish_match(self, field: ComponentName, value: str) -> bool:
if self.dbt_created and self.quote_policy.get_part(field) is False:
return self.path.get_lowered_part(field) == value.lower()
else:
return self.path.get_part(field) == value
@classmethod
def _get_field_named(cls, field_name):
for field, _ in cls._get_fields():
if field.name == field_name:
return field
# this should be unreachable
raise ValueError(f'BaseRelation has no {field_name} field!')
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.to_dict() == other.to_dict()
@classmethod
def get_default_quote_policy(cls) -> Policy:
return cls._get_field_named('quote_policy').default
@classmethod
def get_default_include_policy(cls) -> Policy:
return cls._get_field_named('include_policy').default
def get(self, key, default=None):
"""Override `.get` to return a metadata object so we don't break
dbt_utils.
"""
if key == 'metadata':
return {
'type': self.__class__.__name__
}
return super().get(key, default)
def matches(
self,
database: Optional[str] = None,
schema: Optional[str] = None,
identifier: Optional[str] = None,
) -> bool:
search = filter_null_values({
ComponentName.Database: database,
ComponentName.Schema: schema,
ComponentName.Identifier: identifier
})
if not search:
# nothing was passed in
raise dbt.exceptions.RuntimeException(
"Tried to match relation, but no search path was passed!")
exact_match = True
approximate_match = True
for k, v in search.items():
if not self._is_exactish_match(k, v):
exact_match = False
if self.path.get_lowered_part(k) != v.lower():
approximate_match = False
if approximate_match and not exact_match:
target = self.create(
database=database, schema=schema, identifier=identifier
)
dbt.exceptions.approximate_relation_match(target, self)
return exact_match
def replace_path(self, **kwargs):
return self.replace(path=self.path.replace(**kwargs))
def quote(
self: Self,
database: Optional[bool] = None,
schema: Optional[bool] = None,
identifier: Optional[bool] = None,
) -> Self:
policy = filter_null_values({
ComponentName.Database: database,
ComponentName.Schema: schema,
ComponentName.Identifier: identifier
})
new_quote_policy = self.quote_policy.replace_dict(policy)
return self.replace(quote_policy=new_quote_policy)
def include(
self: Self,
database: Optional[bool] = None,
schema: Optional[bool] = None,
identifier: Optional[bool] = None,
) -> Self:
policy = filter_null_values({
ComponentName.Database: database,
ComponentName.Schema: schema,
ComponentName.Identifier: identifier
})
new_include_policy = self.include_policy.replace_dict(policy)
return self.replace(include_policy=new_include_policy)
def information_schema(self, view_name=None) -> 'InformationSchema':
# some of our data comes from jinja, where things can be `Undefined`.
if not isinstance(view_name, str):
view_name = None
# Kick the user-supplied schema out of the information schema relation
# Instead address this as <database>.information_schema by default
info_schema = InformationSchema.from_relation(self, view_name)
return info_schema.incorporate(path={"schema": None})
def information_schema_only(self) -> 'InformationSchema':
return self.information_schema()
def without_identifier(self) -> 'BaseRelation':
"""Return a form of this relation that only has the database and schema
set to included. To get the appropriately-quoted form the schema out of
the result (for use as part of a query), use `.render()`. To get the
raw database or schema name, use `.database` or `.schema`.
The hash of the returned object is the result of render().
"""
return self.include(identifier=False).replace_path(identifier=None)
def _render_iterator(
self
) -> Iterator[Tuple[Optional[ComponentName], Optional[str]]]:
for key in ComponentName:
path_part: Optional[str] = None
if self.include_policy.get_part(key):
path_part = self.path.get_part(key)
if path_part is not None and self.quote_policy.get_part(key):
path_part = self.quoted(path_part)
yield key, path_part
def render(self) -> str:
# if there is nothing set, this will return the empty string.
return '.'.join(
part for _, part in self._render_iterator()
if part is not None
)
def quoted(self, identifier):
return '{quote_char}{identifier}{quote_char}'.format(
quote_char=self.quote_character,
identifier=identifier,
)
@classmethod
def create_from_source(
cls: Type[Self], source: ParsedSourceDefinition, **kwargs: Any
) -> Self:
source_quoting = source.quoting.to_dict()
source_quoting.pop('column', None)
quote_policy = deep_merge(
cls.get_default_quote_policy().to_dict(),
source_quoting,
kwargs.get('quote_policy', {}),
)
return cls.create(
database=source.database,
schema=source.schema,
identifier=source.identifier,
quote_policy=quote_policy,
**kwargs
)
@staticmethod
def add_ephemeral_prefix(name: str):
return f'__dbt__cte__{name}'
@classmethod
def create_ephemeral_from_node(
cls: Type[Self],
config: HasQuoting,
node: Union[ParsedNode, CompiledNode],
) -> Self:
# Note that ephemeral models are based on the name.
identifier = cls.add_ephemeral_prefix(node.name)
return cls.create(
type=cls.CTE,
identifier=identifier,
).quote(identifier=False)
@classmethod
def create_from_node(
cls: Type[Self],
config: HasQuoting,
node: Union[ParsedNode, CompiledNode],
quote_policy: Optional[Dict[str, bool]] = None,
**kwargs: Any,
) -> Self:
if quote_policy is None:
quote_policy = {}
quote_policy = dbt.utils.merge(config.quoting, quote_policy)
return cls.create(
database=node.database,
schema=node.schema,
identifier=node.alias,
quote_policy=quote_policy,
**kwargs)
@classmethod
def create_from(
cls: Type[Self],
config: HasQuoting,
node: Union[CompiledNode, ParsedNode, ParsedSourceDefinition],
**kwargs: Any,
) -> Self:
if node.resource_type == NodeType.Source:
if not isinstance(node, ParsedSourceDefinition):
raise InternalException(
'type mismatch, expected ParsedSourceDefinition but got {}'
.format(type(node))
)
return cls.create_from_source(node, **kwargs)
else:
if not isinstance(node, (ParsedNode, CompiledNode)):
raise InternalException(
'type mismatch, expected ParsedNode or CompiledNode but '
'got {}'.format(type(node))
)
return cls.create_from_node(config, node, **kwargs)
@classmethod
def create(
cls: Type[Self],
database: Optional[str] = None,
schema: Optional[str] = None,
identifier: Optional[str] = None,
type: Optional[RelationType] = None,
**kwargs,
) -> Self:
kwargs.update({
'path': {
'database': database,
'schema': schema,
'identifier': identifier,
},
'type': type,
})
return cls.from_dict(kwargs)
def __repr__(self) -> str:
return "<{} {}>".format(self.__class__.__name__, self.render())
def __hash__(self) -> int:
return hash(self.render())
def __str__(self) -> str:
return self.render()
@property
def database(self) -> Optional[str]:
return self.path.database
@property
def schema(self) -> Optional[str]:
return self.path.schema
@property
def identifier(self) -> Optional[str]:
return self.path.identifier
@property
def table(self) -> Optional[str]:
return self.path.identifier
# Here for compatibility with old Relation interface
@property
def name(self) -> Optional[str]:
return self.identifier
@property
def is_table(self) -> bool:
return self.type == RelationType.Table
@property
def is_cte(self) -> bool:
return self.type == RelationType.CTE
@property
def is_view(self) -> bool:
return self.type == RelationType.View
@classproperty
def Table(cls) -> str:
return str(RelationType.Table)
@classproperty
def CTE(cls) -> str:
return str(RelationType.CTE)
@classproperty
def View(cls) -> str:
return str(RelationType.View)
@classproperty
def External(cls) -> str:
return str(RelationType.External)
@classproperty
def get_relation_type(cls) -> Type[RelationType]:
return RelationType
Info = TypeVar('Info', bound='InformationSchema')
@dataclass(frozen=True, eq=False, repr=False)
class InformationSchema(BaseRelation):
information_schema_view: Optional[str] = None
def __post_init__(self):
if not isinstance(self.information_schema_view, (type(None), str)):
raise dbt.exceptions.CompilationException(
'Got an invalid name: {}'.format(self.information_schema_view)
)
@classmethod
def get_path(
cls, relation: BaseRelation, information_schema_view: Optional[str]
) -> Path:
return Path(
database=relation.database,
schema=relation.schema,
identifier='INFORMATION_SCHEMA',
)
@classmethod
def get_include_policy(
cls,
relation,
information_schema_view: Optional[str],
) -> Policy:
return relation.include_policy.replace(
database=relation.database is not None,
schema=False,
identifier=True,
)
@classmethod
def get_quote_policy(
cls,
relation,
information_schema_view: Optional[str],
) -> Policy:
return relation.quote_policy.replace(
identifier=False,
)
@classmethod
def from_relation(
cls: Type[Info],
relation: BaseRelation,
information_schema_view: Optional[str],
) -> Info:
include_policy = cls.get_include_policy(
relation, information_schema_view
)
quote_policy = cls.get_quote_policy(relation, information_schema_view)
path = cls.get_path(relation, information_schema_view)
return cls(
type=RelationType.View,
path=path,
include_policy=include_policy,
quote_policy=quote_policy,
information_schema_view=information_schema_view,
)
def _render_iterator(self):
for k, v in super()._render_iterator():
yield k, v
yield None, self.information_schema_view
class SchemaSearchMap(Dict[InformationSchema, Set[Optional[str]]]):
"""A utility class to keep track of what information_schema tables to
search for what schemas. The schema values are all lowercased to avoid
duplication.
"""
def add(self, relation: BaseRelation):
key = relation.information_schema_only()
if key not in self:
self[key] = set()
schema: Optional[str] = None
if relation.schema is not None:
schema = relation.schema.lower()
self[key].add(schema)
def search(
self
) -> Iterator[Tuple[InformationSchema, Optional[str]]]:
for information_schema_name, schemas in self.items():
for schema in schemas:
yield information_schema_name, schema
def flatten(self):
new = self.__class__()
# make sure we don't have duplicates
seen = {r.database.lower() for r in self if r.database}
if len(seen) > 1:
dbt.exceptions.raise_compiler_error(str(seen))
for information_schema_name, schema in self.search():
path = {
'database': information_schema_name.database,
'schema': schema
}
new.add(information_schema_name.incorporate(
path=path,
quote_policy={'database': False},
include_policy={'database': False},
))
return new
| 31.480263
| 79
| 0.61233
|
0f85bedf4ed309613e84863ae31b1adfeffec596
| 3,226
|
py
|
Python
|
modules/swar/doc/sort.py
|
brycelelbach/nt2
|
73d7e8dd390fa4c8d251c6451acdae65def70e0b
|
[
"BSL-1.0"
] | 1
|
2022-03-24T03:35:10.000Z
|
2022-03-24T03:35:10.000Z
|
modules/swar/doc/sort.py
|
brycelelbach/nt2
|
73d7e8dd390fa4c8d251c6451acdae65def70e0b
|
[
"BSL-1.0"
] | null | null | null |
modules/swar/doc/sort.py
|
brycelelbach/nt2
|
73d7e8dd390fa4c8d251c6451acdae65def70e0b
|
[
"BSL-1.0"
] | null | null | null |
[ ## this file was manually modified by jt
{
'functor' : {
'arity' : '1',
'call_types' : [],
'ret_arity' : '0',
'rturn' : {
'default' : 'T',
},
'special' : ['swar'],
'simd_types' : ['gt_16_'],
'type_defs' : [],
'types' : ['real_'],
},
'info' : 'manually modified',
'unit' : {
'global_header' : {
'first_stamp' : 'created by jt the 24/02/2011',
'included' : [],
'no_ulp' : 'True',
'notes' : [],
'stamp' : 'modified by jt the 24/02/2011',
},
'ranges' : {
'default' : [['0', '100']],
'real_' : [['T(-100)', 'T(100)']],
'signed_int_' : [],
'unsigned_int_' : [],
},
'specific_values' : {
'default' : {
'nt2::One<T>()' : {'result' : 'nt2::One<r_t>()','ulp_thresh' : '0',},
'nt2::Zero<T>()' : {'result' : 'nt2::Zero<r_t>()','ulp_thresh' : '0',},
},
'real_' : {
'nt2::Inf<T>()' : {'result' : 'nt2::Inf<r_t>()','ulp_thresh' : '0',},
'nt2::Minf<T>()' : {'result' : 'nt2::Minf<r_t>()','ulp_thresh' : '0',},
'nt2::Mone<T>()' : {'result' : 'nt2::Mone<r_t>()','ulp_thresh' : '0',},
'nt2::Nan<T>()' : {'result' : 'nt2::Nan<r_t>()','ulp_thresh' : '0',},
'nt2::One<T>()' : {'result' : 'nt2::One<r_t>()','ulp_thresh' : '0',},
'nt2::Zero<T>()' : {'result' : 'nt2::Zero<r_t>()','ulp_thresh' : '0',},
},
'signed_int_' : {
'nt2::Mone<T>()' : {'result' : 'nt2::Mone<r_t>()','ulp_thresh' : '0',},
'nt2::One<T>()' : {'result' : 'nt2::One<r_t>()','ulp_thresh' : '0',},
'nt2::Zero<T>()' : {'result' : 'nt2::Zero<r_t>()','ulp_thresh' : '0',},
},
},
'verif_test' : {
'nb_rand' : {
'default' : 'NT2_NB_RANDOM_TEST',
},
'property_call' : {
'default' : ['nt2::sort(a0)'],
},
'property_value' : {
'default' : ['a0'],
},
'ulp_thresh' : {
'default' : ['0.5'],
},
'scalar_simul' :{
'default' : [
" T z[cardinal_of<r_t>::value];",
" for(uint32_t i=0; i<cardinal_of<n_t>::value; i++) z[i]=a0[i];",
" for(uint32_t i=0; i<cardinal_of<n_t>::value; i++){",
" for(uint32_t k=0; k<cardinal_of<n_t>::value; k++){",
" if (z[i]<z[k]) std::swap(z[i],z[k]);",
" }",
" }",
" for(uint32_t i=0; i<cardinal_of<n_t>::value; i++){",
" NT2_TEST_EQUAL(v[i],z[i]);",
" }",
]
},
},
},
},
]
| 40.325
| 96
| 0.322381
|
b3612692d1213150b0fcf639a1e7004dfe0aa0a4
| 252
|
py
|
Python
|
examples/confluence/confluence_page_create.py
|
Kudesnick/atlassian-python-api
|
0ff7cf4ef39d8374e948d8c7ad467944159a3b8b
|
[
"Apache-2.0"
] | 779
|
2018-10-05T02:45:01.000Z
|
2022-03-31T22:30:42.000Z
|
examples/confluence/confluence_page_create.py
|
Kudesnick/atlassian-python-api
|
0ff7cf4ef39d8374e948d8c7ad467944159a3b8b
|
[
"Apache-2.0"
] | 659
|
2018-10-05T09:58:15.000Z
|
2022-03-29T13:20:00.000Z
|
examples/confluence/confluence_page_create.py
|
Kudesnick/atlassian-python-api
|
0ff7cf4ef39d8374e948d8c7ad467944159a3b8b
|
[
"Apache-2.0"
] | 502
|
2018-10-08T16:08:32.000Z
|
2022-03-31T08:24:13.000Z
|
# coding=utf-8
from atlassian import Confluence
confluence = Confluence(url="http://localhost:8090", username="admin", password="admin")
status = confluence.create_page(space="DEMO", title="This is the title", body="This is the body")
print(status)
| 28
| 97
| 0.746032
|
c32aac15632033565cf768ddd359e0aebdf174db
| 6,023
|
py
|
Python
|
tutorials/exchange-tutorial-python/exchange_tutorial.py
|
TP-Lab/enumivo
|
76d81a36d2db8cea93fb54cd95a6ec5f6c407f97
|
[
"MIT"
] | 8
|
2018-08-02T02:31:19.000Z
|
2018-08-16T03:31:02.000Z
|
tutorials/exchange-tutorial-python/exchange_tutorial.py
|
TP-Lab/enumivo
|
76d81a36d2db8cea93fb54cd95a6ec5f6c407f97
|
[
"MIT"
] | null | null | null |
tutorials/exchange-tutorial-python/exchange_tutorial.py
|
TP-Lab/enumivo
|
76d81a36d2db8cea93fb54cd95a6ec5f6c407f97
|
[
"MIT"
] | null | null | null |
import json
import pprint
import os
import sys
import subprocess
import time
from subprocess import PIPE
# This key would be different for each user.
KEY_TO_INTERNAL_ACCOUNT='12345'
DEMO_USER='scott'
def main():
try:
command = sys.argv[1]
if command == 'monitor':
setup()
while True:
monitor_exchange()
time.sleep(.1)
elif command == 'transfer':
if len(sys.argv) == 4:
transfer(sys.argv[2], sys.argv[3])
else:
print('Transfer must be called by `python exchange_tutorial.py transfer {} 1.0000`'.format(DEMO_USER))
except subprocess.CalledProcessError as e:
print(e)
print(str(e.stderr, 'utf-8'))
def monitor_exchange():
action_num = get_last_action() + 1
results = enucli('get actions tokenxchange {} 0 -j'.format(action_num))
results = json.loads(results.stdout)
action_list = results['actions']
if len(action_list) == 0:
return
action = action_list[0]
last_irreversible_block = results['last_irreversible_block']
to = action['action_trace']['act']['data']['to']
block_num = action['block_num']
if is_irreversible(block_num, last_irreversible_block):
update_balance(action, to)
set_last_action(action_num)
def update_balance(action, to):
current_balance = get_balance()
new_balance = current_balance
transfer_quantity = action['action_trace']['act']['data']['quantity'].split()[0]
transfer_quantity = float(transfer_quantity)
if to == 'tokenxchange':
if is_valid_deposit(action):
new_balance = current_balance + transfer_quantity
set_balance(new_balance)
elif is_valid_withdrawal(action):
new_balance = current_balance - transfer_quantity
set_balance(new_balance)
def transfer(to, quantity):
if quantity[:-4] != ' ENU':
quantity += ' ENU'
results = enucli('transfer tokenxchange {} "{}" {} -j'.format(to, quantity, KEY_TO_INTERNAL_ACCOUNT))
transaction_info = json.loads(str(results.stdout, 'utf-8'))
transaction_id = transaction_info['transaction_id']
transaction_status = transaction_info['processed']['receipt']['status']
if transaction_status == 'hard_fail':
print('Transaction failed.')
return
add_transactions(transaction_id)
print('Initiated transfer of {} to {}. Transaction id is {}.'.format(quantity, to, transaction_id))
def is_irreversible(block_num, last_irreversible_block):
return block_num <= last_irreversible_block
def is_valid_deposit(action):
account = action['action_trace']['act']['account']
action_name = action['action_trace']['act']['name']
memo = action['action_trace']['act']['data']['memo']
receiver = action['action_trace']['receipt']['receiver']
token = action['action_trace']['act']['data']['quantity'].split()[1]
valid_user = action['action_trace']['act']['data']['to'] == 'tokenxchange'
from_user = action['action_trace']['act']['data']['from']
# Filter only to actions that notify the tokenxchange account.
if receiver != 'tokenxchange':
return False
if (account == 'enu.token' and
action_name == 'transfer' and
memo == KEY_TO_INTERNAL_ACCOUNT and
valid_user and
from_user == DEMO_USER and
token == 'ENU'):
return True
print('Invalid deposit')
return False
def is_valid_withdrawal(action):
account = action['action_trace']['act']['account']
action_name = action['action_trace']['act']['name']
memo = action['action_trace']['act']['data']['memo']
receiver = action['action_trace']['receipt']['receiver']
token = action['action_trace']['act']['data']['quantity'].split()[1]
transaction_id = action['action_trace']['trx_id']
valid_user = action['action_trace']['act']['data']['from'] == 'tokenxchange'
to_user = action['action_trace']['act']['data']['to']
# Filter only to actions that notify the exchange account.
if receiver != 'tokenxchange':
return False
if (account == 'enu.token' and
action_name == 'transfer' and
memo == KEY_TO_INTERNAL_ACCOUNT and
valid_user and
to_user == DEMO_USER and
transaction_id in get_transactions() and
token == 'ENU'):
return True
print('Invalid withdrawal')
return False
def enucli(args):
if isinstance(args, list):
command = ['enucli']
command.extend(args)
else:
command = 'enucli ' + args
results = subprocess.run(command, stdin=PIPE, stdout=PIPE, stderr=PIPE, shell=True, check=True)
return results
def setup():
if not os.path.exists('last_action.txt'):
set_last_action(-1)
if not os.path.exists('balance.txt'):
set_balance(0)
if not os.path.exists('transactions.txt'):
with open('transactions.txt', 'w') as f:
f.write(json.dumps({"transactions": []}))
def get_transactions():
with open('transactions.txt', 'r') as f:
transactions = json.load(f)
return set(transactions['transactions'])
def add_transactions(transaction_id):
transactions = get_transactions()
transactions.add(transaction_id)
with open('transactions.txt', 'w') as f:
transactions = json.dumps({'transactions': list(transactions)})
f.write(transactions)
def get_last_action():
with open('last_action.txt', 'r') as f:
last_action = int(f.read())
return last_action
def set_last_action(action):
with open('last_action.txt', 'w') as f:
f.write(str(action))
def get_balance():
with open('balance.txt', 'r') as f:
balance = float(f.read())
return balance
def set_balance(balance):
with open('balance.txt', 'w') as f:
f.write(str(balance))
print("{}'s balance is: {}".format(DEMO_USER, balance))
if __name__ == '__main__':
main()
| 32.037234
| 118
| 0.637058
|
6499e3ce8411400014e23e45b84e1233a901dc58
| 1,789
|
py
|
Python
|
prepro/prepro_det.py
|
Canadalynx/NeuralBabyTalk
|
e56346c44bfe373c0550dbe506b77218f481a9e8
|
[
"MIT"
] | null | null | null |
prepro/prepro_det.py
|
Canadalynx/NeuralBabyTalk
|
e56346c44bfe373c0550dbe506b77218f481a9e8
|
[
"MIT"
] | null | null | null |
prepro/prepro_det.py
|
Canadalynx/NeuralBabyTalk
|
e56346c44bfe373c0550dbe506b77218f481a9e8
|
[
"MIT"
] | null | null | null |
import json
import pdb
import numpy as np
import h5py
dataset = 'coco'
if dataset == 'coco':
det_train = json.load(open('data/coco_noc/coco_detection_noc_train.json'))
det_val = json.load(open('data/coco_noc/coco_detection_noc_val.json'))
info = json.load(open('data/coco_noc/dic_coco.json'))
det = []
for img in det_train:
img['split'] = 'train2014'
det.append(img)
for img in det_val:
img['split'] = 'val2014'
det.append(img)
elif dataset == 'flickr30k':
det_file = json.load(open('data/flickr30k/flickr30k_detection.json'))
info = json.load(open('data/flickr30k/dic_flickr30k.json'))
det = []
for img in det_file:
det.append(img)
proposal_file = {} #proposal_file = {图片id:图片}
for img in det:
proposal_file[img['image_id']] = img
N = len(det)#det有N个数据
dets_labels = np.zeros((N, 100, 6))
dets_num = np.zeros((N))
nms_num = np.zeros((N))
for idx, img in enumerate(info['images']):
image_id = img['id']
proposal = proposal_file[image_id]
num_proposal = len(proposal['detection'])
num_nms = proposal['num_boxes']
proposals = np.zeros([num_proposal, 6])
for i in range(num_proposal):
proposals[i, :4] = proposal['detection'][i]['location']
proposals[i, 4] = proposal['detection'][i]['label']
proposals[i, 5] = proposal['detection'][i]['score']
dets_labels[idx,:num_proposal] = proposals
dets_num[idx] = num_proposal
nms_num[idx] = num_nms
if dataset == 'coco':
f = h5py.File('coco_noc_detection.h5', "w")
elif dataset == 'flickr30k':
f = h5py.File('flickr30k_detection.h5', "w")
f.create_dataset("dets_labels", data=dets_labels)
f.create_dataset("dets_num", data=dets_num)
f.create_dataset("nms_num", data=nms_num)
f.close()
| 28.396825
| 78
| 0.658468
|
fff2056c8775165433c9e6e61d1f58bb71a63667
| 619
|
py
|
Python
|
travis_ci/settings.py
|
ExCiteS/geokey-cartodb
|
c6a40a18a08abd1ac547c0f814f74a5a139fb82b
|
[
"MIT"
] | null | null | null |
travis_ci/settings.py
|
ExCiteS/geokey-cartodb
|
c6a40a18a08abd1ac547c0f814f74a5a139fb82b
|
[
"MIT"
] | null | null | null |
travis_ci/settings.py
|
ExCiteS/geokey-cartodb
|
c6a40a18a08abd1ac547c0f814f74a5a139fb82b
|
[
"MIT"
] | null | null | null |
"""GeoKey settings."""
from geokey.core.settings.dev import *
DEFAULT_FROM_EMAIL = 'no-reply@travis-ci.org'
ACCOUNT_EMAIL_VERIFICATION = 'optional'
SECRET_KEY = 'xxxxxxxxxxxxxxxxxxxxxxxxx'
DATABASES = {
'default': {
'ENGINE': 'django.contrib.gis.db.backends.postgis',
'NAME': 'geokey',
'USER': 'postgres',
'PASSWORD': '',
'HOST': '',
'PORT': '',
}
}
INSTALLED_APPS += (
'geokey_cartodb',
)
STATIC_URL = '/static/'
MEDIA_ROOT = normpath(join(dirname(dirname(abspath(__file__))), 'assets'))
MEDIA_URL = '/assets/'
WSGI_APPLICATION = 'wsgi.application'
| 19.34375
| 74
| 0.630048
|
dc85b59bb741084026202b4f34fd88c678d3a27d
| 14,242
|
py
|
Python
|
foundation/organisation/migrations/0019_auto__del_field_networkgroupmembership_role.py
|
Mindelirium/foundation
|
2d07e430915d696ca7376afea633692119c4d30e
|
[
"MIT"
] | null | null | null |
foundation/organisation/migrations/0019_auto__del_field_networkgroupmembership_role.py
|
Mindelirium/foundation
|
2d07e430915d696ca7376afea633692119c4d30e
|
[
"MIT"
] | null | null | null |
foundation/organisation/migrations/0019_auto__del_field_networkgroupmembership_role.py
|
Mindelirium/foundation
|
2d07e430915d696ca7376afea633692119c4d30e
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'NetworkGroupMembership.role'
db.delete_column(u'organisation_networkgroupmembership', 'role')
def backwards(self, orm):
# Adding field 'NetworkGroupMembership.role'
db.add_column(u'organisation_networkgroupmembership', 'role',
self.gf('django.db.models.fields.CharField')(default=None, max_length=100),
keep_default=False)
models = {
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'})
},
u'organisation.board': {
'Meta': {'object_name': 'Board'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['organisation.Person']", 'through': u"orm['organisation.BoardMembership']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'organisation.boardmembership': {
'Meta': {'object_name': 'BoardMembership'},
'board': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['organisation.Board']"}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['organisation.Person']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'organisation.featuredproject': {
'Meta': {'object_name': 'FeaturedProject', '_ormbases': ['cms.CMSPlugin']},
u'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': u"orm['organisation.Project']"})
},
u'organisation.networkgroup': {
'Meta': {'unique_together': "(('country', 'region'),)", 'object_name': 'NetworkGroup'},
'country': ('django_countries.fields.CountryField', [], {'max_length': '2'}),
'country_slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'extra_information': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'group_type': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'homepage': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mailinglist': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['organisation.Person']", 'through': u"orm['organisation.NetworkGroupMembership']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'position': ('geoposition.fields.GeopositionField', [], {'default': "'0,0'", 'max_length': '42', 'null': 'True', 'blank': 'True'}),
'region': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'region_slug': ('django.db.models.fields.SlugField', [], {'default': 'None', 'max_length': '50', 'null': 'True'}),
'twitter': ('django.db.models.fields.CharField', [], {'max_length': '18', 'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'organisation.networkgroupmembership': {
'Meta': {'object_name': 'NetworkGroupMembership'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'networkgroup': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['organisation.NetworkGroup']"}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['organisation.Person']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'organisation.person': {
'Meta': {'ordering': "['name']", 'object_name': 'Person'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'photo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'twitter': ('django.db.models.fields.CharField', [], {'max_length': '18', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
u'organisation.project': {
'Meta': {'ordering': "('name',)", 'object_name': 'Project'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
'homepage_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mailinglist_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'picture': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'}),
'sourcecode_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'teaser': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'themes': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['organisation.Theme']", 'symmetrical': 'False', 'blank': 'True'}),
'twitter': ('django.db.models.fields.CharField', [], {'max_length': '18', 'blank': 'True'}),
'types': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['organisation.ProjectType']", 'symmetrical': 'False', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'organisation.projectlist': {
'Meta': {'object_name': 'ProjectList', '_ormbases': ['cms.CMSPlugin']},
u'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'project_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['organisation.ProjectType']", 'null': 'True', 'blank': 'True'}),
'theme': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['organisation.Theme']", 'null': 'True', 'blank': 'True'})
},
u'organisation.projecttype': {
'Meta': {'object_name': 'ProjectType'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'organisation.theme': {
'Meta': {'object_name': 'Theme'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'organisation.unit': {
'Meta': {'ordering': "['-order', 'name']", 'object_name': 'Unit'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['organisation.Person']", 'through': u"orm['organisation.UnitMembership']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'order': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'organisation.unitmembership': {
'Meta': {'object_name': 'UnitMembership'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['organisation.Person']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'unit': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['organisation.Unit']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'organisation.workinggroup': {
'Meta': {'object_name': 'WorkingGroup'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'incubation': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'logo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'}),
'theme': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['organisation.Theme']", 'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
}
}
complete_apps = ['organisation']
| 80.011236
| 200
| 0.567336
|
7cc3c50666c10b40ad43dacfd74c8c5a0bac0523
| 8,214
|
py
|
Python
|
pyisis/engine.py
|
rodsenra/pyisis
|
f5815fd096a463902893f87f309f8117b5705621
|
[
"MIT"
] | null | null | null |
pyisis/engine.py
|
rodsenra/pyisis
|
f5815fd096a463902893f87f309f8117b5705621
|
[
"MIT"
] | null | null | null |
pyisis/engine.py
|
rodsenra/pyisis
|
f5815fd096a463902893f87f309f8117b5705621
|
[
"MIT"
] | 2
|
2019-11-08T20:51:54.000Z
|
2021-08-17T23:49:48.000Z
|
# -*- coding: utf-8 -*-
"""
Routines used to control and manage ISIS-Cell activity
"""
__updated__ = "2007-12-10"
__created__ = "2007-12-10"
__author__ = "Rodrigo Senra <rsenra@acm.org>"
import sys
from traceback import print_exc
try:
from cStringIo import StringIO
except:
from StringIO import StringIO
import codecs
from os import getcwd
from os.path import join
from glob import glob
from logging import getLogger
from traceback import format_exc, extract_tb
import pyisis
import pyisis.session
#ISISAC.TAB
isisac_tab = [
u'\u0041',u'\u0042',u'\u0043',u'\u0044',u'\u0045',u'\u0046',u'\u0047',u'\u0048',
u'\u0049',u'\u004A',u'\u004B',u'\u004C',u'\u004D',u'\u004E',u'\u004F',u'\u0050',
u'\u0051',u'\u0052',u'\u0053',u'\u0054',u'\u0055',u'\u0056',u'\u0057',u'\u0058',
u'\u0059',u'\u005A',u'\u0061',u'\u0062',u'\u0063',u'\u0064',u'\u0065',u'\u0066',
u'\u0067',u'\u0068',u'\u0069',u'\u006A',u'\u006B',u'\u006C',u'\u006D',u'\u006E',
u'\u006F',u'\u0070',u'\u0071',u'\u0072',u'\u0073',u'\u0074',u'\u0075',u'\u0076',
u'\u0077',u'\u0078',u'\u0079',u'\u007A',u'\u0080',u'\u0081',u'\u0082',u'\u0083',
u'\u0084',u'\u0085',u'\u0086',u'\u0087',u'\u0088',u'\u0089',u'\u008A',u'\u008B',
u'\u008C',u'\u008D',u'\u008E',u'\u008F',u'\u0090',u'\u0091',u'\u0092',u'\u0093',
u'\u0094',u'\u0095',u'\u0096',u'\u0097',u'\u0098',u'\u0099',u'\u009A',u'\u00A0',
u'\u00A1',u'\u00A2',u'\u00A3',u'\u00A4',u'\u00A5',u'\u0020']
def loop(sequence, pause=True):
"""Iterate over the elements of a given sequence,
waiting for user acknolegment after printing each
element"""
for e in sequence:
try:
print e
if pause:
raw_input()
except KeyboardInterrupt:
break
class Engine(object):
"""Holds global data-structures used in
console-only or in gateway mode.
"""
# Class attribute that holds a dict with collection instances
collection = {}
# Global configuration settings
config = None
@staticmethod
def setup(config):
"""Browse config.COLLECTIONS building a dictionary with
all the collection objects. The key of this dictionary is
the collection name and the value is a Collection instance.
Also sets default output encoding.
"""
# Initialize formatting language
Engine.config = config
pyisis.session.initialize(config)
Engine.collection.clear()
logger = getLogger('pyisis')
for typename, name, path in config.COLLECTIONS:
try:
CollectionType = getattr(pyisis.files, typename)
Engine.collection[name]= CollectionType(name, path)
except IOError, ex:
logger.warning(_("Failed to create collection %s: %s")%(name, ex))
except Exception, ex:
logger.error(_("Unexpected error while processing %s: %s")%(name, ex))
logger.error(format_exc())
# Try to identify local collections in current working directory
local_msts = glob("*.mst")
if local_msts:
Engine.collection['current']= pyisis.files.IsisCollection('current', [getcwd()])
#from IPython.ipapi import TryNext, get as ipget
#from IPython.genutils import dir2, Term
def logexcept(self, etype, evalue, etrace):
"""Custom traceback handler that dumps exception traces to log file.
"""
logger = getLogger('pyisis')
logger.warning(format_exc())
#def result_display(self, arg):
# """ Overrides IPython's display hook.
# Called for displaying the result to the user.
# """
# if type(arg) in (type,unicode):
# # unicode() cannot be called directly on classes (unbounded)
# print >>Term.cout, arg
# else:
# print >>Term.cout, unicode(arg).encode(Engine.config.OUTPUT_ENCODING)
#
# return None
def isis_completers(self, event):
""" This should return a list of strings with possible completions.
"""
symbol_parts = event.symbol.split('.')
base = '.'.join(symbol_parts[:-1])
oinfo = self._ofind(base)
if not oinfo['found']:
raise TryNext
obj = oinfo['obj']
types = (pyisis.files.IsisCollection, pyisis.files.MasterFile,
pyisis.records.MasterRecord,
pyisis.fields.MasterField,
pyisis.fields.MasterContainerField,
pyisis.records.XrfRecord)
if not any((isinstance(obj, i) for i in types)):
raise TryNext
attrs = dir2(obj)
attrs = [a for a in attrs if not a.startswith('__')]
# The base of the completion, so we can form the final results list
bdot = base+'.'
tcomp = [bdot+a for a in attrs]
return tcomp
def interactive(collection):
from IPython.ipapi import TryNext, get as ipget
from IPython.genutils import dir2, Term
banner = "\n"+\
_("Welcome to ISIS-NBP Cell %s Interactive Console\n") % pyisis.__version__ +\
"Python %s\n" % sys.version +\
_("Use the console to test and inspect the collections.\n\n")+\
_("Type 'collection' to see a dictionary with all available collections.\n")+\
_("Type '<Ctrl-D>' or 'exit()' followed by '<enter>' to quit.\n")
print banner.encode(Engine.config.OUTPUT_ENCODING)
try:
import readline
import rlcompleter
readline.parse_and_bind("tab: complete")
except ImportError:
pass
# Add these to the global ipshell namespace
locals().update(collection)
locals().update({'format':pyisis.session.format,
'loop':loop,
'MasterFile': pyisis.files.MasterFile,
'MasterRecord': pyisis.records.MasterRecord,
'MasterField': pyisis.fields.MasterField
})
try:
__IPYTHON__
except NameError:
argv = ['']
banner = _("The available collections are: %s\n") % (", ".join(collection.keys()))
exit_msg = _('Closing ISIS-NBP Interactive Python Console\n')
# First import the embeddable shell class
from IPython.Shell import IPShellEmbed
# redefine hooks
ipshell = IPShellEmbed(argv, banner=banner, exit_msg=exit_msg)
#ipshell.IP.set_hook('complete_command', isis_completers, re_key = '.*')
# result _display became obsolete, because encoding conversion
# is done by __str__ methods and works in the standard Python prompt
# as well!
#ipshell.IP.set_hook('result_display', result_display)
#if Engine.config.SILENT_EXCEPTIONS:
# ipshell.IP.set_custom_exc((SyntaxError, NameError, AttributeError, ValueError),
# logexcept)
# Now create the IPython shell instance.
ipshell()
def interactive_old(mf):
"""
THIS FUNCTIONS HAS BEEN REPLACED BY A MODERN IPYTHON CONSOLE.
You need to install IPython from http://ipython.scipy.org
This is the interactive prompt of operation
to test and inspect the master file data and operations.
The master file is accessible by the global mf variable.
Use mf[1] to access the first record.
Type 'q', 'quit' or 'exit' followed by '<enter>' to quit.
"""
print interactive.__doc__
try:
import readline
import rlcompleter
readline.parse_and_bind("tab: complete")
except ImportError:
pass
namespace = {'mf':mf, 'format': pyisis.session.format,
#'proc': pyisis.session.proc
}
while 1:
try:
cmd = raw_input("pymx> ")
if cmd.strip() in ('exit','quit','q'):
raise SystemExit()
# create file-like string to capture output
codeOut, codeErr = StringIO(), StringIO()
try:
sys.stdout, sys.stderr = codeOut, codeErr
exec cmd in namespace
finally:
sys.stdout, sys.stderr = sys.__stdout__, sys.__stderr__
s = codeErr.getvalue()
if s:
print "error:\n%s\n" % s
s = codeOut.getvalue()
if s:
print "\n%s" % s
codeOut.close()
codeErr.close()
except StandardError,ex:
print_exc()
| 33.255061
| 92
| 0.619065
|
60c1e9eadc749f36b3cc03115e9ab33dbe42e4b1
| 652
|
py
|
Python
|
Working_with_Excel_Update_information.py
|
maainul/Paython
|
c72d7fff3b00bc4f379ca6f9dbef0678f01b55f9
|
[
"DOC"
] | null | null | null |
Working_with_Excel_Update_information.py
|
maainul/Paython
|
c72d7fff3b00bc4f379ca6f9dbef0678f01b55f9
|
[
"DOC"
] | null | null | null |
Working_with_Excel_Update_information.py
|
maainul/Paython
|
c72d7fff3b00bc4f379ca6f9dbef0678f01b55f9
|
[
"DOC"
] | null | null | null |
#! usr/bin/env python3
# Chapter 12 Project Updating a Spreadsheet
# updateProduce.py - Corrects costs in produce sales spreadsheet.
import openpyxl,os
os.chdir('/home/mainul/Desktop/copy_files/')
print(os.getcwd())
wb = openpyxl.load_workbook('produceSales.xlsx')
sheet = wb.get_sheet_by_name('Sheet')
PRICE_UPDATES = {'Garlic': 3.07,
'Celery': 1.19,
'Lemon': 1.27}
for rowNum in range(2, sheet.max_row):
produceName = sheet.cell(row=rowNum, column=1).value
if produceName in PRICE_UPDATES:
sheet.cell(row=rowNum, column=2).value = PRICE_UPDATES[produceName]
wb.save('updatedProductSales.xlsx')
| 28.347826
| 75
| 0.699387
|
6c1d8abb1c81d50e7c59d8afc23afd42a2dbe546
| 1,758
|
py
|
Python
|
chapters/appendix_c/tfx_template_example/models/features.py
|
swilliams11/building-machine-learning-pipelines
|
b7e5e722975b3138005892e4644bfadb61ada945
|
[
"MIT"
] | 465
|
2020-05-18T08:23:42.000Z
|
2022-03-28T14:51:43.000Z
|
chapters/appendix_c/tfx_template_example/models/features.py
|
swilliams11/building-machine-learning-pipelines
|
b7e5e722975b3138005892e4644bfadb61ada945
|
[
"MIT"
] | 49
|
2020-06-25T12:35:32.000Z
|
2021-11-23T22:47:30.000Z
|
chapters/appendix_c/tfx_template_example/models/features.py
|
swilliams11/building-machine-learning-pipelines
|
b7e5e722975b3138005892e4644bfadb61ada945
|
[
"MIT"
] | 163
|
2020-05-19T07:20:13.000Z
|
2022-03-24T06:05:41.000Z
|
# Lint as: python2, python3
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TFX complaint model model features.
Define constants here that are common across all models
including features names, label and size of vocabulary.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Text, List
# At least one feature is needed.
# feature name, feature dimensionality
ONE_HOT_FEATURES = {
"product": 11,
"sub_product": 45,
"company_response": 5,
"state": 60,
"issue": 90,
}
# feature name, bucket count
BUCKET_FEATURES = {"zip_code": 10}
# feature name, value is unused
TEXT_FEATURES = {"consumer_complaint_narrative": None}
# Keys
LABEL_KEY = "consumer_disputed"
def transformed_name(key: Text) -> Text:
"""Generate the name of the transformed feature from original name."""
return key + "_xf"
def vocabulary_name(key: Text) -> Text:
"""Generate the name of the vocabulary feature from original name."""
return key + "_vocab"
def transformed_names(keys: List[Text]) -> List[Text]:
"""Transform multiple feature names at once."""
return [transformed_name(key) for key in keys]
| 28.819672
| 74
| 0.737201
|
54e44836e6f3eadc3d11c813292aeec040cfe482
| 1,419
|
py
|
Python
|
CodeWars/Python/Terminal Game #2.py
|
nirgn975/CodeWars
|
657924b16ffb210e620416bcfbdd2f6debc5e41b
|
[
"BSD-3-Clause"
] | 28
|
2017-01-13T11:00:21.000Z
|
2020-08-03T11:51:47.000Z
|
CodeWars/Python/Terminal Game #2.py
|
nirgn975/CodeWars
|
657924b16ffb210e620416bcfbdd2f6debc5e41b
|
[
"BSD-3-Clause"
] | 3
|
2017-08-02T07:06:04.000Z
|
2020-04-04T21:12:41.000Z
|
CodeWars/Python/Terminal Game #2.py
|
nirgn975/CodeWars
|
657924b16ffb210e620416bcfbdd2f6debc5e41b
|
[
"BSD-3-Clause"
] | 15
|
2017-08-02T16:47:00.000Z
|
2020-07-31T17:26:04.000Z
|
"""
Create the hero move method
Create a move method for your hero to move through the level.
Adjust the hero's position by changing the position attribute. The level is a grid with the following values.
00 01 02 03 04
10 11 12 13 14
20 21 22 23 24
30 31 32 33 34
40 41 42 43 44
The dir argument will be a string
```
up
down
left
right
```
If the position is not inside the level grid the method should throw an error and not move the hero
"""
class Hero:
def __init__(self):
self.position = '00'
self.downLimit = 4
self.upLimit = 0
self.rightLimit = 4
self.leftLimit = 0
def move(self, dir):
self.position = int(self.position)
firstNumber = self.position / 10
secondNumber = self.position / 10
if dir == 'down' and firstNumber < self.downLimit:
self.position += 10
elif dir == 'up' and firstNumber > self.upLimit:
self.position -= 10
elif dir == 'left' and secondNumber > self.leftLimit:
self.position -= 1
elif dir == 'right' and secondNumber < self.rightLimit:
self.position += 1
else:
self.position = str(self.position)
raise Exception('new position is out of bounds')
if self.position < 10:
self.position = '0{}'.format(str(self.position))
else:
self.position = str(self.position)
| 26.277778
| 109
| 0.614517
|
204312cba60d8288e3c917c5f96deac86868c9ce
| 19,044
|
py
|
Python
|
patron/tests/unit/api/openstack/compute/test_images.py
|
casbin/openstack-patron
|
b41b1262f3a52c8cc9f6b6bdf87be5a1abcf6d25
|
[
"Apache-2.0"
] | null | null | null |
patron/tests/unit/api/openstack/compute/test_images.py
|
casbin/openstack-patron
|
b41b1262f3a52c8cc9f6b6bdf87be5a1abcf6d25
|
[
"Apache-2.0"
] | null | null | null |
patron/tests/unit/api/openstack/compute/test_images.py
|
casbin/openstack-patron
|
b41b1262f3a52c8cc9f6b6bdf87be5a1abcf6d25
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2010 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests of the new image services, both as a service layer,
and as a WSGI layer
"""
import copy
import mock
import six.moves.urllib.parse as urlparse
import webob
from patron.api.openstack.compute import images
from patron.api.openstack.compute.plugins.v3 import images as images_v21
from patron.api.openstack.compute.views import images as images_view
from patron import exception
from patron.image import glance
from patron import test
from patron.tests.unit.api.openstack import fakes
from patron.tests.unit import image_fixtures
from patron.tests.unit import matchers
NS = "{http://docs.openstack.org/compute/api/v1.1}"
ATOMNS = "{http://www.w3.org/2005/Atom}"
NOW_API_FORMAT = "2010-10-11T10:30:22Z"
IMAGE_FIXTURES = image_fixtures.get_image_fixtures()
class ImagesControllerTestV21(test.NoDBTestCase):
"""Test of the OpenStack API /images application controller w/Glance.
"""
image_controller_class = images_v21.ImagesController
url_base = '/v3'
bookmark_base = ''
http_request = fakes.HTTPRequestV3
def setUp(self):
"""Run before each test."""
super(ImagesControllerTestV21, self).setUp()
fakes.stub_out_networking(self.stubs)
fakes.stub_out_rate_limiting(self.stubs)
fakes.stub_out_key_pair_funcs(self.stubs)
fakes.stub_out_compute_api_snapshot(self.stubs)
fakes.stub_out_compute_api_backup(self.stubs)
self.controller = self.image_controller_class()
self.url_prefix = "http://localhost%s/images" % self.url_base
self.bookmark_prefix = "http://localhost%s/images" % self.bookmark_base
self.uuid = 'fa95aaf5-ab3b-4cd8-88c0-2be7dd051aaf'
self.server_uuid = "aa640691-d1a7-4a67-9d3c-d35ee6b3cc74"
self.server_href = (
"http://localhost%s/servers/%s" % (self.url_base,
self.server_uuid))
self.server_bookmark = (
"http://localhost%s/servers/%s" % (self.bookmark_base,
self.server_uuid))
self.alternate = "%s/images/%s"
self.expected_image_123 = {
"image": {'id': '123',
'name': 'public image',
'metadata': {'key1': 'value1'},
'updated': NOW_API_FORMAT,
'created': NOW_API_FORMAT,
'status': 'ACTIVE',
'minDisk': 10,
'progress': 100,
'minRam': 128,
"links": [{
"rel": "self",
"href": "%s/123" % self.url_prefix
},
{
"rel": "bookmark",
"href":
"%s/123" % self.bookmark_prefix
},
{
"rel": "alternate",
"type": "application/vnd.openstack.image",
"href": self.alternate %
(glance.generate_glance_url(),
123),
}],
},
}
self.expected_image_124 = {
"image": {'id': '124',
'name': 'queued snapshot',
'metadata': {
u'instance_uuid': self.server_uuid,
u'user_id': u'fake',
},
'updated': NOW_API_FORMAT,
'created': NOW_API_FORMAT,
'status': 'SAVING',
'progress': 25,
'minDisk': 0,
'minRam': 0,
'server': {
'id': self.server_uuid,
"links": [{
"rel": "self",
"href": self.server_href,
},
{
"rel": "bookmark",
"href": self.server_bookmark,
}],
},
"links": [{
"rel": "self",
"href": "%s/124" % self.url_prefix
},
{
"rel": "bookmark",
"href":
"%s/124" % self.bookmark_prefix
},
{
"rel": "alternate",
"type":
"application/vnd.openstack.image",
"href": self.alternate %
(glance.generate_glance_url(),
124),
}],
},
}
@mock.patch('patron.image.api.API.get', return_value=IMAGE_FIXTURES[0])
def test_get_image(self, get_mocked):
request = self.http_request.blank(self.url_base + 'images/123')
actual_image = self.controller.show(request, '123')
self.assertThat(actual_image,
matchers.DictMatches(self.expected_image_123))
get_mocked.assert_called_once_with(mock.ANY, '123')
@mock.patch('patron.image.api.API.get', return_value=IMAGE_FIXTURES[1])
def test_get_image_with_custom_prefix(self, _get_mocked):
self.flags(osapi_compute_link_prefix='https://zoo.com:42',
osapi_glance_link_prefix='http://circus.com:34')
fake_req = self.http_request.blank(self.url_base + 'images/124')
actual_image = self.controller.show(fake_req, '124')
expected_image = self.expected_image_124
expected_image["image"]["links"][0]["href"] = (
"https://zoo.com:42%s/images/124" % self.url_base)
expected_image["image"]["links"][1]["href"] = (
"https://zoo.com:42%s/images/124" % self.bookmark_base)
expected_image["image"]["links"][2]["href"] = (
"http://circus.com:34/images/124")
expected_image["image"]["server"]["links"][0]["href"] = (
"https://zoo.com:42%s/servers/%s" % (self.url_base,
self.server_uuid))
expected_image["image"]["server"]["links"][1]["href"] = (
"https://zoo.com:42%s/servers/%s" % (self.bookmark_base,
self.server_uuid))
self.assertThat(actual_image, matchers.DictMatches(expected_image))
@mock.patch('patron.image.api.API.get',
side_effect=exception.ImageNotFound(image_id=''))
def test_get_image_404(self, _get_mocked):
fake_req = self.http_request.blank(self.url_base + 'images/unknown')
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.show, fake_req, 'unknown')
@mock.patch('patron.image.api.API.get_all', return_value=IMAGE_FIXTURES)
def test_get_image_details(self, get_all_mocked):
request = self.http_request.blank(self.url_base + 'images/detail')
response = self.controller.detail(request)
get_all_mocked.assert_called_once_with(mock.ANY, filters={})
response_list = response["images"]
image_125 = copy.deepcopy(self.expected_image_124["image"])
image_125['id'] = '125'
image_125['name'] = 'saving snapshot'
image_125['progress'] = 50
image_125["links"][0]["href"] = "%s/125" % self.url_prefix
image_125["links"][1]["href"] = "%s/125" % self.bookmark_prefix
image_125["links"][2]["href"] = (
"%s/images/125" % glance.generate_glance_url())
image_126 = copy.deepcopy(self.expected_image_124["image"])
image_126['id'] = '126'
image_126['name'] = 'active snapshot'
image_126['status'] = 'ACTIVE'
image_126['progress'] = 100
image_126["links"][0]["href"] = "%s/126" % self.url_prefix
image_126["links"][1]["href"] = "%s/126" % self.bookmark_prefix
image_126["links"][2]["href"] = (
"%s/images/126" % glance.generate_glance_url())
image_127 = copy.deepcopy(self.expected_image_124["image"])
image_127['id'] = '127'
image_127['name'] = 'killed snapshot'
image_127['status'] = 'ERROR'
image_127['progress'] = 0
image_127["links"][0]["href"] = "%s/127" % self.url_prefix
image_127["links"][1]["href"] = "%s/127" % self.bookmark_prefix
image_127["links"][2]["href"] = (
"%s/images/127" % glance.generate_glance_url())
image_128 = copy.deepcopy(self.expected_image_124["image"])
image_128['id'] = '128'
image_128['name'] = 'deleted snapshot'
image_128['status'] = 'DELETED'
image_128['progress'] = 0
image_128["links"][0]["href"] = "%s/128" % self.url_prefix
image_128["links"][1]["href"] = "%s/128" % self.bookmark_prefix
image_128["links"][2]["href"] = (
"%s/images/128" % glance.generate_glance_url())
image_129 = copy.deepcopy(self.expected_image_124["image"])
image_129['id'] = '129'
image_129['name'] = 'pending_delete snapshot'
image_129['status'] = 'DELETED'
image_129['progress'] = 0
image_129["links"][0]["href"] = "%s/129" % self.url_prefix
image_129["links"][1]["href"] = "%s/129" % self.bookmark_prefix
image_129["links"][2]["href"] = (
"%s/images/129" % glance.generate_glance_url())
image_130 = copy.deepcopy(self.expected_image_123["image"])
image_130['id'] = '130'
image_130['name'] = None
image_130['metadata'] = {}
image_130['minDisk'] = 0
image_130['minRam'] = 0
image_130["links"][0]["href"] = "%s/130" % self.url_prefix
image_130["links"][1]["href"] = "%s/130" % self.bookmark_prefix
image_130["links"][2]["href"] = (
"%s/images/130" % glance.generate_glance_url())
image_131 = copy.deepcopy(self.expected_image_123["image"])
image_131['id'] = '131'
image_131['name'] = None
image_131['metadata'] = {}
image_131['minDisk'] = 0
image_131['minRam'] = 0
image_131["links"][0]["href"] = "%s/131" % self.url_prefix
image_131["links"][1]["href"] = "%s/131" % self.bookmark_prefix
image_131["links"][2]["href"] = (
"%s/images/131" % glance.generate_glance_url())
expected = [self.expected_image_123["image"],
self.expected_image_124["image"],
image_125, image_126, image_127,
image_128, image_129, image_130,
image_131]
self.assertThat(expected, matchers.DictListMatches(response_list))
@mock.patch('patron.image.api.API.get_all')
def test_get_image_details_with_limit(self, get_all_mocked):
request = self.http_request.blank(self.url_base +
'images/detail?limit=2')
self.controller.detail(request)
get_all_mocked.assert_called_once_with(mock.ANY, limit=2, filters={})
@mock.patch('patron.image.api.API.get_all')
def test_get_image_details_with_limit_and_page_size(self, get_all_mocked):
request = self.http_request.blank(
self.url_base + 'images/detail?limit=2&page_size=1')
self.controller.detail(request)
get_all_mocked.assert_called_once_with(mock.ANY, limit=2, filters={},
page_size=1)
@mock.patch('patron.image.api.API.get_all')
def _detail_request(self, filters, request, get_all_mocked):
self.controller.detail(request)
get_all_mocked.assert_called_once_with(mock.ANY, filters=filters)
def test_image_detail_filter_with_name(self):
filters = {'name': 'testname'}
request = self.http_request.blank(self.url_base + 'images/detail'
'?name=testname')
self._detail_request(filters, request)
def test_image_detail_filter_with_status(self):
filters = {'status': 'active'}
request = self.http_request.blank(self.url_base + 'images/detail'
'?status=ACTIVE')
self._detail_request(filters, request)
def test_image_detail_filter_with_property(self):
filters = {'property-test': '3'}
request = self.http_request.blank(self.url_base + 'images/detail'
'?property-test=3')
self._detail_request(filters, request)
def test_image_detail_filter_server_href(self):
filters = {'property-instance_uuid': self.uuid}
request = self.http_request.blank(
self.url_base + 'images/detail?server=' + self.uuid)
self._detail_request(filters, request)
def test_image_detail_filter_server_uuid(self):
filters = {'property-instance_uuid': self.uuid}
request = self.http_request.blank(
self.url_base + 'images/detail?server=' + self.uuid)
self._detail_request(filters, request)
def test_image_detail_filter_changes_since(self):
filters = {'changes-since': '2011-01-24T17:08Z'}
request = self.http_request.blank(self.url_base + 'images/detail'
'?changes-since=2011-01-24T17:08Z')
self._detail_request(filters, request)
def test_image_detail_filter_with_type(self):
filters = {'property-image_type': 'BASE'}
request = self.http_request.blank(
self.url_base + 'images/detail?type=BASE')
self._detail_request(filters, request)
def test_image_detail_filter_not_supported(self):
filters = {'status': 'active'}
request = self.http_request.blank(
self.url_base + 'images/detail?status='
'ACTIVE&UNSUPPORTEDFILTER=testname')
self._detail_request(filters, request)
def test_image_detail_no_filters(self):
filters = {}
request = self.http_request.blank(self.url_base + 'images/detail')
self._detail_request(filters, request)
@mock.patch('patron.image.api.API.get_all', side_effect=exception.Invalid)
def test_image_detail_invalid_marker(self, _get_all_mocked):
request = self.http_request.blank(self.url_base + '?marker=invalid')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.detail,
request)
def test_generate_alternate_link(self):
view = images_view.ViewBuilder()
request = self.http_request.blank(self.url_base + 'images/1')
generated_url = view._get_alternate_link(request, 1)
actual_url = "%s/images/1" % glance.generate_glance_url()
self.assertEqual(generated_url, actual_url)
def _check_response(self, controller_method, response, expected_code):
self.assertEqual(expected_code, controller_method.wsgi_code)
@mock.patch('patron.image.api.API.delete')
def test_delete_image(self, delete_mocked):
request = self.http_request.blank(self.url_base + 'images/124')
request.method = 'DELETE'
response = self.controller.delete(request, '124')
self._check_response(self.controller.delete, response, 204)
delete_mocked.assert_called_once_with(mock.ANY, '124')
@mock.patch('patron.image.api.API.delete',
side_effect=exception.ImageNotAuthorized(image_id='123'))
def test_delete_deleted_image(self, _delete_mocked):
# If you try to delete a deleted image, you get back 403 Forbidden.
request = self.http_request.blank(self.url_base + 'images/123')
request.method = 'DELETE'
self.assertRaises(webob.exc.HTTPForbidden, self.controller.delete,
request, '123')
@mock.patch('patron.image.api.API.delete',
side_effect=exception.ImageNotFound(image_id='123'))
def test_delete_image_not_found(self, _delete_mocked):
request = self.http_request.blank(self.url_base + 'images/300')
request.method = 'DELETE'
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.delete, request, '300')
@mock.patch('patron.image.api.API.get_all', return_value=[IMAGE_FIXTURES[0]])
def test_get_image_next_link(self, get_all_mocked):
request = self.http_request.blank(
self.url_base + 'imagesl?limit=1')
response = self.controller.index(request)
response_links = response['images_links']
href_parts = urlparse.urlparse(response_links[0]['href'])
self.assertEqual(self.url_base + '/images', href_parts.path)
params = urlparse.parse_qs(href_parts.query)
self.assertThat({'limit': ['1'], 'marker': [IMAGE_FIXTURES[0]['id']]},
matchers.DictMatches(params))
@mock.patch('patron.image.api.API.get_all', return_value=[IMAGE_FIXTURES[0]])
def test_get_image_details_next_link(self, get_all_mocked):
request = self.http_request.blank(
self.url_base + 'images/detail?limit=1')
response = self.controller.detail(request)
response_links = response['images_links']
href_parts = urlparse.urlparse(response_links[0]['href'])
self.assertEqual(self.url_base + '/images/detail', href_parts.path)
params = urlparse.parse_qs(href_parts.query)
self.assertThat({'limit': ['1'], 'marker': [IMAGE_FIXTURES[0]['id']]},
matchers.DictMatches(params))
class ImagesControllerTestV2(ImagesControllerTestV21):
image_controller_class = images.Controller
url_base = '/v2/fake'
bookmark_base = '/fake'
http_request = fakes.HTTPRequest
def _check_response(self, controller_method, response, expected_code):
self.assertEqual(expected_code, response.status_int)
| 45.451074
| 81
| 0.571309
|
262cf2c4077e40ed4ff29131da314669353a5a59
| 14,929
|
py
|
Python
|
CartoonGAN.py
|
manestay/CartoonGAN-4731
|
f8580a7f027d6505ce0e5c15314e2b53fe18b69e
|
[
"MIT"
] | 4
|
2019-01-23T16:00:06.000Z
|
2020-05-06T12:45:00.000Z
|
CartoonGAN.py
|
manestay/CartoonGAN-4731
|
f8580a7f027d6505ce0e5c15314e2b53fe18b69e
|
[
"MIT"
] | null | null | null |
CartoonGAN.py
|
manestay/CartoonGAN-4731
|
f8580a7f027d6505ce0e5c15314e2b53fe18b69e
|
[
"MIT"
] | 1
|
2019-12-03T13:16:15.000Z
|
2019-12-03T13:16:15.000Z
|
import os, time, pickle, argparse, networks, utils
import torch
import torch.nn as nn
import torch.optim as optim
import matplotlib.pyplot as plt
from torchvision import transforms
from edge_promoting import edge_promoting
import random
parser = argparse.ArgumentParser()
parser.add_argument('--name', required=False, default='project_name', help='')
parser.add_argument('--src_data', required=False, default='src_data_path', help='sec data path')
parser.add_argument('--tgt_data', required=False, default='tgt_data_path', help='tgt data path')
parser.add_argument('--vgg_model', required=False, default='pre_trained_VGG19_model_path/vgg19.pth', help='pre-trained VGG19 model path')
parser.add_argument('--in_ngc', type=int, default=3, help='input channel for generator')
parser.add_argument('--out_ngc', type=int, default=3, help='output channel for generator')
parser.add_argument('--in_ndc', type=int, default=3, help='input channel for discriminator')
parser.add_argument('--out_ndc', type=int, default=1, help='output channel for discriminator')
parser.add_argument('--batch_size', type=int, default=8, help='batch size')
parser.add_argument('--ngf', type=int, default=64)
parser.add_argument('--ndf', type=int, default=32)
parser.add_argument('--nb', type=int, default=8, help='the number of resnet block layer for generator')
parser.add_argument('--input_size_h', type=int, default=180, help='input size height')
parser.add_argument('--input_size_w', type=int, default=320, help='input size width')
parser.add_argument('--train_epoch', type=int, default=80)
parser.add_argument('--pre_train_epoch', type=int, default=10)
parser.add_argument('--lrD', type=float, default=0.0002, help='learning rate, default=0.0002')
parser.add_argument('--lrG', type=float, default=0.0002, help='learning rate, default=0.0002')
parser.add_argument('--con_lambda', type=float, default=10, help='lambda for content loss')
parser.add_argument('--beta1', type=float, default=0.5, help='beta1 for Adam optimizer')
parser.add_argument('--beta2', type=float, default=0.999, help='beta2 for Adam optimizer')
parser.add_argument('--latest_generator_model', required=False, default='', help='the latest trained model path')
parser.add_argument('--latest_discriminator_model', required=False, default='', help='the latest trained model path')
# args for custom loss
parser.add_argument('--lambda_noise', default=10.0, type=float, help='training weight of the popping induced by noise')
parser.add_argument('--noise', default=.1, type=float, help='range of noise for popping reduction')
parser.add_argument('--noise_count', default=0, type=int, help='number of pixels to modify with noise')
args = parser.parse_args()
lambda_noise = args.lambda_noise
noise_range = args.noise
noise_count = args.noise_count
random.seed(1)
print('------------ Options -------------')
for k, v in sorted(vars(args).items()):
print('%s: %s' % (str(k), str(v)))
print('-------------- End ----------------')
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
if torch.backends.cudnn.enabled:
torch.backends.cudnn.benchmark = True
# results save path
if not os.path.isdir(os.path.join(args.name + '_results', 'Reconstruction')):
os.makedirs(os.path.join(args.name + '_results', 'Reconstruction'))
if not os.path.isdir(os.path.join(args.name + '_results', 'Transfer')):
os.makedirs(os.path.join(args.name + '_results', 'Transfer'))
# edge-promoting
if not os.path.isdir(os.path.join('data', args.tgt_data, 'pair')):
print('edge-promoting start!!')
edge_promoting(os.path.join('data', args.tgt_data, 'train'), os.path.join('data', args.tgt_data, 'pair'))
else:
print('edge-promoting already done')
# data_loader
src_transform = transforms.Compose([
transforms.Resize((args.input_size_h, args.input_size_w)),
# transforms.CenterCrop((args.input_size_h, args.input_size_w)),
transforms.ToTensor(),
transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))
])
tgt_transform = transforms.Compose([
transforms.Resize((args.input_size_h, args.input_size_w * 2)),
transforms.ToTensor(),
transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))
])
train_loader_src = utils.data_load(os.path.join('data', args.src_data), 'train', src_transform, args.batch_size, shuffle=True, drop_last=True)
train_loader_tgt = utils.data_load(os.path.join('data', args.tgt_data), 'pair', tgt_transform, args.batch_size, shuffle=True, drop_last=True)
test_loader_src = utils.data_load(os.path.join('data', args.src_data), 'test', src_transform, 1, shuffle=True, drop_last=True)
# network
G = networks.generator(args.in_ngc, args.out_ngc, args.ngf, args.nb)
if args.latest_generator_model != '':
if torch.cuda.is_available():
G.load_state_dict(torch.load(args.latest_generator_model))
else:
# cpu mode
G.load_state_dict(torch.load(args.latest_generator_model, map_location=lambda storage, loc: storage))
D = networks.discriminator(args.in_ndc, args.out_ndc, args.ndf)
if args.latest_discriminator_model != '':
if torch.cuda.is_available():
D.load_state_dict(torch.load(args.latest_discriminator_model))
else:
D.load_state_dict(torch.load(args.latest_discriminator_model, map_location=lambda storage, loc: storage))
VGG = networks.VGG19(init_weights=args.vgg_model, feature_mode=True)
G.to(device)
D.to(device)
VGG.to(device)
G.train()
D.train()
VGG.eval()
print('---------- Networks initialized -------------')
utils.print_network(G)
utils.print_network(D)
utils.print_network(VGG)
print('-----------------------------------------------')
# loss
BCE_loss = nn.BCELoss().to(device)
L1_loss = nn.L1Loss().to(device)
L1_loss2 = nn.L1Loss().to(device)
MSE_loss = nn.MSELoss().to(device)
# Adam optimizer
G_optimizer = optim.Adam(G.parameters(), lr=args.lrG, betas=(args.beta1, args.beta2))
D_optimizer = optim.Adam(D.parameters(), lr=args.lrD, betas=(args.beta1, args.beta2))
G_scheduler = optim.lr_scheduler.MultiStepLR(optimizer=G_optimizer, milestones=[args.train_epoch // 2, args.train_epoch // 4 * 3], gamma=0.1)
D_scheduler = optim.lr_scheduler.MultiStepLR(optimizer=D_optimizer, milestones=[args.train_epoch // 2, args.train_epoch // 4 * 3], gamma=0.1)
pre_train_hist = {}
pre_train_hist['Recon_loss'] = []
pre_train_hist['per_epoch_time'] = []
pre_train_hist['total_time'] = []
""" Pre-train reconstruction """
if args.latest_generator_model == '':
print('Pre-training start!')
start_time = time.time()
for epoch in range(args.pre_train_epoch):
epoch_start_time = time.time()
Recon_losses = []
for x, _ in train_loader_src:
x = x.to(device)
# train generator G
G_optimizer.zero_grad()
x_feature = VGG((x + 1) / 2)
G_ = G(x)
G_feature = VGG((G_ + 1) / 2)
Recon_loss = 10 * L1_loss(G_feature, x_feature.detach())
Recon_losses.append(Recon_loss.item())
pre_train_hist['Recon_loss'].append(Recon_loss.item())
Recon_loss.backward()
G_optimizer.step()
break
per_epoch_time = time.time() - epoch_start_time
pre_train_hist['per_epoch_time'].append(per_epoch_time)
print('[%d/%d] - time: %.2f, Recon loss: %.3f' % ((epoch + 1), args.pre_train_epoch, per_epoch_time, torch.mean(torch.FloatTensor(Recon_losses))))
total_time = time.time() - start_time
pre_train_hist['total_time'].append(total_time)
with open(os.path.join(args.name + '_results', 'pre_train_hist.pkl'), 'wb') as f:
pickle.dump(pre_train_hist, f)
with torch.no_grad():
G.eval()
for n, (x, _) in enumerate(train_loader_src):
x = x.to(device)
G_recon = G(x)
result = torch.cat((x[0], G_recon[0]), 2)
path = os.path.join(args.name + '_results', 'Reconstruction', args.name + '_train_recon_' + str(n + 1) + '.png')
plt.imsave(path, (result.cpu().numpy().transpose(1, 2, 0) + 1) / 2)
if n == 4:
break
for n, (x, _) in enumerate(test_loader_src):
x = x.to(device)
G_recon = G(x)
result = torch.cat((x[0], G_recon[0]), 2)
path = os.path.join(args.name + '_results', 'Reconstruction', args.name + '_test_recon_' + str(n + 1) + '.png')
plt.imsave(path, (result.cpu().numpy().transpose(1, 2, 0) + 1) / 2)
if n == 4:
break
else:
print('Load the latest generator model, no need to pre-train')
train_hist = {}
train_hist['Disc_loss'] = []
train_hist['Gen_loss'] = []
train_hist['Con_loss'] = []
train_hist['Noise_loss'] = []
train_hist['per_epoch_time'] = []
train_hist['total_time'] = []
print('training start!')
start_time = time.time()
real = torch.ones(args.batch_size, 1, args.input_size_h // 4, args.input_size_w // 4).to(device)
fake = torch.zeros(args.batch_size, 1, args.input_size_h // 4, args.input_size_w // 4).to(device)
for epoch in range(args.train_epoch):
epoch_start_time = time.time()
G.train()
G_scheduler.step()
D_scheduler.step()
Disc_losses = []
Gen_losses = []
Con_losses = []
Noise_losses = []
if noise_count:
noiseimg = torch.zeros((args.batch_size, 3, args.input_size_h, args.input_size_w))
for ii in range(noise_count):
xx = random.randrange(args.input_size_w)
yy = random.randrange(args.input_size_h)
for img_num in range(noiseimg.shape[0]):
noiseimg[img_num][0][yy][xx] += random.uniform(-noise_range, noise_range)
noiseimg[img_num][1][yy][xx] += random.uniform(-noise_range, noise_range)
noiseimg[img_num][2][yy][xx] += random.uniform(-noise_range, noise_range)
for (x, _), (y, _) in zip(train_loader_src, train_loader_tgt):
#prepare a noise image
if noise_count:
noisy_x = x.clone()
noisy_x = noisy_x + noiseimg
noisy_x = noisy_x.to(device)
e = y[:, :, :, args.input_size_w:]
y = y[:, :, :, :args.input_size_w]
x, y, e = x.to(device), y.to(device), e.to(device)
# train D
D_optimizer.zero_grad()
D_real = D(y)
D_real_loss = BCE_loss(D_real, real)
G_ = G(x)
D_fake = D(G_)
D_fake_loss = BCE_loss(D_fake, fake)
D_edge = D(e)
D_edge_loss = BCE_loss(D_edge, fake)
Disc_loss = D_real_loss + D_fake_loss + D_edge_loss
Disc_losses.append(Disc_loss.item())
train_hist['Disc_loss'].append(Disc_loss.item())
Disc_loss.backward()
D_optimizer.step()
# train G
G_optimizer.zero_grad()
#add noise image to source image
if noise_count:
noisy_G = G(noisy_x)
G_ = G(x)
D_fake = D(G_)
D_fake_loss = BCE_loss(D_fake, real)
x_feature = VGG((x + 1) / 2)
G_feature = VGG((G_ + 1) / 2)
Con_loss = args.con_lambda * L1_loss(G_feature, x_feature.detach())
#train noise
if noise_count:
G_pop = lambda_noise * MSE_loss(G_, noisy_G)
Gen_loss = D_fake_loss + Con_loss + G_pop
Gen_losses.append(D_fake_loss.item())
train_hist['Gen_loss'].append(D_fake_loss.item())
Con_losses.append(Con_loss.item())
train_hist['Con_loss'].append(Con_loss.item())
Noise_losses.append(G_pop.item())
train_hist['Noise_loss'].append(G_pop.item())
else:
Gen_loss = D_fake_loss + Con_loss + G_pop
Gen_losses.append(D_fake_loss.item())
train_hist['Gen_loss'].append(D_fake_loss.item())
Con_losses.append(Con_loss.item())
train_hist['Con_loss'].append(Con_loss.item())
Gen_loss.backward()
G_optimizer.step()
per_epoch_time = time.time() - epoch_start_time
train_hist['per_epoch_time'].append(per_epoch_time)
if noise_count:
print(
'[%d/%d] - time: %.2f, Disc loss: %.3f, Gen loss: %.3f, Con loss: %.3f, Noise loss: %.3f' % ((epoch + 1), args.train_epoch, per_epoch_time, torch.mean(torch.FloatTensor(Disc_losses)),
torch.mean(torch.FloatTensor(Gen_losses)), torch.mean(torch.FloatTensor(Con_losses)), torch.mean(torch.FloatTensor(Noise_losses))))
else:
print(
'[%d/%d] - time: %.2f, Disc loss: %.3f, Gen loss: %.3f, Con loss: %.3f' % ((epoch + 1), args.train_epoch, per_epoch_time, torch.mean(torch.FloatTensor(Disc_losses)),
torch.mean(torch.FloatTensor(Gen_losses)), torch.mean(torch.FloatTensor(Con_losses))))
if epoch % 2 == 1 or epoch == args.train_epoch - 1:
with torch.no_grad():
G.eval()
for n, (x, _) in enumerate(train_loader_src):
x = x.to(device)
G_recon = G(x)
result = torch.cat((x[0], G_recon[0]), 2)
path = os.path.join(args.name + '_results', 'Transfer', str(epoch+1) + '_epoch_' + args.name + '_train_' + str(n + 1) + '.png')
plt.imsave(path, (result.cpu().numpy().transpose(1, 2, 0) + 1) / 2)
if n == 4:
break
for n, (x, _) in enumerate(test_loader_src):
x = x.to(device)
G_recon = G(x)
result = torch.cat((x[0], G_recon[0]), 2)
path = os.path.join(args.name + '_results', 'Transfer', str(epoch+1) + '_epoch_' + args.name + '_test_' + str(n + 1) + '.png')
plt.imsave(path, (result.cpu().numpy().transpose(1, 2, 0) + 1) / 2)
if n == 4:
break
torch.save(G.state_dict(), os.path.join(args.name + '_results', 'generator_{}.pkl'.format(epoch + 1)))
torch.save(D.state_dict(), os.path.join(args.name + '_results', 'discriminator_{}.pkl'.format(epoch + 1)))
total_time = time.time() - start_time
train_hist['total_time'].append(total_time)
print("Avg one epoch time: %.2f, total %d epochs time: %.2f" % (torch.mean(torch.FloatTensor(train_hist['per_epoch_time'])), args.train_epoch, total_time))
print("Training finish!... save training results")
torch.save(G.state_dict(), os.path.join(args.name + '_results', 'generator_param.pkl'))
torch.save(D.state_dict(), os.path.join(args.name + '_results', 'discriminator_param.pkl'))
with open(os.path.join(args.name + '_results', 'train_hist.pkl'), 'wb') as f:
pickle.dump(train_hist, f)
| 44.966867
| 200
| 0.627571
|
c73d81c7983cc4edb8ca59160e18c34386bb46ea
| 7,841
|
py
|
Python
|
src/clients/python/examples/simple_grpc_shm_client.py
|
szalpal/server
|
85bf86813bce30a6b8e9f66bde057e2145530b7e
|
[
"BSD-3-Clause"
] | 1
|
2020-12-25T02:01:38.000Z
|
2020-12-25T02:01:38.000Z
|
src/clients/python/examples/simple_grpc_shm_client.py
|
szalpal/server
|
85bf86813bce30a6b8e9f66bde057e2145530b7e
|
[
"BSD-3-Clause"
] | null | null | null |
src/clients/python/examples/simple_grpc_shm_client.py
|
szalpal/server
|
85bf86813bce30a6b8e9f66bde057e2145530b7e
|
[
"BSD-3-Clause"
] | 1
|
2021-12-17T03:07:54.000Z
|
2021-12-17T03:07:54.000Z
|
#!/usr/bin/env python
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import argparse
import numpy as np
import os
import sys
from builtins import range
from ctypes import *
import tritonclient.grpc as grpcclient
from tritonclient import utils
import tritonclient.utils.shared_memory as shm
FLAGS = None
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-v',
'--verbose',
action="store_true",
required=False,
default=False,
help='Enable verbose output')
parser.add_argument('-u',
'--url',
type=str,
required=False,
default='localhost:8001',
help='Inference server URL. Default is localhost:8001.')
FLAGS = parser.parse_args()
try:
triton_client = grpcclient.InferenceServerClient(url=FLAGS.url,
verbose=FLAGS.verbose)
except Exception as e:
print("channel creation failed: " + str(e))
sys.exit(1)
# To make sure no shared memory regions are registered with the
# server.
triton_client.unregister_system_shared_memory()
triton_client.unregister_cuda_shared_memory()
# We use a simple model that takes 2 input tensors of 16 integers
# each and returns 2 output tensors of 16 integers each. One
# output tensor is the element-wise sum of the inputs and one
# output is the element-wise difference.
model_name = "simple"
model_version = ""
# Create the data for the two input tensors. Initialize the first
# to unique integers and the second to all ones.
input0_data = np.arange(start=0, stop=16, dtype=np.int32)
input1_data = np.ones(shape=16, dtype=np.int32)
input_byte_size = input0_data.size * input0_data.itemsize
output_byte_size = input_byte_size
# Create Output0 and Output1 in Shared Memory and store shared memory handles
shm_op0_handle = shm.create_shared_memory_region("output0_data",
"/output0_simple",
output_byte_size)
shm_op1_handle = shm.create_shared_memory_region("output1_data",
"/output1_simple",
output_byte_size)
# Register Output0 and Output1 shared memory with Triton Server
triton_client.register_system_shared_memory("output0_data",
"/output0_simple",
output_byte_size)
triton_client.register_system_shared_memory("output1_data",
"/output1_simple",
output_byte_size)
# Create Input0 and Input1 in Shared Memory and store shared memory handles
shm_ip0_handle = shm.create_shared_memory_region("input0_data",
"/input0_simple",
input_byte_size)
shm_ip1_handle = shm.create_shared_memory_region("input1_data",
"/input1_simple",
input_byte_size)
# Put input data values into shared memory
shm.set_shared_memory_region(shm_ip0_handle, [input0_data])
shm.set_shared_memory_region(shm_ip1_handle, [input1_data])
# Register Input0 and Input1 shared memory with Triton Server
triton_client.register_system_shared_memory("input0_data", "/input0_simple",
input_byte_size)
triton_client.register_system_shared_memory("input1_data", "/input1_simple",
input_byte_size)
# Set the parameters to use data from shared memory
inputs = []
inputs.append(grpcclient.InferInput('INPUT0', [1, 16], "INT32"))
inputs[-1].set_shared_memory("input0_data", input_byte_size)
inputs.append(grpcclient.InferInput('INPUT1', [1, 16], "INT32"))
inputs[-1].set_shared_memory("input1_data", input_byte_size)
outputs = []
outputs.append(grpcclient.InferRequestedOutput('OUTPUT0'))
outputs[-1].set_shared_memory("output0_data", output_byte_size)
outputs.append(grpcclient.InferRequestedOutput('OUTPUT1'))
outputs[-1].set_shared_memory("output1_data", output_byte_size)
results = triton_client.infer(model_name=model_name,
inputs=inputs,
outputs=outputs)
# Read results from the shared memory.
output0 = results.get_output("OUTPUT0")
if output0 is not None:
output0_data = shm.get_contents_as_numpy(
shm_op0_handle, utils.triton_to_np_dtype(output0.datatype),
output0.shape)
else:
print("OUTPUT0 is missing in the response.")
sys.exit(1)
output1 = results.get_output("OUTPUT1")
if output1 is not None:
output1_data = shm.get_contents_as_numpy(
shm_op1_handle, utils.triton_to_np_dtype(output1.datatype),
output1.shape)
else:
print("OUTPUT1 is missing in the response.")
sys.exit(1)
for i in range(16):
print(
str(input0_data[i]) + " + " + str(input1_data[i]) + " = " +
str(output0_data[0][i]))
print(
str(input0_data[i]) + " - " + str(input1_data[i]) + " = " +
str(output1_data[0][i]))
if (input0_data[i] + input1_data[i]) != output0_data[0][i]:
print("shm infer error: incorrect sum")
sys.exit(1)
if (input0_data[i] - input1_data[i]) != output1_data[0][i]:
print("shm infer error: incorrect difference")
sys.exit(1)
print(triton_client.get_system_shared_memory_status())
triton_client.unregister_system_shared_memory()
shm.destroy_shared_memory_region(shm_ip0_handle)
shm.destroy_shared_memory_region(shm_ip1_handle)
shm.destroy_shared_memory_region(shm_op0_handle)
shm.destroy_shared_memory_region(shm_op1_handle)
print('PASS: shm')
| 43.804469
| 81
| 0.630914
|
97ad0fdee64acdfb26b729a103344595171f46ad
| 8,702
|
py
|
Python
|
change_detection_pytorch/datasets/custom.py
|
mesuga-reymond/change_detection.pytorch
|
7f501b8a52e7e47f2ed53b707cb3d6f5f3a76962
|
[
"MIT"
] | null | null | null |
change_detection_pytorch/datasets/custom.py
|
mesuga-reymond/change_detection.pytorch
|
7f501b8a52e7e47f2ed53b707cb3d6f5f3a76962
|
[
"MIT"
] | null | null | null |
change_detection_pytorch/datasets/custom.py
|
mesuga-reymond/change_detection.pytorch
|
7f501b8a52e7e47f2ed53b707cb3d6f5f3a76962
|
[
"MIT"
] | 1
|
2022-02-02T13:28:42.000Z
|
2022-02-02T13:28:42.000Z
|
import glob
import os
import os.path as osp
from collections import OrderedDict
from functools import reduce
import albumentations as A
import cv2
import numpy as np
from albumentations.pytorch import ToTensorV2
from torch.utils.data import Dataset
from .transforms.albu import ToTensorTest
class CustomDataset(Dataset):
"""Custom datasets for change detection. An example of file structure
is as followed.
.. code-block:: none
├── data
│ ├── my_dataset
│ │ ├── train
│ │ │ ├── img1_dir
│ │ │ ├── img1_dir
│ │ │ ├── label_dir
│ │ ├── val
│ │ │ ├── img1_dir
│ │ │ ├── img1_dir
│ │ │ ├── label_dir
The imgs/gt pair of CustomDataset should be of the same except suffix.
A valid imgs/gt filename pair should be like ``xxx{img_suffix}`` and
``xxx{seg_map_suffix}`` (extension is also included in the suffix).
Args:
img_dir (str): Path to image directory.
sub_dir_1 (str): Path to the directory of the first temporal images.
e.g. 'A' in LEVIR-CD dataset (LEVIR-CD/train/A). Default: 'A'
sub_dir_2 (str): Path to the directory of the second temporal images.
e.g. 'B' in LEVIR-CD dataset (LEVIR-CD/train/B). Default: 'B'
ann_dir (str): Path to ground truth directory.
img_suffix (str): Suffix of images. Default: '.jpg'
seg_map_suffix (str): Suffix of segmentation maps. Default: '.png'
data_root (str, optional): Data root for img_dir/ann_dir. Default:
None.
test_mode (bool): Whether to the test mode.
size (int): The size of input images.
debug (bool): Whether to use debug mode. i.e. visualization.
"""
def __init__(self,
img_dir,
sub_dir_1='A',
sub_dir_2='B',
ann_dir=None,
img_suffix='.jpg',
seg_map_suffix='.png',
transform=None,
split=None,
data_root=None,
test_mode=False,
size=256,
debug=False):
self.transform = transform
self.img_dir = img_dir
self.ann_dir = ann_dir
self.img_suffix = img_suffix
self.seg_map_suffix = seg_map_suffix
self.split = split
self.data_root = data_root
self.test_mode = test_mode
self.sub_dir_1 = sub_dir_1
self.sub_dir_2 = sub_dir_2
self.size = size
self.debug = debug
# join paths if data_root is specified
if self.data_root is not None:
if not osp.isabs(self.img_dir):
self.img_dir = osp.join(self.data_root, self.img_dir)
if not (self.ann_dir is None or osp.isabs(self.ann_dir)):
self.ann_dir = osp.join(self.data_root, self.ann_dir)
# load annotations
self.img_infos = self.load_infos(self.img_dir, self.img_suffix,
self.seg_map_suffix, self.sub_dir_1,
self.sub_dir_2, self.ann_dir,
self.split)
# transform/augment data
if self.transform is None:
self.transform = self.get_default_transform() if not self.test_mode \
else self.get_test_transform()
# debug, visualize augmentations
if self.debug:
self.transform = A.Compose([t for t in self.transform if not isinstance(t, (A.Normalize, ToTensorV2,
ToTensorTest))])
def load_infos(self, img_dir, img_suffix, seg_map_suffix, sub_dir_1,
sub_dir_2, ann_dir, split):
"""Load annotation from directory.
Args:
img_dir (str): Path to image directory
img_suffix (str): Suffix of images.
ann_dir (str|None): Path to annotation directory.
seg_map_suffix (str|None): Suffix of segmentation maps.
split (str|None): Split txt file. If split is specified, only file
with suffix in the splits will be loaded. Otherwise, all images
in img_dir/ann_dir will be loaded. Default: None
Returns:
list[dict]: All image info of datasets.
"""
img_infos = []
if split is not None:
with open(split) as f:
for line in f:
img_name = line.strip()
img_info = dict(filename=img_name)
img_info['img'] = dict(img1_path=osp.join(img_dir, sub_dir_1, img_name),
img2_path=osp.join(img_dir, sub_dir_2, img_name))
if ann_dir is not None:
seg_map_path = osp.join(ann_dir,
img_name.replace(img_suffix, seg_map_suffix))
img_info['ann'] = dict(ann_path=seg_map_path)
img_infos.append(img_info)
else:
for img in glob.glob(osp.join(img_dir, sub_dir_1, '*'+img_suffix)):
img_name = osp.basename(img)
img_info = dict(filename=img_name)
img_info['img'] = dict(img1_path=osp.join(img_dir, sub_dir_1, img_name),
img2_path=osp.join(img_dir, sub_dir_2, img_name))
if ann_dir is not None:
seg_map_path = osp.join(ann_dir,
img_name.replace(img_suffix, seg_map_suffix))
img_info['ann'] = dict(ann_path=seg_map_path)
img_infos.append(img_info)
print(f'Loaded {len(img_infos)} images')
return img_infos
def get_ann_info(self, idx):
"""Get annotation by index.
Args:
idx (int): Index of data.
Returns:
dict: Annotation info of specified index.
"""
return self.img_infos[idx]['ann']
def get_default_transform(self):
"""Set the default transformation."""
default_transform = A.Compose([
A.Resize(self.size, self.size),
A.Normalize(),
ToTensorV2()
])
return default_transform
def get_test_transform(self):
"""Set the test transformation."""
pass
def get_image(self, img_info):
"""Open and read the image.
Args:
img_info (dict): a dict with img info.
Returns:
dict: image info with new keys.
"""
img1 = cv2.cvtColor(cv2.imread(img_info['img']['img1_path']), cv2.COLOR_BGR2RGB)
img2 = cv2.cvtColor(cv2.imread(img_info['img']['img2_path']), cv2.COLOR_BGR2RGB)
return img1, img2
def get_gt_seg_maps(self, img_info, vis=False):
"""Open and read the ground truth.
Args:
img_info (dict): a dict with ann info.
vis (bool): Whether to use visualization (debug mode).
Returns:
dict: ann info with new keys.
"""
ann = cv2.imread(img_info['ann']['ann_path'], cv2.IMREAD_GRAYSCALE)
ann = ann / 255 if not vis else ann
return ann
def prepare_img(self, idx):
"""Get image after pipeline.
Args:
idx (int): Index of data.
Returns:
dict: Image after pipeline with new keys introduced by
pipeline.
"""
img_info = self.img_infos[idx]
img1, img2 = self.get_image(img_info)
return img1, img2, img_info['filename']
def prepare_img_ann(self, idx):
"""Get image and annotations after pipeline.
Args:
idx (int): Index of data.
Returns:
dict: Image and annotation after pipeline with new keys
introduced by pipeline.
"""
img_info = self.img_infos[idx]
img1, img2 = self.get_image(img_info)
ann = self.get_gt_seg_maps(img_info, self.debug)
return img1, img2, ann, img_info['filename']
def format_results(self, results, **kwargs):
"""Place holder to format result to datasets specific output."""
pass
def __getitem__(self, idx):
"""Get training/test data after pipeline.
Args:
idx (int): Index of data.
Returns:
dict: Training/test data (with annotation if ann_dir is not None).
"""
raise NotImplementedError
def __len__(self):
"""Total number of samples of data."""
return len(self.img_infos)
| 37.347639
| 112
| 0.554011
|
23a0e6295602ea6b7b22cd591010ffce2d134d01
| 2,095
|
py
|
Python
|
software/temcagt/temcagt/nodes/camera/controllers/base.py
|
htem/GridTapeStage
|
0b4764bc4ea8d64970ea481a32d6c7383d301989
|
[
"RSA-MD"
] | 2
|
2020-02-07T10:34:23.000Z
|
2021-09-24T02:28:10.000Z
|
software/temcagt/temcagt/nodes/camera/controllers/base.py
|
htem/GridTapeStage
|
0b4764bc4ea8d64970ea481a32d6c7383d301989
|
[
"RSA-MD"
] | null | null | null |
software/temcagt/temcagt/nodes/camera/controllers/base.py
|
htem/GridTapeStage
|
0b4764bc4ea8d64970ea481a32d6c7383d301989
|
[
"RSA-MD"
] | null | null | null |
#!/usr/bin/env python
import logging
import time
from .... import log
logger = log.get_logger(__name__)
#logger.addHandler(logging.StreamHandler())
#logger.setLevel(logging.DEBUG)
class NodeController(object):
def __init__(self, node):
logger.debug("NodeController[%s] __init__: %s", self, node)
self.node = node
self.state = 'init'
self.callbacks = {}
self.connect()
def connect(self):
logger.debug("NodeController[%s] connect", self)
# connect callbacks and store them in self.callbacks
# by attr (self.callbacks[attr] = [cbid0, cbid1, ...])
self.state = 'connect'
def disconnect(self):
logger.debug("NodeController[%s] disconnect", self)
self.state = 'disconnect'
if self.node is not None:
for attr in self.callbacks:
if hasattr(self.node, attr):
obj = getattr(self.node, attr)
[obj.detatch(cbid) for cbid in self.callbacks[attr]]
self.callbacks = {}
self.node = None
def until(self, test_function, timeout=0.000001):
if isinstance(test_function, (str, unicode)):
test_function = lambda state=test_function: self.state == state
logger.debug(
"NodeController[%s] until: %s", self,
getattr(test_function, '__name__', 'UNKNOWN'))
while not test_function():
#s = ""
#if hasattr(self, 'saving'):
# s = self.saving
#logger.debug(
# "%s() %s: %s", test_function.__name__, test_function(), s)
self.update(timeout=timeout)
logger.debug(
"NodeController[%s] until [done]: %s", self,
getattr(test_function, '__name__', 'UNKNOWN'))
def update(self, timeout=0.000001):
logger.debug("NodeController[%s] update", self)
raise NotImplementedError("NodeController.update is abstract")
def __del__(self):
logger.debug("NodeController[%s] __del__", self)
self.disconnect()
del self.node
| 32.734375
| 75
| 0.588544
|
29b3682b747c66574590de5ea70574981cc536bb
| 12,303
|
py
|
Python
|
demo/sentiment/preprocess.py
|
OleNet/Paddle
|
59271d643b13b13346889d12355611b9a2ce4e31
|
[
"Apache-2.0"
] | 1
|
2016-10-07T20:40:11.000Z
|
2016-10-07T20:40:11.000Z
|
demo/sentiment/preprocess.py
|
anuranrc/Paddle
|
21fa3eb0688459d3b71141d316e8358d31882b8d
|
[
"Apache-2.0"
] | 1
|
2017-05-26T18:33:00.000Z
|
2017-05-26T18:33:00.000Z
|
demo/sentiment/preprocess.py
|
anuranrc/Paddle
|
21fa3eb0688459d3b71141d316e8358d31882b8d
|
[
"Apache-2.0"
] | 1
|
2016-10-07T00:50:53.000Z
|
2016-10-07T00:50:53.000Z
|
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import random
import operator
import numpy as np
from subprocess import Popen, PIPE
from os.path import join as join_path
from optparse import OptionParser
from paddle.utils.preprocess_util import *
"""
Usage: run following command to show help message.
python preprocess.py -h
"""
def save_dict(dict, filename, is_reverse=True):
"""
Save dictionary into file.
dict: input dictionary.
filename: output file name, string.
is_reverse: True, descending order by value.
False, ascending order by value.
"""
f = open(filename, 'w')
for k, v in sorted(dict.items(), key=operator.itemgetter(1),\
reverse=is_reverse):
f.write('%s\t%s\n' % (k, v))
f.close()
def tokenize(sentences):
"""
Use tokenizer.perl to tokenize input sentences.
tokenizer.perl is tool of Moses.
sentences : a list of input sentences.
return: a list of processed text.
"""
dir = './data/mosesdecoder-master/scripts/tokenizer/tokenizer.perl'
tokenizer_cmd = [dir, '-l', 'en', '-q', '-']
assert isinstance(sentences, list)
text = "\n".join(sentences)
tokenizer = Popen(tokenizer_cmd, stdin=PIPE, stdout=PIPE)
tok_text, _ = tokenizer.communicate(text)
toks = tok_text.split('\n')[:-1]
return toks
def read_lines(path):
"""
path: String, file path.
return a list of sequence.
"""
seqs = []
with open(path, 'r') as f:
for line in f.readlines():
line = line.strip()
if len(line):
seqs.append(line)
return seqs
class SentimentDataSetCreate():
"""
A class to process data for sentiment analysis task.
"""
def __init__(self,
data_path,
output_path,
use_okenizer=True,
multi_lines=False):
"""
data_path: string, traing and testing dataset path
output_path: string, output path, store processed dataset
multi_lines: whether a file has multi lines.
In order to shuffle fully, it needs to read all files into
memory, then shuffle them if one file has multi lines.
"""
self.output_path = output_path
self.data_path = data_path
self.train_dir = 'train'
self.test_dir = 'test'
self.train_list = "train.list"
self.test_list = "test.list"
self.label_list = "labels.list"
self.classes_num = 0
self.batch_size = 50000
self.batch_dir = 'batches'
self.dict_file = "dict.txt"
self.dict_with_test = False
self.dict_size = 0
self.word_count = {}
self.tokenizer = use_okenizer
self.overwrite = False
self.multi_lines = multi_lines
self.train_dir = join_path(data_path, self.train_dir)
self.test_dir = join_path(data_path, self.test_dir)
self.train_list = join_path(output_path, self.train_list)
self.test_list = join_path(output_path, self.test_list)
self.label_list = join_path(output_path, self.label_list)
self.dict_file = join_path(output_path, self.dict_file)
def data_list(self, path):
"""
create dataset from path
path: data path
return: data list
"""
label_set = get_label_set_from_dir(path)
data = []
for lab_name in label_set.keys():
file_paths = list_files(join_path(path, lab_name))
for p in file_paths:
data.append({"label" : label_set[lab_name],\
"seq_path": p})
return data, label_set
def create_dict(self, data):
"""
create dict for input data.
data: list, [sequence, sequnce, ...]
"""
for seq in data:
for w in seq.strip().lower().split():
if w not in self.word_count:
self.word_count[w] = 1
else:
self.word_count[w] += 1
def create_dataset(self):
"""
create file batches and dictionary of train data set.
If the self.overwrite is false and train.list already exists in
self.output_path, this function will not create and save file
batches from the data set path.
return: dictionary size, class number.
"""
out_path = self.output_path
if out_path and not os.path.exists(out_path):
os.makedirs(out_path)
# If self.overwrite is false or self.train_list has existed,
# it will not process dataset.
if not (self.overwrite or not os.path.exists(self.train_list)):
print "%s already exists." % self.train_list
return
# Preprocess train data.
train_data, train_lab_set = self.data_list(self.train_dir)
print "processing train set..."
file_lists = self.save_data(train_data, "train", self.batch_size, True,
True)
save_list(file_lists, self.train_list)
# If have test data path, preprocess test data.
if os.path.exists(self.test_dir):
test_data, test_lab_set = self.data_list(self.test_dir)
assert (train_lab_set == test_lab_set)
print "processing test set..."
file_lists = self.save_data(test_data, "test", self.batch_size,
False, self.dict_with_test)
save_list(file_lists, self.test_list)
# save labels set.
save_dict(train_lab_set, self.label_list, False)
self.classes_num = len(train_lab_set.keys())
# save dictionary.
save_dict(self.word_count, self.dict_file, True)
self.dict_size = len(self.word_count)
def save_data(self,
data,
prefix="",
batch_size=50000,
is_shuffle=False,
build_dict=False):
"""
Create batches for a Dataset object.
data: the Dataset object to process.
prefix: the prefix of each batch.
batch_size: number of data in each batch.
build_dict: whether to build dictionary for data
return: list of batch names
"""
if is_shuffle and self.multi_lines:
return self.save_data_multi_lines(data, prefix, batch_size,
build_dict)
if is_shuffle:
random.shuffle(data)
num_batches = int(math.ceil(len(data) / float(batch_size)))
batch_names = []
for i in range(num_batches):
batch_name = join_path(self.output_path,
"%s_part_%03d" % (prefix, i))
begin = i * batch_size
end = min((i + 1) * batch_size, len(data))
# read a batch of data
label_list, data_list = self.get_data_list(begin, end, data)
if build_dict:
self.create_dict(data_list)
self.save_file(label_list, data_list, batch_name)
batch_names.append(batch_name)
return batch_names
def get_data_list(self, begin, end, data):
"""
begin: int, begining index of data.
end: int, ending index of data.
data: a list of {"seq_path": seqquence path, "label": label index}
return a list of label and a list of sequence.
"""
label_list = []
data_list = []
for j in range(begin, end):
seqs = read_lines(data[j]["seq_path"])
lab = int(data[j]["label"])
#File may have multiple lines.
for seq in seqs:
data_list.append(seq)
label_list.append(lab)
if self.tokenizer:
data_list = tokenize(data_list)
return label_list, data_list
def save_data_multi_lines(self,
data,
prefix="",
batch_size=50000,
build_dict=False):
"""
In order to shuffle fully, there is no need to load all data if
each file only contains one sample, it only needs to shuffle list
of file name. But one file contains multi lines, each line is one
sample. It needs to read all data into memory to shuffle fully.
This interface is mainly for data containning multi lines in each
file, which consumes more memory if there is a great mount of data.
data: the Dataset object to process.
prefix: the prefix of each batch.
batch_size: number of data in each batch.
build_dict: whether to build dictionary for data
return: list of batch names
"""
assert self.multi_lines
label_list = []
data_list = []
# read all data
label_list, data_list = self.get_data_list(0, len(data), data)
if build_dict:
self.create_dict(data_list)
length = len(label_list)
perm_list = np.array([i for i in xrange(length)])
random.shuffle(perm_list)
num_batches = int(math.ceil(length / float(batch_size)))
batch_names = []
for i in range(num_batches):
batch_name = join_path(self.output_path,
"%s_part_%03d" % (prefix, i))
begin = i * batch_size
end = min((i + 1) * batch_size, length)
sub_label = [label_list[perm_list[i]] for i in range(begin, end)]
sub_data = [data_list[perm_list[i]] for i in range(begin, end)]
self.save_file(sub_label, sub_data, batch_name)
batch_names.append(batch_name)
return batch_names
def save_file(self, label_list, data_list, filename):
"""
Save data into file.
label_list: a list of int value.
data_list: a list of sequnece.
filename: output file name.
"""
f = open(filename, 'w')
print "saving file: %s" % filename
for lab, seq in zip(label_list, data_list):
f.write('%s\t\t%s\n' % (lab, seq))
f.close()
def option_parser():
parser = OptionParser(usage="usage: python preprcoess.py "\
"-i data_dir [options]")
parser.add_option(
"-i",
"--data",
action="store",
dest="input",
help="Input data directory.")
parser.add_option(
"-o",
"--output",
action="store",
dest="output",
default=None,
help="Output directory.")
parser.add_option(
"-t",
"--tokenizer",
action="store",
dest="use_tokenizer",
default=True,
help="Whether to use tokenizer.")
parser.add_option("-m", "--multi_lines", action="store",
dest="multi_lines", default=False,
help="If input text files have multi lines and they "\
"need to be shuffled, you should set -m True,")
return parser.parse_args()
def main():
options, args = option_parser()
data_dir = options.input
output_dir = options.output
use_tokenizer = options.use_tokenizer
multi_lines = options.multi_lines
if output_dir is None:
outname = os.path.basename(options.input)
output_dir = join_path(os.path.dirname(data_dir), 'pre-' + outname)
data_creator = SentimentDataSetCreate(data_dir, output_dir, use_tokenizer,
multi_lines)
data_creator.create_dataset()
if __name__ == '__main__':
main()
| 34.175
| 79
| 0.585061
|
bb89d472a2b101e8bf9151416be2a68b7e93f7c2
| 69,547
|
py
|
Python
|
tests/unit/modules/boto_vpc_test.py
|
borgstrom/salt
|
2f732b5e8cd0b2a13f133d02f70aba3ee9fc0169
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/modules/boto_vpc_test.py
|
borgstrom/salt
|
2f732b5e8cd0b2a13f133d02f70aba3ee9fc0169
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/modules/boto_vpc_test.py
|
borgstrom/salt
|
2f732b5e8cd0b2a13f133d02f70aba3ee9fc0169
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# TODO: Update skipped tests to expect dictionary results from the execution
# module functions.
# Import Python libs
from __future__ import absolute_import
from distutils.version import LooseVersion # pylint: disable=import-error,no-name-in-module
# Import Salt Testing libs
from salttesting.unit import skipIf, TestCase
from salttesting.mock import NO_MOCK, NO_MOCK_REASON, patch
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../../')
# Import Salt libs
import salt.config
import salt.loader
from salt.modules import boto_vpc
from salt.exceptions import SaltInvocationError, CommandExecutionError
from salt.modules.boto_vpc import _maybe_set_name_tag, _maybe_set_tags
# Import 3rd-party libs
import salt.ext.six as six
# pylint: disable=import-error,no-name-in-module
try:
import boto
from boto.exception import BotoServerError
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
try:
import moto
from moto import mock_ec2
HAS_MOTO = True
except ImportError:
HAS_MOTO = False
def mock_ec2(self):
'''
if the mock_ec2 function is not available due to import failure
this replaces the decorated function with stub_function.
Allows boto_vpc unit tests to use the @mock_ec2 decorator
without a "NameError: name 'mock_ec2' is not defined" error.
'''
def stub_function(self):
pass
return stub_function
# pylint: enable=import-error,no-name-in-module
# the boto_vpc module relies on the connect_to_region() method
# which was added in boto 2.8.0
# https://github.com/boto/boto/commit/33ac26b416fbb48a60602542b4ce15dcc7029f12
required_boto_version = '2.8.0'
required_moto_version = '0.3.7'
region = 'us-east-1'
access_key = 'GKTADJGHEIQSXMKKRBJ08H'
secret_key = 'askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs'
conn_parameters = {'region': region, 'key': access_key, 'keyid': secret_key, 'profile': {}}
cidr_block = '10.0.0.0/24'
dhcp_options_parameters = {'domain_name': 'example.com', 'domain_name_servers': ['1.2.3.4'], 'ntp_servers': ['5.6.7.8'],
'netbios_name_servers': ['10.0.0.1'], 'netbios_node_type': 2}
network_acl_entry_parameters = ('fake', 100, -1, 'allow', cidr_block)
dhcp_options_parameters.update(conn_parameters)
opts = salt.config.DEFAULT_MINION_OPTS
utils = salt.loader.utils(opts, whitelist=['boto'])
mods = salt.loader.minion_mods(opts)
boto_vpc.__utils__ = utils
boto_vpc.__init__(opts, pack=mods)
def _has_required_boto():
'''
Returns True/False boolean depending on if Boto is installed and correct
version.
'''
if not HAS_BOTO:
return False
elif LooseVersion(boto.__version__) < LooseVersion(required_boto_version):
return False
else:
return True
def _has_required_moto():
'''
Returns True/False boolean depending on if Moto is installed and correct
version.
'''
if not HAS_MOTO:
return False
else:
try:
if LooseVersion(moto.__version__) < LooseVersion(required_moto_version):
return False
except AttributeError:
import pkg_resources
from pkg_resources import DistributionNotFound
try:
if LooseVersion(pkg_resources.get_distribution('moto').version) < LooseVersion(required_moto_version):
return False
except DistributionNotFound:
return False
return True
class BotoVpcTestCaseBase(TestCase):
def setUp(self):
boto_vpc.__context__ = {}
class BotoVpcTestCaseMixin(object):
conn = None
def _create_vpc(self, name=None, tags=None):
'''
Helper function to create a test vpc
'''
if not self.conn:
self.conn = boto.vpc.connect_to_region(region)
vpc = self.conn.create_vpc(cidr_block)
_maybe_set_name_tag(name, vpc)
_maybe_set_tags(tags, vpc)
return vpc
def _create_subnet(self, vpc_id, cidr_block='10.0.0.0/25', name=None, tags=None, availability_zone=None):
'''
Helper function to create a test subnet
'''
if not self.conn:
self.conn = boto.vpc.connect_to_region(region)
subnet = self.conn.create_subnet(vpc_id, cidr_block, availability_zone=availability_zone)
_maybe_set_name_tag(name, subnet)
_maybe_set_tags(tags, subnet)
return subnet
def _create_internet_gateway(self, vpc_id, name=None, tags=None):
'''
Helper function to create a test internet gateway
'''
if not self.conn:
self.conn = boto.vpc.connect_to_region(region)
igw = self.conn.create_internet_gateway(vpc_id)
_maybe_set_name_tag(name, igw)
_maybe_set_tags(tags, igw)
return igw
def _create_customer_gateway(self, vpc_id, name=None, tags=None):
'''
Helper function to create a test customer gateway
'''
if not self.conn:
self.conn = boto.vpc.connect_to_region(region)
gw = self.conn.create_customer_gateway(vpc_id)
_maybe_set_name_tag(name, gw)
_maybe_set_tags(tags, gw)
return gw
def _create_dhcp_options(self, domain_name='example.com', domain_name_servers=None, ntp_servers=None,
netbios_name_servers=None, netbios_node_type=2):
'''
Helper function to create test dchp options
'''
if not netbios_name_servers:
netbios_name_servers = ['10.0.0.1']
if not ntp_servers:
ntp_servers = ['5.6.7.8']
if not domain_name_servers:
domain_name_servers = ['1.2.3.4']
if not self.conn:
self.conn = boto.vpc.connect_to_region(region)
return self.conn.create_dhcp_options(domain_name=domain_name, domain_name_servers=domain_name_servers,
ntp_servers=ntp_servers, netbios_name_servers=netbios_name_servers,
netbios_node_type=netbios_node_type)
def _create_network_acl(self, vpc_id):
'''
Helper function to create test network acl
'''
if not self.conn:
self.conn = boto.vpc.connect_to_region(region)
return self.conn.create_network_acl(vpc_id)
def _create_network_acl_entry(self, network_acl_id, rule_number, protocol, rule_action, cidr_block, egress=None,
icmp_code=None, icmp_type=None, port_range_from=None, port_range_to=None):
'''
Helper function to create test network acl entry
'''
if not self.conn:
self.conn = boto.vpc.connect_to_region(region)
return self.conn.create_network_acl_entry(network_acl_id, rule_number, protocol, rule_action,
cidr_block,
egress=egress,
icmp_code=icmp_code, icmp_type=icmp_type,
port_range_from=port_range_from, port_range_to=port_range_to)
def _create_route_table(self, vpc_id, name=None, tags=None):
'''
Helper function to create a test route table
'''
if not self.conn:
self.conn = boto.vpc.connect_to_region(region)
rtbl = self.conn.create_route_table(vpc_id)
_maybe_set_name_tag(name, rtbl)
_maybe_set_tags(tags, rtbl)
return rtbl
@skipIf(NO_MOCK, NO_MOCK_REASON)
@skipIf(HAS_BOTO is False, 'The boto module must be installed.')
@skipIf(HAS_MOTO is False, 'The moto module must be installed.')
@skipIf(_has_required_boto() is False, 'The boto module must be greater than'
' or equal to version {0}'
.format(required_boto_version))
@skipIf(_has_required_moto() is False, 'The moto version must be >= to version {0}'.format(required_moto_version))
class BotoVpcTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin):
'''
TestCase for salt.modules.boto_vpc module
'''
@mock_ec2
def test_that_when_checking_if_a_vpc_exists_by_id_and_a_vpc_exists_the_vpc_exists_method_returns_true(self):
'''
Tests checking vpc existence via id when the vpc already exists
'''
vpc = self._create_vpc()
vpc_exists_result = boto_vpc.exists(vpc_id=vpc.id, **conn_parameters)
self.assertTrue(vpc_exists_result['exists'])
@mock_ec2
def test_that_when_checking_if_a_vpc_exists_by_id_and_a_vpc_does_not_exist_the_vpc_exists_method_returns_false(
self):
'''
Tests checking vpc existence via id when the vpc does not exist
'''
self._create_vpc() # Created to ensure that the filters are applied correctly
vpc_exists_result = boto_vpc.exists(vpc_id='fake', **conn_parameters)
self.assertFalse(vpc_exists_result['exists'])
@mock_ec2
def test_that_when_checking_if_a_vpc_exists_by_name_and_a_vpc_exists_the_vpc_exists_method_returns_true(self):
'''
Tests checking vpc existence via name when vpc exists
'''
self._create_vpc(name='test')
vpc_exists_result = boto_vpc.exists(name='test', **conn_parameters)
self.assertTrue(vpc_exists_result['exists'])
@mock_ec2
def test_that_when_checking_if_a_vpc_exists_by_name_and_a_vpc_does_not_exist_the_vpc_exists_method_returns_false(
self):
'''
Tests checking vpc existence via name when vpc does not exist
'''
self._create_vpc() # Created to ensure that the filters are applied correctly
vpc_exists_result = boto_vpc.exists(name='test', **conn_parameters)
self.assertFalse(vpc_exists_result['exists'])
@mock_ec2
def test_that_when_checking_if_a_vpc_exists_by_tags_and_a_vpc_exists_the_vpc_exists_method_returns_true(self):
'''
Tests checking vpc existence via tag when vpc exists
'''
self._create_vpc(tags={'test': 'testvalue'})
vpc_exists_result = boto_vpc.exists(tags={'test': 'testvalue'}, **conn_parameters)
self.assertTrue(vpc_exists_result['exists'])
@mock_ec2
def test_that_when_checking_if_a_vpc_exists_by_tags_and_a_vpc_does_not_exist_the_vpc_exists_method_returns_false(
self):
'''
Tests checking vpc existence via tag when vpc does not exist
'''
self._create_vpc() # Created to ensure that the filters are applied correctly
vpc_exists_result = boto_vpc.exists(tags={'test': 'testvalue'}, **conn_parameters)
self.assertFalse(vpc_exists_result['exists'])
@mock_ec2
def test_that_when_checking_if_a_vpc_exists_by_cidr_and_a_vpc_exists_the_vpc_exists_method_returns_true(self):
'''
Tests checking vpc existence via cidr when vpc exists
'''
self._create_vpc()
vpc_exists_result = boto_vpc.exists(cidr=u'10.0.0.0/24', **conn_parameters)
self.assertTrue(vpc_exists_result['exists'])
@mock_ec2
def test_that_when_checking_if_a_vpc_exists_by_cidr_and_a_vpc_does_not_exist_the_vpc_exists_method_returns_false(
self):
'''
Tests checking vpc existence via cidr when vpc does not exist
'''
self._create_vpc() # Created to ensure that the filters are applied correctly
vpc_exists_result = boto_vpc.exists(cidr=u'10.10.10.10/24', **conn_parameters)
self.assertFalse(vpc_exists_result['exists'])
@mock_ec2
@skipIf(True, 'Disabled pending https://github.com/spulec/moto/issues/493')
def test_that_when_checking_if_a_vpc_exists_but_providing_no_filters_the_vpc_exists_method_raises_a_salt_invocation_error(self):
'''
Tests checking vpc existence when no filters are provided
'''
with self.assertRaisesRegexp(SaltInvocationError, 'At least one of the following '
'must be provided: vpc_id, vpc_name, '
'cidr or tags.'):
boto_vpc.exists(**conn_parameters)
@mock_ec2
def test_get_vpc_id_method_when_filtering_by_name(self):
'''
Tests getting vpc id when filtering by name
'''
vpc = self._create_vpc(name='test')
get_id_result = boto_vpc.get_id(name='test', **conn_parameters)
self.assertEqual(vpc.id, get_id_result['id'])
@mock_ec2
def test_get_vpc_id_method_when_filtering_by_invalid_name(self):
'''
Tests getting vpc id when filtering by invalid name
'''
self._create_vpc(name='test')
get_id_result = boto_vpc.get_id(name='test_fake', **conn_parameters)
self.assertEqual(get_id_result['id'], None)
@mock_ec2
def test_get_vpc_id_method_when_filtering_by_cidr(self):
'''
Tests getting vpc id when filtering by cidr
'''
vpc = self._create_vpc()
get_id_result = boto_vpc.get_id(cidr=u'10.0.0.0/24', **conn_parameters)
self.assertEqual(vpc.id, get_id_result['id'])
@mock_ec2
def test_get_vpc_id_method_when_filtering_by_invalid_cidr(self):
'''
Tests getting vpc id when filtering by invalid cidr
'''
self._create_vpc()
get_id_result = boto_vpc.get_id(cidr=u'10.10.10.10/24', **conn_parameters)
self.assertEqual(get_id_result['id'], None)
@mock_ec2
def test_get_vpc_id_method_when_filtering_by_tags(self):
'''
Tests getting vpc id when filtering by tags
'''
vpc = self._create_vpc(tags={'test': 'testvalue'})
get_id_result = boto_vpc.get_id(tags={'test': 'testvalue'}, **conn_parameters)
self.assertEqual(vpc.id, get_id_result['id'])
@mock_ec2
def test_get_vpc_id_method_when_filtering_by_invalid_tags(self):
'''
Tests getting vpc id when filtering by invalid tags
'''
self._create_vpc(tags={'test': 'testvalue'})
get_id_result = boto_vpc.get_id(tags={'test': 'fake-testvalue'}, **conn_parameters)
self.assertEqual(get_id_result['id'], None)
@mock_ec2
@skipIf(True, 'Disabled pending https://github.com/spulec/moto/issues/493')
def test_get_vpc_id_method_when_not_providing_filters_raises_a_salt_invocation_error(self):
'''
Tests getting vpc id but providing no filters
'''
with self.assertRaisesRegexp(SaltInvocationError, 'At least one of the following must be provided: vpc_id, vpc_name, cidr or tags.'):
boto_vpc.get_id(**conn_parameters)
@mock_ec2
def test_get_vpc_id_method_when_more_than_one_vpc_is_matched_raises_a_salt_command_execution_error(self):
'''
Tests getting vpc id but providing no filters
'''
vpc1 = self._create_vpc(name='vpc-test1')
vpc2 = self._create_vpc(name='vpc-test2')
with self.assertRaisesRegexp(CommandExecutionError, 'Found more than one VPC matching the criteria.'):
boto_vpc.get_id(cidr=u'10.0.0.0/24', **conn_parameters)
@mock_ec2
def test_that_when_creating_a_vpc_succeeds_the_create_vpc_method_returns_true(self):
'''
tests True VPC created.
'''
vpc_creation_result = boto_vpc.create(cidr_block, **conn_parameters)
self.assertTrue(vpc_creation_result)
@mock_ec2
def test_that_when_creating_a_vpc_and_specifying_a_vpc_name_succeeds_the_create_vpc_method_returns_true(self):
'''
tests True VPC created.
'''
vpc_creation_result = boto_vpc.create(cidr_block, vpc_name='test', **conn_parameters)
self.assertTrue(vpc_creation_result)
@mock_ec2
def test_that_when_creating_a_vpc_and_specifying_tags_succeeds_the_create_vpc_method_returns_true(self):
'''
tests True VPC created.
'''
vpc_creation_result = boto_vpc.create(cidr_block, tags={'test': 'value'}, **conn_parameters)
self.assertTrue(vpc_creation_result)
@mock_ec2
@skipIf(True, 'Disabled pending https://github.com/spulec/moto/issues/493')
def test_that_when_creating_a_vpc_fails_the_create_vpc_method_returns_false(self):
'''
tests False VPC not created.
'''
with patch('moto.ec2.models.VPCBackend.create_vpc', side_effect=BotoServerError(400, 'Mocked error')):
vpc_creation_result = boto_vpc.create(cidr_block, **conn_parameters)
self.assertFalse(vpc_creation_result['created'])
self.assertTrue('error' in vpc_creation_result)
@mock_ec2
def test_that_when_deleting_an_existing_vpc_the_delete_vpc_method_returns_true(self):
'''
Tests deleting an existing vpc
'''
vpc = self._create_vpc()
vpc_deletion_result = boto_vpc.delete(vpc.id, **conn_parameters)
self.assertTrue(vpc_deletion_result)
@mock_ec2
def test_that_when_deleting_a_non_existent_vpc_the_delete_vpc_method_returns_false(self):
'''
Tests deleting a non-existent vpc
'''
delete_vpc_result = boto_vpc.delete('1234', **conn_parameters)
self.assertFalse(delete_vpc_result['deleted'])
@mock_ec2
def test_that_when_describing_vpc_by_id_it_returns_the_dict_of_properties_returns_true(self):
'''
Tests describing parameters via vpc id if vpc exist
'''
vpc = self._create_vpc(name='test', tags={'test': 'testvalue'})
describe_vpc = boto_vpc.describe(vpc_id=vpc.id, **conn_parameters)
vpc_properties = dict(id=vpc.id,
cidr_block=six.text_type(cidr_block),
is_default=None,
state=u'available',
tags={u'Name': u'test', u'test': u'testvalue'},
dhcp_options_id=u'dopt-7a8b9c2d',
instance_tenancy=u'default')
self.assertEqual(describe_vpc, {'vpc': vpc_properties})
@mock_ec2
def test_that_when_describing_vpc_by_id_it_returns_the_dict_of_properties_returns_false(self):
'''
Tests describing parameters via vpc id if vpc does not exist
'''
vpc = self._create_vpc(name='test', tags={'test': 'testvalue'})
describe_vpc = boto_vpc.describe(vpc_id='vpc-fake', **conn_parameters)
self.assertFalse(describe_vpc['vpc'])
@mock_ec2
@skipIf(True, 'Disabled pending https://github.com/spulec/moto/issues/493')
def test_that_when_describing_vpc_by_id_on_connection_error_it_returns_error(self):
'''
Tests describing parameters failure
'''
vpc = self._create_vpc(name='test', tags={'test': 'testvalue'})
with patch('moto.ec2.models.VPCBackend.get_all_vpcs',
side_effect=BotoServerError(400, 'Mocked error')):
describe_result = boto_vpc.describe(vpc_id=vpc.id, **conn_parameters)
self.assertTrue('error' in describe_result)
@mock_ec2
def test_that_when_describing_vpc_but_providing_no_vpc_id_the_describe_method_raises_a_salt_invocation_error(self):
'''
Tests describing vpc without vpc id
'''
with self.assertRaisesRegexp(SaltInvocationError,
'A valid vpc id or name needs to be specified.'):
boto_vpc.describe(vpc_id=None, **conn_parameters)
@skipIf(NO_MOCK, NO_MOCK_REASON)
@skipIf(HAS_BOTO is False, 'The boto module must be installed.')
@skipIf(HAS_MOTO is False, 'The moto module must be installed.')
@skipIf(_has_required_boto() is False, 'The boto module must be greater than'
' or equal to version {0}'
.format(required_boto_version))
@skipIf(_has_required_moto() is False, 'The moto version must be >= to version {0}'.format(required_moto_version))
class BotoVpcSubnetsTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin):
@mock_ec2
def test_get_subnet_association_single_subnet(self):
'''
tests that given multiple subnet ids in the same VPC that the VPC ID is
returned. The test is valuable because it uses a string as an argument
to subnets as opposed to a list.
'''
vpc = self._create_vpc()
subnet = self._create_subnet(vpc.id)
subnet_association = boto_vpc.get_subnet_association(subnets=subnet.id,
**conn_parameters)
self.assertEqual(vpc.id, subnet_association['vpc_id'])
@mock_ec2
def test_get_subnet_association_multiple_subnets_same_vpc(self):
'''
tests that given multiple subnet ids in the same VPC that the VPC ID is
returned.
'''
vpc = self._create_vpc()
subnet_a = self._create_subnet(vpc.id, '10.0.0.0/25')
subnet_b = self._create_subnet(vpc.id, '10.0.0.128/25')
subnet_association = boto_vpc.get_subnet_association([subnet_a.id, subnet_b.id],
**conn_parameters)
self.assertEqual(vpc.id, subnet_association['vpc_id'])
@mock_ec2
def test_get_subnet_association_multiple_subnets_different_vpc(self):
'''
tests that given multiple subnet ids in different VPCs that False is
returned.
'''
vpc_a = self._create_vpc()
vpc_b = self.conn.create_vpc(cidr_block)
subnet_a = self._create_subnet(vpc_a.id, '10.0.0.0/24')
subnet_b = self._create_subnet(vpc_b.id, '10.0.0.0/24')
subnet_association = boto_vpc.get_subnet_association([subnet_a.id, subnet_b.id],
**conn_parameters)
self.assertEqual(set(subnet_association['vpc_ids']), set([vpc_a.id, vpc_b.id]))
@mock_ec2
def test_that_when_creating_a_subnet_succeeds_the_create_subnet_method_returns_true(self):
'''
Tests creating a subnet successfully
'''
vpc = self._create_vpc()
subnet_creation_result = boto_vpc.create_subnet(vpc.id, '10.0.0.0/24', **conn_parameters)
self.assertTrue(subnet_creation_result['created'])
self.assertTrue('id' in subnet_creation_result)
@mock_ec2
def test_that_when_creating_a_subnet_and_specifying_a_name_succeeds_the_create_subnet_method_returns_true(self):
'''
Tests creating a subnet successfully when specifying a name
'''
vpc = self._create_vpc()
subnet_creation_result = boto_vpc.create_subnet(vpc.id, '10.0.0.0/24', subnet_name='test', **conn_parameters)
self.assertTrue(subnet_creation_result['created'])
@mock_ec2
def test_that_when_creating_a_subnet_and_specifying_tags_succeeds_the_create_subnet_method_returns_true(self):
'''
Tests creating a subnet successfully when specifying a tag
'''
vpc = self._create_vpc()
subnet_creation_result = boto_vpc.create_subnet(vpc.id, '10.0.0.0/24', tags={'test': 'testvalue'},
**conn_parameters)
self.assertTrue(subnet_creation_result['created'])
@mock_ec2
@skipIf(True, 'Disabled pending https://github.com/spulec/moto/issues/493')
def test_that_when_creating_a_subnet_fails_the_create_subnet_method_returns_error(self):
'''
Tests creating a subnet failure
'''
vpc = self._create_vpc()
with patch('moto.ec2.models.SubnetBackend.create_subnet', side_effect=BotoServerError(400, 'Mocked error')):
subnet_creation_result = boto_vpc.create_subnet(vpc.id, '10.0.0.0/24', **conn_parameters)
self.assertTrue('error' in subnet_creation_result)
@mock_ec2
def test_that_when_deleting_an_existing_subnet_the_delete_subnet_method_returns_true(self):
'''
Tests deleting an existing subnet
'''
vpc = self._create_vpc()
subnet = self._create_subnet(vpc.id)
subnet_deletion_result = boto_vpc.delete_subnet(subnet_id=subnet.id, **conn_parameters)
self.assertTrue(subnet_deletion_result['deleted'])
@mock_ec2
def test_that_when_deleting_a_non_existent_subnet_the_delete_vpc_method_returns_false(self):
'''
Tests deleting a subnet that doesn't exist
'''
delete_subnet_result = boto_vpc.delete_subnet(subnet_id='1234', **conn_parameters)
self.assertTrue('error' in delete_subnet_result)
@mock_ec2
def test_that_when_checking_if_a_subnet_exists_by_id_the_subnet_exists_method_returns_true(self):
'''
Tests checking if a subnet exists when it does exist
'''
vpc = self._create_vpc()
subnet = self._create_subnet(vpc.id)
subnet_exists_result = boto_vpc.subnet_exists(subnet_id=subnet.id, **conn_parameters)
self.assertTrue(subnet_exists_result['exists'])
@mock_ec2
def test_that_when_a_subnet_does_not_exist_the_subnet_exists_method_returns_false(self):
'''
Tests checking if a subnet exists which doesn't exist
'''
subnet_exists_result = boto_vpc.subnet_exists('fake', **conn_parameters)
self.assertFalse(subnet_exists_result['exists'])
@mock_ec2
def test_that_when_checking_if_a_subnet_exists_by_name_the_subnet_exists_method_returns_true(self):
'''
Tests checking subnet existence by name
'''
vpc = self._create_vpc()
self._create_subnet(vpc.id, name='test')
subnet_exists_result = boto_vpc.subnet_exists(name='test', **conn_parameters)
self.assertTrue(subnet_exists_result['exists'])
@mock_ec2
def test_that_when_checking_if_a_subnet_exists_by_name_the_subnet_does_not_exist_the_subnet_method_returns_false(self):
'''
Tests checking subnet existence by name when it doesn't exist
'''
vpc = self._create_vpc()
self._create_subnet(vpc.id)
subnet_exists_result = boto_vpc.subnet_exists(name='test', **conn_parameters)
self.assertFalse(subnet_exists_result['exists'])
@mock_ec2
def test_that_when_checking_if_a_subnet_exists_by_tags_the_subnet_exists_method_returns_true(self):
'''
Tests checking subnet existence by tag
'''
vpc = self._create_vpc()
self._create_subnet(vpc.id, tags={'test': 'testvalue'})
subnet_exists_result = boto_vpc.subnet_exists(tags={'test': 'testvalue'}, **conn_parameters)
self.assertTrue(subnet_exists_result['exists'])
@mock_ec2
def test_that_when_checking_if_a_subnet_exists_by_tags_the_subnet_does_not_exist_the_subnet_method_returns_false(self):
'''
Tests checking subnet existence by tag when subnet doesn't exist
'''
vpc = self._create_vpc()
self._create_subnet(vpc.id)
subnet_exists_result = boto_vpc.subnet_exists(tags={'test': 'testvalue'}, **conn_parameters)
self.assertFalse(subnet_exists_result['exists'])
@mock_ec2
@skipIf(True, 'Disabled pending https://github.com/spulec/moto/issues/493')
def test_that_when_checking_if_a_subnet_exists_but_providing_no_filters_the_subnet_exists_method_raises_a_salt_invocation_error(self):
'''
Tests checking subnet existence without any filters
'''
with self.assertRaisesRegexp(SaltInvocationError,
'At least one of the following must be specified: subnet id, cidr, subnet_name, tags, or zones.'):
boto_vpc.subnet_exists(**conn_parameters)
@mock_ec2
def test_that_describe_subnet_by_id_for_existing_subnet_returns_correct_data(self):
'''
Tests describing a subnet by id.
'''
vpc = self._create_vpc()
subnet = self._create_subnet(vpc.id)
describe_subnet_results = boto_vpc.describe_subnet(subnet_id=subnet.id)
self.assertEqual(set(describe_subnet_results['subnet'].keys()),
set(['id', 'cidr_block', 'availability_zone', 'tags']))
@mock_ec2
def test_that_describe_subnet_by_id_for_non_existent_subnet_returns_none(self):
'''
Tests describing a non-existent subnet by id.
'''
vpc = self._create_vpc()
describe_subnet_results = boto_vpc.describe_subnet(subnet_id='subnet-a1b2c3')
self.assertEqual(describe_subnet_results['subnet'], None)
@mock_ec2
def test_that_describe_subnet_by_name_for_existing_subnet_returns_correct_data(self):
'''
Tests describing a subnet by name.
'''
vpc = self._create_vpc()
subnet = self._create_subnet(vpc.id, name='test')
describe_subnet_results = boto_vpc.describe_subnet(subnet_name='test')
self.assertEqual(set(describe_subnet_results['subnet'].keys()),
set(['id', 'cidr_block', 'availability_zone', 'tags']))
@mock_ec2
def test_that_describe_subnet_by_name_for_non_existent_subnet_returns_none(self):
'''
Tests describing a non-existent subnet by id.
'''
vpc = self._create_vpc()
describe_subnet_results = boto_vpc.describe_subnet(subnet_name='test')
self.assertEqual(describe_subnet_results['subnet'], None)
@mock_ec2
def test_that_describe_subnets_by_id_for_existing_subnet_returns_correct_data(self):
'''
Tests describing multiple subnets by id.
'''
vpc = self._create_vpc()
subnet1 = self._create_subnet(vpc.id)
subnet2 = self._create_subnet(vpc.id)
describe_subnet_results = boto_vpc.describe_subnets(subnet_ids=[subnet1.id, subnet2.id])
self.assertEqual(len(describe_subnet_results['subnets']), 2)
self.assertEqual(set(describe_subnet_results['subnets'][0].keys()),
set(['id', 'cidr_block', 'availability_zone', 'tags']))
@mock_ec2
def test_that_describe_subnets_by_name_for_existing_subnets_returns_correct_data(self):
'''
Tests describing multiple subnets by id.
'''
vpc = self._create_vpc()
subnet1 = self._create_subnet(vpc.id, name='subnet1')
subnet2 = self._create_subnet(vpc.id, name='subnet2')
describe_subnet_results = boto_vpc.describe_subnets(subnet_names=['subnet1', 'subnet2'])
self.assertEqual(len(describe_subnet_results['subnets']), 2)
self.assertEqual(set(describe_subnet_results['subnets'][0].keys()),
set(['id', 'cidr_block', 'availability_zone', 'tags']))
@mock_ec2
def test_create_subnet_passes_availability_zone(self):
'''
Tests that the availability_zone kwarg is passed on to _create_resource
'''
vpc = self._create_vpc()
self._create_subnet(vpc.id, name='subnet1', availability_zone='us-east-1a')
describe_subnet_results = boto_vpc.describe_subnets(subnet_names=['subnet1'])
self.assertEqual(describe_subnet_results['subnets'][0]['availability_zone'], 'us-east-1a')
@skipIf(NO_MOCK, NO_MOCK_REASON)
@skipIf(HAS_BOTO is False, 'The boto module must be installed.')
@skipIf(HAS_MOTO is False, 'The moto module must be installed.')
@skipIf(_has_required_boto() is False, 'The boto module must be greater than'
' or equal to version {0}'
.format(required_boto_version))
class BotoVpcInternetGatewayTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin):
@mock_ec2
def test_that_when_creating_an_internet_gateway_the_create_internet_gateway_method_returns_true(self):
'''
Tests creating an internet gateway successfully (with no vpc id or name)
'''
igw_creation_result = boto_vpc.create_internet_gateway()
self.assertTrue(igw_creation_result.get('created'))
@mock_ec2
def test_that_when_creating_an_internet_gateway_with_non_existent_vpc_the_create_internet_gateway_method_returns_an_error(self):
'''
Tests that creating an internet gateway for a non-existent VPC fails.
'''
igw_creation_result = boto_vpc.create_internet_gateway(vpc_name='non-existent-vpc')
self.assertTrue('error' in igw_creation_result)
@mock_ec2
def test_that_when_creating_an_internet_gateway_with_vpc_name_specified_the_create_internet_gateway_method_returns_true(self):
'''
Tests creating an internet gateway with vpc name specified.
'''
self._create_vpc(name='test-vpc')
igw_creation_result = boto_vpc.create_internet_gateway(vpc_name='test-vpc')
self.assertTrue(igw_creation_result.get('created'))
@mock_ec2
def test_that_when_creating_an_internet_gateway_with_vpc_id_specified_the_create_internet_gateway_method_returns_true(self):
'''
Tests creating an internet gateway with vpc name specified.
'''
vpc = self._create_vpc()
igw_creation_result = boto_vpc.create_internet_gateway(vpc_id=vpc.id)
self.assertTrue(igw_creation_result.get('created'))
@skipIf(NO_MOCK, NO_MOCK_REASON)
@skipIf(HAS_BOTO is False, 'The boto module must be installed.')
@skipIf(HAS_MOTO is False, 'The moto module must be installed.')
@skipIf(_has_required_boto() is False, 'The boto module must be greater than'
' or equal to version {0}'
.format(required_boto_version))
class BotoVpcCustomerGatewayTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin):
@mock_ec2
@skipIf(True, 'Moto has not implemented this feature. Skipping for now.')
def test_that_when_creating_a_customer_gateway_the_create_customer_gateway_method_returns_true(self):
'''
Tests creating an internet gateway successfully (with no vpc id or name)
'''
gw_creation_result = boto_vpc.create_customer_gateway('ipsec.1', '10.1.1.1', None)
self.assertTrue(gw_creation_result.get('created'))
@mock_ec2
@skipIf(True, 'Moto has not implemented this feature. Skipping for now.')
def test_that_when_checking_if_a_subnet_exists_by_id_the_subnet_exists_method_returns_true(self):
'''
Tests checking if a subnet exists when it does exist
'''
gw_creation_result = boto_vpc.create_customer_gateway('ipsec.1', '10.1.1.1', None)
gw_exists_result = boto_vpc.customer_gateway_exists(customer_gateway_id=gw_creation_result['id'])
self.assertTrue(gw_exists_result['exists'])
@mock_ec2
@skipIf(True, 'Moto has not implemented this feature. Skipping for now.')
def test_that_when_a_subnet_does_not_exist_the_subnet_exists_method_returns_false(self):
'''
Tests checking if a subnet exists which doesn't exist
'''
gw_exists_result = boto_vpc.customer_gateway_exists('fake')
self.assertFalse(gw_exists_result['exists'])
@skipIf(NO_MOCK, NO_MOCK_REASON)
@skipIf(HAS_BOTO is False, 'The boto module must be installed.')
@skipIf(HAS_MOTO is False, 'The moto module must be installed.')
@skipIf(_has_required_boto() is False, 'The boto module must be greater than'
' or equal to version {0}'
.format(required_boto_version))
@skipIf(_has_required_moto() is False, 'The moto version must be >= to version {0}'.format(required_moto_version))
class BotoVpcDHCPOptionsTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin):
@mock_ec2
def test_that_when_creating_dhcp_options_succeeds_the_create_dhcp_options_method_returns_true(self):
'''
Tests creating dhcp options successfully
'''
dhcp_options_creation_result = boto_vpc.create_dhcp_options(**dhcp_options_parameters)
self.assertTrue(dhcp_options_creation_result['created'])
@mock_ec2
@skipIf(True, 'Moto has not implemented this feature. Skipping for now.')
def test_that_when_creating_dhcp_options_and_specifying_a_name_succeeds_the_create_dhcp_options_method_returns_true(
self):
'''
Tests creating dchp options with name successfully
'''
dhcp_options_creation_result = boto_vpc.create_dhcp_options(dhcp_options_name='test',
**dhcp_options_parameters)
self.assertTrue(dhcp_options_creation_result['created'])
@mock_ec2
def test_that_when_creating_dhcp_options_and_specifying_tags_succeeds_the_create_dhcp_options_method_returns_true(
self):
'''
Tests creating dchp options with tag successfully
'''
dhcp_options_creation_result = boto_vpc.create_dhcp_options(tags={'test': 'testvalue'},
**dhcp_options_parameters)
self.assertTrue(dhcp_options_creation_result['created'])
@mock_ec2
@skipIf(True, 'Disabled pending https://github.com/spulec/moto/issues/493')
def test_that_when_creating_dhcp_options_fails_the_create_dhcp_options_method_returns_error(self):
'''
Tests creating dhcp options failure
'''
with patch('moto.ec2.models.DHCPOptionsSetBackend.create_dhcp_options',
side_effect=BotoServerError(400, 'Mocked error')):
r = dhcp_options_creation_result = boto_vpc.create_dhcp_options(**dhcp_options_parameters)
self.assertTrue('error' in r)
@mock_ec2
def test_that_when_associating_an_existing_dhcp_options_set_to_an_existing_vpc_the_associate_dhcp_options_method_returns_true(
self):
'''
Tests associating existing dchp options successfully
'''
vpc = self._create_vpc()
dhcp_options = self._create_dhcp_options()
dhcp_options_association_result = boto_vpc.associate_dhcp_options_to_vpc(dhcp_options.id, vpc.id,
**conn_parameters)
self.assertTrue(dhcp_options_association_result['associated'])
@mock_ec2
def test_that_when_associating_a_non_existent_dhcp_options_set_to_an_existing_vpc_the_associate_dhcp_options_method_returns_error(
self):
'''
Tests associating non-existanct dhcp options successfully
'''
vpc = self._create_vpc()
dhcp_options_association_result = boto_vpc.associate_dhcp_options_to_vpc('fake', vpc.id, **conn_parameters)
self.assertTrue('error' in dhcp_options_association_result)
@mock_ec2
def test_that_when_associating_an_existing_dhcp_options_set_to_a_non_existent_vpc_the_associate_dhcp_options_method_returns_false(
self):
'''
Tests associating existing dhcp options to non-existence vpc
'''
dhcp_options = self._create_dhcp_options()
dhcp_options_association_result = boto_vpc.associate_dhcp_options_to_vpc(dhcp_options.id, 'fake',
**conn_parameters)
self.assertTrue('error' in dhcp_options_association_result)
@mock_ec2
def test_that_when_creating_and_associating_dhcp_options_set_to_an_existing_vpc_succeeds_the_associate_new_dhcp_options_method_returns_true(
self):
'''
Tests creation/association of dchp options to an existing vpc successfully
'''
vpc = self._create_vpc()
dhcp_creation_and_association_result = boto_vpc.associate_new_dhcp_options_to_vpc(vpc.id,
**dhcp_options_parameters)
self.assertTrue(dhcp_creation_and_association_result['created'])
@mock_ec2
@skipIf(True, 'Disabled pending https://github.com/spulec/moto/issues/493')
def test_that_when_creating_and_associating_dhcp_options_set_to_an_existing_vpc_fails_creating_the_dhcp_options_the_associate_new_dhcp_options_method_raises_exception(
self):
'''
Tests creation failure during creation/association of dchp options to an existing vpc
'''
vpc = self._create_vpc()
with patch('moto.ec2.models.DHCPOptionsSetBackend.create_dhcp_options',
side_effect=BotoServerError(400, 'Mocked error')):
r = boto_vpc.associate_new_dhcp_options_to_vpc(vpc.id, **dhcp_options_parameters)
self.assertTrue('error' in r)
@mock_ec2
@skipIf(True, 'Disabled pending https://github.com/spulec/moto/issues/493')
def test_that_when_creating_and_associating_dhcp_options_set_to_an_existing_vpc_fails_associating_the_dhcp_options_the_associate_new_dhcp_options_method_raises_exception(self):
'''
Tests association failure during creation/association of dchp options to existing vpc
'''
vpc = self._create_vpc()
with patch('moto.ec2.models.DHCPOptionsSetBackend.associate_dhcp_options',
side_effect=BotoServerError(400, 'Mocked error')):
r = boto_vpc.associate_new_dhcp_options_to_vpc(vpc.id, **dhcp_options_parameters)
self.assertTrue('error' in r)
@mock_ec2
def test_that_when_creating_and_associating_dhcp_options_set_to_a_non_existent_vpc_the_dhcp_options_the_associate_new_dhcp_options_method_returns_false(
self):
'''
Tests creation/association of dhcp options to non-existent vpc
'''
r = boto_vpc.associate_new_dhcp_options_to_vpc('fake', **dhcp_options_parameters)
self.assertTrue('error' in r)
@mock_ec2
def test_that_when_dhcp_options_exists_the_dhcp_options_exists_method_returns_true(self):
'''
Tests existence of dhcp options successfully
'''
dhcp_options = self._create_dhcp_options()
dhcp_options_exists_result = boto_vpc.dhcp_options_exists(dhcp_options.id, **conn_parameters)
self.assertTrue(dhcp_options_exists_result['exists'])
@mock_ec2
def test_that_when_dhcp_options_do_not_exist_the_dhcp_options_exists_method_returns_false(self):
'''
Tests existence of dhcp options failure
'''
r = boto_vpc.dhcp_options_exists('fake', **conn_parameters)
self.assertFalse(r['exists'])
@mock_ec2
@skipIf(True, 'Disabled pending https://github.com/spulec/moto/issues/493')
def test_that_when_checking_if_dhcp_options_exists_but_providing_no_filters_the_dhcp_options_exists_method_raises_a_salt_invocation_error(self):
'''
Tests checking dhcp option existence with no filters
'''
with self.assertRaisesRegexp(SaltInvocationError, 'At least one of the following must be provided: id, name, or tags.'):
boto_vpc.dhcp_options_exists(**conn_parameters)
@skipIf(NO_MOCK, NO_MOCK_REASON)
@skipIf(HAS_BOTO is False, 'The boto module must be installed.')
@skipIf(HAS_MOTO is False, 'The moto module must be installed.')
@skipIf(_has_required_boto() is False, 'The boto module must be greater than'
' or equal to version {0}'
.format(required_boto_version))
class BotoVpcNetworkACLTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin):
@mock_ec2
#@skipIf(True, 'Moto has not implemented this feature. Skipping for now.')
def test_that_when_creating_network_acl_for_an_existing_vpc_the_create_network_acl_method_returns_true(self):
'''
Tests creation of network acl with existing vpc
'''
vpc = self._create_vpc()
network_acl_creation_result = boto_vpc.create_network_acl(vpc.id, **conn_parameters)
self.assertTrue(network_acl_creation_result)
@mock_ec2
#@skipIf(True, 'Moto has not implemented this feature. Skipping for now.')
def test_that_when_creating_network_acl_for_an_existing_vpc_and_specifying_a_name_the_create_network_acl_method_returns_true(
self):
'''
Tests creation of network acl via name with an existing vpc
'''
vpc = self._create_vpc()
network_acl_creation_result = boto_vpc.create_network_acl(vpc.id, network_acl_name='test', **conn_parameters)
self.assertTrue(network_acl_creation_result)
@mock_ec2
#@skipIf(True, 'Moto has not implemented this feature. Skipping for now.')
def test_that_when_creating_network_acl_for_an_existing_vpc_and_specifying_tags_the_create_network_acl_method_returns_true(
self):
'''
Tests creation of network acl via tags with an existing vpc
'''
vpc = self._create_vpc()
network_acl_creation_result = boto_vpc.create_network_acl(vpc.id, tags={'test': 'testvalue'}, **conn_parameters)
self.assertTrue(network_acl_creation_result)
@mock_ec2
#@skipIf(True, 'Moto has not implemented this feature. Skipping for now.')
def test_that_when_creating_network_acl_for_a_non_existent_vpc_the_create_network_acl_method_returns_an_error(self):
'''
Tests creation of network acl with a non-existent vpc
'''
network_acl_creation_result = boto_vpc.create_network_acl('fake', **conn_parameters)
self.assertTrue('error' in network_acl_creation_result)
@mock_ec2
@skipIf(True, 'Moto has not implemented this feature. Skipping for now.')
def test_that_when_creating_network_acl_fails_the_create_network_acl_method_returns_false(self):
'''
Tests creation of network acl failure
'''
vpc = self._create_vpc()
with patch('moto.ec2.models.NetworkACLBackend.create_network_acl',
side_effect=BotoServerError(400, 'Mocked error')):
network_acl_creation_result = boto_vpc.create_network_acl(vpc.id, **conn_parameters)
self.assertFalse(network_acl_creation_result)
@mock_ec2
#@skipIf(True, 'Moto has not implemented this feature. Skipping for now.')
def test_that_when_deleting_an_existing_network_acl_the_delete_network_acl_method_returns_true(self):
'''
Tests deletion of existing network acl successfully
'''
vpc = self._create_vpc()
network_acl = self._create_network_acl(vpc.id)
network_acl_deletion_result = boto_vpc.delete_network_acl(network_acl.id, **conn_parameters)
self.assertTrue(network_acl_deletion_result)
@mock_ec2
#@skipIf(True, 'Moto has not implemented this feature. Skipping for now.')
def test_that_when_deleting_a_non_existent_network_acl_the_delete_network_acl_method_returns_an_error(self):
'''
Tests deleting a non-existent network acl
'''
network_acl_deletion_result = boto_vpc.delete_network_acl('fake', **conn_parameters)
self.assertTrue('error' in network_acl_deletion_result)
@mock_ec2
#@skipIf(True, 'Moto has not implemented this feature. Skipping for now.')
def test_that_when_a_network_acl_exists_the_network_acl_exists_method_returns_true(self):
'''
Tests existence of network acl
'''
vpc = self._create_vpc()
network_acl = self._create_network_acl(vpc.id)
network_acl_deletion_result = boto_vpc.network_acl_exists(network_acl.id, **conn_parameters)
self.assertTrue(network_acl_deletion_result)
@mock_ec2
#@skipIf(True, 'Moto has not implemented this feature. Skipping for now.')
def test_that_when_a_network_acl_does_not_exist_the_network_acl_exists_method_returns_false(self):
'''
Tests checking network acl does not exist
'''
network_acl_deletion_result = boto_vpc.network_acl_exists('fake', **conn_parameters)
self.assertFalse(network_acl_deletion_result['exists'])
@mock_ec2
@skipIf(True, 'Disabled pending https://github.com/spulec/moto/issues/493')
def test_that_when_checking_if_network_acl_exists_but_providing_no_filters_the_network_acl_exists_method_raises_a_salt_invocation_error(self):
'''
Tests checking existence of network acl with no filters
'''
with self.assertRaisesRegexp(
SaltInvocationError,
'At least one of the following must be provided: id, name, or tags.'
):
boto_vpc.dhcp_options_exists(**conn_parameters)
@mock_ec2
@skipIf(True, 'Moto has not implemented this feature. Skipping for now.')
def test_that_when_creating_a_network_acl_entry_successfully_the_create_network_acl_entry_method_returns_true(self):
'''
Tests creating network acl successfully
'''
vpc = self._create_vpc()
network_acl = self._create_network_acl(vpc.id)
network_acl_entry_creation_result = boto_vpc.create_network_acl_entry(network_acl.id,
*network_acl_entry_parameters,
**conn_parameters)
self.assertTrue(network_acl_entry_creation_result)
@mock_ec2
@skipIf(True, 'Moto has not implemented this feature. Skipping for now.')
def test_that_when_creating_a_network_acl_entry_for_a_non_existent_network_acl_the_create_network_acl_entry_method_returns_false(
self):
'''
Tests creating network acl entry for non-existent network acl
'''
network_acl_entry_creation_result = boto_vpc.create_network_acl_entry(*network_acl_entry_parameters,
**conn_parameters)
self.assertFalse(network_acl_entry_creation_result)
@mock_ec2
@skipIf(True, 'Moto has not implemented this feature. Skipping for now.')
def test_that_when_replacing_a_network_acl_entry_successfully_the_replace_network_acl_entry_method_returns_true(
self):
'''
Tests replacing network acl entry successfully
'''
vpc = self._create_vpc()
network_acl = self._create_network_acl(vpc.id)
self._create_network_acl_entry(network_acl.id, *network_acl_entry_parameters)
network_acl_entry_creation_result = boto_vpc.replace_network_acl_entry(network_acl.id,
*network_acl_entry_parameters,
**conn_parameters)
self.assertTrue(network_acl_entry_creation_result)
@mock_ec2
@skipIf(True, 'Moto has not implemented this feature. Skipping for now.')
def test_that_when_replacing_a_network_acl_entry_for_a_non_existent_network_acl_the_replace_network_acl_entry_method_returns_false(
self):
'''
Tests replacing a network acl entry for a non-existent network acl
'''
network_acl_entry_creation_result = boto_vpc.create_network_acl_entry(*network_acl_entry_parameters,
**conn_parameters)
self.assertFalse(network_acl_entry_creation_result)
@mock_ec2
@skipIf(True, 'Moto has not implemented this feature. Skipping for now.')
def test_that_when_deleting_an_existing_network_acl_entry_the_delete_network_acl_entry_method_returns_true(self):
'''
Tests deleting existing network acl entry successfully
'''
vpc = self._create_vpc()
network_acl = self._create_network_acl(vpc.id)
network_acl_entry = self._create_network_acl_entry(network_acl.id, *network_acl_entry_parameters)
network_acl_entry_deletion_result = boto_vpc.delete_network_acl_entry(network_acl_entry.id, 100,
**conn_parameters)
self.assertTrue(network_acl_entry_deletion_result)
@mock_ec2
@skipIf(True, 'Moto has not implemented this feature. Skipping for now.')
def test_that_when_deleting_a_non_existent_network_acl_entry_the_delete_network_acl_entry_method_returns_false(
self):
'''
Tests deleting a non-existent network acl entry
'''
network_acl_entry_deletion_result = boto_vpc.delete_network_acl_entry('fake', 100,
**conn_parameters)
self.assertFalse(network_acl_entry_deletion_result)
@mock_ec2
@skipIf(True, 'Moto has not implemented this feature. Skipping for now.')
def test_that_when_associating_an_existing_network_acl_to_an_existing_subnet_the_associate_network_acl_method_returns_true(
self):
'''
Tests association of existing network acl to existing subnet successfully
'''
vpc = self._create_vpc()
network_acl = self._create_network_acl(vpc.id)
subnet = self._create_subnet(vpc.id)
network_acl_association_result = boto_vpc.associate_network_acl_to_subnet(network_acl.id, subnet.id,
**conn_parameters)
self.assertTrue(network_acl_association_result)
@mock_ec2
#@skipIf(True, 'Moto has not implemented this feature. Skipping for now.')
def test_that_when_associating_a_non_existent_network_acl_to_an_existing_subnet_the_associate_network_acl_method_returns_an_error(
self):
'''
Tests associating a non-existent network acl to existing subnet failure
'''
vpc = self._create_vpc()
subnet = self._create_subnet(vpc.id)
network_acl_association_result = boto_vpc.associate_network_acl_to_subnet('fake', subnet.id,
**conn_parameters)
self.assertTrue('error' in network_acl_association_result)
@mock_ec2
@skipIf(True, 'Moto has not implemented this feature. Skipping for now.')
def test_that_when_associating_an_existing_network_acl_to_a_non_existent_subnet_the_associate_network_acl_method_returns_false(
self):
'''
Tests associating an existing network acl to a non-existent subnet
'''
vpc = self._create_vpc()
network_acl = self._create_network_acl(vpc.id)
network_acl_association_result = boto_vpc.associate_network_acl_to_subnet(network_acl.id, 'fake',
**conn_parameters)
self.assertFalse(network_acl_association_result)
@mock_ec2
@skipIf(True, 'Moto has not implemented this feature. Skipping for now.')
def test_that_when_creating_and_associating_a_network_acl_to_a_subnet_succeeds_the_associate_new_network_acl_to_subnet_method_returns_true(
self):
'''
Tests creating/associating a network acl to a subnet to a new network
'''
vpc = self._create_vpc()
subnet = self._create_subnet(vpc.id)
network_acl_creation_and_association_result = boto_vpc.associate_new_network_acl_to_subnet(vpc.id, subnet.id,
**conn_parameters)
self.assertTrue(network_acl_creation_and_association_result)
@mock_ec2
@skipIf(True, 'Moto has not implemented this feature. Skipping for now.')
def test_that_when_creating_and_associating_a_network_acl_to_a_subnet_and_specifying_a_name_succeeds_the_associate_new_network_acl_to_subnet_method_returns_true(
self):
'''
Tests creation/association of a network acl to subnet via name successfully
'''
vpc = self._create_vpc()
subnet = self._create_subnet(vpc.id)
network_acl_creation_and_association_result = boto_vpc.associate_new_network_acl_to_subnet(vpc.id, subnet.id,
network_acl_name='test',
**conn_parameters)
self.assertTrue(network_acl_creation_and_association_result)
@mock_ec2
@skipIf(True, 'Moto has not implemented this feature. Skipping for now.')
def test_that_when_creating_and_associating_a_network_acl_to_a_subnet_and_specifying_tags_succeeds_the_associate_new_network_acl_to_subnet_method_returns_true(
self):
'''
Tests creating/association of a network acl to a subnet via tag successfully
'''
vpc = self._create_vpc()
subnet = self._create_subnet(vpc.id)
network_acl_creation_and_association_result = boto_vpc.associate_new_network_acl_to_subnet(vpc.id, subnet.id,
tags={
'test': 'testvalue'},
**conn_parameters)
self.assertTrue(network_acl_creation_and_association_result)
@mock_ec2
@skipIf(True, 'Moto has not implemented this feature. Skipping for now.')
def test_that_when_creating_and_associating_a_network_acl_to_a_non_existent_subnet_the_associate_new_network_acl_to_subnet_method_returns_false(
self):
'''
Tests creation/association of a network acl to a non-existent vpc
'''
vpc = self._create_vpc()
network_acl_creation_and_association_result = boto_vpc.associate_new_network_acl_to_subnet(vpc.id, 'fake',
**conn_parameters)
self.assertFalse(network_acl_creation_and_association_result)
@mock_ec2
#@skipIf(True, 'Moto has not implemented this feature. Skipping for now.')
def test_that_when_creating_and_associating_a_network_acl_to_a_non_existent_vpc_the_associate_new_network_acl_to_subnet_method_returns_an_error(
self):
'''
Tests creation/association of network acl to a non-existent subnet
'''
vpc = self._create_vpc()
subnet = self._create_subnet(vpc.id)
network_acl_creation_and_association_result = boto_vpc.associate_new_network_acl_to_subnet('fake', subnet.id,
**conn_parameters)
self.assertTrue('error' in network_acl_creation_and_association_result)
@mock_ec2
@skipIf(True, 'Moto has not implemented this feature. Skipping for now.')
def test_that_when_disassociating_network_acl_succeeds_the_disassociate_network_acl_method_should_return_true(self):
'''
Tests disassociation of network acl success
'''
vpc = self._create_vpc()
subnet = self._create_subnet(vpc.id)
dhcp_disassociate_result = boto_vpc.disassociate_network_acl(subnet.id, vpc_id=vpc.id, **conn_parameters)
self.assertTrue(dhcp_disassociate_result)
@mock_ec2
@skipIf(True, 'Moto has not implemented this feature. Skipping for now.')
def test_that_when_disassociating_network_acl_for_a_non_existent_vpc_the_disassociate_network_acl_method_should_return_false(
self):
'''
Tests disassociation of network acl from non-existent vpc
'''
vpc = self._create_vpc()
subnet = self._create_subnet(vpc.id)
dhcp_disassociate_result = boto_vpc.disassociate_network_acl(subnet.id, vpc_id='fake', **conn_parameters)
self.assertFalse(dhcp_disassociate_result)
@mock_ec2
@skipIf(True, 'Moto has not implemented this feature. Skipping for now.')
def test_that_when_disassociating_network_acl_for_a_non_existent_subnet_the_disassociate_network_acl_method_should_return_false(
self):
'''
Tests disassociation of network acl from non-existent subnet
'''
vpc = self._create_vpc()
dhcp_disassociate_result = boto_vpc.disassociate_network_acl('fake', vpc_id=vpc.id, **conn_parameters)
self.assertFalse(dhcp_disassociate_result)
@skipIf(NO_MOCK, NO_MOCK_REASON)
@skipIf(HAS_BOTO is False, 'The boto module must be installed.')
@skipIf(HAS_MOTO is False, 'The moto module must be installed.')
@skipIf(_has_required_boto() is False, 'The boto module must be greater than'
' or equal to version {0}'
.format(required_boto_version))
class BotoVpcRouteTablesTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin):
@mock_ec2
@skipIf(True, 'Moto has not implemented this feature. Skipping for now.')
def test_that_when_creating_a_route_table_succeeds_the_create_route_table_method_returns_true(self):
'''
Tests creating route table successfully
'''
vpc = self._create_vpc()
route_table_creation_result = boto_vpc.create_route_table(vpc.id, **conn_parameters)
self.assertTrue(route_table_creation_result)
@mock_ec2
@skipIf(True, 'Moto has not implemented this feature. Skipping for now.')
def test_that_when_creating_a_route_table_on_a_non_existent_vpc_the_create_route_table_method_returns_false(self):
'''
Tests creating route table on a non-existent vpc
'''
route_table_creation_result = boto_vpc.create_route_table('fake', **conn_parameters)
self.assertTrue(route_table_creation_result)
@mock_ec2
@skipIf(True, 'Moto has not implemented this feature. Skipping for now.')
def test_that_when_deleting_a_route_table_succeeds_the_delete_route_table_method_returns_true(self):
'''
Tests deleting route table successfully
'''
vpc = self._create_vpc()
route_table = self._create_route_table(vpc.id)
route_table_deletion_result = boto_vpc.delete_route_table(route_table.id, **conn_parameters)
self.assertTrue(route_table_deletion_result)
@mock_ec2
@skipIf(True, 'Moto has not implemented this feature. Skipping for now.')
def test_that_when_deleting_a_non_existent_route_table_the_delete_route_table_method_returns_false(self):
'''
Tests deleting non-existent route table
'''
route_table_deletion_result = boto_vpc.delete_route_table('fake', **conn_parameters)
self.assertFalse(route_table_deletion_result)
@mock_ec2
@skipIf(True, 'Moto has not implemented this feature. Skipping for now.')
def test_that_when_route_table_exists_the_route_table_exists_method_returns_true(self):
'''
Tests existence of route table success
'''
vpc = self._create_vpc()
route_table = self._create_route_table(vpc.id)
route_table_existence_result = boto_vpc.route_table_exists(route_table.id, **conn_parameters)
self.assertTrue(route_table_existence_result)
@mock_ec2
@skipIf(True, 'Moto has not implemented this feature. Skipping for now.')
def test_that_when_route_table_does_not_exist_the_route_table_exists_method_returns_false(self):
'''
Tests existence of route table failure
'''
route_table_existence_result = boto_vpc.route_table_exists('fake', **conn_parameters)
self.assertFalse(route_table_existence_result)
@mock_ec2
@skipIf(True, 'Disabled pending https://github.com/spulec/moto/issues/493')
def test_that_when_checking_if_a_route_table_exists_but_providing_no_filters_the_route_table_exists_method_raises_a_salt_invocation_error(self):
'''
Tests checking route table without filters
'''
with self.assertRaisesRegexp(
SaltInvocationError,
'At least one of the following must be provided: id, name, or tags.'
):
boto_vpc.dhcp_options_exists(**conn_parameters)
@mock_ec2
@skipIf(True, 'Moto has not implemented this feature. Skipping for now.')
def test_that_when_associating_a_route_table_succeeds_the_associate_route_table_method_should_return_the_association_id(
self):
'''
Tests associating route table successfully
'''
vpc = self._create_vpc()
subnet = self._create_subnet(vpc.id)
route_table = self._create_route_table(vpc.id)
association_id = boto_vpc.associate_route_table(route_table.id, subnet.id, **conn_parameters)
self.assertTrue(association_id)
@mock_ec2
@skipIf(True, 'Moto has not implemented this feature. Skipping for now.')
def test_that_when_associating_a_route_table_with_a_non_existent_route_table_the_associate_route_table_method_should_return_false(
self):
'''
Tests associating of route table to non-existent route table
'''
vpc = self._create_vpc()
subnet = self._create_subnet(vpc.id)
association_id = boto_vpc.associate_route_table('fake', subnet.id, **conn_parameters)
self.assertFalse(association_id)
@mock_ec2
@skipIf(True, 'Moto has not implemented this feature. Skipping for now.')
def test_that_when_associating_a_route_table_with_a_non_existent_subnet_the_associate_route_table_method_should_return_false(
self):
'''
Tests associating of route table with non-existent subnet
'''
vpc = self._create_vpc()
route_table = self._create_route_table(vpc.id)
association_id = boto_vpc.associate_route_table(route_table.id, 'fake', **conn_parameters)
self.assertFalse(association_id)
@mock_ec2
@skipIf(True, 'Moto has not implemented this feature. Skipping for now.')
def test_that_when_disassociating_a_route_table_succeeds_the_disassociate_route_table_method_should_return_true(
self):
'''
Tests disassociation of a route
'''
vpc = self._create_vpc()
subnet = self._create_subnet(vpc.id)
route_table = self._create_route_table(vpc.id)
association_id = self._associate_route_table(route_table.id, subnet.id)
dhcp_disassociate_result = boto_vpc.disassociate_route_table(association_id, **conn_parameters)
self.assertTrue(dhcp_disassociate_result)
@mock_ec2
@skipIf(True, 'Moto has not implemented this feature. Skipping for now.')
def test_that_when_creating_a_route_succeeds_the_create_route_method_should_return_true(self):
'''
Tests successful creation of a route
'''
vpc = self._create_vpc()
route_table = self._create_route_table(vpc.id)
route_creation_result = boto_vpc.create_route(route_table.id, cidr_block, **conn_parameters)
self.assertTrue(route_creation_result)
@mock_ec2
@skipIf(True, 'Moto has not implemented this feature. Skipping for now.')
def test_that_when_creating_a_route_with_a_non_existent_route_table_the_create_route_method_should_return_false(
self):
'''
Tests creation of route on non-existent route table
'''
route_creation_result = boto_vpc.create_route('fake', cidr_block, **conn_parameters)
self.assertFalse(route_creation_result)
@mock_ec2
@skipIf(True, 'Moto has not implemented this feature. Skipping for now.')
def test_that_when_deleting_a_route_succeeds_the_delete_route_method_should_return_true(self):
'''
Tests deleting route from route table
'''
vpc = self._create_vpc()
route_table = self._create_route_table(vpc.id)
route_deletion_result = boto_vpc.delete_route(route_table.id, cidr_block, **conn_parameters)
self.assertTrue(route_deletion_result)
@mock_ec2
@skipIf(True, 'Moto has not implemented this feature. Skipping for now.')
def test_that_when_deleting_a_route_with_a_non_existent_route_table_the_delete_route_method_should_return_false(
self):
'''
Tests deleting route from a non-existent route table
'''
route_deletion_result = boto_vpc.delete_route('fake', cidr_block, **conn_parameters)
self.assertFalse(route_deletion_result)
@mock_ec2
@skipIf(True, 'Moto has not implemented this feature. Skipping for now.')
def test_that_when_replacing_a_route_succeeds_the_replace_route_method_should_return_true(self):
'''
Tests replacing route successfully
'''
vpc = self._create_vpc()
route_table = self._create_route_table(vpc.id)
route_replacing_result = boto_vpc.replace_route(route_table.id, cidr_block, **conn_parameters)
self.assertTrue(route_replacing_result)
@mock_ec2
@skipIf(True, 'Moto has not implemented this feature. Skipping for now.')
def test_that_when_replacing_a_route_with_a_non_existent_route_table_the_replace_route_method_should_return_false(
self):
'''
Tests replacing a route when the route table doesn't exist
'''
route_replacing_result = boto_vpc.replace_route('fake', cidr_block, **conn_parameters)
self.assertFalse(route_replacing_result)
if __name__ == '__main__':
from integration import run_tests # pylint: disable=import-error
run_tests(BotoVpcTestCase, needs_daemon=False)
| 41.694844
| 180
| 0.680705
|
c6929c240a0ee365d2dcca31c77757c414b29f53
| 2,573
|
py
|
Python
|
Software/Estadística/MCMC/HS/CC+SN_int1/4params/MCMC_supernovas_4params_valores_medios.py
|
matiasleize/tesis_licenciatura
|
5df6e341314583702b466b8ed7977d410f0ee457
|
[
"MIT"
] | null | null | null |
Software/Estadística/MCMC/HS/CC+SN_int1/4params/MCMC_supernovas_4params_valores_medios.py
|
matiasleize/tesis_licenciatura
|
5df6e341314583702b466b8ed7977d410f0ee457
|
[
"MIT"
] | null | null | null |
Software/Estadística/MCMC/HS/CC+SN_int1/4params/MCMC_supernovas_4params_valores_medios.py
|
matiasleize/tesis_licenciatura
|
5df6e341314583702b466b8ed7977d410f0ee457
|
[
"MIT"
] | null | null | null |
"""
Created on Wed Feb 5 13:04:17 2020
@author: matias
"""
import numpy as np
np.random.seed(42)
from scipy.optimize import minimize
import sys
import os
from os.path import join as osjoin
from pc_path import definir_path
path_git, path_datos_global = definir_path()
os.chdir(path_git)
sys.path.append('./Software/Funcionales/')
from funciones_data import leer_data_pantheon, leer_data_cronometros, leer_data_AGN
from funciones_alternativos import params_to_chi2
#ORDEN DE PRESENTACION DE LOS PARAMETROS: Mabs,omega_m,b,H_0,n
#%% Predeterminados:
M_true = -19.352
omega_m_true = 0.22
b_true = 0.023
H0_true = 70.87 #73.48 #Unidades de (km/seg)/Mpc
params_fijos = _
#%%
# Supernovas
os.chdir(path_git+'/Software/Estadística/Datos/Datos_pantheon/')
ds_SN = leer_data_pantheon('lcparam_full_long_zhel.txt')
# Cronómetros
os.chdir(path_git+'/Software/Estadística/Datos/')
ds_CC = leer_data_cronometros('datos_cronometros.txt')
# BAO
# os.chdir(path_git+'/Software/Estadística/Datos/BAO/')
# ds_BAO = []
# archivos_BAO = ['datos_BAO_da.txt','datos_BAO_dh.txt','datos_BAO_dm.txt',
# 'datos_BAO_dv.txt','datos_BAO_H.txt']
# for i in range(5):
# aux = leer_data_BAO(archivos_BAO[i])
# ds_BAO.append(aux)
# AGN
#os.chdir(path_git+'/Software/Estadística/Datos/Datos_AGN')
#ds_AGN = leer_data_AGN('table3.dat')
#%% Parametros a ajustar
nll = lambda theta: params_to_chi2(theta, params_fijos, index=4,
dataset_SN = ds_SN,
dataset_CC = ds_CC,
#dataset_BAO = ds_BAO,
#dataset_AGN = ds_AGN,
#H0_Riess = True,
model = 'HS',
integrador = 1
)
initial = np.array([M_true,omega_m_true,b_true,H0_true])
soln = minimize(nll, initial, options = {'eps': 0.01}, bounds =((-25,-18),(0.1,0.5),(0, 2),(67,74)))
M_ml, omega_m_ml, b_ml, H0_ml = soln.x
print(M_ml,omega_m_ml,b_ml,H0_ml)
#%%
os.chdir(path_git + '/Software/Estadística/Resultados_simulaciones')
np.savez('valores_medios_HS_CC+SN_4params_int1.npz', sol=soln.x)
num_data_CC = len(ds_CC[0])
num_data_SN = len(ds_SN[0])
# num_data_BAO = 20
#num_data_AGN = len(ds_AGN[0])
datos_totales = num_data_CC+num_data_SN
soln.fun/(datos_totales-len(soln.x))
#%%
os.chdir(path_git+'/Software/Estadística/Resultados_simulaciones/')
with np.load('valores_medios_HS_CC+SN_4params_int1.npz') as data:
sol = data['sol']
sol
| 29.574713
| 100
| 0.659153
|
405c8e64b6f2b3776c46fac768f8a3c67fff8d87
| 2,551
|
py
|
Python
|
CIFAR/models/fold_bn.py
|
ppppps/SNN_Calibration
|
1aca56daa5759a28bed6ed31b207c766d745dd51
|
[
"MIT"
] | 32
|
2021-06-14T04:36:04.000Z
|
2022-03-26T19:23:13.000Z
|
CIFAR/models/fold_bn.py
|
ppppps/SNN_Calibration
|
1aca56daa5759a28bed6ed31b207c766d745dd51
|
[
"MIT"
] | 2
|
2022-01-10T15:05:35.000Z
|
2022-03-29T15:23:48.000Z
|
CIFAR/models/fold_bn.py
|
ppppps/SNN_Calibration
|
1aca56daa5759a28bed6ed31b207c766d745dd51
|
[
"MIT"
] | 8
|
2021-06-21T06:46:25.000Z
|
2022-03-22T07:35:53.000Z
|
import torch
import torch.nn as nn
from CIFAR.models.utils import StraightThrough
import torch.nn.init as init
def _fold_bn(conv_module, bn_module, avg=False):
w = conv_module.weight.data
y_mean = bn_module.running_mean
y_var = bn_module.running_var
safe_std = torch.sqrt(y_var + bn_module.eps)
w_view = (conv_module.out_channels, 1, 1, 1)
if bn_module.affine:
weight = w * (bn_module.weight / safe_std).view(w_view)
beta = bn_module.bias - bn_module.weight * y_mean / safe_std
if conv_module.bias is not None:
bias = bn_module.weight * conv_module.bias / safe_std + beta
else:
bias = beta
else:
weight = w / safe_std.view(w_view)
beta = -y_mean / safe_std
if conv_module.bias is not None:
bias = conv_module.bias / safe_std + beta
else:
bias = beta
return weight, bias
def fold_bn_into_conv(conv_module, bn_module, avg=False):
w, b = _fold_bn(conv_module, bn_module, avg)
if conv_module.bias is None:
conv_module.bias = nn.Parameter(b)
else:
conv_module.bias.data = b
conv_module.weight.data = w
# set bn running stats
bn_module.running_mean = bn_module.bias.data
bn_module.running_var = bn_module.weight.data ** 2
def reset_bn(module: nn.BatchNorm2d):
if module.track_running_stats:
module.running_mean.zero_()
module.running_var.fill_(1-module.eps)
# we do not reset numer of tracked batches here
# self.num_batches_tracked.zero_()
if module.affine:
init.ones_(module.weight)
init.zeros_(module.bias)
def is_bn(m):
return isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.BatchNorm1d)
def is_absorbing(m):
return (isinstance(m, nn.Conv2d)) or isinstance(m, nn.Linear)
def search_fold_and_remove_bn(model):
model.eval()
prev = None
for n, m in model.named_children():
if is_bn(m) and is_absorbing(prev):
fold_bn_into_conv(prev, m)
# set the bn module to straight through
setattr(model, n, StraightThrough())
elif is_absorbing(m):
prev = m
else:
prev = search_fold_and_remove_bn(m)
return prev
def search_fold_and_reset_bn(model):
model.eval()
prev = None
for n, m in model.named_children():
if is_bn(m) and is_absorbing(prev):
fold_bn_into_conv(prev, m)
# reset_bn(m)
else:
search_fold_and_reset_bn(m)
prev = m
| 29.662791
| 73
| 0.642101
|
7f9f1faf8ace642e4b1dfa8556a388d9ce419777
| 934
|
py
|
Python
|
misc/doc/sources/apis/ja-http/example_send_misc.py
|
2naive/jasmin
|
7609a50ded4ebf5873b607cb4a500be4b1be6be1
|
[
"Apache-2.0"
] | 2
|
2020-05-14T18:27:01.000Z
|
2021-03-21T17:26:19.000Z
|
misc/doc/sources/apis/ja-http/example_send_misc.py
|
2naive/jasmin
|
7609a50ded4ebf5873b607cb4a500be4b1be6be1
|
[
"Apache-2.0"
] | null | null | null |
misc/doc/sources/apis/ja-http/example_send_misc.py
|
2naive/jasmin
|
7609a50ded4ebf5873b607cb4a500be4b1be6be1
|
[
"Apache-2.0"
] | 1
|
2020-11-24T06:48:22.000Z
|
2020-11-24T06:48:22.000Z
|
# Python example
# http://jasminsms.com
import urllib2
import urllib
baseParams = {'username':'foo', 'password':'bar', 'to':'+336222172', 'content':'Hello'}
# Sending long content (more than 160 chars):
baseParams['content'] = 'Very long message ....................................................................................................................................................................................'
urllib2.urlopen("http://127.0.0.1:1401/send?%s" % urllib.urlencode(baseParams)).read()
# Sending UCS2 (UTF-16) arabic content
baseParams['content'] = '\x06\x23\x06\x31\x06\x46\x06\x28'
baseParams['coding'] = 8
urllib2.urlopen("http://127.0.0.1:1401/send?%s" % urllib.urlencode(baseParams)).read()
# Sending UCS2 (UTF-16) arabic binary content
baseParams['hex-content'] = '0623063106460628'
baseParams['coding'] = 8
urllib2.urlopen("http://127.0.0.1:1401/send?%s" % urllib.urlencode(baseParams)).read()
| 44.47619
| 224
| 0.573876
|
c877a4e8944cfcb24ff1ea098fb45be174e94810
| 4,409
|
py
|
Python
|
tests/test_agenda_route.py
|
krauson/calendar
|
34f3861ec1bb69e7dd81aa10739731cc2ae6c941
|
[
"Apache-2.0"
] | null | null | null |
tests/test_agenda_route.py
|
krauson/calendar
|
34f3861ec1bb69e7dd81aa10739731cc2ae6c941
|
[
"Apache-2.0"
] | null | null | null |
tests/test_agenda_route.py
|
krauson/calendar
|
34f3861ec1bb69e7dd81aa10739731cc2ae6c941
|
[
"Apache-2.0"
] | null | null | null |
from datetime import date, datetime, timedelta
from fastapi import status
class TestAgenda:
"""In the test we are receiving event fixtures
as parameters so they will load into the database"""
AGENDA = "/agenda"
AGENDA_7_DAYS = "/agenda?days=7"
AGENDA_30_DAYS = "/agenda?days=30"
NO_EVENTS = b"No events found..."
INVALID_DATES = b"Start date is greater than end date"
today_date = datetime.today().replace(hour=0, minute=0, second=0)
@staticmethod
def test_agenda_page_no_arguments_when_no_today_events(
agenda_test_client, session):
resp = agenda_test_client.get(TestAgenda.AGENDA)
assert resp.status_code == status.HTTP_200_OK
assert TestAgenda.NO_EVENTS in resp.content
def test_agenda_page_no_arguments_when_today_events_exist(
self, agenda_test_client, session, sender, today_event,
today_event_2, yesterday_event, next_week_event,
next_month_event, old_event
):
resp = agenda_test_client.get(TestAgenda.AGENDA)
assert resp.status_code == status.HTTP_200_OK
assert b"event 1" in resp.content
assert b"event 2" in resp.content
assert b"event 3" not in resp.content
assert b"event 4" not in resp.content
assert b"event 5" not in resp.content
assert b"event 6" not in resp.content
@staticmethod
def test_agenda_per_7_days(
agenda_test_client, session, sender, today_event,
today_event_2, yesterday_event, next_week_event,
next_month_event, old_event
):
resp = agenda_test_client.get(TestAgenda.AGENDA_7_DAYS)
today = date.today().strftime("%d/%m/%Y")
assert resp.status_code == status.HTTP_200_OK
assert bytes(today, 'utf-8') in resp.content
assert b"event 1" in resp.content
assert b"event 2" in resp.content
assert b"event 3" not in resp.content
assert b"event 4" in resp.content
assert b"event 5" not in resp.content
assert b"event 6" not in resp.content
@staticmethod
def test_agenda_per_30_days(
agenda_test_client, session, sender, today_event,
today_event_2, yesterday_event, next_week_event,
next_month_event, old_event
):
resp = agenda_test_client.get(TestAgenda.AGENDA_30_DAYS)
today = date.today().strftime("%d/%m/%Y")
assert resp.status_code == status.HTTP_200_OK
assert bytes(today, 'utf-8') in resp.content
assert b"event 1" in resp.content
assert b"event 2" in resp.content
assert b"event 3" not in resp.content
assert b"event 4" in resp.content
assert b"event 5" in resp.content
assert b"event 6" not in resp.content
def test_agenda_between_two_dates(
self, agenda_test_client, session, sender, today_event,
today_event_2, yesterday_event, next_week_event,
next_month_event, old_event
):
start_date = (self.today_date + timedelta(days=8, hours=4)).date()
end_date = (self.today_date + timedelta(days=32, hours=4)).date()
resp = agenda_test_client.get(
f"/agenda?start_date={start_date}&end_date={end_date}")
assert resp.status_code == status.HTTP_200_OK
assert b"event 1" not in resp.content
assert b"event 2" not in resp.content
assert b"event 3" not in resp.content
assert b"event 4" not in resp.content
assert b"event 5" in resp.content
assert b"event 6" not in resp.content
def test_agenda_start_bigger_than_end(self, agenda_test_client):
start_date = self.today_date.date()
end_date = (self.today_date - timedelta(days=2)).date()
resp = agenda_test_client.get(
f"/agenda?start_date={start_date}&end_date={end_date}")
assert resp.status_code == status.HTTP_200_OK
assert TestAgenda.INVALID_DATES in resp.content
@staticmethod
def test_no_show_events_user_2(
agenda_test_client, session, sender, today_event,
today_event_2, yesterday_event, next_week_event,
next_month_event, old_event
):
# "user" is just a different event creator
resp = agenda_test_client.get(TestAgenda.AGENDA)
assert resp.status_code == status.HTTP_200_OK
assert b"event 7" not in resp.content
| 41.205607
| 74
| 0.668859
|
f06549010d2dba88c6e639020f4713dfdebde6de
| 791
|
py
|
Python
|
CritsAndCoffee.Auth.API/API_Auth/app.py
|
srwagsta/critsAndCoffee2.0
|
4d2042b91675c50c2b6938c1e1863f873c80d391
|
[
"Apache-1.1"
] | null | null | null |
CritsAndCoffee.Auth.API/API_Auth/app.py
|
srwagsta/critsAndCoffee2.0
|
4d2042b91675c50c2b6938c1e1863f873c80d391
|
[
"Apache-1.1"
] | 10
|
2020-07-16T23:43:12.000Z
|
2022-03-02T03:52:43.000Z
|
CritsAndCoffee.Auth.API/API_Auth/app.py
|
srwagsta/critsAndCoffee2.0
|
4d2042b91675c50c2b6938c1e1863f873c80d391
|
[
"Apache-1.1"
] | null | null | null |
from flask import Flask
from API_Auth import auth, api
from API_Auth.extensions import db, jwt, migrate
def create_app(testing=False, cli=False):
"""Application factory, used to create application
"""
app = Flask('API_Auth')
app.config.from_object('API_Auth.config')
if testing is True:
app.config['TESTING'] = True
configure_extensions(app, cli)
register_blueprints(app)
return app
def configure_extensions(app, cli):
"""configure flask extensions
"""
db.init_app(app)
jwt.init_app(app)
if cli is True:
migrate.init_app(app, db)
def register_blueprints(app):
"""register all blueprints for application
"""
app.register_blueprint(auth.views.blueprint)
app.register_blueprint(api.views.blueprint)
| 21.378378
| 54
| 0.694058
|
2a92d76c0a0bc4e45e19cd3cb395a74f917fb8b7
| 17,140
|
py
|
Python
|
sdk/python/pulumi_azure_native/botservice/channel.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/botservice/channel.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/botservice/channel.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['ChannelArgs', 'Channel']
@pulumi.input_type
class ChannelArgs:
def __init__(__self__, *,
resource_group_name: pulumi.Input[str],
resource_name: pulumi.Input[str],
channel_name: Optional[pulumi.Input[str]] = None,
etag: Optional[pulumi.Input[str]] = None,
kind: Optional[pulumi.Input[Union[str, 'Kind']]] = None,
location: Optional[pulumi.Input[str]] = None,
properties: Optional[pulumi.Input[Union['AlexaChannelArgs', 'DirectLineChannelArgs', 'DirectLineSpeechChannelArgs', 'EmailChannelArgs', 'FacebookChannelArgs', 'KikChannelArgs', 'LineChannelArgs', 'MsTeamsChannelArgs', 'SkypeChannelArgs', 'SlackChannelArgs', 'SmsChannelArgs', 'TelegramChannelArgs', 'WebChatChannelArgs']]] = None,
sku: Optional[pulumi.Input['SkuArgs']] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a Channel resource.
:param pulumi.Input[str] resource_group_name: The name of the Bot resource group in the user subscription.
:param pulumi.Input[str] resource_name: The name of the Bot resource.
:param pulumi.Input[str] channel_name: The name of the Channel resource.
:param pulumi.Input[str] etag: Entity Tag
:param pulumi.Input[Union[str, 'Kind']] kind: Required. Gets or sets the Kind of the resource.
:param pulumi.Input[str] location: Specifies the location of the resource.
:param pulumi.Input[Union['AlexaChannelArgs', 'DirectLineChannelArgs', 'DirectLineSpeechChannelArgs', 'EmailChannelArgs', 'FacebookChannelArgs', 'KikChannelArgs', 'LineChannelArgs', 'MsTeamsChannelArgs', 'SkypeChannelArgs', 'SlackChannelArgs', 'SmsChannelArgs', 'TelegramChannelArgs', 'WebChatChannelArgs']] properties: The set of properties specific to bot channel resource
:param pulumi.Input['SkuArgs'] sku: Gets or sets the SKU of the resource.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Contains resource tags defined as key/value pairs.
"""
pulumi.set(__self__, "resource_group_name", resource_group_name)
pulumi.set(__self__, "resource_name", resource_name)
if channel_name is not None:
pulumi.set(__self__, "channel_name", channel_name)
if etag is not None:
pulumi.set(__self__, "etag", etag)
if kind is not None:
pulumi.set(__self__, "kind", kind)
if location is not None:
pulumi.set(__self__, "location", location)
if properties is not None:
pulumi.set(__self__, "properties", properties)
if sku is not None:
pulumi.set(__self__, "sku", sku)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the Bot resource group in the user subscription.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="resourceName")
def resource_name(self) -> pulumi.Input[str]:
"""
The name of the Bot resource.
"""
return pulumi.get(self, "resource_name")
@resource_name.setter
def resource_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_name", value)
@property
@pulumi.getter(name="channelName")
def channel_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the Channel resource.
"""
return pulumi.get(self, "channel_name")
@channel_name.setter
def channel_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "channel_name", value)
@property
@pulumi.getter
def etag(self) -> Optional[pulumi.Input[str]]:
"""
Entity Tag
"""
return pulumi.get(self, "etag")
@etag.setter
def etag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "etag", value)
@property
@pulumi.getter
def kind(self) -> Optional[pulumi.Input[Union[str, 'Kind']]]:
"""
Required. Gets or sets the Kind of the resource.
"""
return pulumi.get(self, "kind")
@kind.setter
def kind(self, value: Optional[pulumi.Input[Union[str, 'Kind']]]):
pulumi.set(self, "kind", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the location of the resource.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def properties(self) -> Optional[pulumi.Input[Union['AlexaChannelArgs', 'DirectLineChannelArgs', 'DirectLineSpeechChannelArgs', 'EmailChannelArgs', 'FacebookChannelArgs', 'KikChannelArgs', 'LineChannelArgs', 'MsTeamsChannelArgs', 'SkypeChannelArgs', 'SlackChannelArgs', 'SmsChannelArgs', 'TelegramChannelArgs', 'WebChatChannelArgs']]]:
"""
The set of properties specific to bot channel resource
"""
return pulumi.get(self, "properties")
@properties.setter
def properties(self, value: Optional[pulumi.Input[Union['AlexaChannelArgs', 'DirectLineChannelArgs', 'DirectLineSpeechChannelArgs', 'EmailChannelArgs', 'FacebookChannelArgs', 'KikChannelArgs', 'LineChannelArgs', 'MsTeamsChannelArgs', 'SkypeChannelArgs', 'SlackChannelArgs', 'SmsChannelArgs', 'TelegramChannelArgs', 'WebChatChannelArgs']]]):
pulumi.set(self, "properties", value)
@property
@pulumi.getter
def sku(self) -> Optional[pulumi.Input['SkuArgs']]:
"""
Gets or sets the SKU of the resource.
"""
return pulumi.get(self, "sku")
@sku.setter
def sku(self, value: Optional[pulumi.Input['SkuArgs']]):
pulumi.set(self, "sku", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Contains resource tags defined as key/value pairs.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
class Channel(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
channel_name: Optional[pulumi.Input[str]] = None,
etag: Optional[pulumi.Input[str]] = None,
kind: Optional[pulumi.Input[Union[str, 'Kind']]] = None,
location: Optional[pulumi.Input[str]] = None,
properties: Optional[pulumi.Input[Union[pulumi.InputType['AlexaChannelArgs'], pulumi.InputType['DirectLineChannelArgs'], pulumi.InputType['DirectLineSpeechChannelArgs'], pulumi.InputType['EmailChannelArgs'], pulumi.InputType['FacebookChannelArgs'], pulumi.InputType['KikChannelArgs'], pulumi.InputType['LineChannelArgs'], pulumi.InputType['MsTeamsChannelArgs'], pulumi.InputType['SkypeChannelArgs'], pulumi.InputType['SlackChannelArgs'], pulumi.InputType['SmsChannelArgs'], pulumi.InputType['TelegramChannelArgs'], pulumi.InputType['WebChatChannelArgs']]]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
resource_name_: Optional[pulumi.Input[str]] = None,
sku: Optional[pulumi.Input[pulumi.InputType['SkuArgs']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
"""
Bot channel resource definition
API Version: 2021-03-01.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] channel_name: The name of the Channel resource.
:param pulumi.Input[str] etag: Entity Tag
:param pulumi.Input[Union[str, 'Kind']] kind: Required. Gets or sets the Kind of the resource.
:param pulumi.Input[str] location: Specifies the location of the resource.
:param pulumi.Input[Union[pulumi.InputType['AlexaChannelArgs'], pulumi.InputType['DirectLineChannelArgs'], pulumi.InputType['DirectLineSpeechChannelArgs'], pulumi.InputType['EmailChannelArgs'], pulumi.InputType['FacebookChannelArgs'], pulumi.InputType['KikChannelArgs'], pulumi.InputType['LineChannelArgs'], pulumi.InputType['MsTeamsChannelArgs'], pulumi.InputType['SkypeChannelArgs'], pulumi.InputType['SlackChannelArgs'], pulumi.InputType['SmsChannelArgs'], pulumi.InputType['TelegramChannelArgs'], pulumi.InputType['WebChatChannelArgs']]] properties: The set of properties specific to bot channel resource
:param pulumi.Input[str] resource_group_name: The name of the Bot resource group in the user subscription.
:param pulumi.Input[str] resource_name_: The name of the Bot resource.
:param pulumi.Input[pulumi.InputType['SkuArgs']] sku: Gets or sets the SKU of the resource.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Contains resource tags defined as key/value pairs.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ChannelArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Bot channel resource definition
API Version: 2021-03-01.
:param str resource_name: The name of the resource.
:param ChannelArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ChannelArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
channel_name: Optional[pulumi.Input[str]] = None,
etag: Optional[pulumi.Input[str]] = None,
kind: Optional[pulumi.Input[Union[str, 'Kind']]] = None,
location: Optional[pulumi.Input[str]] = None,
properties: Optional[pulumi.Input[Union[pulumi.InputType['AlexaChannelArgs'], pulumi.InputType['DirectLineChannelArgs'], pulumi.InputType['DirectLineSpeechChannelArgs'], pulumi.InputType['EmailChannelArgs'], pulumi.InputType['FacebookChannelArgs'], pulumi.InputType['KikChannelArgs'], pulumi.InputType['LineChannelArgs'], pulumi.InputType['MsTeamsChannelArgs'], pulumi.InputType['SkypeChannelArgs'], pulumi.InputType['SlackChannelArgs'], pulumi.InputType['SmsChannelArgs'], pulumi.InputType['TelegramChannelArgs'], pulumi.InputType['WebChatChannelArgs']]]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
resource_name_: Optional[pulumi.Input[str]] = None,
sku: Optional[pulumi.Input[pulumi.InputType['SkuArgs']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ChannelArgs.__new__(ChannelArgs)
__props__.__dict__["channel_name"] = channel_name
__props__.__dict__["etag"] = etag
__props__.__dict__["kind"] = kind
__props__.__dict__["location"] = location
__props__.__dict__["properties"] = properties
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
if resource_name_ is None and not opts.urn:
raise TypeError("Missing required property 'resource_name_'")
__props__.__dict__["resource_name"] = resource_name_
__props__.__dict__["sku"] = sku
__props__.__dict__["tags"] = tags
__props__.__dict__["name"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:botservice:Channel"), pulumi.Alias(type_="azure-native:botservice/v20171201:Channel"), pulumi.Alias(type_="azure-nextgen:botservice/v20171201:Channel"), pulumi.Alias(type_="azure-native:botservice/v20180712:Channel"), pulumi.Alias(type_="azure-nextgen:botservice/v20180712:Channel"), pulumi.Alias(type_="azure-native:botservice/v20200602:Channel"), pulumi.Alias(type_="azure-nextgen:botservice/v20200602:Channel"), pulumi.Alias(type_="azure-native:botservice/v20210301:Channel"), pulumi.Alias(type_="azure-nextgen:botservice/v20210301:Channel")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(Channel, __self__).__init__(
'azure-native:botservice:Channel',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Channel':
"""
Get an existing Channel resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = ChannelArgs.__new__(ChannelArgs)
__props__.__dict__["etag"] = None
__props__.__dict__["kind"] = None
__props__.__dict__["location"] = None
__props__.__dict__["name"] = None
__props__.__dict__["properties"] = None
__props__.__dict__["sku"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["type"] = None
return Channel(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def etag(self) -> pulumi.Output[Optional[str]]:
"""
Entity Tag
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def kind(self) -> pulumi.Output[Optional[str]]:
"""
Required. Gets or sets the Kind of the resource.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def location(self) -> pulumi.Output[Optional[str]]:
"""
Specifies the location of the resource.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Specifies the name of the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> pulumi.Output[Any]:
"""
The set of properties specific to bot channel resource
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter
def sku(self) -> pulumi.Output[Optional['outputs.SkuResponse']]:
"""
Gets or sets the SKU of the resource.
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Contains resource tags defined as key/value pairs.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Specifies the type of the resource.
"""
return pulumi.get(self, "type")
| 48.146067
| 633
| 0.655309
|
b4fec2417995f8b16fb8f1bc9f96ab0349179e65
| 9,184
|
py
|
Python
|
idpsreact/lib/python3.7/site-packages/openpyxl/cell/cell.py
|
DTrafford/IDPS
|
1eaccfc218adcb7231e64271731c765f8362b891
|
[
"MIT"
] | 6
|
2019-12-07T07:30:34.000Z
|
2022-01-20T14:26:44.000Z
|
idpsreact/lib/python3.7/site-packages/openpyxl/cell/cell.py
|
DTrafford/IDPS
|
1eaccfc218adcb7231e64271731c765f8362b891
|
[
"MIT"
] | 9
|
2019-12-28T06:18:53.000Z
|
2022-01-13T01:54:21.000Z
|
idpsreact/lib/python3.7/site-packages/openpyxl/cell/cell.py
|
DTrafford/IDPS
|
1eaccfc218adcb7231e64271731c765f8362b891
|
[
"MIT"
] | 1
|
2020-05-21T15:55:45.000Z
|
2020-05-21T15:55:45.000Z
|
# Copyright (c) 2010-2019 openpyxl
"""Manage individual cells in a spreadsheet.
The Cell class is required to know its value and type, display options,
and any other features of an Excel cell. Utilities for referencing
cells using Excel's 'A1' column/row nomenclature are also provided.
"""
__docformat__ = "restructuredtext en"
# Python stdlib imports
from copy import copy
import datetime
import re
from itertools import islice, product
from openpyxl.compat import (
NUMERIC_TYPES,
deprecated,
)
from openpyxl.utils.units import (
DEFAULT_ROW_HEIGHT,
DEFAULT_COLUMN_WIDTH
)
from openpyxl.utils.datetime import (
to_excel,
time_to_days,
timedelta_to_days,
from_excel
)
from openpyxl.utils.exceptions import (
IllegalCharacterError
)
from openpyxl.utils.units import points_to_pixels
from openpyxl.utils import (
get_column_letter,
column_index_from_string,
)
from openpyxl.utils.inference import (
cast_numeric,
cast_percentage,
cast_percentage,
)
from openpyxl.styles import numbers, is_date_format
from openpyxl.styles.styleable import StyleableObject
from openpyxl.worksheet.hyperlink import Hyperlink
# constants
TIME_TYPES = (datetime.datetime, datetime.date, datetime.time, datetime.timedelta)
TIME_FORMATS = {
datetime.datetime:numbers.FORMAT_DATE_DATETIME,
datetime.date:numbers.FORMAT_DATE_YYYYMMDD2,
datetime.time:numbers.FORMAT_DATE_TIME6,
datetime.timedelta:numbers.FORMAT_DATE_TIMEDELTA,
}
try:
from pandas import Timestamp
TIME_TYPES = TIME_TYPES + (Timestamp,)
TIME_FORMATS[Timestamp] = numbers.FORMAT_DATE_DATETIME
except ImportError:
pass
STRING_TYPES = (str, bytes)
KNOWN_TYPES = NUMERIC_TYPES + TIME_TYPES + STRING_TYPES + (bool, type(None))
ILLEGAL_CHARACTERS_RE = re.compile(r'[\000-\010]|[\013-\014]|[\016-\037]')
ERROR_CODES = ('#NULL!', '#DIV/0!', '#VALUE!', '#REF!', '#NAME?', '#NUM!',
'#N/A')
ERROR_CODES = ERROR_CODES
TYPE_STRING = 's'
TYPE_FORMULA = 'f'
TYPE_NUMERIC = 'n'
TYPE_BOOL = 'b'
TYPE_NULL = 'n'
TYPE_INLINE = 'inlineStr'
TYPE_ERROR = 'e'
TYPE_FORMULA_CACHE_STRING = 'str'
VALID_TYPES = (TYPE_STRING, TYPE_FORMULA, TYPE_NUMERIC, TYPE_BOOL,
TYPE_NULL, TYPE_INLINE, TYPE_ERROR, TYPE_FORMULA_CACHE_STRING)
_TYPES = {int:'n', float:'n', str:'s', bool:'b'}
def get_type(t, value):
if isinstance(value, NUMERIC_TYPES):
dt = 'n'
elif isinstance(value, STRING_TYPES):
dt = 's'
elif isinstance(value, TIME_TYPES):
dt = 'd'
else:
return
_TYPES[t] = dt
return dt
class Cell(StyleableObject):
"""Describes cell associated properties.
Properties of interest include style, type, value, and address.
"""
__slots__ = (
'row',
'column',
'_value',
'data_type',
'parent',
'_hyperlink',
'_comment',
)
def __init__(self, worksheet, row=None, column=None, value=None, style_array=None):
super(Cell, self).__init__(worksheet, style_array)
self.row = row
"""Row number of this cell (1-based)"""
self.column = column
"""Column number of this cell (1-based)"""
# _value is the stored value, while value is the displayed value
self._value = None
self._hyperlink = None
self.data_type = 'n'
if value is not None:
self.value = value
self._comment = None
@property
def coordinate(self):
"""This cell's coordinate (ex. 'A5')"""
col = get_column_letter(self.column)
return "%s%d" % (col, self.row)
@property
def col_idx(self):
"""The numerical index of the column"""
return self.column
@property
def column_letter(self):
return get_column_letter(self.column)
@property
def encoding(self):
return self.parent.encoding
@property
def base_date(self):
return self.parent.parent.epoch
@property
def guess_types(self):
return getattr(self.parent.parent, 'guess_types', False)
def __repr__(self):
return "<Cell {0!r}.{1}>".format(self.parent.title, self.coordinate)
def check_string(self, value):
"""Check string coding, length, and line break character"""
if value is None:
return
# convert to str string
if not isinstance(value, str):
value = str(value, self.encoding)
value = str(value)
# string must never be longer than 32,767 characters
# truncate if necessary
value = value[:32767]
if next(ILLEGAL_CHARACTERS_RE.finditer(value), None):
raise IllegalCharacterError
return value
def check_error(self, value):
"""Tries to convert Error" else N/A"""
try:
return str(value)
except UnicodeDecodeError:
return u'#N/A'
def _bind_value(self, value):
"""Given a value, infer the correct data type"""
self.data_type = "n"
t = type(value)
try:
dt = _TYPES[t]
except KeyError:
dt = get_type(t, value)
if dt is not None:
self.data_type = dt
if dt == 'n' or dt == 'b':
pass
elif dt == 'd':
if not is_date_format(self.number_format):
self.number_format = TIME_FORMATS[t]
self.data_type = "d"
elif dt == "s":
value = self.check_string(value)
if len(value) > 1 and value.startswith("="):
self.data_type = 'f'
elif value in ERROR_CODES:
self.data_type = 'e'
elif value is not None:
raise ValueError("Cannot convert {0!r} to Excel".format(value))
self._value = value
@property
def value(self):
"""Get or set the value held in the cell.
:type: depends on the value (string, float, int or
:class:`datetime.datetime`)
"""
return self._value
@value.setter
def value(self, value):
"""Set the value and infer type and display options."""
self._bind_value(value)
@property
def internal_value(self):
"""Always returns the value for excel."""
return self._value
@property
def hyperlink(self):
"""Return the hyperlink target or an empty string"""
return self._hyperlink
@hyperlink.setter
def hyperlink(self, val):
"""Set value and display for hyperlinks in a cell.
Automatically sets the `value` of the cell with link text,
but you can modify it afterwards by setting the `value`
property, and the hyperlink will remain.
Hyperlink is removed if set to ``None``."""
if val is None:
self._hyperlink = None
else:
if not isinstance(val, Hyperlink):
val = Hyperlink(ref="", target=val)
val.ref = self.coordinate
self._hyperlink = val
if self._value is None:
self.value = val.target or val.location
@property
def is_date(self):
"""True if the value is formatted as a date
:type: bool
"""
return self.data_type == 'd' or (
self.data_type == 'n' and is_date_format(self.number_format)
)
def offset(self, row=0, column=0):
"""Returns a cell location relative to this cell.
:param row: number of rows to offset
:type row: int
:param column: number of columns to offset
:type column: int
:rtype: :class:`openpyxl.cell.Cell`
"""
offset_column = self.col_idx + column
offset_row = self.row + row
return self.parent.cell(column=offset_column, row=offset_row)
@property
def comment(self):
""" Returns the comment associated with this cell
:type: :class:`openpyxl.comments.Comment`
"""
return self._comment
@comment.setter
def comment(self, value):
"""
Assign a comment to a cell
"""
if value is not None:
if value.parent:
value = copy(value)
value.bind(self)
elif value is None and self._comment:
self._comment.unbind()
self._comment = value
class MergedCell(StyleableObject):
"""
Describes the properties of a cell in a merged cell and helps to
display the borders of the merged cell.
The value of a MergedCell is always None.
"""
__slots__ = ('row', 'column')
_value = None
data_type = "n"
comment = None
hyperlink = None
def __init__(self, worksheet, row=None, column=None):
super(MergedCell, self).__init__(worksheet)
self.row = row
self.column = column
def __repr__(self):
return "<MergedCell {0!r}.{1}>".format(self.parent.title, self.coordinate)
coordinate = Cell.coordinate
_comment = comment
value = _value
def WriteOnlyCell(ws=None, value=None):
return Cell(worksheet=ws, column=1, row=1, value=value)
| 25.943503
| 87
| 0.614547
|
8e342e7d22ae15011b5fa9a49cbb27fe4839d661
| 21,496
|
py
|
Python
|
tractseg/libs/ExpUtils.py
|
soichih/TractSeg
|
f78d0c6dc998905e593cbf4346745467e30d1979
|
[
"Apache-2.0"
] | null | null | null |
tractseg/libs/ExpUtils.py
|
soichih/TractSeg
|
f78d0c6dc998905e593cbf4346745467e30d1979
|
[
"Apache-2.0"
] | null | null | null |
tractseg/libs/ExpUtils.py
|
soichih/TractSeg
|
f78d0c6dc998905e593cbf4346745467e30d1979
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2017 Division of Medical Image Computing, German Cancer Research Center (DKFZ)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import os
import re
from os.path import join
import numpy as np
from pprint import pprint
import glob
from tractseg.libs.Config import Config as C
from tractseg.libs.Subjects import get_all_subjects
from tractseg.libs.Utils import Utils
class ExpUtils:
@staticmethod
def create_experiment_folder(experiment_name, multi_parent_path, train):
'''
Create a new experiment folder. If it already exist, create new one with increasing number at the end.
If not training model (only predicting): Use existing folder
'''
if multi_parent_path != "":
dir = join(multi_parent_path, experiment_name)
else:
dir = join(C.EXP_PATH, experiment_name)
if not train:
if os.path.exists(dir):
return dir
else:
sys.exit('Testing target directory does not exist!')
else:
for i in range(40):
if os.path.exists(dir):
# tailing_numbers = re.findall('x.*?([0-9]+)$', experiment_name) #not correct
tailing_numbers = re.findall('x([0-9]+)$', experiment_name) #find tailing numbers that start with a x
if len(tailing_numbers) > 0:
num = int(tailing_numbers[0])
if num < 10:
experiment_name = experiment_name[:-1] + str(num+1)
else:
experiment_name = experiment_name[:-2] + str(num+1)
else:
experiment_name += "_x2"
if multi_parent_path != "":
dir = join(multi_parent_path, experiment_name)
else:
dir = join(C.EXP_PATH, experiment_name)
else:
os.makedirs(dir)
break
return dir
@staticmethod
def make_dir(directory):
if not os.path.exists(directory):
os.makedirs(directory)
@staticmethod
def print_HPs(HP):
# dict = copy.deepcopy(HP.__dict__)
dict = {attr: getattr(HP, attr) for attr in dir(HP) if not callable(getattr(HP, attr)) and not attr.startswith("__")}
dict.pop("TRAIN_SUBJECTS", None)
dict.pop("TEST_SUBJECTS", None)
dict.pop("VALIDATE_SUBJECTS", None)
pprint(dict)
@staticmethod
def get_best_weights_path(exp_path, load_weights):
if load_weights:
return glob.glob(exp_path + "/best_weights_ep*.npz")[0]
else:
return ""
@staticmethod
def get_bvals_bvecs_path(args):
input_file_without_ending = os.path.basename(args.input).split(".")[0]
if args.bvals:
bvals = args.bvals
else:
bvals = join(os.path.dirname(args.input), input_file_without_ending + ".bvals")
if args.bvecs:
bvecs = args.bvecs
else:
bvecs = join(os.path.dirname(args.input), input_file_without_ending + ".bvecs")
return bvals, bvecs
@staticmethod
def get_brain_mask_path(HP, args):
if args.brain_mask:
return args.brain_mask
brain_mask_path = join(HP.PREDICT_IMG_OUTPUT, "nodif_brain_mask.nii.gz")
if os.path.isfile(brain_mask_path):
return brain_mask_path
brain_mask_path = join(os.path.dirname(args.input), "nodif_brain_mask.nii.gz")
if os.path.isfile(brain_mask_path):
print("Loading brain mask from: {}".format(brain_mask_path))
return brain_mask_path
# raise ValueError("no brainmask available")
return None
@staticmethod
def get_bundle_names(CLASSES):
if CLASSES == "All":
# 72 Tracts
bundles = ["AF_left", "AF_right", "ATR_left", "ATR_right", "CA", "CC_1", "CC_2", "CC_3", "CC_4", "CC_5", "CC_6", "CC_7",
"CG_left", "CG_right", "CST_left", "CST_right", "MLF_left", "MLF_right",
"FPT_left", "FPT_right", "FX_left", "FX_right",
"ICP_left", "ICP_right", "IFO_left", "IFO_right", "ILF_left", "ILF_right", "MCP", "OR_left", "OR_right",
"POPT_left", "POPT_right", "SCP_left", "SCP_right", "SLF_I_left", "SLF_I_right", "SLF_II_left", "SLF_II_right",
"SLF_III_left", "SLF_III_right", "STR_left", "STR_right", "UF_left", "UF_right", "CC",
"T_PREF_left", "T_PREF_right", "T_PREM_left", "T_PREM_right", "T_PREC_left", "T_PREC_right", "T_POSTC_left",
"T_POSTC_right", "T_PAR_left", "T_PAR_right", "T_OCC_left", "T_OCC_right", "ST_FO_left", "ST_FO_right", "ST_PREF_left",
"ST_PREF_right", "ST_PREM_left", "ST_PREM_right", "ST_PREC_left", "ST_PREC_right", "ST_POSTC_left", "ST_POSTC_right",
"ST_PAR_left", "ST_PAR_right", "ST_OCC_left", "ST_OCC_right"]
elif CLASSES == "All_Part1":
# 18 Tracts
bundles = ['AF_left', 'AF_right', 'ATR_left', 'ATR_right', 'CA', 'CC_1', 'CC_2', 'CC_3', 'CC_4', 'CC_5', 'CC_6', 'CC_7', 'CG_left', 'CG_right', 'CST_left', 'CST_right', 'MLF_left', 'MLF_right']
elif CLASSES == "All_Part2":
# 18 Tracts
bundles = ['FPT_left', 'FPT_right', 'FX_left', 'FX_right', 'ICP_left', 'ICP_right', 'IFO_left', 'IFO_right', 'ILF_left', 'ILF_right', 'MCP', 'OR_left', 'OR_right', 'POPT_left', 'POPT_right', 'SCP_left', 'SCP_right', 'SLF_I_left']
elif CLASSES == "All_Part3":
# 18 Tracts
bundles = ['SLF_I_right', 'SLF_II_left', 'SLF_II_right', 'SLF_III_left', 'SLF_III_right', 'STR_left', 'STR_right', 'UF_left', 'UF_right', 'CC', 'T_PREF_left', 'T_PREF_right', 'T_PREM_left', 'T_PREM_right', 'T_PREC_left', 'T_PREC_right', 'T_POSTC_left', 'T_POSTC_right']
elif CLASSES == "All_Part4":
# 18 Tracts
bundles = ['T_PAR_left', 'T_PAR_right', 'T_OCC_left', 'T_OCC_right', 'ST_FO_left', 'ST_FO_right', 'ST_PREF_left', 'ST_PREF_right', 'ST_PREM_left', 'ST_PREM_right', 'ST_PREC_left', 'ST_PREC_right', 'ST_POSTC_left', 'ST_POSTC_right', 'ST_PAR_left', 'ST_PAR_right', 'ST_OCC_left', 'ST_OCC_right']
elif CLASSES == "11":
# 11 Major tracts
bundles = ["CST_left", "CST_right", "IFO_left", "IFO_right", "CA", "CG_left", "CG_right",
"FX_left", "FX_right", "UF_left", "UF_right"]
elif CLASSES == "20":
# 20 Major tracts
bundles = ["AF_left", "AF_right", "CA", "CST_left", "CST_right", "CG_left", "CG_right",
"ICP_left", "ICP_right", "MCP", "SCP_left", "SCP_right", "ILF_left", "ILF_right",
"IFO_left", "IFO_right", "OR_left", "OR_right", "UF_left", "UF_right"]
elif CLASSES == "20_endpoints_combined":
# endpoints for "20"; beginnings and endings combined
bundles = ["AF_left", "AF_right", "CA", "CST_left", "CST_right", "CG_left", "CG_right",
"ICP_left", "ICP_right", "MCP", "SCP_left", "SCP_right", "ILF_left", "ILF_right",
"IFO_left", "IFO_right", "OR_left", "OR_right", "UF_left", "UF_right"]
elif CLASSES == "20_endpoints":
#endpoints for "20"
bundles = ['AF_left_b', 'AF_left_e', 'AF_right_b', 'AF_right_e', 'CA_b', 'CA_e',
'CST_left_b', 'CST_left_e', 'CST_right_b', 'CST_right_e', 'CG_left_b',
'CG_left_e', 'CG_right_b', 'CG_right_e', 'ICP_left_b', 'ICP_left_e',
'ICP_right_b', 'ICP_right_e', 'MCP_b', 'MCP_e', 'SCP_left_b', 'SCP_left_e',
'SCP_right_b', 'SCP_right_e', 'ILF_left_b', 'ILF_left_e', 'ILF_right_b',
'ILF_right_e', 'IFO_left_b', 'IFO_left_e', 'IFO_right_b', 'IFO_right_e',
'OR_left_b', 'OR_left_e', 'OR_right_b', 'OR_right_e', 'UF_left_b', 'UF_left_e',
'UF_right_b', 'UF_right_e'] #40
elif CLASSES == "20_bundles_endpoints":
#endpoints for "20"
bundles = ['AF_left', 'AF_left_b', 'AF_left_e', 'AF_right', 'AF_right_b', 'AF_right_e',
'CA', 'CA_b', 'CA_e', 'CST_left', 'CST_left_b', 'CST_left_e', 'CST_right', 'CST_right_b', 'CST_right_e',
'CG_left', 'CG_left_b', 'CG_left_e', 'CG_right', 'CG_right_b', 'CG_right_e',
'ICP_left', 'ICP_left_b', 'ICP_left_e', 'ICP_right', 'ICP_right_b', 'ICP_right_e',
'MCP', 'MCP_b', 'MCP_e', 'SCP_left', 'SCP_left_b', 'SCP_left_e',
'SCP_right', 'SCP_right_b', 'SCP_right_e', 'ILF_left', 'ILF_left_b', 'ILF_left_e',
'ILF_right', 'ILF_right_b', 'ILF_right_e', 'IFO_left', 'IFO_left_b', 'IFO_left_e',
'IFO_right', 'IFO_right_b', 'IFO_right_e',
'OR_left', 'OR_left_b', 'OR_left_e', 'OR_right', 'OR_right_b', 'OR_right_e',
'UF_left', 'UF_left_b', 'UF_left_e', 'UF_right', 'UF_right_b', 'UF_right_e'] #60
elif CLASSES == "All_endpoints":
#endpoints for "All"
bundles = ['AF_left_b', 'AF_left_e', 'AF_right_b', 'AF_right_e', 'ATR_left_b', 'ATR_left_e', 'ATR_right_b',
'ATR_right_e', 'CA_b', 'CA_e', 'CC_1_b', 'CC_1_e', 'CC_2_b', 'CC_2_e', 'CC_3_b', 'CC_3_e', 'CC_4_b',
'CC_4_e', 'CC_5_b', 'CC_5_e', 'CC_6_b', 'CC_6_e', 'CC_7_b', 'CC_7_e', 'CG_left_b', 'CG_left_e',
'CG_right_b', 'CG_right_e', 'CST_left_b', 'CST_left_e', 'CST_right_b', 'CST_right_e', 'MLF_left_b',
'MLF_left_e', 'MLF_right_b', 'MLF_right_e', 'FPT_left_b', 'FPT_left_e', 'FPT_right_b', 'FPT_right_e',
'FX_left_b', 'FX_left_e', 'FX_right_b', 'FX_right_e', 'ICP_left_b', 'ICP_left_e', 'ICP_right_b',
'ICP_right_e', 'IFO_left_b', 'IFO_left_e', 'IFO_right_b', 'IFO_right_e', 'ILF_left_b', 'ILF_left_e',
'ILF_right_b', 'ILF_right_e', 'MCP_b', 'MCP_e', 'OR_left_b', 'OR_left_e', 'OR_right_b', 'OR_right_e',
'POPT_left_b', 'POPT_left_e', 'POPT_right_b', 'POPT_right_e', 'SCP_left_b', 'SCP_left_e', 'SCP_right_b',
'SCP_right_e', 'SLF_I_left_b', 'SLF_I_left_e', 'SLF_I_right_b', 'SLF_I_right_e', 'SLF_II_left_b',
'SLF_II_left_e', 'SLF_II_right_b', 'SLF_II_right_e', 'SLF_III_left_b', 'SLF_III_left_e', 'SLF_III_right_b',
'SLF_III_right_e', 'STR_left_b', 'STR_left_e', 'STR_right_b', 'STR_right_e', 'UF_left_b', 'UF_left_e',
'UF_right_b', 'UF_right_e', 'CC_b', 'CC_e', 'T_PREF_left_b', 'T_PREF_left_e', 'T_PREF_right_b',
'T_PREF_right_e', 'T_PREM_left_b', 'T_PREM_left_e', 'T_PREM_right_b', 'T_PREM_right_e', 'T_PREC_left_b',
'T_PREC_left_e', 'T_PREC_right_b', 'T_PREC_right_e', 'T_POSTC_left_b', 'T_POSTC_left_e', 'T_POSTC_right_b',
'T_POSTC_right_e', 'T_PAR_left_b', 'T_PAR_left_e', 'T_PAR_right_b', 'T_PAR_right_e', 'T_OCC_left_b',
'T_OCC_left_e', 'T_OCC_right_b', 'T_OCC_right_e', 'ST_FO_left_b', 'ST_FO_left_e', 'ST_FO_right_b',
'ST_FO_right_e', 'ST_PREF_left_b', 'ST_PREF_left_e', 'ST_PREF_right_b', 'ST_PREF_right_e',
'ST_PREM_left_b', 'ST_PREM_left_e', 'ST_PREM_right_b', 'ST_PREM_right_e', 'ST_PREC_left_b',
'ST_PREC_left_e', 'ST_PREC_right_b', 'ST_PREC_right_e', 'ST_POSTC_left_b', 'ST_POSTC_left_e',
'ST_POSTC_right_b', 'ST_POSTC_right_e', 'ST_PAR_left_b', 'ST_PAR_left_e', 'ST_PAR_right_b',
'ST_PAR_right_e', 'ST_OCC_left_b', 'ST_OCC_left_e', 'ST_OCC_right_b', 'ST_OCC_right_e'] #144
else:
#1 tract
# bundles = ["CST_right"]
bundles = [CLASSES]
return ["BG"] + bundles #Add Background label (is always beginning of list)
@staticmethod
def get_ACT_noACT_bundle_names():
# ACT = ["AF_left", "AF_right", "ATR_left", "ATR_right", "CC_1", "CC_2", "CC_3", "CC_4", "CC_5", "CC_6", "CC_7",
# "CG_left", "CG_right", "CST_left", "CST_right", "EMC_left", "EMC_right", "MLF_left", "MLF_right",
# "FPT_left", "FPT_right", "FX_left", "FX_right",
# "ICP_left", "ICP_right", "ILF_left", "ILF_right", "MCP", "OR_left", "OR_right",
# "POPT_left", "POPT_right", "SCP_left", "SCP_right", "SLF_I_left", "SLF_I_right", "SLF_II_left", "SLF_II_right",
# "SLF_III_left", "SLF_III_right", "STR_left", "STR_right", "CC",
# "T_PREF_left", "T_PREF_right", "T_PREM_left", "T_PREM_right", "T_PREC_left", "T_PREC_right", "T_POSTC_left",
# "T_POSTC_right", "T_PAR_left", "T_PAR_right", "T_OCC_left", "T_OCC_right", "ST_FO_left", "ST_FO_right", "ST_PREF_left",
# "ST_PREF_right", "ST_PREM_left", "ST_PREM_right", "ST_PREC_left", "ST_PREC_right", "ST_POSTC_left", "ST_POSTC_right",
# "ST_PAR_left", "ST_PAR_right", "ST_OCC_left", "ST_OCC_right"]
ACT = ["AF_left", "AF_right", "ATR_left", "ATR_right", "CC_1", "CC_2", "CC_3", "CC_4", "CC_5", "CC_6", "CC_7",
"CG_left", "CG_right", "CST_left", "CST_right", "MLF_left", "MLF_right",
"FPT_left", "FPT_right", "FX_left", "FX_right",
"ICP_left", "ICP_right", "ILF_left", "ILF_right", "MCP", "OR_left", "OR_right",
"POPT_left", "POPT_right", "SCP_left", "SCP_right", "SLF_I_left", "SLF_I_right", "SLF_II_left", "SLF_II_right",
"SLF_III_left", "SLF_III_right", "STR_left", "STR_right", "CC",
"T_PREF_left", "T_PREF_right", "T_PREM_left", "T_PREM_right", "T_PREC_left", "T_PREC_right", "T_POSTC_left",
"T_POSTC_right", "T_PAR_left", "T_PAR_right", "T_OCC_left", "T_OCC_right", "ST_FO_left", "ST_FO_right", "ST_PREF_left",
"ST_PREF_right", "ST_PREM_left", "ST_PREM_right", "ST_PREC_left", "ST_PREC_right", "ST_POSTC_left", "ST_POSTC_right",
"ST_PAR_left", "ST_PAR_right", "ST_OCC_left", "ST_OCC_right"]
noACT = ["CA", "IFO_left", "IFO_right", "UF_left", "UF_right"]
return ACT, noACT
@staticmethod
def get_labels_filename(HP):
if HP.CLASSES == "All" and HP.EXPERIMENT_TYPE == "peak_regression":
if HP.RESOLUTION == "1.25mm":
HP.LABELS_FILENAME = "bundle_peaks"
else:
HP.LABELS_FILENAME = "bundle_peaks_808080"
elif HP.CLASSES == "11" and HP.EXPERIMENT_TYPE == "peak_regression":
if HP.RESOLUTION == "1.25mm":
HP.LABELS_FILENAME = "bundle_peaks_11"
else:
HP.LABELS_FILENAME = "bundle_peaks_11_808080"
elif HP.CLASSES == "20" and HP.EXPERIMENT_TYPE == "peak_regression":
if HP.RESOLUTION == "1.25mm":
HP.LABELS_FILENAME = "bundle_peaks_20"
else:
HP.LABELS_FILENAME = "bundle_peaks_20_808080"
elif HP.CLASSES == "All_Part1" and HP.EXPERIMENT_TYPE == "peak_regression":
if HP.RESOLUTION == "1.25mm":
HP.LABELS_FILENAME = "bundle_peaks_Part1"
else:
HP.LABELS_FILENAME = "bundle_peaks_Part1_808080"
elif HP.CLASSES == "All_Part2" and HP.EXPERIMENT_TYPE == "peak_regression":
if HP.RESOLUTION == "1.25mm":
HP.LABELS_FILENAME = "bundle_peaks_Part2"
else:
HP.LABELS_FILENAME = "bundle_peaks_Part2_808080"
elif HP.CLASSES == "All_Part3" and HP.EXPERIMENT_TYPE == "peak_regression":
if HP.RESOLUTION == "1.25mm":
HP.LABELS_FILENAME = "bundle_peaks_Part3"
else:
HP.LABELS_FILENAME = "bundle_peaks_Part3_808080"
elif HP.CLASSES == "All_Part4" and HP.EXPERIMENT_TYPE == "peak_regression":
if HP.RESOLUTION == "1.25mm":
HP.LABELS_FILENAME = "bundle_peaks_Part4"
else:
HP.LABELS_FILENAME = "bundle_peaks_Part5_808080"
elif HP.CLASSES == "All_endpoints" and HP.EXPERIMENT_TYPE == "endings_segmentation":
if HP.RESOLUTION == "1.25mm":
HP.LABELS_FILENAME = "endpoints_72_ordered"
else:
HP.LABELS_FILENAME = "endpoints_72_ordered"
elif HP.CLASSES == "20_endpoints" and HP.EXPERIMENT_TYPE == "endings_segmentation":
if HP.RESOLUTION == "1.25mm":
HP.LABELS_FILENAME = "endpoints_20_ordered"
else:
HP.LABELS_FILENAME = "endpoints_20_ordered"
elif HP.CLASSES == "20_endpoints_combined" and HP.EXPERIMENT_TYPE == "endings_segmentation":
if HP.RESOLUTION == "1.25mm":
HP.LABELS_FILENAME = "endpoints_20_combined"
else:
HP.LABELS_FILENAME = "endpoints_20_combined"
elif HP.CLASSES == "20_bundles_endpoints" and HP.EXPERIMENT_TYPE == "endings_segmentation":
if HP.RESOLUTION == "1.25mm":
HP.LABELS_FILENAME = "bundle_endpoints_20"
else:
HP.LABELS_FILENAME = "bundle_endpoints_20"
elif HP.CLASSES == "All" and HP.EXPERIMENT_TYPE == "tract_segmentation":
if HP.RESOLUTION == "1.25mm":
HP.LABELS_FILENAME = "bundle_masks_72"
elif HP.RESOLUTION == "2mm" and HP.DATASET == "Schizo":
HP.LABELS_FILENAME = "bundle_masks_72"
else:
HP.LABELS_FILENAME = "bundle_masks_72_808080"
elif HP.CLASSES == "20" and HP.EXPERIMENT_TYPE == "tract_segmentation":
if HP.RESOLUTION == "1.25mm":
HP.LABELS_FILENAME = "bundle_masks_20"
else:
HP.LABELS_FILENAME = "bundle_masks_20_808080"
elif HP.CLASSES == "All" and HP.EXPERIMENT_TYPE == "dm_regression":
if HP.RESOLUTION == "1.25mm":
HP.LABELS_FILENAME = "bundle_masks_dm"
else:
HP.LABELS_FILENAME = "NOT_AVAILABLE"
else:
HP.LABELS_FILENAME = "bundle_peaks/" + HP.CLASSES
return HP
@staticmethod
def add_background_class(data):
'''
List of 3D Array with bundle masks; shape: (nr_bundles, x,y,z)
Calculate BG class (where no other class is 1) and add it at idx=0 to array.
Returns with nr_bundles shape in last dim: (x,y,z,nr_bundles)
:param data:
:return:
'''
s = data[0].shape
mask_ml = np.zeros((s[0], s[1], s[2], len(data) + 1))
background = np.ones((s[0], s[1], s[2])) # everything that contains no bundle
for idx in range(len(data)):
mask = data[idx]
mask_ml[:, :, :, idx + 1] = mask
background[mask == 1] = 0 # remove this bundle from background
mask_ml[:, :, :, 0] = background
return mask_ml
@staticmethod
def get_cv_fold(fold, dataset="HCP"):
'''
Brauche train-test-validate wegen Best-model selection und wegen training von combined net
:return:
'''
#For CV
if fold == 0:
train, validate, test = [0, 1, 2], [3], [4]
# train, validate, test = [0, 1, 2, 3, 4], [3], [4]
elif fold == 1:
train, validate, test = [1, 2, 3], [4], [0]
elif fold == 2:
train, validate, test = [2, 3, 4], [0], [1]
elif fold == 3:
train, validate, test = [3, 4, 0], [1], [2]
elif fold == 4:
train, validate, test = [4, 0, 1], [2], [3]
subjects = get_all_subjects(dataset)
if dataset.startswith("HCP"):
# subjects = list(Utils.chunks(subjects[:100], 10)) #10 folds
subjects = list(Utils.chunks(subjects, 21)) #5 folds a 21 subjects
# => 5 fold CV ok (score only 1%-point worse than 10 folds (80 vs 60 train subjects) (10 Fold CV impractical!)
elif dataset.startswith("Schizo"):
# 410 subjects
subjects = list(Utils.chunks(subjects, 82)) # 5 folds a 82 subjects
else:
raise ValueError("Invalid dataset name")
subjects = np.array(subjects)
return list(subjects[train].flatten()), list(subjects[validate].flatten()), list(subjects[test].flatten())
@staticmethod
def print_and_save(HP, text, only_log=False):
if not only_log:
print(text)
try:
with open(join(HP.EXP_PATH, "Log.txt"), "a") as f: # a for append
f.write(text)
f.write("\n")
except IOError:
print("WARNING: Could not write to Log.txt file")
@staticmethod
def print_verbose(HP, text):
if HP.VERBOSE:
print(text)
| 51.059382
| 305
| 0.58276
|
1623b62934878991bb71fc9a7ee09359ec7aae34
| 21,694
|
py
|
Python
|
python/tests/unit/test_cache.py
|
cognitedatatest/emissary
|
397ad9941ddafb9faceafdd1bb9ee410f6ac6c96
|
[
"Apache-2.0"
] | null | null | null |
python/tests/unit/test_cache.py
|
cognitedatatest/emissary
|
397ad9941ddafb9faceafdd1bb9ee410f6ac6c96
|
[
"Apache-2.0"
] | 1
|
2022-02-14T02:34:21.000Z
|
2022-02-14T02:34:21.000Z
|
python/tests/unit/test_cache.py
|
cognitedatatest/emissary
|
397ad9941ddafb9faceafdd1bb9ee410f6ac6c96
|
[
"Apache-2.0"
] | null | null | null |
from typing import Any, Callable, Dict, List, Optional, OrderedDict, Set, Tuple
import difflib
import json
import logging
import os
import random
import re
import sys
import yaml
import pytest
logging.basicConfig(
level=logging.DEBUG,
format="%(asctime)s test %(levelname)s: %(message)s",
datefmt='%Y-%m-%d %H:%M:%S'
)
logger = logging.getLogger("ambassador")
logger.setLevel(logging.DEBUG)
from ambassador import Cache, Config, IR, EnvoyConfig
from ambassador.ir.ir import IRFileChecker
from ambassador.fetch import ResourceFetcher
from ambassador.utils import SecretHandler, NullSecretHandler, Timer
class Builder:
def __init__(self, logger: logging.Logger, yaml_file: str,
enable_cache=True) -> None:
self.logger = logger
self.test_dir = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"test_cache_data"
)
self.cache: Optional[Cache] = None
if enable_cache:
self.cache = Cache(logger)
# This is a brutal hack: we load all the YAML, store it as objects, then
# build IR and econf from the re-serialized YAML from these resources.
# The reason is that it's kind of the only way we can apply deltas in
# a meaningful way.
self.resources: Dict[str, Any] = {}
self.deltas: Dict[str, Any] = {}
# Load the initial YAML.
self.apply_yaml(yaml_file, allow_updates=False)
self.secret_handler = NullSecretHandler(logger, "/tmp/secrets/src", "/tmp/secrets/cache", "0")
# Save builds to make this simpler to call.
self.builds: List[Tuple[IR, EnvoyConfig]] = []
def current_yaml(self) -> str:
return yaml.safe_dump_all(list(self.resources.values()))
def apply_yaml(self, yaml_file: str, allow_updates=True) -> None:
yaml_data = open(os.path.join(self.test_dir, yaml_file), "r").read()
self.apply_yaml_string(yaml_data, allow_updates)
def apply_yaml_string(self, yaml_data: str, allow_updates=True) -> None:
for rsrc in yaml.safe_load_all(yaml_data):
# We require kind, metadata.name, and metadata.namespace here.
kind = rsrc['kind']
metadata = rsrc['metadata']
name = metadata['name']
namespace = metadata['namespace']
key = f"{kind}-v2-{name}-{namespace}"
dtype = "add"
if key in self.resources:
# This is an attempted update.
if not allow_updates:
raise RuntimeError(f"Cannot update {key}")
dtype = "update"
# if self.cache is not None:
# self.cache.invalidate(key)
self.resources[key] = rsrc
self.deltas[key] = {
"kind": kind,
"apiVersion": rsrc["apiVersion"],
"metadata": {
"name": name,
"namespace": namespace,
"creationTimestamp": metadata.get("creationTimestamp", "2021-11-19T15:11:45Z")
},
"deltaType": dtype
}
def delete_yaml(self, yaml_file: str) -> None:
yaml_data = open(os.path.join(self.test_dir, yaml_file), "r").read()
self.delete_yaml_string(yaml_data)
def delete_yaml_string(self, yaml_data: str) -> None:
for rsrc in yaml.safe_load_all(yaml_data):
# We require kind, metadata.name, and metadata.namespace here.
kind = rsrc['kind']
metadata = rsrc['metadata']
name = metadata['name']
namespace = metadata['namespace']
key = f"{kind}-v2-{name}-{namespace}"
if key in self.resources:
del(self.resources[key])
# if self.cache is not None:
# self.cache.invalidate(key)
self.deltas[key] = {
"kind": kind,
"apiVersion": rsrc["apiVersion"],
"metadata": {
"name": name,
"namespace": namespace,
"creationTimestamp": metadata.get("creationTimestamp", "2021-11-19T15:11:45Z")
},
"deltaType": "delete"
}
def build(self, version='V2') -> Tuple[IR, EnvoyConfig]:
# Do a build, return IR & econf, but also stash them in self.builds.
watt: Dict[str, Any] = {
"Kubernetes": {},
"Deltas": list(self.deltas.values())
}
# Clear deltas for the next build.
self.deltas = {}
# The Ambassador resource types are all valid keys in the Kubernetes dict.
# Other things (e.g. if this test gets expanded to cover Ingress or Secrets)
# may not be.
for rsrc in self.resources.values():
kind = rsrc['kind']
if kind not in watt['Kubernetes']:
watt['Kubernetes'][kind] = []
watt['Kubernetes'][kind].append(rsrc)
watt_json = json.dumps(watt, sort_keys=True, indent=4)
self.logger.debug(f"Watt JSON:\n{watt_json}")
# OK, we have the WATT-formatted JSON. This next bit of code largely duplicates
# _load_ir from diagd.
#
# XXX That obviously means that it should be factored out for reuse.
# Grab a new aconf, and use a new ResourceFetcher to load it up.
aconf = Config()
fetcher = ResourceFetcher(self.logger, aconf)
fetcher.parse_watt(watt_json)
aconf.load_all(fetcher.sorted())
# Next up: What kind of reconfiguration are we doing?
config_type, reset_cache, invalidate_groups_for = IR.check_deltas(self.logger, fetcher, self.cache)
# For the tests in this file, we should see cache resets and full reconfigurations
# IFF we have no cache.
if self.cache is None:
assert config_type == "complete", "check_deltas wants an incremental reconfiguration with no cache, which it shouldn't"
assert reset_cache, "check_deltas with no cache does not want to reset the cache, but it should"
else:
assert config_type == "incremental", "check_deltas with a cache wants a complete reconfiguration, which it shouldn't"
assert not reset_cache, "check_deltas with a cache wants to reset the cache, which it shouldn't"
# Once that's done, compile the IR.
ir = IR(aconf, logger=self.logger,
cache=self.cache, invalidate_groups_for=invalidate_groups_for,
file_checker=lambda path: True,
secret_handler=self.secret_handler)
assert ir, "could not create an IR"
econf = EnvoyConfig.generate(ir, version, cache=self.cache)
assert econf, "could not create an econf"
self.builds.append(( ir, econf ))
return ir, econf
def invalidate(self, key) -> None:
if self.cache is not None:
assert self.cache[key] is not None, f"key {key} is not cached"
self.cache.invalidate(key)
def check(self, what: str, b1: Tuple[IR, EnvoyConfig], b2: Tuple[IR, EnvoyConfig],
strip_cache_keys=False) -> bool:
for kind, idx in [ ( "IR", 0 ), ( "econf", 1 ) ]:
if strip_cache_keys and (idx == 0):
x1 = self.strip_cache_keys(b1[idx].as_dict())
j1 = json.dumps(x1, sort_keys=True, indent=4)
x2 = self.strip_cache_keys(b2[idx].as_dict())
j2 = json.dumps(x2, sort_keys=True, indent=4)
else:
j1 = b1[idx].as_json()
j2 = b2[idx].as_json()
match = (j1 == j2)
output = ""
if not match:
l1 = j1.split("\n")
l2 = j2.split("\n")
n1 = f"{what} {kind} 1"
n2 = f"{what} {kind} 2"
output += "\n--------\n"
for line in difflib.context_diff(l1, l2, fromfile=n1, tofile=n2):
line = line.rstrip()
output += line
output += "\n"
assert match, output
return match
def check_last(self, what: str) -> None:
build_count = len(self.builds)
b1 = self.builds[build_count - 2]
b2 = self.builds[build_count - 1]
self.check(what, b1, b2)
def strip_cache_keys(self, node: Any) -> Any:
if isinstance(node, dict):
output = {}
for k, v in node.items():
if k == '_cache_key':
continue
output[k] = self.strip_cache_keys(v)
return output
elif isinstance(node, list):
return [ self.strip_cache_keys(x) for x in node ]
return node
def test_circular_link():
builder = Builder(logger, "cache_test_1.yaml")
builder.build()
# This Can't Happen(tm) in Ambassador, but it's important that it not go
# off the rails. Find a Mapping...
mapping_key = "Mapping-v2-foo-4-default"
m = builder.cache[mapping_key]
# ...then walk the link chain until we get to a V2-Cluster.
worklist = [ m.cache_key ]
cluster_key: Optional[str] = None
while worklist:
key = worklist.pop(0)
if key.startswith('V2-Cluster'):
cluster_key = key
break
if key in builder.cache.links:
for owned in builder.cache.links[key]:
worklist.append(owned)
assert cluster_key is not None, f"No V2-Cluster linked from {m}?"
c = builder.cache[cluster_key]
assert c is not None, f"No V2-Cluster in the cache for {c}"
builder.cache.link(c, m)
builder.cache.invalidate(mapping_key)
builder.build()
builder.check_last("after invalidating circular links")
def test_multiple_rebuilds():
builder = Builder(logger, "cache_test_1.yaml")
for i in range(10):
builder.build()
if i > 0:
builder.check_last(f"rebuild {i-1} -> {i}")
def test_simple_targets():
builder = Builder(logger, "cache_test_1.yaml")
builder.build()
builder.build()
builder.check_last("immediate rebuild")
builder.invalidate("Mapping-v2-foo-4-default")
builder.build()
builder.check_last("after delete foo-4")
def test_smashed_targets():
builder = Builder(logger, "cache_test_2.yaml")
builder.build()
builder.build()
builder.check_last("immediate rebuild")
# Invalidate two things that share common links.
builder.invalidate("Mapping-v2-foo-4-default")
builder.invalidate("Mapping-v2-foo-6-default")
builder.build()
builder.check_last("after invalidating foo-4 and foo-6")
def test_delta_1():
builder1 = Builder(logger, "cache_test_1.yaml")
builder2 = Builder(logger, "cache_test_1.yaml", enable_cache=False)
b1 = builder1.build()
b2 = builder2.build()
builder1.check("baseline", b1, b2, strip_cache_keys=True)
builder1.apply_yaml("cache_delta_1.yaml")
builder2.apply_yaml("cache_delta_1.yaml")
b1 = builder1.build()
b2 = builder2.build()
builder1.check("after delta", b1, b2, strip_cache_keys=True)
builder3 = Builder(logger, "cache_result_1.yaml")
b3 = builder3.build()
builder3.check("final", b3, b1)
def test_delta_2():
builder1 = Builder(logger, "cache_test_2.yaml")
builder2 = Builder(logger, "cache_test_2.yaml", enable_cache=False)
b1 = builder1.build()
b2 = builder2.build()
builder1.check("baseline", b1, b2, strip_cache_keys=True)
builder1.apply_yaml("cache_delta_2.yaml")
builder2.apply_yaml("cache_delta_2.yaml")
b1 = builder1.build()
b2 = builder2.build()
builder1.check("after delta", b1, b2, strip_cache_keys=True)
builder3 = Builder(logger, "cache_result_2.yaml")
b3 = builder3.build()
builder3.check("final", b3, b1)
def test_delta_3():
builder1 = Builder(logger, "cache_test_1.yaml")
builder2 = Builder(logger, "cache_test_1.yaml", enable_cache=False)
b1 = builder1.build()
b2 = builder2.build()
builder1.check("baseline", b1, b2, strip_cache_keys=True)
# Load up five delta files and apply them in a random order.
deltas = [ f"cache_random_{i}.yaml" for i in [ 1, 2, 3, 4, 5 ] ]
random.shuffle(deltas)
for delta in deltas:
builder1.apply_yaml(delta)
builder2.apply_yaml(delta)
b1 = builder1.build()
b2 = builder2.build()
builder1.check("after deltas", b1, b2, strip_cache_keys=True)
builder3 = Builder(logger, "cache_result_3.yaml")
b3 = builder3.build()
builder3.check("final", b3, b1)
def test_delete_4():
builder1 = Builder(logger, "cache_test_1.yaml")
builder2 = Builder(logger, "cache_test_1.yaml", enable_cache=False)
b1 = builder1.build()
b2 = builder2.build()
builder1.check("baseline", b1, b2, strip_cache_keys=True)
# Delete a resource.
builder1.delete_yaml("cache_delta_1.yaml")
builder2.delete_yaml("cache_delta_1.yaml")
b1 = builder1.build()
b2 = builder2.build()
builder1.check("after deletion", b1, b2, strip_cache_keys=True)
builder3 = Builder(logger, "cache_result_4.yaml")
b3 = builder3.build()
builder3.check("final", b3, b1)
def test_long_cluster_1():
# Create a cache for Mappings whose cluster names are too long
# to be envoy cluster names and must be truncated.
builder1 = Builder(logger, "cache_test_3.yaml")
builder2 = Builder(logger, "cache_test_3.yaml", enable_cache=False)
b1 = builder1.build()
b2 = builder2.build()
print("checking baseline...")
builder1.check("baseline", b1, b2, strip_cache_keys=True)
# Apply the second Mapping, make sure we use the same cached cluster
builder1.apply_yaml("cache_delta_3.yaml")
builder2.apply_yaml("cache_delta_3.yaml")
b1 = builder1.build()
b2 = builder2.build()
print("checking after apply...")
builder1.check("after apply", b1, b2, strip_cache_keys=True)
print("test_long_cluster_1 done")
MadnessVerifier = Callable[[Tuple[IR, EnvoyConfig]], bool]
class MadnessMapping:
name: str
pfx: str
service: str
def __init__(self, name, pfx, svc) -> None:
self.name = name
self.pfx = pfx
self.service = svc
# This is only OK for service names without any weirdnesses.
self.cluster = "cluster_" + re.sub(r'[^0-9A-Za-z_]', '_', self.service) + "_default"
def __str__(self) -> str:
return f"MadnessMapping {self.name}: {self.pfx} => {self.service}"
def yaml(self) -> str:
return f"""
apiVersion: getambassador.io/v3alpha1
kind: Mapping
metadata:
name: {self.name}
namespace: default
spec:
prefix: {self.pfx}
service: {self.service}
"""
class MadnessOp:
name: str
op: str
mapping: MadnessMapping
verifiers: List[MadnessVerifier]
def __init__(self, name: str, op: str, mapping: MadnessMapping, verifiers: List[MadnessVerifier]) -> None:
self.name = name
self.op = op
self.mapping = mapping
self.verifiers = verifiers
def __str__(self) -> str:
return self.name
def exec(self, builder1: Builder, builder2: Builder, dumpfile: Optional[str]=None) -> bool:
verifiers: List[MadnessVerifier] = []
if self.op == "apply":
builder1.apply_yaml_string(self.mapping.yaml())
builder2.apply_yaml_string(self.mapping.yaml())
verifiers.append(self._cluster_present)
elif self.op == "delete":
builder1.delete_yaml_string(self.mapping.yaml())
builder2.delete_yaml_string(self.mapping.yaml())
verifiers.append(self._cluster_absent)
else:
raise Exception(f"Unknown op {self.op}")
logger.info("======== builder1:")
logger.info("INPUT: %s" % builder1.current_yaml())
b1 = builder1.build()
logger.info("IR: %s" % json.dumps(b1[0].as_dict(), indent=2, sort_keys=True))
logger.info("======== builder2:")
logger.info("INPUT: %s" % builder2.current_yaml())
b2 = builder2.build()
logger.info("IR: %s" % json.dumps(b2[0].as_dict(), indent=2, sort_keys=True))
if dumpfile:
json.dump(b1[0].as_dict(), open(f"/tmp/{dumpfile}-1.json", "w"), indent=2, sort_keys=True)
json.dump(b2[0].as_dict(), open(f"/tmp/{dumpfile}-2.json", "w"), indent=2, sort_keys=True)
if not builder1.check(self.name, b1, b2, strip_cache_keys=True):
return False
verifiers += self.verifiers
for v in verifiers:
# for b in [ b1 ]:
for b in [ b1, b2 ]:
# The verifiers are meant to do assertions. The return value is
# about short-circuiting the loop, not logging the errors.
if not v(b):
return False
return True
def _cluster_present(self, b: Tuple[IR, EnvoyConfig]) -> bool:
ir, econf = b
ir_has_cluster = ir.has_cluster(self.mapping.cluster)
assert ir_has_cluster, f"{self.name}: needed IR cluster {self.mapping.cluster}, have only {', '.join(ir.clusters.keys())}"
return ir_has_cluster
def _cluster_absent(self, b: Tuple[IR, EnvoyConfig]) -> bool:
ir, econf = b
ir_has_cluster = ir.has_cluster(self.mapping.cluster)
assert not ir_has_cluster, f"{self.name}: needed no IR cluster {self.mapping.cluster}, but found it"
return not ir_has_cluster
def check_group(self, b: Tuple[IR, EnvoyConfig], current_mappings: Dict[MadnessMapping, bool]) -> bool:
ir, econf = b
match = False
group = ir.groups.get("3644d75eb336f323bec43e48d4cfd8a950157607", None)
if current_mappings:
# There are some active mappings. Make sure that the group exists, that it has the
# correct mappings, and that the mappings have sane weights.
assert group, f"{self.name}: needed group 3644d75eb336f323bec43e48d4cfd8a950157607, but none found"
# We expect the mappings to be sorted in the group, because every change to the
# mappings that are part of the group should result in the whole group being torn
# down and recreated.
wanted_services = sorted([ m.service for m in current_mappings.keys() ])
found_services = [ m.service for m in group.mappings ]
match1 = (wanted_services == found_services)
assert match1, f"{self.name}: wanted services {wanted_services}, but found {found_services}"
weight_delta = 100 // len(current_mappings)
wanted_weights: List[int] = [ (i + 1) * weight_delta for i in range(len(current_mappings)) ]
wanted_weights[-1] = 100
found_weights: List[int] = [ m._weight for m in group.mappings ]
match2 = (wanted_weights == found_weights)
assert match2, f"{self.name}: wanted weights {wanted_weights}, but found {found_weights}"
return match1 and match2
else:
# There are no active mappings, so make sure that the group doesn't exist.
assert not group, f"{self.name}: needed no group 3644d75eb336f323bec43e48d4cfd8a950157607, but found one"
match = True
return match
def test_cache_madness():
builder1 = Builder(logger, "/dev/null")
builder2 = Builder(logger, "/dev/null", enable_cache=False)
logger.info("======== builder1:")
logger.info("INPUT: %s" % builder1.current_yaml())
b1 = builder1.build()
logger.info("IR: %s" % json.dumps(b1[0].as_dict(), indent=2, sort_keys=True))
logger.info("======== builder2:")
logger.info("INPUT: %s" % builder2.current_yaml())
b2 = builder2.build()
logger.info("IR: %s" % json.dumps(b2[0].as_dict(), indent=2, sort_keys=True))
builder1.check("baseline", b1, b2, strip_cache_keys=True)
# We're going to mix and match some changes to the config,
# in a random order.
all_mappings = [
MadnessMapping("mapping1", "/foo/", "service1"),
MadnessMapping("mapping2", "/foo/", "service2"),
MadnessMapping("mapping3", "/foo/", "service3"),
MadnessMapping("mapping4", "/foo/", "service4"),
MadnessMapping("mapping5", "/foo/", "service5"),
]
current_mappings: OrderedDict[MadnessMapping, bool] = {}
# grunge = [ all_mappings[i] for i in [ 0, 3, 2 ] ]
# for i in range(len(grunge)):
# mapping = grunge[i]
for i in range(0, 100):
mapping = random.choice(all_mappings)
op: MadnessOp
if mapping in current_mappings:
del(current_mappings[mapping])
op = MadnessOp(name=f"delete {mapping.pfx} -> {mapping.service}", op="delete", mapping=mapping,
verifiers=[ lambda b: op.check_group(b, current_mappings) ])
else:
current_mappings[mapping] = True
op = MadnessOp(name=f"apply {mapping.pfx} -> {mapping.service}", op="apply", mapping=mapping,
verifiers=[ lambda b: op.check_group(b, current_mappings) ])
print("==== EXEC %d: %s => %s" % (i, op, sorted([ m.service for m in current_mappings.keys() ])))
logger.info("======== EXEC %d: %s", i, op)
# if not op.exec(builder1, None, dumpfile=f"ir{i}"):
if not op.exec(builder1, builder2, dumpfile=f"ir{i}"):
break
if __name__ == '__main__':
pytest.main(sys.argv)
| 32.044313
| 131
| 0.604591
|
602f45d637afb0a8f3e14300214faec9cc4fe1e2
| 8,255
|
py
|
Python
|
pandas/tests/indexing/interval/test_interval_new.py
|
AdrianMastronardi/pandas
|
67045903306ac4a1cab108177e92df30d99912b4
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause"
] | 1
|
2019-11-01T08:44:40.000Z
|
2019-11-01T08:44:40.000Z
|
pandas/tests/indexing/interval/test_interval_new.py
|
AdrianMastronardi/pandas
|
67045903306ac4a1cab108177e92df30d99912b4
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null |
pandas/tests/indexing/interval/test_interval_new.py
|
AdrianMastronardi/pandas
|
67045903306ac4a1cab108177e92df30d99912b4
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null |
import re
import numpy as np
import pytest
from pandas.compat import IS64
from pandas import (
Index,
Interval,
IntervalIndex,
Series,
)
import pandas._testing as tm
class TestIntervalIndex:
@pytest.fixture
def series_with_interval_index(self):
return Series(
np.arange(5), IntervalIndex.from_breaks(np.arange(6), inclusive="right")
)
def test_loc_with_interval(self, series_with_interval_index, indexer_sl):
# loc with single label / list of labels:
# - Intervals: only exact matches
# - scalars: those that contain it
ser = series_with_interval_index.copy()
expected = 0
result = indexer_sl(ser)[Interval(0, 1, "right")]
assert result == expected
expected = ser.iloc[3:5]
result = indexer_sl(ser)[[Interval(3, 4, "right"), Interval(4, 5, "right")]]
tm.assert_series_equal(expected, result)
# missing or not exact
with pytest.raises(
KeyError, match=re.escape("Interval(3, 5, inclusive='left')")
):
indexer_sl(ser)[Interval(3, 5, inclusive="left")]
with pytest.raises(
KeyError, match=re.escape("Interval(3, 5, inclusive='right')")
):
indexer_sl(ser)[Interval(3, 5, "right")]
with pytest.raises(
KeyError, match=re.escape("Interval(-2, 0, inclusive='right')")
):
indexer_sl(ser)[Interval(-2, 0, "right")]
with pytest.raises(
KeyError, match=re.escape("Interval(5, 6, inclusive='right')")
):
indexer_sl(ser)[Interval(5, 6, "right")]
def test_loc_with_scalar(self, series_with_interval_index, indexer_sl):
# loc with single label / list of labels:
# - Intervals: only exact matches
# - scalars: those that contain it
ser = series_with_interval_index.copy()
assert indexer_sl(ser)[1] == 0
assert indexer_sl(ser)[1.5] == 1
assert indexer_sl(ser)[2] == 1
expected = ser.iloc[1:4]
tm.assert_series_equal(expected, indexer_sl(ser)[[1.5, 2.5, 3.5]])
tm.assert_series_equal(expected, indexer_sl(ser)[[2, 3, 4]])
tm.assert_series_equal(expected, indexer_sl(ser)[[1.5, 3, 4]])
expected = ser.iloc[[1, 1, 2, 1]]
tm.assert_series_equal(expected, indexer_sl(ser)[[1.5, 2, 2.5, 1.5]])
expected = ser.iloc[2:5]
tm.assert_series_equal(expected, indexer_sl(ser)[ser >= 2])
def test_loc_with_slices(self, series_with_interval_index, indexer_sl):
# loc with slices:
# - Interval objects: only works with exact matches
# - scalars: only works for non-overlapping, monotonic intervals,
# and start/stop select location based on the interval that
# contains them:
# (slice_loc(start, stop) == (idx.get_loc(start), idx.get_loc(stop))
ser = series_with_interval_index.copy()
# slice of interval
expected = ser.iloc[:3]
result = indexer_sl(ser)[Interval(0, 1, "right") : Interval(2, 3, "right")]
tm.assert_series_equal(expected, result)
expected = ser.iloc[3:]
result = indexer_sl(ser)[Interval(3, 4, "right") :]
tm.assert_series_equal(expected, result)
msg = "Interval objects are not currently supported"
with pytest.raises(NotImplementedError, match=msg):
indexer_sl(ser)[Interval(3, 6) :]
with pytest.raises(NotImplementedError, match=msg):
indexer_sl(ser)[Interval(3, 4, inclusive="left") :]
def test_slice_step_ne1(self, series_with_interval_index):
# GH#31658 slice of scalar with step != 1
ser = series_with_interval_index.copy()
expected = ser.iloc[0:4:2]
result = ser[0:4:2]
tm.assert_series_equal(result, expected)
result2 = ser[0:4][::2]
tm.assert_series_equal(result2, expected)
def test_slice_float_start_stop(self, series_with_interval_index):
# GH#31658 slicing with integers is positional, with floats is not
# supported
ser = series_with_interval_index.copy()
msg = "label-based slicing with step!=1 is not supported for IntervalIndex"
with pytest.raises(ValueError, match=msg):
ser[1.5:9.5:2]
def test_slice_interval_step(self, series_with_interval_index):
# GH#31658 allows for integer step!=1, not Interval step
ser = series_with_interval_index.copy()
msg = "label-based slicing with step!=1 is not supported for IntervalIndex"
with pytest.raises(ValueError, match=msg):
ser[0 : 4 : Interval(0, 1)]
def test_loc_with_overlap(self, indexer_sl):
idx = IntervalIndex.from_tuples([(1, 5), (3, 7)], inclusive="right")
ser = Series(range(len(idx)), index=idx)
# scalar
expected = ser
result = indexer_sl(ser)[4]
tm.assert_series_equal(expected, result)
result = indexer_sl(ser)[[4]]
tm.assert_series_equal(expected, result)
# interval
expected = 0
result = indexer_sl(ser)[Interval(1, 5, "right")]
result == expected
expected = ser
result = indexer_sl(ser)[[Interval(1, 5, "right"), Interval(3, 7, "right")]]
tm.assert_series_equal(expected, result)
with pytest.raises(
KeyError, match=re.escape("Interval(3, 5, inclusive='right')")
):
indexer_sl(ser)[Interval(3, 5, "right")]
msg = r"None of \[\[Interval\(3, 5, inclusive='right'\)\]\]"
with pytest.raises(KeyError, match=msg):
indexer_sl(ser)[[Interval(3, 5, "right")]]
# slices with interval (only exact matches)
expected = ser
result = indexer_sl(ser)[Interval(1, 5, "right") : Interval(3, 7, "right")]
tm.assert_series_equal(expected, result)
msg = "'can only get slices from an IntervalIndex if bounds are"
" non-overlapping and all monotonic increasing or decreasing'"
with pytest.raises(KeyError, match=msg):
indexer_sl(ser)[Interval(1, 6) : Interval(3, 8)]
if indexer_sl is tm.loc:
# slices with scalar raise for overlapping intervals
# TODO KeyError is the appropriate error?
with pytest.raises(KeyError, match=msg):
ser.loc[1:4]
def test_non_unique(self, indexer_sl):
idx = IntervalIndex.from_tuples([(1, 3), (3, 7)])
ser = Series(range(len(idx)), index=idx)
result = indexer_sl(ser)[Interval(1, 3)]
assert result == 0
result = indexer_sl(ser)[[Interval(1, 3)]]
expected = ser.iloc[0:1]
tm.assert_series_equal(expected, result)
def test_non_unique_moar(self, indexer_sl):
idx = IntervalIndex.from_tuples([(1, 3), (1, 3), (3, 7)])
ser = Series(range(len(idx)), index=idx)
expected = ser.iloc[[0, 1]]
result = indexer_sl(ser)[Interval(1, 3)]
tm.assert_series_equal(expected, result)
expected = ser
result = indexer_sl(ser)[Interval(1, 3) :]
tm.assert_series_equal(expected, result)
expected = ser.iloc[[0, 1]]
result = indexer_sl(ser)[[Interval(1, 3)]]
tm.assert_series_equal(expected, result)
def test_loc_getitem_missing_key_error_message(
self, frame_or_series, series_with_interval_index
):
# GH#27365
ser = series_with_interval_index.copy()
obj = frame_or_series(ser)
with pytest.raises(KeyError, match=r"\[6\]"):
obj.loc[[4, 5, 6]]
@pytest.mark.xfail(not IS64, reason="GH 23440")
@pytest.mark.parametrize(
"intervals",
[
([Interval(-np.inf, 0.0), Interval(0.0, 1.0)]),
([Interval(-np.inf, -2.0), Interval(-2.0, -1.0)]),
([Interval(-1.0, 0.0), Interval(0.0, np.inf)]),
([Interval(1.0, 2.0), Interval(2.0, np.inf)]),
],
)
def test_repeating_interval_index_with_infs(intervals):
# GH 46658
interval_index = Index(intervals * 51)
expected = np.arange(1, 102, 2, dtype=np.intp)
result = interval_index.get_indexer_for([intervals[1]])
tm.assert_equal(result, expected)
| 33.831967
| 84
| 0.612235
|
dc1592237fec363d02933d65a71ef2738874caec
| 580
|
py
|
Python
|
train.py
|
DaryLL-S/Adverting_Model
|
e2f8fed48bbba06f458c1ac7201205b0ff46751a
|
[
"MIT"
] | null | null | null |
train.py
|
DaryLL-S/Adverting_Model
|
e2f8fed48bbba06f458c1ac7201205b0ff46751a
|
[
"MIT"
] | null | null | null |
train.py
|
DaryLL-S/Adverting_Model
|
e2f8fed48bbba06f458c1ac7201205b0ff46751a
|
[
"MIT"
] | null | null | null |
import pandas as pd
import xgboost as xgb
# 导入数据集
df = pd.read_csv("./data/raw_data.csv")
df = df.fillna(value=-1)
data = df.iloc[:, 3:16]
target = df.iloc[:, -1:]
print(data)
print(target)
# booster:
params = {'learning_rate': 0.4,
'max_depth': 10, # 构建树的深度,越大越容易过拟合
'num_boost_round': 2000,
'objective': 'multi:softprob', # 多分类的问题
'random_state': 7,
'silent': 0,
'num_class': 5, # 类别数,与 multisoftmax 并用
'eta': 0.8}
model = xgb.train(params, xgb.DMatrix(data, target))
model.save_model('XGboost.model')
| 24.166667
| 52
| 0.598276
|
8ea95f35ade2d425f4f3c01d5dd88ddd9639e329
| 1,123
|
py
|
Python
|
web/utils.py
|
bigchaindb/coalaip-http-api
|
8cefb8077997e8b7ce1423ef851fb3e343907d06
|
[
"Apache-2.0"
] | 4
|
2017-10-19T16:26:12.000Z
|
2020-06-15T20:13:59.000Z
|
web/utils.py
|
bigchaindb/coalaip-http-api
|
8cefb8077997e8b7ce1423ef851fb3e343907d06
|
[
"Apache-2.0"
] | 23
|
2016-08-30T12:06:13.000Z
|
2017-08-11T10:22:49.000Z
|
web/utils.py
|
bigchaindb/coalaip-http-api
|
8cefb8077997e8b7ce1423ef851fb3e343907d06
|
[
"Apache-2.0"
] | 10
|
2016-10-05T13:27:01.000Z
|
2020-06-15T20:14:10.000Z
|
from collections import namedtuple
import os
BigchainDBConfiguration = namedtuple('BigchainDBConfiguration', [
'hostname',
'port',
])
# Double check in case the environment variable is sent via Docker,
# which will send empty strings for missing environment variables
BDB_HOST = os.environ.get('BDB_NODE_HOST', None)
if not BDB_HOST:
BDB_HOST = 'localhost'
BDB_PORT = os.environ.get('BDB_NODE_PORT', None)
if not BDB_PORT:
BDB_PORT = '9984'
def get_bigchaindb_configuration():
return BigchainDBConfiguration(BDB_HOST, BDB_PORT)
def get_bigchaindb_api_url():
hostname, port = get_bigchaindb_configuration()
return 'http://{hostname}:{port}'.format(hostname=hostname, port=port)
def parse_model(required_fields):
def _parse_model(inputs):
for field in required_fields:
try:
value = inputs[field]
except KeyError:
raise KeyError('`{}` must be provided'.format(field))
if not value:
raise ValueError("`{}`'s value must be defined".format(field))
return inputs
return _parse_model
| 26.738095
| 78
| 0.684773
|
b437ff845481fd16be2f8fc1d410e6c3c3a17c1d
| 554
|
py
|
Python
|
tests/functions/list/test_lists_map.py
|
sukovanej/mplisp
|
a3faf8c06936bcc5cde59899abf41a1b379090f5
|
[
"MIT"
] | null | null | null |
tests/functions/list/test_lists_map.py
|
sukovanej/mplisp
|
a3faf8c06936bcc5cde59899abf41a1b379090f5
|
[
"MIT"
] | null | null | null |
tests/functions/list/test_lists_map.py
|
sukovanej/mplisp
|
a3faf8c06936bcc5cde59899abf41a1b379090f5
|
[
"MIT"
] | null | null | null |
import unittest
import mplisp.evaluator as evaluator
class TestListMap(unittest.TestCase):
def map_test(self):
input1 = """
(map (lambda (x) (* 2 x)) (list 1 2 3))
"""
output1 = list(evaluator.evaluate(input1))
self.assertEqual(output1[0], [2, 4, 6])
def map_test_2(self):
input1 = """
(import "sys")
(def a (list 1 2 3 4))
(map (lambda (x) (* 2 x)) a)
"""
output1 = list(evaluator.evaluate(input1))
self.assertEqual(output1[2], [2, 4, 6, 8])
| 22.16
| 50
| 0.534296
|
2031769b343244ece94f16b54da16c6bb54aecc1
| 2,781
|
py
|
Python
|
homeassistant/components/rainbird/switch.py
|
FlorianLudwig/home-assistant
|
29ad3961e581d3591ce0963a7fa01672abadedf7
|
[
"Apache-2.0"
] | 2
|
2017-10-26T19:43:55.000Z
|
2017-12-30T23:29:00.000Z
|
homeassistant/components/rainbird/switch.py
|
FlorianLudwig/home-assistant
|
29ad3961e581d3591ce0963a7fa01672abadedf7
|
[
"Apache-2.0"
] | 2
|
2019-04-15T02:43:04.000Z
|
2019-04-15T02:49:10.000Z
|
homeassistant/components/rainbird/switch.py
|
FlorianLudwig/home-assistant
|
29ad3961e581d3591ce0963a7fa01672abadedf7
|
[
"Apache-2.0"
] | 1
|
2022-02-20T07:41:14.000Z
|
2022-02-20T07:41:14.000Z
|
"""
Support for Rain Bird Irrigation system LNK WiFi Module.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/switch.rainbird/
"""
import logging
import voluptuous as vol
from homeassistant.components.switch import PLATFORM_SCHEMA, SwitchDevice
from homeassistant.const import (
CONF_FRIENDLY_NAME, CONF_SCAN_INTERVAL, CONF_SWITCHES, CONF_TRIGGER_TIME,
CONF_ZONE)
from homeassistant.helpers import config_validation as cv
from . import DATA_RAINBIRD
DEPENDENCIES = ['rainbird']
DOMAIN = 'rainbird'
_LOGGER = logging.getLogger(__name__)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_SWITCHES, default={}): vol.Schema({
cv.string: {
vol.Optional(CONF_FRIENDLY_NAME): cv.string,
vol.Required(CONF_ZONE): cv.string,
vol.Required(CONF_TRIGGER_TIME): cv.string,
vol.Optional(CONF_SCAN_INTERVAL): cv.string,
},
}),
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up Rain Bird switches over a Rain Bird controller."""
controller = hass.data[DATA_RAINBIRD]
devices = []
for dev_id, switch in config.get(CONF_SWITCHES).items():
devices.append(RainBirdSwitch(controller, switch, dev_id))
add_entities(devices, True)
class RainBirdSwitch(SwitchDevice):
"""Representation of a Rain Bird switch."""
def __init__(self, rb, dev, dev_id):
"""Initialize a Rain Bird Switch Device."""
self._rainbird = rb
self._devid = dev_id
self._zone = int(dev.get(CONF_ZONE))
self._name = dev.get(CONF_FRIENDLY_NAME,
"Sprinkler {}".format(self._zone))
self._state = None
self._duration = dev.get(CONF_TRIGGER_TIME)
self._attributes = {
"duration": self._duration,
"zone": self._zone
}
@property
def device_state_attributes(self):
"""Return state attributes."""
return self._attributes
@property
def name(self):
"""Get the name of the switch."""
return self._name
def turn_on(self, **kwargs):
"""Turn the switch on."""
self._rainbird.startIrrigation(int(self._zone), int(self._duration))
def turn_off(self, **kwargs):
"""Turn the switch off."""
self._rainbird.stopIrrigation()
def get_device_status(self):
"""Get the status of the switch from Rain Bird Controller."""
return self._rainbird.currentIrrigation() == self._zone
def update(self):
"""Update switch status."""
self._state = self.get_device_status()
@property
def is_on(self):
"""Return true if switch is on."""
return self._state
| 29.585106
| 77
| 0.658037
|
554244970bc5061e0acfdce1055794209d93c0b4
| 190,726
|
py
|
Python
|
source/gui/settingsDialogs.py
|
falsecz/python-java-access-bridge
|
52bdca4172fa53072be7201adfc08be45f9c064c
|
[
"bzip2-1.0.6"
] | 6
|
2021-03-08T07:28:08.000Z
|
2022-02-23T02:48:23.000Z
|
source/gui/settingsDialogs.py
|
falsecz/python-java-access-bridge
|
52bdca4172fa53072be7201adfc08be45f9c064c
|
[
"bzip2-1.0.6"
] | null | null | null |
source/gui/settingsDialogs.py
|
falsecz/python-java-access-bridge
|
52bdca4172fa53072be7201adfc08be45f9c064c
|
[
"bzip2-1.0.6"
] | 2
|
2021-07-16T00:25:27.000Z
|
2022-03-24T08:36:36.000Z
|
# -*- coding: UTF-8 -*-
# A part of NonVisual Desktop Access (NVDA)
# Copyright (C) 2006-2020 NV Access Limited, Peter Vágner, Aleksey Sadovoy,
# Rui Batista, Joseph Lee, Heiko Folkerts, Zahari Yurukov, Leonard de Ruijter,
# Derek Riemer, Babbage B.V., Davy Kager, Ethan Holliger, Bill Dengler, Thomas Stivers
# This file is covered by the GNU General Public License.
# See the file COPYING for more details.
import logging
from abc import ABCMeta
import copy
def _(a):
return a
def pgettext(a, b=None):
return a
import wx
from vision.providerBase import VisionEnhancementProviderSettings
from wx.lib import scrolledpanel
from wx.lib.expando import ExpandoTextCtrl
import wx.lib.newevent
import winUser
import logHandler
import installer
from synthDriverHandler import *
from synthDriverHandler import SynthDriver, getSynth
import config
import languageHandler
import speech
import gui
import globalVars
from logHandler import log
import nvwave
import audioDucking
import speechDictHandler
import queueHandler
import braille
import brailleTables
import brailleInput
import vision
import vision.providerInfo
import vision.providerBase
from typing import Callable, List, Optional, Any
import core
import keyboardHandler
import characterProcessing
from . import guiHelper
try:
import updateCheck
except RuntimeError:
updateCheck = None
from . import nvdaControls
from autoSettingsUtils.utils import UnsupportedConfigParameterError
from autoSettingsUtils.autoSettings import AutoSettings
from autoSettingsUtils.driverSetting import BooleanDriverSetting, NumericDriverSetting, DriverSetting
import touchHandler
import winVersion
import weakref
import time
import keyLabels
from .dpiScalingHelper import DpiScalingHelperMixinWithoutInit
class SettingsDialog(
DpiScalingHelperMixinWithoutInit,
gui.ContextHelpMixin,
wx.Dialog, # wxPython does not seem to call base class initializer, put last in MRO
metaclass=guiHelper.SIPABCMeta
):
"""A settings dialog.
A settings dialog consists of one or more settings controls and OK and Cancel buttons and an optional Apply button.
Action may be taken in response to the OK, Cancel or Apply buttons.
To use this dialog:
* Set L{title} to the title of the dialog.
* Override L{makeSettings} to populate a given sizer with the settings controls.
* Optionally, override L{postInit} to perform actions after the dialog is created, such as setting the focus. Be
aware that L{postInit} is also called by L{onApply}.
* Optionally, extend one or more of L{onOk}, L{onCancel} or L{onApply} to perform actions in response to the
OK, Cancel or Apply buttons, respectively.
@ivar title: The title of the dialog.
@type title: str
"""
class MultiInstanceError(RuntimeError): pass
_DIALOG_CREATED_STATE = 0
_DIALOG_DESTROYED_STATE = 1
# holds instances of SettingsDialogs as keys, and state as the value
_instances=weakref.WeakKeyDictionary()
title = ""
helpId = "NVDASettings"
shouldSuspendConfigProfileTriggers = True
def __new__(cls, *args, **kwargs):
# We are iterating over instanceItems only once, so it can safely be an iterator.
instanceItems = SettingsDialog._instances.items()
instancesOfSameClass = (
(dlg, state) for dlg, state in instanceItems if isinstance(dlg, cls)
)
firstMatchingInstance, state = next(instancesOfSameClass, (None, None))
multiInstanceAllowed = kwargs.get('multiInstanceAllowed', False)
if log.isEnabledFor(log.DEBUG):
instancesState = dict(SettingsDialog._instances)
log.debug(
"Creating new settings dialog (multiInstanceAllowed:{}). "
"State of _instances {!r}".format(multiInstanceAllowed, instancesState)
)
if state is cls._DIALOG_CREATED_STATE and not multiInstanceAllowed:
raise SettingsDialog.MultiInstanceError("Only one instance of SettingsDialog can exist at a time")
if state is cls._DIALOG_DESTROYED_STATE and not multiInstanceAllowed:
# the dialog has been destroyed by wx, but the instance is still available. This indicates there is something
# keeping it alive.
log.error("Opening new settings dialog while instance still exists: {!r}".format(firstMatchingInstance))
obj = super(SettingsDialog, cls).__new__(cls, *args, **kwargs)
SettingsDialog._instances[obj] = cls._DIALOG_CREATED_STATE
return obj
def _setInstanceDestroyedState(self):
if log.isEnabledFor(log.DEBUG):
instancesState = dict(SettingsDialog._instances)
log.debug(
"Setting state to destroyed for instance: {!r}\n"
"Current _instances {!r}".format(self, instancesState)
)
if self in SettingsDialog._instances:
SettingsDialog._instances[self] = self._DIALOG_DESTROYED_STATE
def __init__(
self, parent,
resizeable=False,
hasApplyButton=False,
settingsSizerOrientation=wx.VERTICAL,
multiInstanceAllowed=False
):
"""
@param parent: The parent for this dialog; C{None} for no parent.
@type parent: wx.Window
@param resizeable: True if the settings dialog should be resizable by the user, only set this if
you have tested that the components resize correctly.
@type resizeable: bool
@param hasApplyButton: C{True} to add an apply button to the dialog; defaults to C{False} for backwards compatibility.
@type hasApplyButton: bool
@param settingsSizerOrientation: Either wx.VERTICAL or wx.HORIZONTAL. This controls the orientation of the
sizer that is passed into L{makeSettings}. The default is wx.VERTICAL.
@type settingsSizerOrientation: wx.Orientation
@param multiInstanceAllowed: Whether multiple instances of SettingsDialog may exist.
Note that still only one instance of a particular SettingsDialog subclass may exist at one time.
@type multiInstanceAllowed: bool
"""
if gui._isDebug():
startTime = time.time()
windowStyle = wx.DEFAULT_DIALOG_STYLE
if resizeable:
windowStyle |= wx.RESIZE_BORDER | wx.MAXIMIZE_BOX
super().__init__(parent, title=self.title, style=windowStyle)
self.hasApply = hasApplyButton
self.mainSizer=wx.BoxSizer(wx.VERTICAL)
self.settingsSizer=wx.BoxSizer(settingsSizerOrientation)
self.makeSettings(self.settingsSizer)
self.mainSizer.Add(self.settingsSizer, border=guiHelper.BORDER_FOR_DIALOGS, flag=wx.ALL | wx.EXPAND, proportion=1)
buttons = wx.OK | wx.CANCEL
if hasApplyButton:
buttons |= wx.APPLY
self.mainSizer.Add(
self.CreateSeparatedButtonSizer(buttons),
border=guiHelper.BORDER_FOR_DIALOGS,
flag=wx.EXPAND | wx.BOTTOM | wx.LEFT | wx.RIGHT
)
self.mainSizer.Fit(self)
self.SetSizer(self.mainSizer)
self.Bind(wx.EVT_BUTTON, self.onOk, id=wx.ID_OK)
self.Bind(wx.EVT_BUTTON, self.onCancel, id=wx.ID_CANCEL)
self.Bind(wx.EVT_BUTTON, self.onApply, id=wx.ID_APPLY)
self.Bind(wx.EVT_CHAR_HOOK, self._enterActivatesOk_ctrlSActivatesApply)
# Garbage collection normally handles removing the settings instance, however this may not happen immediately
# after a window is closed, or may be blocked by a circular reference. So instead, remove when the window is
# destroyed.
self.Bind(wx.EVT_WINDOW_DESTROY, self._onWindowDestroy)
self.postInit()
if resizeable:
self.SetMinSize(self.mainSizer.GetMinSize())
self.CentreOnScreen()
if gui._isDebug():
log.debug("Loading %s took %.2f seconds"%(self.__class__.__name__, time.time() - startTime))
def _enterActivatesOk_ctrlSActivatesApply(self, evt):
"""Listens for keyboard input and triggers ok button on enter and triggers apply button when control + S is
pressed. Cancel behavior is built into wx.
Pressing enter will also close the dialog when a list has focus
(e.g. the list of symbols in the symbol pronunciation dialog).
Without this custom handler, enter would propagate to the list control (wx ticket #3725).
"""
if evt.KeyCode in (wx.WXK_RETURN, wx.WXK_NUMPAD_ENTER):
self.ProcessEvent(wx.CommandEvent(wx.wxEVT_COMMAND_BUTTON_CLICKED, wx.ID_OK))
elif self.hasApply and evt.UnicodeKey == ord(u'S') and evt.controlDown:
self.ProcessEvent(wx.CommandEvent(wx.wxEVT_COMMAND_BUTTON_CLICKED, wx.ID_APPLY))
else:
evt.Skip()
@abstractmethod
def makeSettings(self, sizer):
"""Populate the dialog with settings controls.
Subclasses must override this method.
@param sizer: The sizer to which to add the settings controls.
@type sizer: wx.Sizer
"""
raise NotImplementedError
def postInit(self):
"""Called after the dialog has been created.
For example, this might be used to set focus to the desired control.
Sub-classes may override this method.
"""
def onOk(self, evt):
"""Take action in response to the OK button being pressed.
Sub-classes may extend this method.
This base method should always be called to clean up the dialog.
"""
self.DestroyChildren()
self.Destroy()
self.SetReturnCode(wx.ID_OK)
def onCancel(self, evt):
"""Take action in response to the Cancel button being pressed.
Sub-classes may extend this method.
This base method should always be called to clean up the dialog.
"""
self.DestroyChildren()
self.Destroy()
self.SetReturnCode(wx.ID_CANCEL)
def onApply(self, evt):
"""Take action in response to the Apply button being pressed.
Sub-classes may extend or override this method.
This base method should be called to run the postInit method.
"""
self.postInit()
self.SetReturnCode(wx.ID_APPLY)
def _onWindowDestroy(self, evt):
evt.Skip()
self._setInstanceDestroyedState()
# An event and event binder that will notify the containers that they should
# redo the layout in whatever way makes sense for their particular content.
_RWLayoutNeededEvent, EVT_RW_LAYOUT_NEEDED = wx.lib.newevent.NewCommandEvent()
class SettingsPanel(
DpiScalingHelperMixinWithoutInit,
gui.ContextHelpMixin,
wx.Panel, # wxPython does not seem to call base class initializer, put last in MRO
metaclass=guiHelper.SIPABCMeta
):
"""A settings panel, to be used in a multi category settings dialog.
A settings panel consists of one or more settings controls.
Action may be taken in response to the parent dialog's OK or Cancel buttons.
To use this panel:
* Set L{title} to the title of the category.
* Override L{makeSettings} to populate a given sizer with the settings controls.
* Optionally, extend L{onPanelActivated} to perform actions after the category has been selected in the list of categories, such as synthesizer or braille display list population.
* Optionally, extend L{onPanelDeactivated} to perform actions after the category has been deselected (i.e. another category is selected) in the list of categories.
* Optionally, extend one or both of L{onSave} or L{onDiscard} to perform actions in response to the parent dialog's OK or Cancel buttons, respectively.
* Optionally, extend one or both of L{isValid} or L{postSave} to perform validation before or steps after saving, respectively.
@ivar title: The title of the settings panel, also listed in the list of settings categories.
@type title: str
"""
title=""
panelDescription=u""
def __init__(self, parent: wx.Window):
"""
@param parent: The parent for this panel; C{None} for no parent.
"""
if gui._isDebug():
startTime = time.time()
super().__init__(parent)
self._buildGui()
if gui._isDebug():
elapsedSeconds = time.time() - startTime
panelName = self.__class__.__qualname__
log.debug(f"Loading {panelName} took {elapsedSeconds:.2f} seconds")
def _buildGui(self):
self.mainSizer=wx.BoxSizer(wx.VERTICAL)
self.settingsSizer=wx.BoxSizer(wx.VERTICAL)
self.makeSettings(self.settingsSizer)
self.mainSizer.Add(self.settingsSizer, flag=wx.ALL | wx.EXPAND)
self.mainSizer.Fit(self)
self.SetSizer(self.mainSizer)
@abstractmethod
def makeSettings(self, sizer: wx.BoxSizer):
"""Populate the panel with settings controls.
Subclasses must override this method.
@param sizer: The sizer to which to add the settings controls.
"""
raise NotImplementedError
def onPanelActivated(self):
"""Called after the panel has been activated (i.e. de corresponding category is selected in the list of categories).
For example, this might be used for resource intensive tasks.
Sub-classes should extend this method.
"""
self.Show()
def onPanelDeactivated(self):
"""Called after the panel has been deactivated (i.e. another category has been selected in the list of categories).
Sub-classes should extendthis method.
"""
self.Hide()
@abstractmethod
def onSave(self):
"""Take action in response to the parent's dialog OK or apply button being pressed.
Sub-classes should override this method.
MultiCategorySettingsDialog is responsible for cleaning up the panel when OK is pressed.
"""
raise NotImplementedError
def isValid(self):
"""Evaluate whether the current circumstances of this panel are valid
and allow saving all the settings in a L{MultiCategorySettingsDialog}.
Sub-classes may extend this method.
@returns: C{True} if validation should continue,
C{False} otherwise.
@rtype: bool
"""
return True
def postSave(self):
"""Take action whenever saving settings for all panels in a L{MultiCategorySettingsDialog} succeeded.
Sub-classes may extend this method.
"""
def onDiscard(self):
"""Take action in response to the parent's dialog Cancel button being pressed.
Sub-classes may override this method.
MultiCategorySettingsDialog is responsible for cleaning up the panel when Cancel is pressed.
"""
def _sendLayoutUpdatedEvent(self):
"""Notify any wx parents that may be listening that they should redo their layout in whatever way
makes sense for them. It is expected that sub-classes call this method in response to changes in
the number of GUI items in their panel.
"""
event = _RWLayoutNeededEvent(self.GetId())
event.SetEventObject(self)
self.GetEventHandler().ProcessEvent(event)
class MultiCategorySettingsDialog(SettingsDialog):
"""A settings dialog with multiple settings categories.
A multi category settings dialog consists of a list view with settings categories on the left side,
and a settings panel on the right side of the dialog.
Furthermore, in addition to Ok and Cancel buttons, it has an Apply button by default,
which is different from the default behavior of L{SettingsDialog}.
To use this dialog: set title and populate L{categoryClasses} with subclasses of SettingsPanel.
Make sure that L{categoryClasses} only contains panels that are available on a particular system.
For example, if a certain category of settings is only supported on Windows 10 and higher,
that category should be left out of L{categoryClasses}
"""
title=""
categoryClasses=[]
class CategoryUnavailableError(RuntimeError): pass
def __init__(self, parent, initialCategory=None):
"""
@param parent: The parent for this dialog; C{None} for no parent.
@type parent: wx.Window
@param initialCategory: The initial category to select when opening this dialog
@type parent: SettingsPanel
"""
if initialCategory and not issubclass(initialCategory,SettingsPanel):
if gui._isDebug():
log.debug("Unable to open category: {}".format(initialCategory), stack_info=True)
raise TypeError("initialCategory should be an instance of SettingsPanel")
if initialCategory and initialCategory not in self.categoryClasses:
if gui._isDebug():
log.debug("Unable to open category: {}".format(initialCategory), stack_info=True)
raise MultiCategorySettingsDialog.CategoryUnavailableError(
"The provided initial category is not a part of this dialog"
)
self.initialCategory = initialCategory
self.currentCategory = None
self.setPostInitFocus = None
# dictionary key is index of category in self.catList, value is the instance. Partially filled, check for KeyError
self.catIdToInstanceMap = {}
super(MultiCategorySettingsDialog, self).__init__(
parent,
resizeable=True,
hasApplyButton=True,
settingsSizerOrientation=wx.HORIZONTAL
)
# setting the size must be done after the parent is constructed.
self.SetMinSize(self.scaleSize(self.MIN_SIZE))
self.SetSize(self.scaleSize(self.INITIAL_SIZE))
# the size has changed, so recenter on the screen
self.CentreOnScreen()
# Initial / min size for the dialog. This size was chosen as a medium fit, so the
# smaller settings panels are not surrounded by too much space but most of
# the panels fit. Vertical scrolling is acceptable. Horizontal scrolling less
# so, the width was chosen to eliminate horizontal scroll bars. If a panel
# exceeds the the initial width a debugWarning will be added to the log.
INITIAL_SIZE = (800, 480)
MIN_SIZE = (470, 240) # Min height required to show the OK, Cancel, Apply buttons
def makeSettings(self, settingsSizer):
sHelper = guiHelper.BoxSizerHelper(self, sizer=settingsSizer)
# Translators: The label for the list of categories in a multi category settings dialog.
categoriesLabelText=_("&Categories:")
categoriesLabel = wx.StaticText(self, label=categoriesLabelText)
# since the categories list and the container both expand in height, the y
# portion is essentially a "min" height.
# These sizes are set manually so that the initial proportions within the dialog look correct. If these sizes are
# not given, then I believe the proportion arguments (as given to the gridBagSizer.AddGrowableColumn) are used
# to set their relative sizes. We want the proportion argument to be used for resizing, but not the initial size.
catListDim = (150, 10)
catListDim = self.scaleSize(catListDim)
initialScaledWidth = self.scaleSize(self.INITIAL_SIZE[0])
spaceForBorderWidth = self.scaleSize(20)
catListWidth = catListDim[0]
containerDim = (initialScaledWidth - catListWidth - spaceForBorderWidth, self.scaleSize(10))
self.catListCtrl = nvdaControls.AutoWidthColumnListCtrl(
self,
autoSizeColumn=1,
size=catListDim,
style=wx.LC_REPORT|wx.LC_SINGLE_SEL|wx.LC_NO_HEADER
)
# This list consists of only one column.
# The provided column header is just a placeholder, as it is hidden due to the wx.LC_NO_HEADER style flag.
self.catListCtrl.InsertColumn(0,categoriesLabelText)
self.container = scrolledpanel.ScrolledPanel(
parent = self,
style = wx.TAB_TRAVERSAL | wx.BORDER_THEME,
size=containerDim
)
# Th min size is reset so that they can be reduced to below their "size" constraint.
self.container.SetMinSize((1,1))
self.catListCtrl.SetMinSize((1,1))
self.containerSizer = wx.BoxSizer(wx.VERTICAL)
self.container.SetSizer(self.containerSizer)
for cls in self.categoryClasses:
if not issubclass(cls,SettingsPanel):
raise RuntimeError("Invalid category class %s provided in %s.categoryClasses"%(cls.__name__,self.__class__.__name__))
# It's important here that the listItems are added to catListCtrl in the same order that they exist in categoryClasses.
# the ListItem index / Id is used to index categoryClasses, and used as the key in catIdToInstanceMap
self.catListCtrl.Append((cls.title,))
# populate the GUI with the initial category
initialCatIndex = 0 if not self.initialCategory else self.categoryClasses.index(self.initialCategory)
self._doCategoryChange(initialCatIndex)
self.catListCtrl.Select(initialCatIndex)
# we must focus the initial category in the category list.
self.catListCtrl.Focus(initialCatIndex)
self.setPostInitFocus = self.container.SetFocus if self.initialCategory else self.catListCtrl.SetFocus
self.gridBagSizer=gridBagSizer=wx.GridBagSizer(
hgap=guiHelper.SPACE_BETWEEN_BUTTONS_HORIZONTAL,
vgap=guiHelper.SPACE_BETWEEN_BUTTONS_VERTICAL
)
# add the label, the categories list, and the settings panel to a 2 by 2 grid.
# The label should span two columns, so that the start of the categories list
# and the start of the settings panel are at the same vertical position.
gridBagSizer.Add(categoriesLabel, pos=(0,0), span=(1,2))
gridBagSizer.Add(self.catListCtrl, pos=(1,0), flag=wx.EXPAND)
gridBagSizer.Add(self.container, pos=(1,1), flag=wx.EXPAND)
# Make the row with the listCtrl and settings panel grow vertically.
gridBagSizer.AddGrowableRow(1)
# Make the columns with the listCtrl and settings panel grow horizontally if the dialog is resized.
# They should grow 1:3, since the settings panel is much more important, and already wider
# than the listCtrl.
gridBagSizer.AddGrowableCol(0, proportion=1)
gridBagSizer.AddGrowableCol(1, proportion=3)
sHelper.sizer.Add(gridBagSizer, flag=wx.EXPAND, proportion=1)
self.container.Layout()
self.catListCtrl.Bind(wx.EVT_LIST_ITEM_FOCUSED, self.onCategoryChange)
self.Bind(wx.EVT_CHAR_HOOK, self.onCharHook)
self.Bind(EVT_RW_LAYOUT_NEEDED, self._onPanelLayoutChanged)
def _getCategoryPanel(self, catId):
panel = self.catIdToInstanceMap.get(catId, None)
if not panel:
try:
cls = self.categoryClasses[catId]
except IndexError:
raise ValueError("Unable to create panel for unknown category ID: {}".format(catId))
panel = cls(parent=self.container)
panel.Hide()
self.containerSizer.Add(
panel, flag=wx.ALL | wx.EXPAND,
border=guiHelper.SPACE_BETWEEN_ASSOCIATED_CONTROL_HORIZONTAL
)
self.catIdToInstanceMap[catId] = panel
panelWidth = panel.Size[0]
availableWidth = self.containerSizer.GetSize()[0]
if panelWidth > availableWidth and gui._isDebug():
log.debugWarning(
("Panel width ({1}) too large for: {0} Try to reduce the width of this panel, or increase width of " +
"MultiCategorySettingsDialog.MIN_SIZE"
).format(cls, panel.Size[0])
)
panel.SetLabel(panel.title)
import oleacc
panel.server = nvdaControls.AccPropertyOverride(
panel,
propertyAnnotations={
oleacc.PROPID_ACC_ROLE: oleacc.ROLE_SYSTEM_PROPERTYPAGE, # change the role from pane to property page
oleacc.PROPID_ACC_DESCRIPTION: panel.panelDescription, # set a description
}
)
return panel
def postInit(self):
# By default after the dialog is created, focus lands on the button group for wx.Dialogs. However this is not where
# we want focus. We only want to modify focus after creation (makeSettings), but postInit is also called after
# onApply, so we reset the setPostInitFocus function.
if self.setPostInitFocus:
self.setPostInitFocus()
self.setPostInitFocus = None
else:
# when postInit is called without a setPostInitFocus ie because onApply was called
# then set the focus to the listCtrl. This is a good starting point for a "fresh state"
self.catListCtrl.SetFocus()
def onCharHook(self,evt):
"""Listens for keyboard input and switches panels for control+tab"""
if not self.catListCtrl:
# Dialog has not yet been constructed.
# Allow another handler to take the event, and return early.
evt.Skip()
return
key = evt.GetKeyCode()
listHadFocus = self.catListCtrl.HasFocus()
if evt.ControlDown() and key==wx.WXK_TAB:
# Focus the categories list. If we don't, the panel won't hide correctly
if not listHadFocus:
self.catListCtrl.SetFocus()
index = self.catListCtrl.GetFirstSelected()
newIndex=index-1 if evt.ShiftDown() else index+1
# Less than first wraps to the last index, greater than last wraps to first index.
newIndex=newIndex % self.catListCtrl.ItemCount
self.catListCtrl.Select(newIndex)
# we must focus the new selection in the category list to trigger the change of category.
self.catListCtrl.Focus(newIndex)
if not listHadFocus and self.currentCategory:
self.currentCategory.SetFocus()
else:
evt.Skip()
def _onPanelLayoutChanged(self,evt):
# call layout and SetupScrolling on the container so that the controls apear in their expected locations.
self.container.Layout()
self.container.SetupScrolling()
# when child elements get smaller the scrolledPanel does not
# erase the old contents and must be redrawn
self.container.Refresh()
def _doCategoryChange(self, newCatId):
oldCat = self.currentCategory
# Freeze and Thaw are called to stop visual artifact's while the GUI
# is being rebuilt. Without this, the controls can sometimes be seen being
# added.
self.container.Freeze()
try:
newCat = self._getCategoryPanel(newCatId)
except ValueError as e:
newCatTitle = self.catListCtrl.GetItemText(newCatId)
log.error("Unable to change to category: {}".format(newCatTitle), exc_info=e)
return
if oldCat:
oldCat.onPanelDeactivated()
self.currentCategory = newCat
newCat.onPanelActivated()
# call Layout and SetupScrolling on the container to make sure that the controls apear in their expected locations.
self.container.Layout()
self.container.SetupScrolling()
self.container.Thaw()
def onCategoryChange(self, evt):
currentCat = self.currentCategory
newIndex = evt.GetIndex()
if not currentCat or newIndex != self.categoryClasses.index(currentCat.__class__):
self._doCategoryChange(newIndex)
else:
evt.Skip()
def _doSave(self):
for panel in self.catIdToInstanceMap.values():
if panel.isValid() is False:
raise ValueError("Validation for %s blocked saving settings" % panel.__class__.__name__)
for panel in self.catIdToInstanceMap.values():
panel.onSave()
for panel in self.catIdToInstanceMap.values():
panel.postSave()
def onOk(self,evt):
try:
self._doSave()
except ValueError:
log.debugWarning("", exc_info=True)
return
for panel in self.catIdToInstanceMap.values():
panel.Destroy()
super(MultiCategorySettingsDialog,self).onOk(evt)
def onCancel(self,evt):
for panel in self.catIdToInstanceMap.values():
panel.onDiscard()
panel.Destroy()
super(MultiCategorySettingsDialog,self).onCancel(evt)
def onApply(self,evt):
try:
self._doSave()
except ValueError:
log.debugWarning("", exc_info=True)
return
super(MultiCategorySettingsDialog,self).onApply(evt)
class GeneralSettingsPanel(SettingsPanel):
# Translators: This is the label for the general settings panel.
title = _("General")
helpId = "GeneralSettingsLanguage"
LOG_LEVELS = (
# Translators: One of the log levels of NVDA (the disabled mode turns off logging completely).
(log.OFF, _("disabled")),
# Translators: One of the log levels of NVDA (the info mode shows info as NVDA runs).
(log.INFO, _("info")),
# Translators: One of the log levels of NVDA (the debug warning shows debugging messages and warnings as NVDA runs).
(log.DEBUGWARNING, _("debug warning")),
# Translators: One of the log levels of NVDA (the input/output shows keyboard commands and/or braille commands as well as speech and/or braille output of NVDA).
(log.IO, _("input/output")),
# Translators: One of the log levels of NVDA (the debug mode shows debug messages as NVDA runs).
(log.DEBUG, _("debug"))
)
def makeSettings(self, settingsSizer):
settingsSizerHelper = guiHelper.BoxSizerHelper(self, sizer=settingsSizer)
self.languageNames = languageHandler.getAvailableLanguages(presentational=True)
languageChoices = [x[1] for x in self.languageNames]
# Translators: The label for a setting in general settings to select NVDA's interface language
# (once selected, NVDA must be restarted; the option user default means the user's Windows language
# will be used).
languageLabelText = _("NVDA &Language (requires restart):")
self.languageList=settingsSizerHelper.addLabeledControl(languageLabelText, wx.Choice, choices=languageChoices)
self.languageList.SetToolTip(wx.ToolTip("Choose the language NVDA's messages and user interface should be presented in."))
try:
self.oldLanguage=config.conf["general"]["language"]
index=[x[0] for x in self.languageNames].index(self.oldLanguage)
self.languageList.SetSelection(index)
except:
pass
if globalVars.appArgs.secure:
self.languageList.Disable()
# Translators: The label for a setting in general settings to save current configuration when NVDA
# exits (if it is not checked, user needs to save configuration before quitting NVDA).
self.saveOnExitCheckBox = wx.CheckBox(self, label=_("&Save configuration when exiting NVDA"))
self.saveOnExitCheckBox.SetValue(config.conf["general"]["saveConfigurationOnExit"])
if globalVars.appArgs.secure:
self.saveOnExitCheckBox.Disable()
settingsSizerHelper.addItem(self.saveOnExitCheckBox)
# Translators: The label for a setting in general settings to ask before quitting NVDA (if not checked, NVDA will exit without asking the user for action).
self.askToExitCheckBox=wx.CheckBox(self,label=_("Sho&w exit options when exiting NVDA"))
self.askToExitCheckBox.SetValue(config.conf["general"]["askToExit"])
settingsSizerHelper.addItem(self.askToExitCheckBox)
self.bindHelpEvent("GeneralSettingsShowExitOptions", self.askToExitCheckBox)
# Translators: The label for a setting in general settings to play sounds when NVDA starts or exits.
self.playStartAndExitSoundsCheckBox=wx.CheckBox(self,label=_("&Play sounds when starting or exiting NVDA"))
self.bindHelpEvent("GeneralSettingsPlaySounds", self.playStartAndExitSoundsCheckBox)
self.playStartAndExitSoundsCheckBox.SetValue(config.conf["general"]["playStartAndExitSounds"])
settingsSizerHelper.addItem(self.playStartAndExitSoundsCheckBox)
# Translators: The label for a setting in general settings to select logging level of NVDA as it runs
# (available options and what they are logging are found under comments for the logging level messages
# themselves).
logLevelLabelText=_("L&ogging level:")
logLevelChoices = [name for level, name in self.LOG_LEVELS]
self.logLevelList = settingsSizerHelper.addLabeledControl(logLevelLabelText, wx.Choice, choices=logLevelChoices)
curLevel = log.getEffectiveLevel()
if logHandler.isLogLevelForced():
self.logLevelList.Disable()
for index, (level, name) in enumerate(self.LOG_LEVELS):
if level == curLevel:
self.logLevelList.SetSelection(index)
break
else:
log.debugWarning("Could not set log level list to current log level")
# Translators: The label for a setting in general settings to allow NVDA to start after logging onto
# Windows (if checked, NVDA will start automatically after logging into Windows; if not, user must
# start NVDA by pressing the shortcut key (CTRL+Alt+N by default).
self.startAfterLogonCheckBox = wx.CheckBox(self, label=_("St&art NVDA after I sign in"))
self.startAfterLogonCheckBox.SetValue(config.getStartAfterLogon())
if globalVars.appArgs.secure or not config.isInstalledCopy():
self.startAfterLogonCheckBox.Disable()
settingsSizerHelper.addItem(self.startAfterLogonCheckBox)
self.bindHelpEvent("GeneralSettingsStartAfterLogOn", self.startAfterLogonCheckBox)
self.startOnLogonScreenCheckBox = wx.CheckBox(
self,
# Translators: The label for a setting in general settings to
# allow NVDA to come up in Windows login screen (useful if user
# needs to enter passwords or if multiple user accounts are present
# to allow user to choose the correct account).
label=_("Use NVDA during sign-in (requires administrator privileges)")
)
self.bindHelpEvent("GeneralSettingsStartOnLogOnScreen", self.startOnLogonScreenCheckBox)
self.startOnLogonScreenCheckBox.SetValue(config.getStartOnLogonScreen())
if globalVars.appArgs.secure or not config.canStartOnSecureScreens():
self.startOnLogonScreenCheckBox.Disable()
settingsSizerHelper.addItem(self.startOnLogonScreenCheckBox)
self.copySettingsButton = wx.Button(
self,
label=_(
# Translators: The label for a button in general settings to copy
# current user settings to system settings (to allow current
# settings to be used in secure screens such as User Account
# Control (UAC) dialog).
"Use currently saved settings during sign-in and on secure screens"
" (requires administrator privileges)"
)
)
self.bindHelpEvent("GeneralSettingsCopySettings", self.copySettingsButton)
self.copySettingsButton.Bind(wx.EVT_BUTTON,self.onCopySettings)
if globalVars.appArgs.secure or not config.canStartOnSecureScreens():
self.copySettingsButton.Disable()
settingsSizerHelper.addItem(self.copySettingsButton)
if updateCheck:
# Translators: The label of a checkbox in general settings to toggle automatic checking for updated versions of NVDA (if not checked, user must check for updates manually).
item=self.autoCheckForUpdatesCheckBox=wx.CheckBox(self,label=_("Automatically check for &updates to NVDA"))
self.bindHelpEvent("GeneralSettingsCheckForUpdates", self.autoCheckForUpdatesCheckBox)
item.Value=config.conf["update"]["autoCheck"]
if globalVars.appArgs.secure:
item.Disable()
settingsSizerHelper.addItem(item)
# Translators: The label of a checkbox in general settings to toggle startup notifications
# for a pending NVDA update.
item=self.notifyForPendingUpdateCheckBox=wx.CheckBox(self,label=_("Notify for &pending update on startup"))
item.Value=config.conf["update"]["startupNotification"]
if globalVars.appArgs.secure:
item.Disable()
settingsSizerHelper.addItem(item)
# Translators: The label of a checkbox in general settings to toggle allowing of usage stats gathering
item=self.allowUsageStatsCheckBox=wx.CheckBox(self,label=_("Allow the NVDA project to gather NVDA usage statistics"))
item.Value=config.conf["update"]["allowUsageStats"]
if globalVars.appArgs.secure:
item.Disable()
settingsSizerHelper.addItem(item)
def onCopySettings(self,evt):
addonsDirPath = os.path.join(globalVars.appArgs.configPath, 'addons')
if os.path.isdir(addonsDirPath) and 0 < len(os.listdir(addonsDirPath)):
message = _(
# Translators: A message to warn the user when attempting to copy current
# settings to system settings.
"Add-ons were detected in your user settings directory. "
"Copying these to the system profile could be a security risk. "
"Do you still wish to copy your settings?"
)
# Translators: The title of the warning dialog displayed when trying to
# copy settings for use in secure screens.
title = _("Warning")
style = wx.YES | wx.NO | wx.ICON_WARNING
if wx.NO == gui.messageBox(message, title, style, self):
return
progressDialog = gui.IndeterminateProgressDialog(
gui.mainFrame,
# Translators: The title of the dialog presented while settings are being copied
_("Copying Settings"),
# Translators: The message displayed while settings are being copied
# to the system configuration (for use on Windows logon etc)
_("Please wait while settings are copied to the system configuration.")
)
while True:
try:
gui.ExecAndPump(config.setSystemConfigToCurrentConfig)
res=True
break
except installer.RetriableFailure:
log.debugWarning("Error when copying settings to system config",exc_info=True)
# Translators: a message dialog asking to retry or cancel when copying settings fails
message=_("Unable to copy a file. Perhaps it is currently being used by another process or you have run out of disc space on the drive you are copying to.")
# Translators: the title of a retry cancel dialog when copying settings fails
title=_("Error Copying")
if winUser.MessageBox(None,message,title,winUser.MB_RETRYCANCEL)==winUser.IDRETRY:
continue
res=False
break
except:
log.debugWarning("Error when copying settings to system config",exc_info=True)
res=False
break
progressDialog.done()
del progressDialog
if not res:
# Translators: The message displayed when errors were found while trying to copy current configuration to system settings.
gui.messageBox(_("Error copying NVDA user settings"),_("Error"),wx.OK|wx.ICON_ERROR,self)
else:
# Translators: The message displayed when copying configuration to system settings was successful.
gui.messageBox(_("Successfully copied NVDA user settings"),_("Success"),wx.OK|wx.ICON_INFORMATION,self)
def onSave(self):
newLanguage=[x[0] for x in self.languageNames][self.languageList.GetSelection()]
config.conf["general"]["language"]=newLanguage
config.conf["general"]["saveConfigurationOnExit"]=self.saveOnExitCheckBox.IsChecked()
config.conf["general"]["askToExit"]=self.askToExitCheckBox.IsChecked()
config.conf["general"]["playStartAndExitSounds"]=self.playStartAndExitSoundsCheckBox.IsChecked()
logLevel=self.LOG_LEVELS[self.logLevelList.GetSelection()][0]
if not logHandler.isLogLevelForced():
config.conf["general"]["loggingLevel"] = logging.getLevelName(logLevel)
logHandler.setLogLevelFromConfig()
if self.startAfterLogonCheckBox.IsEnabled():
config.setStartAfterLogon(self.startAfterLogonCheckBox.GetValue())
if self.startOnLogonScreenCheckBox.IsEnabled():
try:
config.setStartOnLogonScreen(self.startOnLogonScreenCheckBox.GetValue())
except (WindowsError, RuntimeError):
gui.messageBox(_("This change requires administrator privileges."), _("Insufficient Privileges"), style=wx.OK | wx.ICON_ERROR, parent=self)
if updateCheck:
config.conf["update"]["autoCheck"]=self.autoCheckForUpdatesCheckBox.IsChecked()
config.conf["update"]["allowUsageStats"]=self.allowUsageStatsCheckBox.IsChecked()
config.conf["update"]["startupNotification"]=self.notifyForPendingUpdateCheckBox.IsChecked()
updateCheck.terminate()
updateCheck.initialize()
def postSave(self):
if self.oldLanguage != config.conf["general"]["language"]:
LanguageRestartDialog(self).ShowModal()
class LanguageRestartDialog(wx.Dialog):
def __init__(self, parent):
# Translators: The title of the dialog which appears when the user changed NVDA's interface language.
super(LanguageRestartDialog, self).__init__(parent, title=_("Language Configuration Change"))
mainSizer = wx.BoxSizer(wx.VERTICAL)
sHelper = guiHelper.BoxSizerHelper(self, orientation=wx.VERTICAL)
# Translators: The message displayed after NVDA interface language has been changed.
sHelper.addItem(wx.StaticText(self, label=_("NVDA must be restarted for the new language to take effect.")))
bHelper = sHelper.addDialogDismissButtons(guiHelper.ButtonHelper(wx.HORIZONTAL))
# Translators: The label for a button in the dialog which appears when the user changed NVDA's interface language.
restartNowButton = bHelper.addButton(self, label=_("Restart &now"))
restartNowButton.Bind(wx.EVT_BUTTON, self.onRestartNowButton)
restartNowButton.SetFocus()
# Translators: The label for a button in the dialog which appears when the user changed NVDA's interface language.
restartLaterButton = bHelper.addButton(self, wx.ID_CLOSE, label=_("Restart &later"))
restartLaterButton.Bind(wx.EVT_BUTTON, lambda evt: self.Close())
self.Bind(wx.EVT_CLOSE, lambda evt: self.Destroy())
self.EscapeId = wx.ID_CLOSE
mainSizer.Add(sHelper.sizer, border=guiHelper.BORDER_FOR_DIALOGS, flag=wx.ALL)
self.Sizer = mainSizer
mainSizer.Fit(self)
self.CentreOnScreen()
def onRestartNowButton(self, evt):
self.Destroy()
config.conf.save()
queueHandler.queueFunction(queueHandler.eventQueue,core.restart)
class SpeechSettingsPanel(SettingsPanel):
# Translators: This is the label for the speech panel
title = _("Speech")
helpId = "SpeechSettings"
def makeSettings(self, settingsSizer):
settingsSizerHelper = guiHelper.BoxSizerHelper(self, sizer=settingsSizer)
# Translators: A label for the synthesizer on the speech panel.
synthLabel = _("&Synthesizer")
synthBox = wx.StaticBox(self, label=synthLabel)
synthGroup = guiHelper.BoxSizerHelper(self, sizer=wx.StaticBoxSizer(synthBox, wx.HORIZONTAL))
settingsSizerHelper.addItem(synthGroup)
# Use a ExpandoTextCtrl because even when readonly it accepts focus from keyboard, which
# standard readonly TextCtrl does not. ExpandoTextCtrl is a TE_MULTILINE control, however
# by default it renders as a single line. Standard TextCtrl with TE_MULTILINE has two lines,
# and a vertical scroll bar. This is not neccessary for the single line of text we wish to
# display here.
synthDesc = getSynth().description
self.synthNameCtrl = ExpandoTextCtrl(self, size=(self.scaleSize(250), -1), value=synthDesc, style=wx.TE_READONLY)
self.synthNameCtrl.Bind(wx.EVT_CHAR_HOOK, self._enterTriggersOnChangeSynth)
# Translators: This is the label for the button used to change synthesizer,
# it appears in the context of a synthesizer group on the speech settings panel.
changeSynthBtn = wx.Button(self, label=_("C&hange..."))
self.bindHelpEvent("SpeechSettingsChange", self.synthNameCtrl)
self.bindHelpEvent("SpeechSettingsChange", changeSynthBtn)
synthGroup.addItem(
guiHelper.associateElements(
self.synthNameCtrl,
changeSynthBtn
)
)
changeSynthBtn.Bind(wx.EVT_BUTTON,self.onChangeSynth)
self.voicePanel = VoiceSettingsPanel(self)
settingsSizerHelper.addItem(self.voicePanel)
def _enterTriggersOnChangeSynth(self, evt):
if evt.KeyCode == wx.WXK_RETURN:
self.onChangeSynth(evt)
else:
evt.Skip()
def onChangeSynth(self, evt):
changeSynth = SynthesizerSelectionDialog(self, multiInstanceAllowed=True)
ret = changeSynth.ShowModal()
if ret == wx.ID_OK:
self.Freeze()
# trigger a refresh of the settings
self.onPanelActivated()
self._sendLayoutUpdatedEvent()
self.Thaw()
def updateCurrentSynth(self):
synthDesc = getSynth().description
self.synthNameCtrl.SetValue(synthDesc)
def onPanelActivated(self):
# call super after all panel updates have been completed, we dont want the panel to show until this is complete.
self.voicePanel.onPanelActivated()
super(SpeechSettingsPanel,self).onPanelActivated()
def onPanelDeactivated(self):
self.voicePanel.onPanelDeactivated()
super(SpeechSettingsPanel,self).onPanelDeactivated()
def onDiscard(self):
self.voicePanel.onDiscard()
def onSave(self):
self.voicePanel.onSave()
class SynthesizerSelectionDialog(SettingsDialog):
# Translators: This is the label for the synthesizer selection dialog
title = _("Select Synthesizer")
helpId = "SynthesizerSelection"
synthNames = []
def makeSettings(self, settingsSizer):
settingsSizerHelper = guiHelper.BoxSizerHelper(self, sizer=settingsSizer)
# Translators: This is a label for the select
# synthesizer combobox in the synthesizer dialog.
synthListLabelText=_("&Synthesizer:")
self.synthList = settingsSizerHelper.addLabeledControl(synthListLabelText, wx.Choice, choices=[])
self.bindHelpEvent("SelectSynthesizerSynthesizer", self.synthList)
self.updateSynthesizerList()
# Translators: This is the label for the select output
# device combo in the synthesizer dialog. Examples of
# of an output device are default soundcard, usb
# headphones, etc.
deviceListLabelText = _("Audio output &device:")
deviceNames=nvwave.getOutputDeviceNames()
# #11349: On Windows 10 20H1 and 20H2, Microsoft Sound Mapper returns an empty string.
if deviceNames[0] in ("", "Microsoft Sound Mapper"):
# Translators: name for default (Microsoft Sound Mapper) audio output device.
deviceNames[0] = _("Microsoft Sound Mapper")
self.deviceList = settingsSizerHelper.addLabeledControl(deviceListLabelText, wx.Choice, choices=deviceNames)
self.bindHelpEvent("SelectSynthesizerOutputDevice", self.deviceList)
try:
selection = deviceNames.index(config.conf["speech"]["outputDevice"])
except ValueError:
selection = 0
self.deviceList.SetSelection(selection)
# Translators: This is a label for the audio ducking combo box in the Synthesizer Settings dialog.
duckingListLabelText = _("Audio d&ucking mode:")
self.duckingList=settingsSizerHelper.addLabeledControl(duckingListLabelText, wx.Choice, choices=audioDucking.audioDuckingModes)
self.bindHelpEvent("SelectSynthesizerDuckingMode", self.duckingList)
index=config.conf['audio']['audioDuckingMode']
self.duckingList.SetSelection(index)
if not audioDucking.isAudioDuckingSupported():
self.duckingList.Disable()
def postInit(self):
# Finally, ensure that focus is on the synthlist
self.synthList.SetFocus()
def updateSynthesizerList(self):
driverList=getSynthList()
self.synthNames=[x[0] for x in driverList]
options=[x[1] for x in driverList]
self.synthList.Clear()
self.synthList.AppendItems(options)
try:
index=self.synthNames.index(getSynth().name)
self.synthList.SetSelection(index)
except:
pass
def onOk(self, evt):
if not self.synthNames:
# The list of synths has not been populated yet, so we didn't change anything in this panel
return
config.conf["speech"]["outputDevice"]=self.deviceList.GetStringSelection()
newSynth=self.synthNames[self.synthList.GetSelection()]
if not setSynth(newSynth):
# Translators: This message is presented when
# NVDA is unable to load the selected
# synthesizer.
gui.messageBox(_("Could not load the %s synthesizer.")%newSynth,_("Synthesizer Error"),wx.OK|wx.ICON_WARNING,self)
return
if audioDucking.isAudioDuckingSupported():
index=self.duckingList.GetSelection()
config.conf['audio']['audioDuckingMode']=index
audioDucking.setAudioDuckingMode(index)
# Reinitialize the tones module to update the audio device
import tones
tones.terminate()
tones.initialize()
if self.IsModal():
# Hack: we need to update the synth in our parent window before closing.
# Otherwise, NVDA will report the old synth even though the new synth is reflected visually.
self.Parent.updateCurrentSynth()
super(SynthesizerSelectionDialog, self).onOk(evt)
class DriverSettingChanger(object):
"""Functor which acts as callback for GUI events."""
def __init__(self,driver,setting):
self._driverRef=weakref.ref(driver)
self.setting=setting
@property
def driver(self):
return self._driverRef()
def __call__(self,evt):
evt.Skip() # allow other handlers to also process this event.
val=evt.GetSelection()
setattr(self.driver,self.setting.id,val)
class StringDriverSettingChanger(DriverSettingChanger):
"""Same as L{DriverSettingChanger} but handles combobox events."""
def __init__(self,driver,setting,container):
self.container=container
super(StringDriverSettingChanger,self).__init__(driver,setting)
def __call__(self,evt):
evt.Skip() # allow other handlers to also process this event.
# Quick workaround to deal with voice changes.
if self.setting.id == "voice":
# Cancel speech first so that the voice will change immediately instead of the change being queued.
speech.cancelSpeech()
changeVoice(
self.driver,
getattr(self.container,"_%ss"%self.setting.id)[evt.GetSelection()].id
)
self.container.updateDriverSettings(changedSetting=self.setting.id)
else:
setattr(
self.driver,
self.setting.id,
getattr(self.container,"_%ss"%self.setting.id)[evt.GetSelection()].id
)
class AutoSettingsMixin(metaclass=ABCMeta):
"""
Mixin class that provides support for driver/vision provider specific gui settings.
Derived classes should implement:
- L{getSettings}
- L{settingsSizer}
Derived classes likely need to inherit from L{SettingsPanel}, in particular
the following methods must be provided:
- makeSettings
- onPanelActivated
@note: This mixin uses self.lastControl and self.sizerDict to keep track of the
controls added / and maintain ordering.
If you plan to maintain other controls in the same panel care will need to be taken.
"""
def __init__(self, *args, **kwargs):
"""
Mixin init, forwards args to other base class.
The other base class is likely L{gui.SettingsPanel}.
@param args: Positional args to passed to other base class.
@param kwargs: Keyword args to passed to other base class.
"""
self.sizerDict = {}
self.lastControl = None
super(AutoSettingsMixin, self).__init__(*args, **kwargs)
# because settings instances can be of type L{Driver} as well, we have to handle
# showing settings for non-instances. Because of this, we must reacquire a reference
# to the settings class whenever we wish to use it (via L{getSettings}) in case the instance changes.
# We also use the weakref to refresh the gui when an instance dies.
self._currentSettingsRef = weakref.ref(
self.getSettings(),
lambda ref: wx.CallAfter(self.refreshGui)
)
settingsSizer: wx.BoxSizer
@abstractmethod
def getSettings(self) -> AutoSettings:
...
@abstractmethod
def makeSettings(self, sizer: wx.BoxSizer):
"""Populate the panel with settings controls.
@note: Normally classes also inherit from settingsDialogs.SettingsPanel.
@param sizer: The sizer to which to add the settings controls.
"""
...
def _getSettingsStorage(self) -> Any:
""" Override to change storage object for setting values."""
return self.getSettings()
@property
def hasOptions(self) -> bool:
return bool(self.getSettings().supportedSettings)
@classmethod
def _setSliderStepSizes(cls, slider, setting):
slider.SetLineSize(setting.minStep)
slider.SetPageSize(setting.largeStep)
def _makeSliderSettingControl(
self,
setting: NumericDriverSetting,
settingsStorage: Any
) -> wx.BoxSizer:
"""Constructs appropriate GUI controls for given L{DriverSetting} such as label and slider.
@param setting: Setting to construct controls for
@param settingsStorage: where to get initial values / set values.
This param must have an attribute with a name matching setting.id.
In most cases it will be of type L{AutoSettings}
@return: wx.BoxSizer containing newly created controls.
"""
labeledControl = guiHelper.LabeledControlHelper(
self,
f"{setting.displayNameWithAccelerator}:",
nvdaControls.EnhancedInputSlider,
minValue=setting.minVal,
maxValue=setting.maxVal
)
lSlider=labeledControl.control
setattr(self, f"{setting.id}Slider", lSlider)
lSlider.Bind(wx.EVT_SLIDER, DriverSettingChanger(
settingsStorage, setting
))
self._setSliderStepSizes(lSlider, setting)
lSlider.SetValue(getattr(settingsStorage, setting.id))
if self.lastControl:
lSlider.MoveAfterInTabOrder(self.lastControl)
self.lastControl=lSlider
return labeledControl.sizer
def _makeStringSettingControl(
self,
setting: DriverSetting,
settingsStorage: Any
):
"""
Same as L{_makeSliderSettingControl} but for string settings displayed in a wx.Choice control
Options for the choice control come from the availableXstringvalues property
(Dict[id, StringParameterInfo]) on the instance returned by self.getSettings()
The id of the value is stored on settingsStorage.
Returns sizer with label and combobox.
"""
labelText = f"{setting.displayNameWithAccelerator}:"
stringSettingAttribName = f"_{setting.id}s"
setattr(
self,
stringSettingAttribName,
# Settings are stored as an ordered dict.
# Therefore wrap this inside a list call.
list(getattr(
self.getSettings(),
f"available{setting.id.capitalize()}s"
).values())
)
stringSettings = getattr(self, stringSettingAttribName)
labeledControl = guiHelper.LabeledControlHelper(
self,
labelText,
wx.Choice,
choices=[x.displayName for x in stringSettings]
)
lCombo = labeledControl.control
setattr(self, f"{setting.id}List", lCombo)
self.bindHelpEvent(
f"SpeechSettings{setting.displayName.capitalize()}",
lCombo
)
try:
cur = getattr(settingsStorage, setting.id)
selectionIndex = [
x.id for x in stringSettings
].index(cur)
lCombo.SetSelection(selectionIndex)
except ValueError:
pass
lCombo.Bind(
wx.EVT_CHOICE,
StringDriverSettingChanger(settingsStorage, setting, self)
)
if self.lastControl:
lCombo.MoveAfterInTabOrder(self.lastControl)
self.lastControl = lCombo
return labeledControl.sizer
def _makeBooleanSettingControl(
self,
setting: BooleanDriverSetting,
settingsStorage: Any
):
"""
Same as L{_makeSliderSettingControl} but for boolean settings. Returns checkbox.
"""
checkbox = wx.CheckBox(self, label=setting.displayNameWithAccelerator)
setattr(self, f"{setting.id}Checkbox", checkbox)
settingsStorageProxy = weakref.proxy(settingsStorage)
self.bindHelpEvent(f"SpeechSettings{setting.displayName.capitalize()}", checkbox)
def _onCheckChanged(evt: wx.CommandEvent):
evt.Skip() # allow other handlers to also process this event.
setattr(settingsStorageProxy, setting.id, evt.IsChecked())
checkbox.Bind(wx.EVT_CHECKBOX, _onCheckChanged)
checkbox.SetValue(getattr(
settingsStorage,
setting.id
))
if self.lastControl:
checkbox.MoveAfterInTabOrder(self.lastControl)
self.lastControl=checkbox
return checkbox
def updateDriverSettings(self, changedSetting=None):
"""
Creates, hides or updates existing GUI controls for all of supported settings.
"""
settingsInst = self.getSettings()
settingsStorage = self._getSettingsStorage()
# firstly check already created options
for name, sizer in self.sizerDict.items():
if name == changedSetting:
# Changing a setting shouldn't cause that setting itself to disappear.
continue
if not settingsInst.isSupported(name):
self.settingsSizer.Hide(sizer)
# Create new controls, update already existing
if gui._isDebug():
log.debug(f"Current sizerDict: {self.sizerDict!r}")
log.debug(f"Current supportedSettings: {self.getSettings().supportedSettings!r}")
for setting in settingsInst.supportedSettings:
if setting.id == changedSetting:
# Changing a setting shouldn't cause that setting's own values to change.
continue
if setting.id in self.sizerDict: # update a value
self._updateValueForControl(setting, settingsStorage)
else: # create a new control
self._createNewControl(setting, settingsStorage)
# Update graphical layout of the dialog
self.settingsSizer.Layout()
def _createNewControl(self, setting, settingsStorage):
settingMaker = self._getSettingMaker(setting)
try:
s = settingMaker(setting, settingsStorage)
except UnsupportedConfigParameterError:
log.debugWarning(f"Unsupported setting {setting.id}; ignoring", exc_info=True)
else:
self.sizerDict[setting.id] = s
self.settingsSizer.Insert(
len(self.sizerDict) - 1,
s,
border=10,
flag=wx.BOTTOM
)
def _getSettingMaker(self, setting):
if isinstance(setting, NumericDriverSetting):
settingMaker = self._makeSliderSettingControl
elif isinstance(setting, BooleanDriverSetting):
settingMaker = self._makeBooleanSettingControl
else:
settingMaker = self._makeStringSettingControl
return settingMaker
def _updateValueForControl(self, setting, settingsStorage):
self.settingsSizer.Show(self.sizerDict[setting.id])
if isinstance(setting, NumericDriverSetting):
getattr(self, f"{setting.id}Slider").SetValue(
getattr(settingsStorage, setting.id)
)
elif isinstance(setting, BooleanDriverSetting):
getattr(self, f"{setting.id}Checkbox").SetValue(
getattr(settingsStorage, setting.id)
)
else:
options = getattr(self, f"_{setting.id}s")
lCombo = getattr(self, f"{setting.id}List")
try:
cur = getattr(settingsStorage, setting.id)
indexOfItem = [x.id for x in options].index(cur)
lCombo.SetSelection(indexOfItem)
except ValueError:
pass
def onDiscard(self):
# unbind change events for string settings as wx closes combo boxes on cancel
settingsInst = self.getSettings()
for setting in settingsInst.supportedSettings:
if isinstance(setting, (NumericDriverSetting, BooleanDriverSetting)):
continue
getattr(self, f"{setting.id}List").Unbind(wx.EVT_CHOICE)
# restore settings
settingsInst.loadSettings()
def onSave(self):
self.getSettings().saveSettings()
def refreshGui(self):
if not self._currentSettingsRef():
if gui._isDebug():
log.debug("refreshing panel")
self.sizerDict.clear()
self.settingsSizer.Clear(delete_windows=True)
self._currentSettingsRef = weakref.ref(
self.getSettings(),
lambda ref: wx.CallAfter(self.refreshGui)
)
self.makeSettings(self.settingsSizer)
def onPanelActivated(self):
"""Called after the panel has been activated
@note: Normally classes also inherit from settingsDialogs.SettingsPanel.
"""
self.refreshGui()
super().onPanelActivated()
#: DriverSettingsMixin name is provided or backwards compatibility.
# The name DriverSettingsMixin should be considered deprecated, use AutoSettingsMixin instead.
DriverSettingsMixin = AutoSettingsMixin
class VoiceSettingsPanel(AutoSettingsMixin, SettingsPanel):
# Translators: This is the label for the voice settings panel.
title = _("Voice")
helpId = "SpeechSettings"
@property
def driver(self):
synth: SynthDriver = getSynth()
return synth
def getSettings(self) -> AutoSettings:
return self.driver
def makeSettings(self, settingsSizer):
# Construct synthesizer settings
self.updateDriverSettings()
settingsSizerHelper = guiHelper.BoxSizerHelper(self, sizer=settingsSizer)
# Translators: This is the label for a checkbox in the
# voice settings panel (if checked, text will be read using the voice for the language of the text).
autoLanguageSwitchingText = _("Automatic language switching (when supported)")
self.autoLanguageSwitchingCheckbox = settingsSizerHelper.addItem(
wx.CheckBox(
self,
label=autoLanguageSwitchingText
))
self.bindHelpEvent("SpeechSettingsLanguageSwitching", self.autoLanguageSwitchingCheckbox)
self.autoLanguageSwitchingCheckbox.SetValue(
config.conf["speech"]["autoLanguageSwitching"]
)
# Translators: This is the label for a checkbox in the
# voice settings panel (if checked, different voices for dialects will be used to
# read text in that dialect).
autoDialectSwitchingText = _("Automatic dialect switching (when supported)")
self.autoDialectSwitchingCheckbox = settingsSizerHelper.addItem(
wx.CheckBox(self, label=autoDialectSwitchingText)
)
self.bindHelpEvent("SpeechSettingsDialectSwitching", self.autoDialectSwitchingCheckbox)
self.autoDialectSwitchingCheckbox.SetValue(
config.conf["speech"]["autoDialectSwitching"]
)
# Translators: This is the label for a combobox in the
# voice settings panel (possible choices are none, some, most and all).
punctuationLabelText = _("Punctuation/symbol &level:")
symbolLevelLabels = characterProcessing.SPEECH_SYMBOL_LEVEL_LABELS
symbolLevelChoices = [
symbolLevelLabels[level] for level in characterProcessing.CONFIGURABLE_SPEECH_SYMBOL_LEVELS
]
self.symbolLevelList = settingsSizerHelper.addLabeledControl(
punctuationLabelText, wx.Choice, choices=symbolLevelChoices
)
self.bindHelpEvent("SpeechSettingsSymbolLevel", self.symbolLevelList)
curLevel = config.conf["speech"]["symbolLevel"]
self.symbolLevelList.SetSelection(
characterProcessing.CONFIGURABLE_SPEECH_SYMBOL_LEVELS.index(curLevel)
)
# Translators: This is the label for a checkbox in the
# voice settings panel (if checked, text will be read using the voice for the language of the text).
trustVoiceLanguageText = _("Trust voice's language when processing characters and symbols")
self.trustVoiceLanguageCheckbox = settingsSizerHelper.addItem(
wx.CheckBox(self, label=trustVoiceLanguageText)
)
self.bindHelpEvent("SpeechSettingsTrust", self.trustVoiceLanguageCheckbox)
self.trustVoiceLanguageCheckbox.SetValue(config.conf["speech"]["trustVoiceLanguage"])
includeCLDRText = _(
# Translators: This is the label for a checkbox in the
# voice settings panel (if checked, data from the unicode CLDR will be used
# to speak emoji descriptions).
"Include Unicode Consortium data (including emoji) when processing characters and symbols"
)
self.includeCLDRCheckbox = settingsSizerHelper.addItem(
wx.CheckBox(self, label=includeCLDRText)
)
self.includeCLDRCheckbox.SetValue(config.conf["speech"]["includeCLDR"])
minPitchChange = int(config.conf.getConfigValidation(
("speech", self.driver.name, "capPitchChange")
).kwargs["min"])
maxPitchChange = int(config.conf.getConfigValidation(
("speech", self.driver.name, "capPitchChange")
).kwargs["max"])
# Translators: This is a label for a setting in voice settings (an edit box to change
# voice pitch for capital letters; the higher the value, the pitch will be higher).
capPitchChangeLabelText = _("Capital pitch change percentage")
self.capPitchChangeEdit = settingsSizerHelper.addLabeledControl(
capPitchChangeLabelText,
nvdaControls.SelectOnFocusSpinCtrl,
min=minPitchChange,
max=maxPitchChange,
initial=config.conf["speech"][self.driver.name]["capPitchChange"])
self.bindHelpEvent(
"SpeechSettingsCapPitchChange",
self.capPitchChangeEdit
)
# Translators: This is the label for a checkbox in the
# voice settings panel.
sayCapForCapsText = _("Say &cap before capitals")
self.sayCapForCapsCheckBox = settingsSizerHelper.addItem(
wx.CheckBox(self, label=sayCapForCapsText)
)
self.bindHelpEvent("SpeechSettingsSayCapBefore", self.sayCapForCapsCheckBox)
self.sayCapForCapsCheckBox.SetValue(
config.conf["speech"][self.driver.name]["sayCapForCapitals"]
)
# Translators: This is the label for a checkbox in the
# voice settings panel.
beepForCapsText =_("&Beep for capitals")
self.beepForCapsCheckBox = settingsSizerHelper.addItem(
wx.CheckBox(self, label=beepForCapsText)
)
self.bindHelpEvent(
"SpeechSettingsBeepForCaps",
self.beepForCapsCheckBox
)
self.beepForCapsCheckBox.SetValue(
config.conf["speech"][self.driver.name]["beepForCapitals"]
)
# Translators: This is the label for a checkbox in the
# voice settings panel.
useSpellingFunctionalityText = _("Use &spelling functionality if supported")
self.useSpellingFunctionalityCheckBox = settingsSizerHelper.addItem(
wx.CheckBox(self, label=useSpellingFunctionalityText)
)
self.bindHelpEvent("SpeechSettingsUseSpelling", self.useSpellingFunctionalityCheckBox)
self.useSpellingFunctionalityCheckBox.SetValue(
config.conf["speech"][self.driver.name]["useSpellingFunctionality"]
)
def onSave(self):
AutoSettingsMixin.onSave(self)
config.conf["speech"]["autoLanguageSwitching"] = self.autoLanguageSwitchingCheckbox.IsChecked()
config.conf["speech"]["autoDialectSwitching"] = self.autoDialectSwitchingCheckbox.IsChecked()
config.conf["speech"]["symbolLevel"]=characterProcessing.CONFIGURABLE_SPEECH_SYMBOL_LEVELS[self.symbolLevelList.GetSelection()]
config.conf["speech"]["trustVoiceLanguage"]=self.trustVoiceLanguageCheckbox.IsChecked()
currentIncludeCLDR = config.conf["speech"]["includeCLDR"]
config.conf["speech"]["includeCLDR"] = newIncludeCldr = self.includeCLDRCheckbox.IsChecked()
if currentIncludeCLDR is not newIncludeCldr:
# Either included or excluded CLDR data, so clear the cache.
characterProcessing.clearSpeechSymbols()
config.conf["speech"][self.driver.name]["capPitchChange"]=self.capPitchChangeEdit.Value
config.conf["speech"][self.driver.name]["sayCapForCapitals"]=self.sayCapForCapsCheckBox.IsChecked()
config.conf["speech"][self.driver.name]["beepForCapitals"]=self.beepForCapsCheckBox.IsChecked()
config.conf["speech"][self.driver.name]["useSpellingFunctionality"]=self.useSpellingFunctionalityCheckBox.IsChecked()
class KeyboardSettingsPanel(SettingsPanel):
# Translators: This is the label for the keyboard settings panel.
title = _("Keyboard")
helpId = "KeyboardSettings"
def makeSettings(self, settingsSizer):
sHelper = guiHelper.BoxSizerHelper(self, sizer=settingsSizer)
# Translators: This is the label for a combobox in the
# keyboard settings panel.
kbdLabelText = _("&Keyboard layout:")
layouts=keyboardHandler.KeyboardInputGesture.LAYOUTS
self.kbdNames=sorted(layouts)
kbdChoices = [layouts[layout] for layout in self.kbdNames]
self.kbdList=sHelper.addLabeledControl(kbdLabelText, wx.Choice, choices=kbdChoices)
self.bindHelpEvent("KeyboardSettingsLayout", self.kbdList)
try:
index=self.kbdNames.index(config.conf['keyboard']['keyboardLayout'])
self.kbdList.SetSelection(index)
except:
log.debugWarning("Could not set Keyboard layout list to current layout",exc_info=True)
#Translators: This is the label for a list of checkboxes
# controlling which keys are NVDA modifier keys.
modifierBoxLabel = _("&Select NVDA Modifier Keys")
self.modifierChoices = [keyLabels.localizedKeyLabels[key] for key in keyboardHandler.SUPPORTED_NVDA_MODIFIER_KEYS]
self.modifierList=sHelper.addLabeledControl(modifierBoxLabel, nvdaControls.CustomCheckListBox, choices=self.modifierChoices)
checkedItems = []
if config.conf["keyboard"]["useNumpadInsertAsNVDAModifierKey"]:
checkedItems.append(keyboardHandler.SUPPORTED_NVDA_MODIFIER_KEYS.index("numpadinsert"))
if config.conf["keyboard"]["useExtendedInsertAsNVDAModifierKey"]:
checkedItems.append(keyboardHandler.SUPPORTED_NVDA_MODIFIER_KEYS.index("insert"))
if config.conf["keyboard"]["useCapsLockAsNVDAModifierKey"]:
checkedItems.append(keyboardHandler.SUPPORTED_NVDA_MODIFIER_KEYS.index("capslock"))
self.modifierList.CheckedItems = checkedItems
self.modifierList.Select(0)
self.bindHelpEvent("KeyboardSettingsModifiers", self.modifierList)
# Translators: This is the label for a checkbox in the
# keyboard settings panel.
charsText = _("Speak typed &characters")
self.charsCheckBox=sHelper.addItem(wx.CheckBox(self,label=charsText))
self.bindHelpEvent(
"KeyboardSettingsSpeakTypedCharacters",
self.charsCheckBox
)
self.charsCheckBox.SetValue(config.conf["keyboard"]["speakTypedCharacters"])
# Translators: This is the label for a checkbox in the
# keyboard settings panel.
speakTypedWordsText = _("Speak typed &words")
self.wordsCheckBox=sHelper.addItem(wx.CheckBox(self,label=speakTypedWordsText))
self.bindHelpEvent("KeyboardSettingsSpeakTypedWords", self.wordsCheckBox)
self.wordsCheckBox.SetValue(config.conf["keyboard"]["speakTypedWords"])
# Translators: This is the label for a checkbox in the
# keyboard settings panel.
speechInterruptForCharText = _("Speech &interrupt for typed characters")
self.speechInterruptForCharsCheckBox=sHelper.addItem(wx.CheckBox(self,label=speechInterruptForCharText))
self.bindHelpEvent("KeyboardSettingsSpeechInteruptForCharacters", self.speechInterruptForCharsCheckBox)
self.speechInterruptForCharsCheckBox.SetValue(config.conf["keyboard"]["speechInterruptForCharacters"])
# Translators: This is the label for a checkbox in the
# keyboard settings panel.
speechInterruptForEnterText = _("Speech i&nterrupt for Enter key")
self.speechInterruptForEnterCheckBox=sHelper.addItem(wx.CheckBox(self,label=speechInterruptForEnterText))
self.speechInterruptForEnterCheckBox.SetValue(config.conf["keyboard"]["speechInterruptForEnter"])
self.bindHelpEvent("KeyboardSettingsSpeechInteruptForEnter", self.speechInterruptForEnterCheckBox)
# Translators: This is the label for a checkbox in the
# keyboard settings panel.
allowSkimReadingInSayAllText = _("Allow skim &reading in Say All")
self.skimReadingInSayAllCheckBox=sHelper.addItem(wx.CheckBox(self,label=allowSkimReadingInSayAllText))
self.bindHelpEvent("KeyboardSettingsSkimReading", self.skimReadingInSayAllCheckBox)
self.skimReadingInSayAllCheckBox.SetValue(config.conf["keyboard"]["allowSkimReadingInSayAll"])
# Translators: This is the label for a checkbox in the
# keyboard settings panel.
beepForLowercaseWithCapsLockText = _("&Beep if typing lowercase letters when caps lock is on")
self.beepLowercaseCheckBox=sHelper.addItem(wx.CheckBox(self,label=beepForLowercaseWithCapsLockText))
self.bindHelpEvent("SpeechSettingsBeepLowercase", self.beepLowercaseCheckBox)
self.beepLowercaseCheckBox.SetValue(config.conf["keyboard"]["beepForLowercaseWithCapslock"])
# Translators: This is the label for a checkbox in the
# keyboard settings panel.
commandKeysText = _("Speak c&ommand keys")
self.commandKeysCheckBox=sHelper.addItem(wx.CheckBox(self,label=commandKeysText))
self.bindHelpEvent("SpeechSettingsSpeakCommandKeys", self.commandKeysCheckBox)
self.commandKeysCheckBox.SetValue(config.conf["keyboard"]["speakCommandKeys"])
# Translators: This is the label for a checkbox in the
# keyboard settings panel.
alertForSpellingErrorsText = _("Play sound for &spelling errors while typing")
self.alertForSpellingErrorsCheckBox=sHelper.addItem(wx.CheckBox(self,label=alertForSpellingErrorsText))
self.bindHelpEvent("KeyboardSettingsAlertForSpellingErrors", self.alertForSpellingErrorsCheckBox)
self.alertForSpellingErrorsCheckBox.SetValue(config.conf["keyboard"]["alertForSpellingErrors"])
if not config.conf["documentFormatting"]["reportSpellingErrors"]:
self.alertForSpellingErrorsCheckBox.Disable()
# Translators: This is the label for a checkbox in the
# keyboard settings panel.
handleInjectedKeysText = _("Handle keys from other &applications")
self.handleInjectedKeysCheckBox=sHelper.addItem(wx.CheckBox(self,label=handleInjectedKeysText))
self.bindHelpEvent("KeyboardSettingsHandleKeys", self.handleInjectedKeysCheckBox)
self.handleInjectedKeysCheckBox.SetValue(config.conf["keyboard"]["handleInjectedKeys"])
def isValid(self):
# #2871: check whether at least one key is the nvda key.
if not self.modifierList.CheckedItems:
log.debugWarning("No NVDA key set")
gui.messageBox(
# Translators: Message to report wrong configuration of the NVDA key
_("At least one key must be used as the NVDA key."),
# Translators: The title of the message box
_("Error"), wx.OK|wx.ICON_ERROR,self)
return False
return super(KeyboardSettingsPanel, self).isValid()
def onSave(self):
layout=self.kbdNames[self.kbdList.GetSelection()]
config.conf['keyboard']['keyboardLayout']=layout
config.conf["keyboard"]["useNumpadInsertAsNVDAModifierKey"]= self.modifierList.IsChecked(keyboardHandler.SUPPORTED_NVDA_MODIFIER_KEYS.index("numpadinsert"))
config.conf["keyboard"]["useExtendedInsertAsNVDAModifierKey"] = self.modifierList.IsChecked(keyboardHandler.SUPPORTED_NVDA_MODIFIER_KEYS.index("insert"))
config.conf["keyboard"]["useCapsLockAsNVDAModifierKey"] = self.modifierList.IsChecked(keyboardHandler.SUPPORTED_NVDA_MODIFIER_KEYS.index("capslock"))
config.conf["keyboard"]["speakTypedCharacters"]=self.charsCheckBox.IsChecked()
config.conf["keyboard"]["speakTypedWords"]=self.wordsCheckBox.IsChecked()
config.conf["keyboard"]["speechInterruptForCharacters"]=self.speechInterruptForCharsCheckBox.IsChecked()
config.conf["keyboard"]["speechInterruptForEnter"]=self.speechInterruptForEnterCheckBox.IsChecked()
config.conf["keyboard"]["allowSkimReadingInSayAll"]=self.skimReadingInSayAllCheckBox.IsChecked()
config.conf["keyboard"]["beepForLowercaseWithCapslock"]=self.beepLowercaseCheckBox.IsChecked()
config.conf["keyboard"]["speakCommandKeys"]=self.commandKeysCheckBox.IsChecked()
config.conf["keyboard"]["alertForSpellingErrors"]=self.alertForSpellingErrorsCheckBox.IsChecked()
config.conf["keyboard"]["handleInjectedKeys"]=self.handleInjectedKeysCheckBox.IsChecked()
class MouseSettingsPanel(SettingsPanel):
# Translators: This is the label for the mouse settings panel.
title = _("Mouse")
helpId = "MouseSettings"
def makeSettings(self, settingsSizer):
sHelper = guiHelper.BoxSizerHelper(self, sizer=settingsSizer)
# Translators: This is the label for a checkbox in the
# mouse settings panel.
shapeChangesText = _("Report mouse &shape changes")
self.shapeCheckBox=sHelper.addItem(wx.CheckBox(self,label=shapeChangesText))
self.bindHelpEvent("MouseSettingsShape", self.shapeCheckBox)
self.shapeCheckBox.SetValue(config.conf["mouse"]["reportMouseShapeChanges"])
# Translators: This is the label for a checkbox in the
# mouse settings panel.
mouseTrackingText=_("Enable mouse &tracking")
self.mouseTrackingCheckBox=sHelper.addItem(wx.CheckBox(self,label=mouseTrackingText))
self.bindHelpEvent("MouseSettingsTracking", self.mouseTrackingCheckBox)
self.mouseTrackingCheckBox.SetValue(config.conf["mouse"]["enableMouseTracking"])
# Translators: This is the label for a combobox in the
# mouse settings panel.
textUnitLabelText=_("Text &unit resolution:")
import textInfos
self.textUnits=textInfos.MOUSE_TEXT_RESOLUTION_UNITS
textUnitsChoices = [textInfos.unitLabels[x] for x in self.textUnits]
self.textUnitComboBox=sHelper.addLabeledControl(textUnitLabelText, wx.Choice, choices=textUnitsChoices)
self.bindHelpEvent("MouseSettingsTextUnit", self.textUnitComboBox)
try:
index=self.textUnits.index(config.conf["mouse"]["mouseTextUnit"])
except:
index=0
self.textUnitComboBox.SetSelection(index)
# Translators: This is the label for a checkbox in the
# mouse settings panel.
reportObjectRoleText = _("Report &role when mouse enters object")
self.reportObjectRoleCheckBox=sHelper.addItem(wx.CheckBox(self,label=reportObjectRoleText))
self.bindHelpEvent("MouseSettingsRole", self.reportObjectRoleCheckBox)
self.reportObjectRoleCheckBox.SetValue(config.conf["mouse"]["reportObjectRoleOnMouseEnter"])
# Translators: This is the label for a checkbox in the
# mouse settings panel.
audioText = _("&Play audio coordinates when mouse moves")
self.audioCheckBox=sHelper.addItem(wx.CheckBox(self,label=audioText))
self.bindHelpEvent("MouseSettingsAudio", self.audioCheckBox)
self.audioCheckBox.SetValue(config.conf["mouse"]["audioCoordinatesOnMouseMove"])
# Translators: This is the label for a checkbox in the
# mouse settings panel.
audioDetectBrightnessText = _("&Brightness controls audio coordinates volume")
self.audioDetectBrightnessCheckBox=sHelper.addItem(wx.CheckBox(self,label=audioDetectBrightnessText))
self.bindHelpEvent("MouseSettingsBrightness", self.audioDetectBrightnessCheckBox)
self.audioDetectBrightnessCheckBox.SetValue(config.conf["mouse"]["audioCoordinates_detectBrightness"])
# Translators: This is the label for a checkbox in the
# mouse settings panel.
ignoreInjectedMouseInputText = _("Ignore mouse input from other &applications")
self.ignoreInjectedMouseInputCheckBox=sHelper.addItem(wx.CheckBox(self,label=ignoreInjectedMouseInputText))
self.ignoreInjectedMouseInputCheckBox.SetValue(config.conf["mouse"]["ignoreInjectedMouseInput"])
def onSave(self):
config.conf["mouse"]["reportMouseShapeChanges"]=self.shapeCheckBox.IsChecked()
config.conf["mouse"]["enableMouseTracking"]=self.mouseTrackingCheckBox.IsChecked()
config.conf["mouse"]["mouseTextUnit"]=self.textUnits[self.textUnitComboBox.GetSelection()]
config.conf["mouse"]["reportObjectRoleOnMouseEnter"]=self.reportObjectRoleCheckBox.IsChecked()
config.conf["mouse"]["audioCoordinatesOnMouseMove"]=self.audioCheckBox.IsChecked()
config.conf["mouse"]["audioCoordinates_detectBrightness"]=self.audioDetectBrightnessCheckBox.IsChecked()
config.conf["mouse"]["ignoreInjectedMouseInput"]=self.ignoreInjectedMouseInputCheckBox.IsChecked()
class ReviewCursorPanel(SettingsPanel):
# Translators: This is the label for the review cursor settings panel.
title = _("Review Cursor")
helpId = "ReviewCursorSettings"
def makeSettings(self, settingsSizer):
# Translators: This is the label for a checkbox in the
# review cursor settings panel.
self.followFocusCheckBox = wx.CheckBox(self, label=_("Follow system &focus"))
self.bindHelpEvent("ReviewCursorFollowFocus", self.followFocusCheckBox)
self.followFocusCheckBox.SetValue(config.conf["reviewCursor"]["followFocus"])
settingsSizer.Add(self.followFocusCheckBox,border=10,flag=wx.BOTTOM)
# Translators: This is the label for a checkbox in the
# review cursor settings panel.
self.followCaretCheckBox = wx.CheckBox(self, label=_("Follow System &Caret"))
self.bindHelpEvent("ReviewCursorFollowCaret", self.followCaretCheckBox)
self.followCaretCheckBox.SetValue(config.conf["reviewCursor"]["followCaret"])
settingsSizer.Add(self.followCaretCheckBox,border=10,flag=wx.BOTTOM)
# Translators: This is the label for a checkbox in the
# review cursor settings panel.
self.followMouseCheckBox = wx.CheckBox(self, label=_("Follow &mouse cursor"))
self.bindHelpEvent("ReviewCursorFollowMouse", self.followMouseCheckBox)
self.followMouseCheckBox.SetValue(config.conf["reviewCursor"]["followMouse"])
settingsSizer.Add(self.followMouseCheckBox,border=10,flag=wx.BOTTOM)
# Translators: This is the label for a checkbox in the
# review cursor settings panel.
self.simpleReviewModeCheckBox = wx.CheckBox(self, label=_("&Simple review mode"))
self.bindHelpEvent("ReviewCursorSimple", self.simpleReviewModeCheckBox)
self.simpleReviewModeCheckBox.SetValue(config.conf["reviewCursor"]["simpleReviewMode"])
settingsSizer.Add(self.simpleReviewModeCheckBox,border=10,flag=wx.BOTTOM)
def onSave(self):
config.conf["reviewCursor"]["followFocus"]=self.followFocusCheckBox.IsChecked()
config.conf["reviewCursor"]["followCaret"]=self.followCaretCheckBox.IsChecked()
config.conf["reviewCursor"]["followMouse"]=self.followMouseCheckBox.IsChecked()
config.conf["reviewCursor"]["simpleReviewMode"]=self.simpleReviewModeCheckBox.IsChecked()
class InputCompositionPanel(SettingsPanel):
# Translators: This is the label for the Input Composition settings panel.
title = _("Input Composition")
helpId = "InputCompositionSettings"
def makeSettings(self, settingsSizer):
# Translators: This is the label for a checkbox in the
# Input composition settings panel.
self.autoReportAllCandidatesCheckBox=wx.CheckBox(self,wx.ID_ANY,label=_("Automatically report all available &candidates"))
self.bindHelpEvent("InputCompositionReportAllCandidates", self.autoReportAllCandidatesCheckBox)
self.autoReportAllCandidatesCheckBox.SetValue(config.conf["inputComposition"]["autoReportAllCandidates"])
settingsSizer.Add(self.autoReportAllCandidatesCheckBox,border=10,flag=wx.BOTTOM)
# Translators: This is the label for a checkbox in the
# Input composition settings panel.
self.announceSelectedCandidateCheckBox=wx.CheckBox(self,wx.ID_ANY,label=_("Announce &selected candidate"))
self.bindHelpEvent("InputCompositionAnnounceSelectedCandidate", self.announceSelectedCandidateCheckBox)
self.announceSelectedCandidateCheckBox.SetValue(config.conf["inputComposition"]["announceSelectedCandidate"])
settingsSizer.Add(self.announceSelectedCandidateCheckBox,border=10,flag=wx.BOTTOM)
# Translators: This is the label for a checkbox in the
# Input composition settings panel.
self.candidateIncludesShortCharacterDescriptionCheckBox=wx.CheckBox(self,wx.ID_ANY,label=_("Always include short character &description when announcing candidates"))
self.bindHelpEvent(
"InputCompositionCandidateIncludesShortCharacterDescription",
self.candidateIncludesShortCharacterDescriptionCheckBox
)
self.candidateIncludesShortCharacterDescriptionCheckBox.SetValue(config.conf["inputComposition"]["alwaysIncludeShortCharacterDescriptionInCandidateName"])
settingsSizer.Add(self.candidateIncludesShortCharacterDescriptionCheckBox,border=10,flag=wx.BOTTOM)
# Translators: This is the label for a checkbox in the
# Input composition settings panel.
self.reportReadingStringChangesCheckBox=wx.CheckBox(self,wx.ID_ANY,label=_("Report changes to the &reading string"))
self.bindHelpEvent(
"InputCompositionReadingStringChanges",
self.reportReadingStringChangesCheckBox
)
self.reportReadingStringChangesCheckBox.SetValue(config.conf["inputComposition"]["reportReadingStringChanges"])
settingsSizer.Add(self.reportReadingStringChangesCheckBox,border=10,flag=wx.BOTTOM)
# Translators: This is the label for a checkbox in the
# Input composition settings panel.
self.reportCompositionStringChangesCheckBox=wx.CheckBox(self,wx.ID_ANY,label=_("Report changes to the &composition string"))
self.bindHelpEvent(
"InputCompositionCompositionStringChanges",
self.reportCompositionStringChangesCheckBox
)
self.reportCompositionStringChangesCheckBox.SetValue(config.conf["inputComposition"]["reportCompositionStringChanges"])
settingsSizer.Add(self.reportCompositionStringChangesCheckBox,border=10,flag=wx.BOTTOM)
def onSave(self):
config.conf["inputComposition"]["autoReportAllCandidates"]=self.autoReportAllCandidatesCheckBox.IsChecked()
config.conf["inputComposition"]["announceSelectedCandidate"]=self.announceSelectedCandidateCheckBox.IsChecked()
config.conf["inputComposition"]["alwaysIncludeShortCharacterDescriptionInCandidateName"]=self.candidateIncludesShortCharacterDescriptionCheckBox.IsChecked()
config.conf["inputComposition"]["reportReadingStringChanges"]=self.reportReadingStringChangesCheckBox.IsChecked()
config.conf["inputComposition"]["reportCompositionStringChanges"]=self.reportCompositionStringChangesCheckBox.IsChecked()
class ObjectPresentationPanel(SettingsPanel):
# Translators: This is the label for the object presentation panel.
title = _("Object Presentation")
helpId = "ObjectPresentationSettings"
progressLabels = (
# Translators: An option for progress bar output in the Object Presentation dialog
# which disables reporting of progress bars.
# See Progress bar output in the Object Presentation Settings section of the User Guide.
("off", _("off")),
# Translators: An option for progress bar output in the Object Presentation dialog
# which reports progress bar updates by speaking.
# See Progress bar output in the Object Presentation Settings section of the User Guide.
("speak", _("Speak")),
# Translators: An option for progress bar output in the Object Presentation dialog
# which reports progress bar updates by beeping.
# See Progress bar output in the Object Presentation Settings section of the User Guide.
("beep", _("Beep")),
# Translators: An option for progress bar output in the Object Presentation dialog
# which reports progress bar updates by both speaking and beeping.
# See Progress bar output in the Object Presentation Settings section of the User Guide.
("both", _("Speak and beep")),
)
def makeSettings(self, settingsSizer):
sHelper = guiHelper.BoxSizerHelper(self, sizer=settingsSizer)
# Translators: This is the label for a checkbox in the
# object presentation settings panel.
reportToolTipsText = _("Report &tooltips")
self.tooltipCheckBox=sHelper.addItem(wx.CheckBox(self,label=reportToolTipsText))
self.bindHelpEvent("ObjectPresentationReportToolTips", self.tooltipCheckBox)
self.tooltipCheckBox.SetValue(config.conf["presentation"]["reportTooltips"])
# Translators: This is the label for a checkbox in the
# object presentation settings panel.
balloonText = _("Report ¬ifications")
self.balloonCheckBox=sHelper.addItem(wx.CheckBox(self,label=balloonText))
self.bindHelpEvent("ObjectPresentationReportBalloons", self.balloonCheckBox)
self.balloonCheckBox.SetValue(config.conf["presentation"]["reportHelpBalloons"])
# Translators: This is the label for a checkbox in the
# object presentation settings panel.
shortcutText = _("Report object shortcut &keys")
self.shortcutCheckBox=sHelper.addItem(wx.CheckBox(self,label=shortcutText))
self.bindHelpEvent("ObjectPresentationShortcutKeys", self.shortcutCheckBox)
self.shortcutCheckBox.SetValue(config.conf["presentation"]["reportKeyboardShortcuts"])
# Translators: This is the label for a checkbox in the
# object presentation settings panel.
positionInfoText = _("Report object &position information")
self.positionInfoCheckBox=sHelper.addItem(wx.CheckBox(self,label=positionInfoText))
self.bindHelpEvent("ObjectPresentationPositionInfo", self.positionInfoCheckBox)
self.positionInfoCheckBox.SetValue(config.conf["presentation"]["reportObjectPositionInformation"])
# Translators: This is the label for a checkbox in the
# object presentation settings panel.
guessPositionInfoText = _("&Guess object position information when unavailable")
self.guessPositionInfoCheckBox=sHelper.addItem(wx.CheckBox(self,label=guessPositionInfoText))
self.bindHelpEvent("ObjectPresentationGuessPositionInfo", self.guessPositionInfoCheckBox)
self.guessPositionInfoCheckBox.SetValue(config.conf["presentation"]["guessObjectPositionInformationWhenUnavailable"])
# Translators: This is the label for a checkbox in the
# object presentation settings panel.
descriptionText = _("Report object &descriptions")
self.descriptionCheckBox=sHelper.addItem(wx.CheckBox(self,label=descriptionText))
self.bindHelpEvent("ObjectPresentationReportDescriptions", self.descriptionCheckBox)
self.descriptionCheckBox.SetValue(config.conf["presentation"]["reportObjectDescriptions"])
# Translators: This is the label for a combobox in the
# object presentation settings panel.
progressLabelText = _("Progress &bar output:")
progressChoices = [name for setting, name in self.progressLabels]
self.progressList=sHelper.addLabeledControl(progressLabelText, wx.Choice, choices=progressChoices)
self.bindHelpEvent("ObjectPresentationProgressBarOutput", self.progressList)
for index, (setting, name) in enumerate(self.progressLabels):
if setting == config.conf["presentation"]["progressBarUpdates"]["progressBarOutputMode"]:
self.progressList.SetSelection(index)
break
else:
log.debugWarning("Could not set progress list to current report progress bar updates setting")
# Translators: This is the label for a checkbox in the
# object presentation settings panel.
reportBackgroundProgressBarsText = _("Report backg&round progress bars")
self.reportBackgroundProgressBarsCheckBox=sHelper.addItem(wx.CheckBox(self,label=reportBackgroundProgressBarsText))
self.bindHelpEvent(
"ObjectPresentationReportBackgroundProgressBars",
self.reportBackgroundProgressBarsCheckBox
)
self.reportBackgroundProgressBarsCheckBox.SetValue(config.conf["presentation"]["progressBarUpdates"]["reportBackgroundProgressBars"])
# Translators: This is the label for a checkbox in the
# object presentation settings panel.
dynamicContentText = _("Report dynamic &content changes")
self.dynamicContentCheckBox=sHelper.addItem(wx.CheckBox(self,label=dynamicContentText))
self.bindHelpEvent(
"ObjectPresentationReportDynamicContent",
self.dynamicContentCheckBox
)
self.dynamicContentCheckBox.SetValue(config.conf["presentation"]["reportDynamicContentChanges"])
# Translators: This is the label for a checkbox in the
# object presentation settings panel.
autoSuggestionsLabelText = _("Play a sound when &auto-suggestions appear")
self.autoSuggestionSoundsCheckBox=sHelper.addItem(wx.CheckBox(self,label=autoSuggestionsLabelText))
self.bindHelpEvent(
"ObjectPresentationSuggestionSounds",
self.autoSuggestionSoundsCheckBox
)
self.autoSuggestionSoundsCheckBox.SetValue(config.conf["presentation"]["reportAutoSuggestionsWithSound"])
def onSave(self):
config.conf["presentation"]["reportTooltips"]=self.tooltipCheckBox.IsChecked()
config.conf["presentation"]["reportHelpBalloons"]=self.balloonCheckBox.IsChecked()
config.conf["presentation"]["reportKeyboardShortcuts"]=self.shortcutCheckBox.IsChecked()
config.conf["presentation"]["reportObjectPositionInformation"]=self.positionInfoCheckBox.IsChecked()
config.conf["presentation"]["guessObjectPositionInformationWhenUnavailable"]=self.guessPositionInfoCheckBox.IsChecked()
config.conf["presentation"]["reportObjectDescriptions"]=self.descriptionCheckBox.IsChecked()
config.conf["presentation"]["progressBarUpdates"]["progressBarOutputMode"]=self.progressLabels[self.progressList.GetSelection()][0]
config.conf["presentation"]["progressBarUpdates"]["reportBackgroundProgressBars"]=self.reportBackgroundProgressBarsCheckBox.IsChecked()
config.conf["presentation"]["reportDynamicContentChanges"]=self.dynamicContentCheckBox.IsChecked()
config.conf["presentation"]["reportAutoSuggestionsWithSound"]=self.autoSuggestionSoundsCheckBox.IsChecked()
class BrowseModePanel(SettingsPanel):
# Translators: This is the label for the browse mode settings panel.
title = _("Browse Mode")
helpId = "BrowseModeSettings"
def makeSettings(self, settingsSizer):
sHelper = guiHelper.BoxSizerHelper(self, sizer=settingsSizer)
# Translators: This is the label for a textfield in the
# browse mode settings panel.
maxLengthLabelText = _("&Maximum number of characters on one line")
self.maxLengthEdit = sHelper.addLabeledControl(maxLengthLabelText, nvdaControls.SelectOnFocusSpinCtrl,
# min and max are not enforced in the config for virtualBuffers.maxLineLength
min=10, max=250,
initial=config.conf["virtualBuffers"]["maxLineLength"])
self.bindHelpEvent("BrowseModeSettingsMaxLength", self.maxLengthEdit)
# Translators: This is the label for a textfield in the
# browse mode settings panel.
pageLinesLabelText = _("&Number of lines per page")
self.pageLinesEdit = sHelper.addLabeledControl(pageLinesLabelText, nvdaControls.SelectOnFocusSpinCtrl,
# min and max are not enforced in the config for virtualBuffers.linesPerPage
min=5, max=150,
initial=config.conf["virtualBuffers"]["linesPerPage"])
self.bindHelpEvent("BrowseModeSettingsPageLines", self.pageLinesEdit)
# Translators: This is the label for a checkbox in the
# browse mode settings panel.
useScreenLayoutText = _("Use &screen layout (when supported)")
self.useScreenLayoutCheckBox = sHelper.addItem(wx.CheckBox(self, label=useScreenLayoutText))
self.bindHelpEvent("BrowseModeSettingsScreenLayout", self.useScreenLayoutCheckBox)
self.useScreenLayoutCheckBox.SetValue(config.conf["virtualBuffers"]["useScreenLayout"])
# Translators: The label for a checkbox in browse mode settings to
# enable browse mode on page load.
enableOnPageLoadText = _("&Enable browse mode on page load")
self.enableOnPageLoadCheckBox = sHelper.addItem(wx.CheckBox(self, label=enableOnPageLoadText))
self.enableOnPageLoadCheckBox.SetValue(config.conf["virtualBuffers"]["enableOnPageLoad"])
# Translators: This is the label for a checkbox in the
# browse mode settings panel.
autoSayAllText = _("Automatic &Say All on page load")
self.autoSayAllCheckBox = sHelper.addItem(wx.CheckBox(self, label=autoSayAllText))
self.bindHelpEvent("BrowseModeSettingsAutoSayAll", self.autoSayAllCheckBox)
self.autoSayAllCheckBox.SetValue(config.conf["virtualBuffers"]["autoSayAllOnPageLoad"])
# Translators: This is the label for a checkbox in the
# browse mode settings panel.
layoutTablesText = _("Include l&ayout tables")
self.layoutTablesCheckBox = sHelper.addItem(wx.CheckBox(self, label =layoutTablesText))
self.bindHelpEvent("BrowseModeSettingsIncludeLayoutTables", self.layoutTablesCheckBox)
self.layoutTablesCheckBox.SetValue(config.conf["documentFormatting"]["includeLayoutTables"])
# Translators: This is the label for a checkbox in the
# browse mode settings panel.
autoPassThroughOnFocusChangeText = _("Automatic focus mode for focus changes")
self.autoPassThroughOnFocusChangeCheckBox = sHelper.addItem(wx.CheckBox(self, label=autoPassThroughOnFocusChangeText))
self.bindHelpEvent(
"BrowseModeSettingsAutoPassThroughOnFocusChange",
self.autoPassThroughOnFocusChangeCheckBox
)
self.autoPassThroughOnFocusChangeCheckBox.SetValue(config.conf["virtualBuffers"]["autoPassThroughOnFocusChange"])
# Translators: This is the label for a checkbox in the
# browse mode settings panel.
autoPassThroughOnCaretMoveText = _("Automatic focus mode for caret movement")
self.autoPassThroughOnCaretMoveCheckBox = sHelper.addItem(wx.CheckBox(self, label=autoPassThroughOnCaretMoveText))
self.bindHelpEvent(
"BrowseModeSettingsAutoPassThroughOnCaretMove",
self.autoPassThroughOnCaretMoveCheckBox
)
self.autoPassThroughOnCaretMoveCheckBox.SetValue(config.conf["virtualBuffers"]["autoPassThroughOnCaretMove"])
# Translators: This is the label for a checkbox in the
# browse mode settings panel.
passThroughAudioIndicationText = _("Audio indication of focus and browse modes")
self.passThroughAudioIndicationCheckBox = sHelper.addItem(wx.CheckBox(self, label=passThroughAudioIndicationText))
self.bindHelpEvent(
"BrowseModeSettingsPassThroughAudioIndication",
self.passThroughAudioIndicationCheckBox
)
self.passThroughAudioIndicationCheckBox.SetValue(config.conf["virtualBuffers"]["passThroughAudioIndication"])
# Translators: This is the label for a checkbox in the
# browse mode settings panel.
trapNonCommandGesturesText = _("&Trap all non-command gestures from reaching the document")
self.trapNonCommandGesturesCheckBox = sHelper.addItem(wx.CheckBox(self, label=trapNonCommandGesturesText))
self.bindHelpEvent(
"BrowseModeSettingsTrapNonCommandGestures",
self.trapNonCommandGesturesCheckBox
)
self.trapNonCommandGesturesCheckBox.SetValue(config.conf["virtualBuffers"]["trapNonCommandGestures"])
# Translators: This is the label for a checkbox in the
# browse mode settings panel.
autoFocusFocusableElementsText = _("Automatically set system &focus to focusable elements")
self.autoFocusFocusableElementsCheckBox = sHelper.addItem(
wx.CheckBox(self, label=autoFocusFocusableElementsText)
)
self.autoFocusFocusableElementsCheckBox.SetValue(
config.conf["virtualBuffers"]["autoFocusFocusableElements"]
)
def onSave(self):
config.conf["virtualBuffers"]["maxLineLength"]=self.maxLengthEdit.GetValue()
config.conf["virtualBuffers"]["linesPerPage"]=self.pageLinesEdit.GetValue()
config.conf["virtualBuffers"]["useScreenLayout"]=self.useScreenLayoutCheckBox.IsChecked()
config.conf["virtualBuffers"]["enableOnPageLoad"] = self.enableOnPageLoadCheckBox.IsChecked()
config.conf["virtualBuffers"]["autoSayAllOnPageLoad"]=self.autoSayAllCheckBox.IsChecked()
config.conf["documentFormatting"]["includeLayoutTables"]=self.layoutTablesCheckBox.IsChecked()
config.conf["virtualBuffers"]["autoPassThroughOnFocusChange"]=self.autoPassThroughOnFocusChangeCheckBox.IsChecked()
config.conf["virtualBuffers"]["autoPassThroughOnCaretMove"]=self.autoPassThroughOnCaretMoveCheckBox.IsChecked()
config.conf["virtualBuffers"]["passThroughAudioIndication"]=self.passThroughAudioIndicationCheckBox.IsChecked()
config.conf["virtualBuffers"]["trapNonCommandGestures"]=self.trapNonCommandGesturesCheckBox.IsChecked()
config.conf["virtualBuffers"]["autoFocusFocusableElements"] = (
self.autoFocusFocusableElementsCheckBox.IsChecked()
)
class DocumentFormattingPanel(SettingsPanel):
# Translators: This is the label for the document formatting panel.
title = _("Document Formatting")
helpId = "DocumentFormattingSettings"
# Translators: This is a label appearing on the document formatting settings panel.
panelDescription = _("The following options control the types of document formatting reported by NVDA.")
def makeSettings(self, settingsSizer):
sHelper = guiHelper.BoxSizerHelper(self, sizer=settingsSizer)
sHelper.addItem(wx.StaticText(self, label=self.panelDescription))
# Translators: This is the label for a group of document formatting options in the
# document formatting settings panel
fontGroupText = _("Font")
fontGroup = guiHelper.BoxSizerHelper(self, sizer=wx.StaticBoxSizer(wx.StaticBox(self, label=fontGroupText), wx.VERTICAL))
sHelper.addItem(fontGroup)
# Translators: This is the label for a checkbox in the
# document formatting settings panel.
fontNameText = _("&Font name")
self.fontNameCheckBox=fontGroup.addItem(wx.CheckBox(self, label=fontNameText))
self.fontNameCheckBox.SetValue(config.conf["documentFormatting"]["reportFontName"])
# Translators: This is the label for a checkbox in the
# document formatting settings panel.
fontSizeText = _("Font &size")
self.fontSizeCheckBox=fontGroup.addItem(wx.CheckBox(self,label=fontSizeText))
self.fontSizeCheckBox.SetValue(config.conf["documentFormatting"]["reportFontSize"])
# Translators: This is the label for a checkbox in the
# document formatting settings panel.
fontAttributesText = _("Font attrib&utes")
self.fontAttrsCheckBox=fontGroup.addItem(wx.CheckBox(self,label=fontAttributesText))
self.fontAttrsCheckBox.SetValue(config.conf["documentFormatting"]["reportFontAttributes"])
# Translators: This is the label for a checkbox in the
# document formatting settings panel.
superscriptsAndSubscriptsText = _("Su&perscripts and subscripts")
self.superscriptsAndSubscriptsCheckBox = fontGroup.addItem(
wx.CheckBox(self, label=superscriptsAndSubscriptsText)
)
self.superscriptsAndSubscriptsCheckBox.SetValue(
config.conf["documentFormatting"]["reportSuperscriptsAndSubscripts"]
)
# Translators: This is the label for a checkbox in the
# document formatting settings panel.
emphasisText=_("E&mphasis")
self.emphasisCheckBox=fontGroup.addItem(wx.CheckBox(self,label=emphasisText))
self.emphasisCheckBox.SetValue(config.conf["documentFormatting"]["reportEmphasis"])
# Translators: This is the label for a checkbox in the
# document formatting settings panel.
highlightText = _("Marked (highlighted text)")
self.highlightCheckBox = fontGroup.addItem(
wx.CheckBox(self, label=highlightText)
)
self.highlightCheckBox.SetValue(
config.conf["documentFormatting"]["reportHighlight"]
)
# Translators: This is the label for a checkbox in the
# document formatting settings panel.
styleText =_("St&yle")
self.styleCheckBox=fontGroup.addItem(wx.CheckBox(self,label=styleText))
self.styleCheckBox.SetValue(config.conf["documentFormatting"]["reportStyle"])
# Translators: This is the label for a checkbox in the
# document formatting settings panel.
colorsText = _("&Colors")
self.colorCheckBox=fontGroup.addItem(wx.CheckBox(self,label=colorsText))
self.colorCheckBox.SetValue(config.conf["documentFormatting"]["reportColor"])
# Translators: This is the label for a group of document formatting options in the
# document formatting settings panel
documentInfoGroupText = _("Document information")
docInfoGroup = guiHelper.BoxSizerHelper(self, sizer=wx.StaticBoxSizer(wx.StaticBox(self, label=documentInfoGroupText), wx.VERTICAL))
sHelper.addItem(docInfoGroup)
# Translators: This is the label for a checkbox in the
# document formatting settings panel.
commentsText = _("No&tes and comments")
self.commentsCheckBox=docInfoGroup.addItem(wx.CheckBox(self,label=commentsText))
self.commentsCheckBox.SetValue(config.conf["documentFormatting"]["reportComments"])
# Translators: This is the label for a checkbox in the
# document formatting settings panel.
revisionsText = _("&Editor revisions")
self.revisionsCheckBox=docInfoGroup.addItem(wx.CheckBox(self,label=revisionsText))
self.revisionsCheckBox.SetValue(config.conf["documentFormatting"]["reportRevisions"])
# Translators: This is the label for a checkbox in the
# document formatting settings panel.
spellingErrorText = _("Spelling e&rrors")
self.spellingErrorsCheckBox=docInfoGroup.addItem(wx.CheckBox(self,label=spellingErrorText))
self.spellingErrorsCheckBox.SetValue(config.conf["documentFormatting"]["reportSpellingErrors"])
# Translators: This is the label for a group of document formatting options in the
# document formatting settings panel
pageAndSpaceGroupText = _("Pages and spacing")
pageAndSpaceGroup = guiHelper.BoxSizerHelper(self, sizer=wx.StaticBoxSizer(wx.StaticBox(self, label=pageAndSpaceGroupText), wx.VERTICAL))
sHelper.addItem(pageAndSpaceGroup)
# Translators: This is the label for a checkbox in the
# document formatting settings panel.
pageText = _("&Pages")
self.pageCheckBox=pageAndSpaceGroup.addItem(wx.CheckBox(self,label=pageText))
self.pageCheckBox.SetValue(config.conf["documentFormatting"]["reportPage"])
# Translators: This is the label for a checkbox in the
# document formatting settings panel.
lineText = _("Line &numbers")
self.lineNumberCheckBox=pageAndSpaceGroup.addItem(wx.CheckBox(self,label=lineText))
self.lineNumberCheckBox.SetValue(config.conf["documentFormatting"]["reportLineNumber"])
# Translators: This is the label for a combobox controlling the reporting of line indentation in the
# Document Formatting dialog (possible choices are Off, Speech, Tones, or Both.
lineIndentationText = _("Line &indentation reporting:")
indentChoices=[
#Translators: A choice in a combo box in the document formatting dialog to report No line Indentation.
_("Off"),
#Translators: A choice in a combo box in the document formatting dialog to report indentation with Speech.
pgettext('line indentation setting', "Speech"),
#Translators: A choice in a combo box in the document formatting dialog to report indentation with tones.
_("Tones"),
#Translators: A choice in a combo box in the document formatting dialog to report indentation with both Speech and tones.
_("Both Speech and Tones")
]
self.lineIndentationCombo = pageAndSpaceGroup.addLabeledControl(lineIndentationText, wx.Choice, choices=indentChoices)
self.bindHelpEvent(
"DocumentFormattingSettingsLineIndentation",
self.lineIndentationCombo
)
#We use bitwise operations because it saves us a four way if statement.
curChoice = config.conf["documentFormatting"]["reportLineIndentationWithTones"] << 1 | config.conf["documentFormatting"]["reportLineIndentation"]
self.lineIndentationCombo.SetSelection(curChoice)
# Translators: This message is presented in the document formatting settings panelue
# If this option is selected, NVDA will report paragraph indentation if available.
paragraphIndentationText = _("&Paragraph indentation")
self.paragraphIndentationCheckBox=pageAndSpaceGroup.addItem(wx.CheckBox(self,label=paragraphIndentationText))
self.paragraphIndentationCheckBox.SetValue(config.conf["documentFormatting"]["reportParagraphIndentation"])
# Translators: This message is presented in the document formatting settings panelue
# If this option is selected, NVDA will report line spacing if available.
lineSpacingText=_("&Line spacing")
self.lineSpacingCheckBox=pageAndSpaceGroup.addItem(wx.CheckBox(self,label=lineSpacingText))
self.lineSpacingCheckBox.SetValue(config.conf["documentFormatting"]["reportLineSpacing"])
# Translators: This is the label for a checkbox in the
# document formatting settings panel.
alignmentText = _("&Alignment")
self.alignmentCheckBox=pageAndSpaceGroup.addItem(wx.CheckBox(self,label=alignmentText))
self.alignmentCheckBox.SetValue(config.conf["documentFormatting"]["reportAlignment"])
# Translators: This is the label for a group of document formatting options in the
# document formatting settings panel
tablesGroupText = _("Table information")
tablesGroup = guiHelper.BoxSizerHelper(self, sizer=wx.StaticBoxSizer(wx.StaticBox(self, label=tablesGroupText), wx.VERTICAL))
sHelper.addItem(tablesGroup)
# Translators: This is the label for a checkbox in the
# document formatting settings panel.
self.tablesCheckBox=tablesGroup.addItem(wx.CheckBox(self,label=_("&Tables")))
self.tablesCheckBox.SetValue(config.conf["documentFormatting"]["reportTables"])
# Translators: This is the label for a checkbox in the
# document formatting settings panel.
self.tableHeadersCheckBox=tablesGroup.addItem(wx.CheckBox(self,label=_("Row/column h&eaders")))
self.tableHeadersCheckBox.SetValue(config.conf["documentFormatting"]["reportTableHeaders"])
# Translators: This is the label for a checkbox in the
# document formatting settings panel.
self.tableCellCoordsCheckBox=tablesGroup.addItem(wx.CheckBox(self,label=_("Cell c&oordinates")))
self.tableCellCoordsCheckBox.SetValue(config.conf["documentFormatting"]["reportTableCellCoords"])
borderChoices=[
# Translators: This is the label for a combobox in the
# document formatting settings panel.
_("Off"),
# Translators: This is the label for a combobox in the
# document formatting settings panel.
_("Styles"),
# Translators: This is the label for a combobox in the
# document formatting settings panel.
_("Both Colors and Styles"),
]
self.borderComboBox = tablesGroup.addLabeledControl(
# Translators: This is the label for a combobox in the
# document formatting settings panel.
_("Cell &borders:"),
wx.Choice,
choices=borderChoices
)
curChoice = 0
if config.conf["documentFormatting"]["reportBorderStyle"]:
if config.conf["documentFormatting"]["reportBorderColor"]:
curChoice = 2
else:
curChoice = 1
self.borderComboBox.SetSelection(curChoice)
# Translators: This is the label for a group of document formatting options in the
# document formatting settings panel
elementsGroupText = _("Elements")
elementsGroup = guiHelper.BoxSizerHelper(self, sizer=wx.StaticBoxSizer(wx.StaticBox(self, label=elementsGroupText), wx.VERTICAL))
sHelper.addItem(elementsGroup, flag=wx.EXPAND, proportion=1)
# Translators: This is the label for a checkbox in the
# document formatting settings panel.
self.headingsCheckBox=elementsGroup.addItem(wx.CheckBox(self,label=_("&Headings")))
self.headingsCheckBox.SetValue(config.conf["documentFormatting"]["reportHeadings"])
# Translators: This is the label for a checkbox in the
# document formatting settings panel.
self.linksCheckBox=elementsGroup.addItem(wx.CheckBox(self,label=_("Lin&ks")))
self.linksCheckBox.SetValue(config.conf["documentFormatting"]["reportLinks"])
# Translators: This is the label for a checkbox in the
# document formatting settings panel.
self.graphicsCheckBox = elementsGroup.addItem(wx.CheckBox(self, label=_("&Graphics")))
self.graphicsCheckBox.SetValue(config.conf["documentFormatting"]["reportGraphics"])
# Translators: This is the label for a checkbox in the
# document formatting settings panel.
self.listsCheckBox=elementsGroup.addItem(wx.CheckBox(self,label=_("&Lists")))
self.listsCheckBox.SetValue(config.conf["documentFormatting"]["reportLists"])
# Translators: This is the label for a checkbox in the
# document formatting settings panel.
self.blockQuotesCheckBox=elementsGroup.addItem(wx.CheckBox(self,label=_("Block "es")))
self.blockQuotesCheckBox.SetValue(config.conf["documentFormatting"]["reportBlockQuotes"])
# Translators: This is the label for a checkbox in the
# document formatting settings panel.
groupingsText = _("&Groupings")
self.groupingsCheckBox = elementsGroup.addItem(wx.CheckBox(self, label=groupingsText))
self.groupingsCheckBox.SetValue(config.conf["documentFormatting"]["reportGroupings"])
# Translators: This is the label for a checkbox in the
# document formatting settings panel.
landmarksText = _("Lan&dmarks and regions")
self.landmarksCheckBox = elementsGroup.addItem(wx.CheckBox(self, label=landmarksText))
self.landmarksCheckBox.SetValue(config.conf["documentFormatting"]["reportLandmarks"])
# Translators: This is the label for a checkbox in the
# document formatting settings panel.
self.articlesCheckBox = elementsGroup.addItem(wx.CheckBox(self, label=_("Arti&cles")))
self.articlesCheckBox.SetValue(config.conf["documentFormatting"]["reportArticles"])
# Translators: This is the label for a checkbox in the
# document formatting settings panel.
self.framesCheckBox=elementsGroup.addItem(wx.CheckBox(self,label=_("Fra&mes")))
self.framesCheckBox.Value=config.conf["documentFormatting"]["reportFrames"]
# Translators: This is the label for a checkbox in the
# document formatting settings panel.
self.clickableCheckBox=elementsGroup.addItem(wx.CheckBox(self,label=_("&Clickable")))
self.clickableCheckBox.Value=config.conf["documentFormatting"]["reportClickable"]
# Translators: This is the label for a checkbox in the
# document formatting settings panel.
detectFormatAfterCursorText = _("Report formatting chan&ges after the cursor (can cause a lag)")
self.detectFormatAfterCursorCheckBox=wx.CheckBox(self, label=detectFormatAfterCursorText)
self.bindHelpEvent(
"DocumentFormattingDetectFormatAfterCursor",
self.detectFormatAfterCursorCheckBox
)
self.detectFormatAfterCursorCheckBox.SetValue(config.conf["documentFormatting"]["detectFormatAfterCursor"])
sHelper.addItem(self.detectFormatAfterCursorCheckBox)
def onSave(self):
config.conf["documentFormatting"]["detectFormatAfterCursor"]=self.detectFormatAfterCursorCheckBox.IsChecked()
config.conf["documentFormatting"]["reportFontName"]=self.fontNameCheckBox.IsChecked()
config.conf["documentFormatting"]["reportFontSize"]=self.fontSizeCheckBox.IsChecked()
config.conf["documentFormatting"]["reportFontAttributes"]=self.fontAttrsCheckBox.IsChecked()
config.conf["documentFormatting"]["reportSuperscriptsAndSubscripts"] = (
self.superscriptsAndSubscriptsCheckBox.IsChecked()
)
config.conf["documentFormatting"]["reportColor"]=self.colorCheckBox.IsChecked()
config.conf["documentFormatting"]["reportComments"]=self.commentsCheckBox.IsChecked()
config.conf["documentFormatting"]["reportRevisions"]=self.revisionsCheckBox.IsChecked()
config.conf["documentFormatting"]["reportEmphasis"]=self.emphasisCheckBox.IsChecked()
config.conf["documentFormatting"]["reportHighlight"] = self.highlightCheckBox.IsChecked()
config.conf["documentFormatting"]["reportAlignment"]=self.alignmentCheckBox.IsChecked()
config.conf["documentFormatting"]["reportStyle"]=self.styleCheckBox.IsChecked()
config.conf["documentFormatting"]["reportSpellingErrors"]=self.spellingErrorsCheckBox.IsChecked()
config.conf["documentFormatting"]["reportPage"]=self.pageCheckBox.IsChecked()
config.conf["documentFormatting"]["reportLineNumber"]=self.lineNumberCheckBox.IsChecked()
choice = self.lineIndentationCombo.GetSelection()
config.conf["documentFormatting"]["reportLineIndentation"] = choice in (1, 3)
config.conf["documentFormatting"]["reportLineIndentationWithTones"] = choice in (2, 3)
config.conf["documentFormatting"]["reportParagraphIndentation"]=self.paragraphIndentationCheckBox.IsChecked()
config.conf["documentFormatting"]["reportLineSpacing"]=self.lineSpacingCheckBox.IsChecked()
config.conf["documentFormatting"]["reportTables"]=self.tablesCheckBox.IsChecked()
config.conf["documentFormatting"]["reportTableHeaders"]=self.tableHeadersCheckBox.IsChecked()
config.conf["documentFormatting"]["reportTableCellCoords"]=self.tableCellCoordsCheckBox.IsChecked()
choice = self.borderComboBox.GetSelection()
config.conf["documentFormatting"]["reportBorderStyle"] = choice in (1,2)
config.conf["documentFormatting"]["reportBorderColor"] = (choice == 2)
config.conf["documentFormatting"]["reportLinks"]=self.linksCheckBox.IsChecked()
config.conf["documentFormatting"]["reportGraphics"] = self.graphicsCheckBox.IsChecked()
config.conf["documentFormatting"]["reportHeadings"]=self.headingsCheckBox.IsChecked()
config.conf["documentFormatting"]["reportLists"]=self.listsCheckBox.IsChecked()
config.conf["documentFormatting"]["reportBlockQuotes"]=self.blockQuotesCheckBox.IsChecked()
config.conf["documentFormatting"]["reportGroupings"] = self.groupingsCheckBox.IsChecked()
config.conf["documentFormatting"]["reportLandmarks"]=self.landmarksCheckBox.IsChecked()
config.conf["documentFormatting"]["reportArticles"] = self.articlesCheckBox.IsChecked()
config.conf["documentFormatting"]["reportFrames"]=self.framesCheckBox.Value
config.conf["documentFormatting"]["reportClickable"]=self.clickableCheckBox.Value
class TouchInteractionPanel(SettingsPanel):
# Translators: This is the label for the touch interaction settings panel.
title = _("Touch Interaction")
def makeSettings(self, settingsSizer):
sHelper = guiHelper.BoxSizerHelper(self, sizer=settingsSizer)
# Translators: This is the label for a checkbox in the
# touch interaction settings panel.
touchSupportEnableLabel = _("Enable touch interaction support")
self.enableTouchSupportCheckBox = sHelper.addItem(wx.CheckBox(self, label=touchSupportEnableLabel))
self.enableTouchSupportCheckBox.SetValue(config.conf["touch"]["enabled"])
# Translators: This is the label for a checkbox in the
# touch interaction settings panel.
self.touchTypingCheckBox = sHelper.addItem(wx.CheckBox(self, label=_("&Touch typing mode")))
self.touchTypingCheckBox.SetValue(config.conf["touch"]["touchTyping"])
def onSave(self):
config.conf["touch"]["enabled"] = self.enableTouchSupportCheckBox.IsChecked()
config.conf["touch"]["touchTyping"] = self.touchTypingCheckBox.IsChecked()
touchHandler.setTouchSupport(config.conf["touch"]["enabled"])
class UwpOcrPanel(SettingsPanel):
# Translators: The title of the Windows 10 OCR panel.
title = _("Windows 10 OCR")
helpId = "Win10OcrSettings"
def makeSettings(self, settingsSizer):
sHelper = guiHelper.BoxSizerHelper(self, sizer=settingsSizer)
# Lazily import this.
from contentRecog import uwpOcr
self.languageCodes = uwpOcr.getLanguages()
languageChoices = [
languageHandler.getLanguageDescription(languageHandler.normalizeLanguage(lang))
for lang in self.languageCodes]
# Translators: Label for an option in the Windows 10 OCR dialog.
languageLabel = _("Recognition &language:")
self.languageChoice = sHelper.addLabeledControl(languageLabel, wx.Choice, choices=languageChoices)
self.bindHelpEvent("Win10OcrSettingsRecognitionLanguage", self.languageChoice)
try:
langIndex = self.languageCodes.index(config.conf["uwpOcr"]["language"])
self.languageChoice.Selection = langIndex
except ValueError:
self.languageChoice.Selection = 0
def onSave(self):
lang = self.languageCodes[self.languageChoice.Selection]
config.conf["uwpOcr"]["language"] = lang
class AdvancedPanelControls(wx.Panel):
"""Holds the actual controls for the Advanced Settings panel, this allows the state of the controls to
be more easily managed.
"""
def __init__(self, parent):
super(AdvancedPanelControls, self).__init__(parent)
self._defaultsRestored = False
sHelper = guiHelper.BoxSizerHelper(self, orientation=wx.VERTICAL)
self.SetSizer(sHelper.sizer)
# Translators: This is the label for a group of advanced options in the
# Advanced settings panel
groupText = _("NVDA Development")
devGroup = guiHelper.BoxSizerHelper(
parent=self,
sizer=wx.StaticBoxSizer(parent=self, label=groupText, orient=wx.VERTICAL)
)
sHelper.addItem(devGroup)
# Translators: This is the label for a checkbox in the
# Advanced settings panel.
label = _("Enable loading custom code from Developer Scratchpad directory")
self.scratchpadCheckBox=devGroup.addItem(wx.CheckBox(self, label=label))
self.scratchpadCheckBox.SetValue(config.conf["development"]["enableScratchpadDir"])
self.scratchpadCheckBox.defaultValue = self._getDefaultValue(["development", "enableScratchpadDir"])
self.scratchpadCheckBox.Bind(
wx.EVT_CHECKBOX,
lambda evt: self.openScratchpadButton.Enable(evt.IsChecked())
)
if config.isAppX:
self.scratchpadCheckBox.Disable()
# Translators: the label for a button in the Advanced settings category
label=_("Open developer scratchpad directory")
self.openScratchpadButton=devGroup.addItem(wx.Button(self, label=label))
self.openScratchpadButton.Enable(config.conf["development"]["enableScratchpadDir"])
self.openScratchpadButton.Bind(wx.EVT_BUTTON,self.onOpenScratchpadDir)
if config.isAppX:
self.openScratchpadButton.Disable()
# Translators: This is the label for a group of advanced options in the
# Advanced settings panel
label = _("Microsoft UI Automation")
UIAGroup = guiHelper.BoxSizerHelper(
parent=self,
sizer=wx.StaticBoxSizer(parent=self, label=label, orient=wx.VERTICAL)
)
sHelper.addItem(UIAGroup)
# Translators: This is the label for a checkbox in the
# Advanced settings panel.
label = _("Enable &selective registration for UI Automation events and property changes")
self.selectiveUIAEventRegistrationCheckBox = UIAGroup.addItem(wx.CheckBox(self, label=label))
self.selectiveUIAEventRegistrationCheckBox.SetValue(config.conf["UIA"]["selectiveEventRegistration"])
self.selectiveUIAEventRegistrationCheckBox.defaultValue = (
self._getDefaultValue(["UIA", "selectiveEventRegistration"])
)
# Translators: This is the label for a checkbox in the
# Advanced settings panel.
label = _("Use UI Automation to access Microsoft &Word document controls when available")
self.UIAInMSWordCheckBox=UIAGroup.addItem(wx.CheckBox(self, label=label))
self.UIAInMSWordCheckBox.SetValue(config.conf["UIA"]["useInMSWordWhenAvailable"])
self.UIAInMSWordCheckBox.defaultValue = self._getDefaultValue(["UIA", "useInMSWordWhenAvailable"])
# Translators: This is the label for a checkbox in the
# Advanced settings panel.
label = _("Use UI Automation to access the Windows C&onsole when available")
consoleUIADevMap = True if config.conf['UIA']['winConsoleImplementation'] == 'UIA' else False
self.ConsoleUIACheckBox = UIAGroup.addItem(wx.CheckBox(self, label=label))
self.ConsoleUIACheckBox.SetValue(consoleUIADevMap)
self.ConsoleUIACheckBox.defaultValue = self._getDefaultValue(["UIA", "winConsoleImplementation"])
# Translators: This is the label for a checkbox in the
# Advanced settings panel.
label = _("Speak &passwords in UIA consoles (may improve performance)")
self.winConsoleSpeakPasswordsCheckBox = UIAGroup.addItem(wx.CheckBox(self, label=label))
self.winConsoleSpeakPasswordsCheckBox.SetValue(config.conf["terminals"]["speakPasswords"])
self.winConsoleSpeakPasswordsCheckBox.defaultValue = self._getDefaultValue(["terminals", "speakPasswords"])
# Translators: This is the label for a group of advanced options in the
# Advanced settings panel
label = _("Terminal programs")
terminalsGroup = guiHelper.BoxSizerHelper(
parent=self,
sizer=wx.StaticBoxSizer(parent=self, label=label, orient=wx.VERTICAL)
)
sHelper.addItem(terminalsGroup)
# Translators: This is the label for a checkbox in the
# Advanced settings panel.
label = _("Use the new t&yped character support in Windows Console when available")
self.keyboardSupportInLegacyCheckBox=terminalsGroup.addItem(wx.CheckBox(self, label=label))
self.keyboardSupportInLegacyCheckBox.SetValue(config.conf["terminals"]["keyboardSupportInLegacy"])
self.keyboardSupportInLegacyCheckBox.defaultValue = self._getDefaultValue(["terminals", "keyboardSupportInLegacy"])
self.keyboardSupportInLegacyCheckBox.Enable(winVersion.isWin10(1607))
# Translators: This is the label for a group of advanced options in the
# Advanced settings panel
label = _("Speech")
speechGroup = guiHelper.BoxSizerHelper(
parent=self,
sizer=wx.StaticBoxSizer(parent=self, label=label, orient=wx.VERTICAL)
)
sHelper.addItem(speechGroup)
expiredFocusSpeechChoices = [
# Translators: Label for the 'Cancel speech for expired &focus events' combobox
# in the Advanced settings panel.
_("Default (No)"),
# Translators: Label for the 'Cancel speech for expired &focus events' combobox
# in the Advanced settings panel.
_("Yes"),
# Translators: Label for the 'Cancel speech for expired &focus events' combobox
# in the Advanced settings panel.
_("No"),
]
# Translators: This is the label for combobox in the Advanced settings panel.
cancelExpiredFocusSpeechText = _("Attempt to cancel speech for expired focus events:")
self.cancelExpiredFocusSpeechCombo: wx.Choice = speechGroup.addLabeledControl(
cancelExpiredFocusSpeechText,
wx.Choice,
choices=expiredFocusSpeechChoices
)
self.cancelExpiredFocusSpeechCombo.SetSelection(
config.conf["featureFlag"]["cancelExpiredFocusSpeech"]
)
self.cancelExpiredFocusSpeechCombo.defaultValue = self._getDefaultValue(
["featureFlag", "cancelExpiredFocusSpeech"]
)
# Translators: This is the label for a group of advanced options in the
# Advanced settings panel
label = _("Editable Text")
editableTextGroup = guiHelper.BoxSizerHelper(
self,
sizer=wx.StaticBoxSizer(parent=self, label=label, orient=wx.VERTICAL)
)
sHelper.addItem(editableTextGroup)
# Translators: This is the label for a numeric control in the
# Advanced settings panel.
label = _("Caret movement timeout (in ms)")
self.caretMoveTimeoutSpinControl=editableTextGroup.addLabeledControl(
label,
nvdaControls.SelectOnFocusSpinCtrl,
min=0,
max=2000,
initial=config.conf["editableText"]["caretMoveTimeoutMs"]
)
self.caretMoveTimeoutSpinControl.defaultValue = self._getDefaultValue(["editableText", "caretMoveTimeoutMs"])
# Translators: This is the label for a group of advanced options in the
# Advanced settings panel
label = _("Debug logging")
debugLogGroup = guiHelper.BoxSizerHelper(
self,
sizer=wx.StaticBoxSizer(parent=self, label=label, orient=wx.VERTICAL)
)
sHelper.addItem(debugLogGroup)
self.logCategories=[
"hwIo",
"MSAA",
"UIA",
"audioDucking",
"gui",
"louis",
"timeSinceInput",
"vision",
"speech",
"speechManager",
"nvwave",
]
# Translators: This is the label for a list in the
# Advanced settings panel
logCategoriesLabel=_("Enabled logging categories")
self.logCategoriesList=debugLogGroup.addLabeledControl(
logCategoriesLabel,
nvdaControls.CustomCheckListBox,
choices=self.logCategories
)
self.logCategoriesList.CheckedItems = [
index for index, x in enumerate(self.logCategories) if config.conf['debugLog'][x]
]
self.logCategoriesList.Select(0)
self.logCategoriesList.defaultCheckedItems = [
index for index, x in enumerate(self.logCategories) if bool(
self._getDefaultValue(['debugLog', x])
)
]
self.Layout()
def onOpenScratchpadDir(self,evt):
path=config.getScratchpadDir(ensureExists=True)
os.startfile(path)
def _getDefaultValue(self, configPath):
return config.conf.getConfigValidation(configPath).default
def haveConfigDefaultsBeenRestored(self):
return (
self._defaultsRestored
and self.scratchpadCheckBox.IsChecked() == self.scratchpadCheckBox.defaultValue
and (
self.selectiveUIAEventRegistrationCheckBox.IsChecked()
== self.selectiveUIAEventRegistrationCheckBox.defaultValue
)
and self.UIAInMSWordCheckBox.IsChecked() == self.UIAInMSWordCheckBox.defaultValue
and self.ConsoleUIACheckBox.IsChecked() == (self.ConsoleUIACheckBox.defaultValue == 'UIA')
and self.winConsoleSpeakPasswordsCheckBox.IsChecked() == self.winConsoleSpeakPasswordsCheckBox.defaultValue
and self.cancelExpiredFocusSpeechCombo.GetSelection() == self.cancelExpiredFocusSpeechCombo.defaultValue
and self.keyboardSupportInLegacyCheckBox.IsChecked() == self.keyboardSupportInLegacyCheckBox.defaultValue
and self.caretMoveTimeoutSpinControl.GetValue() == self.caretMoveTimeoutSpinControl.defaultValue
and set(self.logCategoriesList.CheckedItems) == set(self.logCategoriesList.defaultCheckedItems)
and True # reduce noise in diff when the list is extended.
)
def restoreToDefaults(self):
self.scratchpadCheckBox.SetValue(self.scratchpadCheckBox.defaultValue)
self.selectiveUIAEventRegistrationCheckBox.SetValue(self.selectiveUIAEventRegistrationCheckBox.defaultValue)
self.UIAInMSWordCheckBox.SetValue(self.UIAInMSWordCheckBox.defaultValue)
self.ConsoleUIACheckBox.SetValue(self.ConsoleUIACheckBox.defaultValue == 'UIA')
self.winConsoleSpeakPasswordsCheckBox.SetValue(self.winConsoleSpeakPasswordsCheckBox.defaultValue)
self.cancelExpiredFocusSpeechCombo.SetSelection(self.cancelExpiredFocusSpeechCombo.defaultValue)
self.keyboardSupportInLegacyCheckBox.SetValue(self.keyboardSupportInLegacyCheckBox.defaultValue)
self.caretMoveTimeoutSpinControl.SetValue(self.caretMoveTimeoutSpinControl.defaultValue)
self.logCategoriesList.CheckedItems = self.logCategoriesList.defaultCheckedItems
self._defaultsRestored = True
def onSave(self):
log.debug("Saving advanced config")
config.conf["development"]["enableScratchpadDir"]=self.scratchpadCheckBox.IsChecked()
config.conf["UIA"]["selectiveEventRegistration"] = self.selectiveUIAEventRegistrationCheckBox.IsChecked()
config.conf["UIA"]["useInMSWordWhenAvailable"]=self.UIAInMSWordCheckBox.IsChecked()
if self.ConsoleUIACheckBox.IsChecked():
config.conf['UIA']['winConsoleImplementation'] = "UIA"
else:
config.conf['UIA']['winConsoleImplementation'] = "auto"
config.conf["terminals"]["speakPasswords"] = self.winConsoleSpeakPasswordsCheckBox.IsChecked()
config.conf["featureFlag"]["cancelExpiredFocusSpeech"] = self.cancelExpiredFocusSpeechCombo.GetSelection()
config.conf["terminals"]["keyboardSupportInLegacy"]=self.keyboardSupportInLegacyCheckBox.IsChecked()
config.conf["editableText"]["caretMoveTimeoutMs"]=self.caretMoveTimeoutSpinControl.GetValue()
for index,key in enumerate(self.logCategories):
config.conf['debugLog'][key]=self.logCategoriesList.IsChecked(index)
class AdvancedPanel(SettingsPanel):
enableControlsCheckBox = None # type: wx.CheckBox
# Translators: This is the label for the Advanced settings panel.
title = _("Advanced")
# Translators: This is the label to warn users about the Advanced options in the
# Advanced settings panel
warningHeader = _("Warning!")
warningExplanation = _(
# Translators: This is a label appearing on the Advanced settings panel.
"The following settings are for advanced users. "
"Changing them may cause NVDA to function incorrectly. "
"Please only change these if you know what you are doing or "
"have been specifically instructed by NVDA developers."
)
panelDescription = u"{}\n{}".format(warningHeader, warningExplanation)
def makeSettings(self, settingsSizer):
"""
:type settingsSizer: wx.BoxSizer
"""
sHelper = guiHelper.BoxSizerHelper(self, sizer=settingsSizer)
warningGroup = guiHelper.BoxSizerHelper(
self,
sizer=wx.StaticBoxSizer(wx.StaticBox(self), wx.VERTICAL)
)
sHelper.addItem(warningGroup)
warningBox = warningGroup.sizer.GetStaticBox() # type: wx.StaticBox
warningText = wx.StaticText(warningBox, label=self.warningHeader)
warningText.SetFont(wx.Font(18, wx.FONTFAMILY_DEFAULT, wx.NORMAL, wx.BOLD))
warningGroup.addItem(warningText)
self.windowText = warningGroup.addItem(wx.StaticText(warningBox, label=self.warningExplanation))
self.windowText.Wrap(self.scaleSize(544))
enableAdvancedControlslabel = _(
# Translators: This is the label for a checkbox in the Advanced settings panel.
"I understand that changing these settings may cause NVDA to function incorrectly."
)
self.enableControlsCheckBox = warningGroup.addItem(
wx.CheckBox(parent=warningBox, label=enableAdvancedControlslabel, id=wx.NewIdRef())
)
boldedFont = self.enableControlsCheckBox.GetFont().Bold()
self.enableControlsCheckBox.SetFont(boldedFont)
restoreDefaultsButton = warningGroup.addItem(
# Translators: This is the label for a button in the Advanced settings panel
wx.Button(self, label=_("Restore defaults"))
)
restoreDefaultsButton.Bind(wx.EVT_BUTTON, lambda evt: self.advancedControls.restoreToDefaults())
self.advancedControls = AdvancedPanelControls(self)
sHelper.sizer.Add(self.advancedControls, flag=wx.EXPAND)
self.enableControlsCheckBox.Bind(
wx.EVT_CHECKBOX,
self.onEnableControlsCheckBox
)
self.advancedControls.Enable(self.enableControlsCheckBox.IsChecked())
def onSave(self):
if (
self.enableControlsCheckBox.IsChecked() or
self.advancedControls.haveConfigDefaultsBeenRestored()
):
self.advancedControls.onSave()
def onEnableControlsCheckBox(self, evt):
# due to some not very well understood mis ordering of event processing, we force NVDA to
# process pending events. This fixes an issue where the checkbox state was being reported
# incorrectly. This checkbox is slightly different from most, in that its behaviour is to
# enable more controls than is typical. This might be causing enough of a delay, that there
# is a mismatch in the state of the checkbox and when the events are processed by NVDA.
from api import processPendingEvents
processPendingEvents()
self.advancedControls.Enable(evt.IsChecked())
class DictionaryEntryDialog(wx.Dialog):
TYPE_LABELS = {
# Translators: This is a label for an Entry Type radio button in add dictionary entry dialog.
speechDictHandler.ENTRY_TYPE_ANYWHERE: _("&Anywhere"),
# Translators: This is a label for an Entry Type radio button in add dictionary entry dialog.
speechDictHandler.ENTRY_TYPE_WORD: _("Whole &word"),
# Translators: This is a label for an Entry Type radio button in add dictionary entry dialog.
speechDictHandler.ENTRY_TYPE_REGEXP: _("Regular &expression")
}
TYPE_LABELS_ORDERING = (speechDictHandler.ENTRY_TYPE_ANYWHERE, speechDictHandler.ENTRY_TYPE_WORD, speechDictHandler.ENTRY_TYPE_REGEXP)
# Translators: This is the label for the edit dictionary entry dialog.
def __init__(self, parent, title=_("Edit Dictionary Entry")):
super(DictionaryEntryDialog,self).__init__(parent,title=title)
mainSizer=wx.BoxSizer(wx.VERTICAL)
sHelper = guiHelper.BoxSizerHelper(self, orientation=wx.VERTICAL)
# Translators: This is a label for an edit field in add dictionary entry dialog.
patternLabelText = _("&Pattern")
self.patternTextCtrl=sHelper.addLabeledControl(patternLabelText, wx.TextCtrl)
# Translators: This is a label for an edit field in add dictionary entry dialog and in punctuation/symbol pronunciation dialog.
replacementLabelText = _("&Replacement")
self.replacementTextCtrl=sHelper.addLabeledControl(replacementLabelText, wx.TextCtrl)
# Translators: This is a label for an edit field in add dictionary entry dialog.
commentLabelText = _("&Comment")
self.commentTextCtrl=sHelper.addLabeledControl(commentLabelText, wx.TextCtrl)
# Translators: This is a label for a checkbox in add dictionary entry dialog.
caseSensitiveText = _("Case &sensitive")
self.caseSensitiveCheckBox=sHelper.addItem(wx.CheckBox(self,label=caseSensitiveText))
# Translators: This is a label for a set of radio buttons in add dictionary entry dialog.
typeText = _("&Type")
typeChoices = [DictionaryEntryDialog.TYPE_LABELS[i] for i in DictionaryEntryDialog.TYPE_LABELS_ORDERING]
self.typeRadioBox=sHelper.addItem(wx.RadioBox(self,label=typeText, choices=typeChoices))
sHelper.addDialogDismissButtons(wx.OK | wx.CANCEL, separated=True)
mainSizer.Add(sHelper.sizer, border=guiHelper.BORDER_FOR_DIALOGS, flag=wx.ALL)
mainSizer.Fit(self)
self.SetSizer(mainSizer)
self.setType(speechDictHandler.ENTRY_TYPE_ANYWHERE)
self.patternTextCtrl.SetFocus()
self.Bind(wx.EVT_BUTTON,self.onOk,id=wx.ID_OK)
def getType(self):
typeRadioValue = self.typeRadioBox.GetSelection()
if typeRadioValue == wx.NOT_FOUND:
return speechDictHandler.ENTRY_TYPE_ANYWHERE
return DictionaryEntryDialog.TYPE_LABELS_ORDERING[typeRadioValue]
def onOk(self,evt):
if not self.patternTextCtrl.GetValue():
# Translators: This is an error message to let the user know that the pattern field in the dictionary entry is not valid.
gui.messageBox(_("A pattern is required."), _("Dictionary Entry Error"), wx.OK|wx.ICON_WARNING, self)
self.patternTextCtrl.SetFocus()
return
try:
dictEntry = self.dictEntry = speechDictHandler.SpeechDictEntry(
self.patternTextCtrl.GetValue(),
self.replacementTextCtrl.GetValue(),
self.commentTextCtrl.GetValue(),
bool(self.caseSensitiveCheckBox.GetValue()),
self.getType()
)
dictEntry.sub("test") # Ensure there are no grouping error (#11407)
except Exception as e:
log.debugWarning("Could not add dictionary entry due to (regex error) : %s" % e)
# Translators: This is an error message to let the user know that the dictionary entry is not valid.
gui.messageBox(_("Regular Expression error: \"%s\".")%e, _("Dictionary Entry Error"), wx.OK|wx.ICON_WARNING, self)
return
evt.Skip()
def setType(self, type):
self.typeRadioBox.SetSelection(DictionaryEntryDialog.TYPE_LABELS_ORDERING.index(type))
class DictionaryDialog(SettingsDialog):
TYPE_LABELS = {t: l.replace("&", "") for t, l in DictionaryEntryDialog.TYPE_LABELS.items()}
helpId = "SpeechDictionaries"
def __init__(self,parent,title,speechDict):
self.title = title
self.speechDict = speechDict
self.tempSpeechDict=speechDictHandler.SpeechDict()
self.tempSpeechDict.extend(self.speechDict)
globalVars.speechDictionaryProcessing=False
super().__init__(parent, resizeable=True)
# Historical initial size, result of L{self.dictList} being (550,350) as of #6287.
# Setting an initial size on L{self.dictList} by passing a L{size} argument when
# creating the control would also set its minimum size and thus block the dialog from being shrunk.
self.SetSize(576, 502)
self.CentreOnScreen()
def makeSettings(self, settingsSizer):
sHelper = guiHelper.BoxSizerHelper(self, sizer=settingsSizer)
# Translators: The label for the list box of dictionary entries in speech dictionary dialog.
entriesLabelText=_("&Dictionary entries")
self.dictList = sHelper.addLabeledControl(
entriesLabelText,
wx.ListCtrl, style=wx.LC_REPORT | wx.LC_SINGLE_SEL
)
# Translators: The label for a column in dictionary entries list used to identify comments for the entry.
self.dictList.InsertColumn(0,_("Comment"),width=150)
# Translators: The label for a column in dictionary entries list used to identify pattern (original word or a pattern).
self.dictList.InsertColumn(1,_("Pattern"),width=150)
# Translators: The label for a column in dictionary entries list and in a list of symbols from symbol pronunciation dialog used to identify replacement for a pattern or a symbol
self.dictList.InsertColumn(2,_("Replacement"),width=150)
# Translators: The label for a column in dictionary entries list used to identify whether the entry is case sensitive or not.
self.dictList.InsertColumn(3,_("case"),width=50)
# Translators: The label for a column in dictionary entries list used to identify whether the entry is a regular expression, matches whole words, or matches anywhere.
self.dictList.InsertColumn(4,_("Type"),width=50)
self.offOn = (_("off"),_("on"))
for entry in self.tempSpeechDict:
self.dictList.Append((entry.comment,entry.pattern,entry.replacement,self.offOn[int(entry.caseSensitive)],DictionaryDialog.TYPE_LABELS[entry.type]))
self.editingIndex=-1
bHelper = guiHelper.ButtonHelper(orientation=wx.HORIZONTAL)
bHelper.addButton(
parent=self,
# Translators: The label for a button in speech dictionaries dialog to add new entries.
label=_("&Add")
).Bind(wx.EVT_BUTTON, self.OnAddClick)
bHelper.addButton(
parent=self,
# Translators: The label for a button in speech dictionaries dialog to edit existing entries.
label=_("&Edit")
).Bind(wx.EVT_BUTTON, self.OnEditClick)
bHelper.addButton(
parent=self,
# Translators: The label for a button in speech dictionaries dialog to remove existing entries.
label=_("&Remove")
).Bind(wx.EVT_BUTTON, self.OnRemoveClick)
sHelper.addItem(bHelper)
def postInit(self):
self.dictList.SetFocus()
def onCancel(self,evt):
globalVars.speechDictionaryProcessing=True
super(DictionaryDialog, self).onCancel(evt)
def onOk(self,evt):
globalVars.speechDictionaryProcessing=True
if self.tempSpeechDict!=self.speechDict:
del self.speechDict[:]
self.speechDict.extend(self.tempSpeechDict)
self.speechDict.save()
super(DictionaryDialog, self).onOk(evt)
def OnAddClick(self,evt):
# Translators: This is the label for the add dictionary entry dialog.
entryDialog=DictionaryEntryDialog(self,title=_("Add Dictionary Entry"))
if entryDialog.ShowModal()==wx.ID_OK:
self.tempSpeechDict.append(entryDialog.dictEntry)
self.dictList.Append((entryDialog.commentTextCtrl.GetValue(),entryDialog.patternTextCtrl.GetValue(),entryDialog.replacementTextCtrl.GetValue(),self.offOn[int(entryDialog.caseSensitiveCheckBox.GetValue())],DictionaryDialog.TYPE_LABELS[entryDialog.getType()]))
index=self.dictList.GetFirstSelected()
while index>=0:
self.dictList.Select(index,on=0)
index=self.dictList.GetNextSelected(index)
addedIndex=self.dictList.GetItemCount()-1
self.dictList.Select(addedIndex)
self.dictList.Focus(addedIndex)
self.dictList.SetFocus()
entryDialog.Destroy()
def OnEditClick(self,evt):
if self.dictList.GetSelectedItemCount()!=1:
return
editIndex=self.dictList.GetFirstSelected()
if editIndex<0:
return
entryDialog=DictionaryEntryDialog(self)
entryDialog.patternTextCtrl.SetValue(self.tempSpeechDict[editIndex].pattern)
entryDialog.replacementTextCtrl.SetValue(self.tempSpeechDict[editIndex].replacement)
entryDialog.commentTextCtrl.SetValue(self.tempSpeechDict[editIndex].comment)
entryDialog.caseSensitiveCheckBox.SetValue(self.tempSpeechDict[editIndex].caseSensitive)
entryDialog.setType(self.tempSpeechDict[editIndex].type)
if entryDialog.ShowModal()==wx.ID_OK:
self.tempSpeechDict[editIndex]=entryDialog.dictEntry
self.dictList.SetItem(editIndex,0,entryDialog.commentTextCtrl.GetValue())
self.dictList.SetItem(editIndex,1,entryDialog.patternTextCtrl.GetValue())
self.dictList.SetItem(editIndex,2,entryDialog.replacementTextCtrl.GetValue())
self.dictList.SetItem(editIndex,3,self.offOn[int(entryDialog.caseSensitiveCheckBox.GetValue())])
self.dictList.SetItem(editIndex,4,DictionaryDialog.TYPE_LABELS[entryDialog.getType()])
self.dictList.SetFocus()
entryDialog.Destroy()
def OnRemoveClick(self,evt):
index=self.dictList.GetFirstSelected()
while index>=0:
self.dictList.DeleteItem(index)
del self.tempSpeechDict[index]
index=self.dictList.GetNextSelected(index)
self.dictList.SetFocus()
class BrailleSettingsPanel(SettingsPanel):
# Translators: This is the label for the braille panel
title = _("Braille")
helpId = "BrailleSettings"
def makeSettings(self, settingsSizer):
settingsSizerHelper = guiHelper.BoxSizerHelper(self, sizer=settingsSizer)
# Translators: A label for the braille display on the braille panel.
displayLabel = _("Braille &display")
displayBox = wx.StaticBox(self, label=displayLabel)
displayGroup = guiHelper.BoxSizerHelper(self, sizer=wx.StaticBoxSizer(displayBox, wx.HORIZONTAL))
settingsSizerHelper.addItem(displayGroup)
self.displayNameCtrl = ExpandoTextCtrl(self, size=(self.scaleSize(250), -1), style=wx.TE_READONLY)
self.updateCurrentDisplay()
# Translators: This is the label for the button used to change braille display,
# it appears in the context of a braille display group on the braille settings panel.
changeDisplayBtn = wx.Button(self, label=_("C&hange..."))
displayGroup.addItem(
guiHelper.associateElements(
self.displayNameCtrl,
changeDisplayBtn
)
)
self.displayNameCtrl.Bind(wx.EVT_CHAR_HOOK, self._enterTriggersOnChangeDisplay)
changeDisplayBtn.Bind(wx.EVT_BUTTON,self.onChangeDisplay)
self.brailleSubPanel = BrailleSettingsSubPanel(self)
settingsSizerHelper.addItem(self.brailleSubPanel)
def _enterTriggersOnChangeDisplay(self, evt):
if evt.KeyCode == wx.WXK_RETURN:
self.onChangeDisplay(evt)
else:
evt.Skip()
def onChangeDisplay(self, evt):
changeDisplay = BrailleDisplaySelectionDialog(self, multiInstanceAllowed=True)
ret = changeDisplay.ShowModal()
if ret == wx.ID_OK:
self.Freeze()
# trigger a refresh of the settings
self.onPanelActivated()
self._sendLayoutUpdatedEvent()
self.Thaw()
def updateCurrentDisplay(self):
if config.conf["braille"]["display"] == braille.AUTO_DISPLAY_NAME:
displayDesc = BrailleDisplaySelectionDialog.getCurrentAutoDisplayDescription()
else:
displayDesc = braille.handler.display.description
self.displayNameCtrl.SetValue(displayDesc)
def onPanelActivated(self):
self.brailleSubPanel.onPanelActivated()
super(BrailleSettingsPanel,self).onPanelActivated()
def onPanelDeactivated(self):
self.brailleSubPanel.onPanelDeactivated()
super(BrailleSettingsPanel,self).onPanelDeactivated()
def onDiscard(self):
self.brailleSubPanel.onDiscard()
def onSave(self):
self.brailleSubPanel.onSave()
class BrailleDisplaySelectionDialog(SettingsDialog):
# Translators: This is the label for the braille display selection dialog.
title = _("Select Braille Display")
helpId = "BrailleSettings"
displayNames = []
possiblePorts = []
def makeSettings(self, settingsSizer):
sHelper = guiHelper.BoxSizerHelper(self, sizer=settingsSizer)
# Translators: The label for a setting in braille settings to choose a braille display.
displayLabelText = _("Braille &display:")
self.displayList = sHelper.addLabeledControl(displayLabelText, wx.Choice, choices=[])
self.Bind(wx.EVT_CHOICE, self.onDisplayNameChanged, self.displayList)
# Translators: The label for a setting in braille settings to choose the connection port (if the selected braille display supports port selection).
portsLabelText = _("&Port:")
self.portsList = sHelper.addLabeledControl(portsLabelText, wx.Choice, choices=[])
self.bindHelpEvent("BrailleSettingsPort", self.portsList)
self.updateBrailleDisplayLists()
def postInit(self):
# Finally, ensure that focus is on the list of displays.
self.displayList.SetFocus()
@staticmethod
def getCurrentAutoDisplayDescription():
description = braille.AUTOMATIC_PORT[1]
if (
config.conf["braille"]["display"] == braille.AUTO_DISPLAY_NAME
and braille.handler.display.name != "noBraille"
):
description = "%s (%s)" % (description, braille.handler.display.description)
return description
def updateBrailleDisplayLists(self):
driverList = [(braille.AUTO_DISPLAY_NAME, self.getCurrentAutoDisplayDescription())]
driverList.extend(braille.getDisplayList())
self.displayNames = [driver[0] for driver in driverList]
displayChoices = [driver[1] for driver in driverList]
self.displayList.Clear()
self.displayList.AppendItems(displayChoices)
self.bindHelpEvent("BrailleSettingsDisplay", self.displayList)
try:
if config.conf["braille"]["display"] == braille.AUTO_DISPLAY_NAME:
selection = 0
else:
selection = self.displayNames.index(braille.handler.display.name)
self.displayList.SetSelection(selection)
except:
pass
self.updatePossiblePorts()
def updatePossiblePorts(self):
displayName = self.displayNames[self.displayList.GetSelection()]
self.possiblePorts = []
if displayName != "auto":
displayCls = braille._getDisplayDriver(displayName)
try:
self.possiblePorts.extend(displayCls.getPossiblePorts().items())
except NotImplementedError:
pass
if self.possiblePorts:
self.portsList.SetItems([p[1] for p in self.possiblePorts])
try:
selectedPort = config.conf["braille"][displayName].get("port")
portNames = [p[0] for p in self.possiblePorts]
selection = portNames.index(selectedPort)
except (KeyError, ValueError):
# Display name not in config or port not valid
selection = 0
self.portsList.SetSelection(selection)
# If no port selection is possible or only automatic selection is available, disable the port selection control
enable = len(self.possiblePorts) > 0 and not (len(self.possiblePorts) == 1 and self.possiblePorts[0][0] == "auto")
self.portsList.Enable(enable)
def onDisplayNameChanged(self, evt):
self.updatePossiblePorts()
def onOk(self, evt):
if not self.displayNames:
# The list of displays has not been populated yet, so we didn't change anything in this panel
return
display = self.displayNames[self.displayList.GetSelection()]
if display not in config.conf["braille"]:
config.conf["braille"][display] = {}
if self.possiblePorts:
port = self.possiblePorts[self.portsList.GetSelection()][0]
config.conf["braille"][display]["port"] = port
if not braille.handler.setDisplayByName(display):
gui.messageBox(
# Translators: The message in a dialog presented when NVDA is unable to load the selected
# braille display.
message=_("Could not load the {display} display.").format(display=display),
# Translators: The title in a dialog presented when NVDA is unable to load the selected
# braille display.
caption=_("Braille Display Error"),
style=wx.OK | wx.ICON_WARNING,
parent=self
)
return
if self.IsModal():
# Hack: we need to update the display in our parent window before closing.
# Otherwise, NVDA will report the old display even though the new display is reflected visually.
self.Parent.updateCurrentDisplay()
super(BrailleDisplaySelectionDialog, self).onOk(evt)
class BrailleSettingsSubPanel(AutoSettingsMixin, SettingsPanel):
@property
def driver(self):
return braille.handler.display
def getSettings(self) -> AutoSettings:
return self.driver
def makeSettings(self, settingsSizer):
shouldDebugGui = gui._isDebug()
startTime = 0 if not shouldDebugGui else time.time()
# Construct braille display specific settings
self.updateDriverSettings()
sHelper = guiHelper.BoxSizerHelper(self, sizer=settingsSizer)
tables = brailleTables.listTables()
# Translators: The label for a setting in braille settings to select the output table (the braille table used to read braille text on the braille display).
outputsLabelText = _("&Output table:")
outTables = [table for table in tables if table.output]
self.outTableNames = [table.fileName for table in outTables]
outTableChoices = [table.displayName for table in outTables]
self.outTableList = sHelper.addLabeledControl(outputsLabelText, wx.Choice, choices=outTableChoices)
self.bindHelpEvent("BrailleSettingsOutputTable", self.outTableList)
try:
selection = self.outTableNames.index(config.conf["braille"]["translationTable"])
self.outTableList.SetSelection(selection)
except:
pass
if shouldDebugGui:
timePassed = time.time() - startTime
log.debug(
f"Loading output tables completed, now at {timePassed:.2f} seconds from start"
)
# Translators: The label for a setting in braille settings to select the input table (the braille table used to type braille characters on a braille keyboard).
inputLabelText = _("&Input table:")
self.inTables = [table for table in tables if table.input]
inTableChoices = [table.displayName for table in self.inTables]
self.inTableList = sHelper.addLabeledControl(inputLabelText, wx.Choice, choices=inTableChoices)
self.bindHelpEvent("BrailleSettingsInputTable", self.inTableList)
try:
selection = self.inTables.index(brailleInput.handler.table)
self.inTableList.SetSelection(selection)
except:
pass
if shouldDebugGui:
timePassed = time.time() - startTime
log.debug(
f"Loading input tables completed, now at {timePassed:.2f} seconds from start"
)
# Translators: The label for a setting in braille settings to expand the current word under cursor to computer braille.
expandAtCursorText = _("E&xpand to computer braille for the word at the cursor")
self.expandAtCursorCheckBox = sHelper.addItem(
wx.CheckBox(self, wx.ID_ANY, label=expandAtCursorText)
)
self.bindHelpEvent("BrailleSettingsExpandToComputerBraille", self.expandAtCursorCheckBox)
self.expandAtCursorCheckBox.SetValue(config.conf["braille"]["expandAtCursor"])
# Translators: The label for a setting in braille settings to show the cursor.
showCursorLabelText = _("&Show cursor")
self.showCursorCheckBox = sHelper.addItem(wx.CheckBox(self, label=showCursorLabelText))
self.bindHelpEvent("BrailleSettingsShowCursor", self.showCursorCheckBox)
self.showCursorCheckBox.Bind(wx.EVT_CHECKBOX, self.onShowCursorChange)
self.showCursorCheckBox.SetValue(config.conf["braille"]["showCursor"])
# Translators: The label for a setting in braille settings to enable cursor blinking.
cursorBlinkLabelText = _("Blink cursor")
self.cursorBlinkCheckBox = sHelper.addItem(
wx.CheckBox(self, label=cursorBlinkLabelText)
)
self.bindHelpEvent("BrailleSettingsBlinkCursor", self.cursorBlinkCheckBox)
self.cursorBlinkCheckBox.Bind(wx.EVT_CHECKBOX, self.onBlinkCursorChange)
self.cursorBlinkCheckBox.SetValue(config.conf["braille"]["cursorBlink"])
if not self.showCursorCheckBox.GetValue():
self.cursorBlinkCheckBox.Disable()
# Translators: The label for a setting in braille settings to change cursor blink rate in milliseconds (1 second is 1000 milliseconds).
cursorBlinkRateLabelText = _("Cursor blink rate (ms)")
minBlinkRate = int(config.conf.getConfigValidation(
("braille", "cursorBlinkRate")
).kwargs["min"])
maxBlinkRate = int(config.conf.getConfigValidation(("braille", "cursorBlinkRate")).kwargs["max"])
self.cursorBlinkRateEdit = sHelper.addLabeledControl(
cursorBlinkRateLabelText,
nvdaControls.SelectOnFocusSpinCtrl,
min=minBlinkRate,
max=maxBlinkRate,
initial=config.conf["braille"]["cursorBlinkRate"]
)
self.bindHelpEvent("BrailleSettingsBlinkRate", self.cursorBlinkRateEdit)
if not self.showCursorCheckBox.GetValue() or not self.cursorBlinkCheckBox.GetValue() :
self.cursorBlinkRateEdit.Disable()
self.cursorShapes = [s[0] for s in braille.CURSOR_SHAPES]
cursorShapeChoices = [s[1] for s in braille.CURSOR_SHAPES]
# Translators: The label for a setting in braille settings to select the cursor shape when tethered to focus.
cursorShapeFocusLabelText = _("Cursor shape for &focus:")
self.cursorShapeFocusList = sHelper.addLabeledControl(cursorShapeFocusLabelText, wx.Choice, choices=cursorShapeChoices)
self.bindHelpEvent("BrailleSettingsCursorShapeForFocus", self.cursorShapeFocusList)
try:
selection = self.cursorShapes.index(config.conf["braille"]["cursorShapeFocus"])
self.cursorShapeFocusList.SetSelection(selection)
except:
pass
if not self.showCursorCheckBox.GetValue():
self.cursorShapeFocusList.Disable()
# Translators: The label for a setting in braille settings to select the cursor shape when tethered to review.
cursorShapeReviewLabelText = _("Cursor shape for &review:")
self.cursorShapeReviewList = sHelper.addLabeledControl(cursorShapeReviewLabelText, wx.Choice, choices=cursorShapeChoices)
self.bindHelpEvent("BrailleSettingsCursorShapeForReview", self.cursorShapeReviewList)
try:
selection = self.cursorShapes.index(config.conf["braille"]["cursorShapeReview"])
self.cursorShapeReviewList.SetSelection(selection)
except:
pass
if not self.showCursorCheckBox.GetValue():
self.cursorShapeReviewList.Disable()
if gui._isDebug():
log.debug("Loading cursor settings completed, now at %.2f seconds from start"%(time.time() - startTime))
SHOW_MESSAGES_LABELS = [
# Translators: One of the show states of braille messages
# (the disabled mode turns off showing of braille messages completely).
_("Disabled"),
# Translators: One of the show states of braille messages
# (the timeout mode shows messages for the specific time).
_("Use timeout"),
# Translators: One of the show states of braille messages
# (the indefinitely mode prevents braille messages from disappearing automatically).
_("Show indefinitely"),
]
# Translators: The label for a setting in braille settings to combobox enabling user
# to decide if braille messages should be shown and automatically disappear from braille display.
showMessagesText = _("Show messages")
self.showMessagesList = sHelper.addLabeledControl(
showMessagesText,
wx.Choice,
choices=SHOW_MESSAGES_LABELS
)
self.bindHelpEvent("BrailleSettingsShowMessages", self.showMessagesList)
self.showMessagesList.Bind(wx.EVT_CHOICE, self.onShowMessagesChange)
if config.conf["braille"]["messageTimeout"] == 0:
self.showMessagesList.SetSelection(0)
elif config.conf["braille"]["noMessageTimeout"] == 0:
self.showMessagesList.SetSelection(1)
else:
self.showMessagesList.SetSelection(2)
# Minimal timeout value possible here is 1, because 0 disables showing of braille messages
# and is set using showMessagesList
minTimeout = 1
maxTimeOut = int(config.conf.getConfigValidation(
("braille", "messageTimeout")
).kwargs["max"])
# Translators: The label for a setting in braille settings to change how long a message stays on the braille display (in seconds).
messageTimeoutText = _("Message &timeout (sec)")
self.messageTimeoutEdit = sHelper.addLabeledControl(
messageTimeoutText,
nvdaControls.SelectOnFocusSpinCtrl,
min=minTimeout,
max=maxTimeOut,
initial=config.conf["braille"]["messageTimeout"]
)
self.bindHelpEvent("BrailleSettingsMessageTimeout", self.messageTimeoutEdit)
if self.showMessagesList.GetSelection() != 1:
self.messageTimeoutEdit.Disable()
if gui._isDebug():
log.debug("Loading timeout settings completed, now at %.2f seconds from start"%(time.time() - startTime))
# Translators: The label for a setting in braille settings to set whether braille should be tethered to focus or review cursor.
tetherListText = _("Tether B&raille:")
# Translators: The value for a setting in the braille settings, to set whether braille should be tethered to focus or review cursor.
tetherChoices = [x[1] for x in braille.handler.tetherValues]
self.tetherList = sHelper.addLabeledControl(tetherListText, wx.Choice, choices=tetherChoices)
self.bindHelpEvent("BrailleTether", self.tetherList)
tetherChoice=braille.handler.TETHER_AUTO if config.conf["braille"]["autoTether"] else config.conf["braille"]["tetherTo"]
selection = next((x for x,y in enumerate(braille.handler.tetherValues) if y[0]==tetherChoice))
try:
self.tetherList.SetSelection(selection)
except:
pass
if gui._isDebug():
log.debug("Loading tether settings completed, now at %.2f seconds from start"%(time.time() - startTime))
# Translators: The label for a setting in braille settings to read by paragraph (if it is checked, the commands to move the display by lines moves the display by paragraphs instead).
readByParagraphText = _("Read by ¶graph")
self.readByParagraphCheckBox = sHelper.addItem(wx.CheckBox(self, label=readByParagraphText))
self.bindHelpEvent("BrailleSettingsReadByParagraph", self.readByParagraphCheckBox)
self.readByParagraphCheckBox.Value = config.conf["braille"]["readByParagraph"]
# Translators: The label for a setting in braille settings to enable word wrap (try to avoid spliting words at the end of the braille display).
wordWrapText = _("Avoid splitting &words when possible")
self.wordWrapCheckBox = sHelper.addItem(wx.CheckBox(self, label=wordWrapText))
self.bindHelpEvent("BrailleSettingsWordWrap", self.wordWrapCheckBox)
self.wordWrapCheckBox.Value = config.conf["braille"]["wordWrap"]
# Translators: The label for a setting in braille settings to select how the context for the focus object should be presented on a braille display.
focusContextPresentationLabelText = _("Focus context presentation:")
self.focusContextPresentationValues = [x[0] for x in braille.focusContextPresentations]
focusContextPresentationChoices = [x[1] for x in braille.focusContextPresentations]
self.focusContextPresentationList = sHelper.addLabeledControl(focusContextPresentationLabelText, wx.Choice, choices=focusContextPresentationChoices)
self.bindHelpEvent("BrailleSettingsFocusContextPresentation", self.focusContextPresentationList)
try:
index=self.focusContextPresentationValues.index(config.conf["braille"]["focusContextPresentation"])
except:
index=0
self.focusContextPresentationList.SetSelection(index)
if gui._isDebug():
log.debug("Finished making settings, now at %.2f seconds from start"%(time.time() - startTime))
def onSave(self):
AutoSettingsMixin.onSave(self)
config.conf["braille"]["translationTable"] = self.outTableNames[self.outTableList.GetSelection()]
brailleInput.handler.table = self.inTables[self.inTableList.GetSelection()]
config.conf["braille"]["expandAtCursor"] = self.expandAtCursorCheckBox.GetValue()
config.conf["braille"]["showCursor"] = self.showCursorCheckBox.GetValue()
config.conf["braille"]["cursorBlink"] = self.cursorBlinkCheckBox.GetValue()
config.conf["braille"]["cursorBlinkRate"] = self.cursorBlinkRateEdit.GetValue()
config.conf["braille"]["cursorShapeFocus"] = self.cursorShapes[self.cursorShapeFocusList.GetSelection()]
config.conf["braille"]["cursorShapeReview"] = self.cursorShapes[self.cursorShapeReviewList.GetSelection()]
config.conf["braille"]["noMessageTimeout"] = self.showMessagesList.GetSelection() == 2
if self.showMessagesList.GetSelection() == 0:
config.conf["braille"]["messageTimeout"] = 0
else:
config.conf["braille"]["messageTimeout"] = self.messageTimeoutEdit.GetValue()
tetherChoice = braille.handler.tetherValues[self.tetherList.GetSelection()][0]
if tetherChoice==braille.handler.TETHER_AUTO:
config.conf["braille"]["autoTether"] = True
config.conf["braille"]["tetherTo"] = braille.handler.TETHER_FOCUS
else:
config.conf["braille"]["autoTether"] = False
braille.handler.setTether(tetherChoice, auto=False)
config.conf["braille"]["readByParagraph"] = self.readByParagraphCheckBox.Value
config.conf["braille"]["wordWrap"] = self.wordWrapCheckBox.Value
config.conf["braille"]["focusContextPresentation"] = self.focusContextPresentationValues[self.focusContextPresentationList.GetSelection()]
def onShowCursorChange(self, evt):
self.cursorBlinkCheckBox.Enable(evt.IsChecked())
self.cursorBlinkRateEdit.Enable(evt.IsChecked() and self.cursorBlinkCheckBox.GetValue())
self.cursorShapeFocusList.Enable(evt.IsChecked())
self.cursorShapeReviewList.Enable(evt.IsChecked())
def onBlinkCursorChange(self, evt):
self.cursorBlinkRateEdit.Enable(evt.IsChecked())
def onShowMessagesChange(self, evt):
self.messageTimeoutEdit.Enable(evt.GetSelection() == 1)
def showStartErrorForProviders(
parent: wx.Window,
providers: List[vision.providerInfo.ProviderInfo],
) -> None:
if not providers:
return
if len(providers) == 1:
providerName = providers[0].displayName
# Translators: This message is presented when
# NVDA is unable to load a single vision enhancement provider.
message = _("Could not load the {providerName} vision enhancement provider").format(
providerName=providerName
)
else:
providerNames = ", ".join(provider.displayName for provider in providers)
# Translators: This message is presented when NVDA is unable to
# load multiple vision enhancement providers.
message = _("Could not load the following vision enhancement providers:\n{providerNames}").format(
providerNames=providerNames
)
gui.messageBox(
message,
# Translators: The title of the vision enhancement provider error message box.
_("Vision Enhancement Provider Error"),
wx.OK | wx.ICON_WARNING,
parent,
)
def showTerminationErrorForProviders(
parent: wx.Window,
providers: List[vision.providerInfo.ProviderInfo],
) -> None:
if not providers:
return
if len(providers) == 1:
providerName = providers[0].displayName
# Translators: This message is presented when
# NVDA is unable to gracefully terminate a single vision enhancement provider.
message = _("Could not gracefully terminate the {providerName} vision enhancement provider").format(
providerName=providerName
)
else:
providerNames = ", ".join(provider.displayName for provider in providers)
message = _(
# Translators: This message is presented when
# NVDA is unable to terminate multiple vision enhancement providers.
"Could not gracefully terminate the following vision enhancement providers:\n"
"{providerNames}"
).format(providerNames=providerNames)
gui.messageBox(
message,
# Translators: The title of the vision enhancement provider error message box.
_("Vision Enhancement Provider Error"),
wx.OK | wx.ICON_WARNING,
parent,
)
class VisionProviderStateControl(vision.providerBase.VisionProviderStateControl):
"""
Gives settings panels for vision enhancement providers a way to control a
single vision enhancement provider, handling any error conditions in
a UX friendly way.
"""
def __init__(
self,
parent: wx.Window,
providerInfo: vision.providerInfo.ProviderInfo
):
self._providerInfo = providerInfo
self._parent = weakref.ref(parent) # don't keep parent dialog alive with a circular reference.
def getProviderInfo(self) -> vision.providerInfo.ProviderInfo:
return self._providerInfo
def getProviderInstance(self) -> Optional[vision.providerBase.VisionEnhancementProvider]:
return vision.handler.getProviderInstance(self._providerInfo)
def startProvider(
self,
shouldPromptOnError: bool = True
) -> bool:
"""Initializes the provider, prompting user with the error if necessary.
@param shouldPromptOnError: True if the user should be presented with any errors that may occur.
@return: True on success
"""
success = self._doStartProvider()
if not success and shouldPromptOnError:
showStartErrorForProviders(self._parent(), [self._providerInfo, ])
return success
def terminateProvider(
self,
shouldPromptOnError: bool = True
) -> bool:
"""Terminate the provider, prompting user with the error if necessary.
@param shouldPromptOnError: True if the user should be presented with any errors that may occur.
@return: True on success
"""
success = self._doTerminate()
if not success and shouldPromptOnError:
showTerminationErrorForProviders(self._parent(), [self._providerInfo, ])
return success
def _doStartProvider(self) -> bool:
"""Attempt to start the provider, catching any errors.
@return True on successful termination.
"""
try:
vision.handler.initializeProvider(self._providerInfo)
return True
except Exception:
log.error(
f"Could not initialize the {self._providerInfo.providerId} vision enhancement provider",
exc_info=True
)
return False
def _doTerminate(self) -> bool:
"""Attempt to terminate the provider, catching any errors.
@return True on successful termination.
"""
try:
# Terminating a provider from the gui should never save the settings.
# This is because termination happens on the fly when unchecking check boxes.
# Saving settings would be harmful if a user opens the vision panel,
# then changes some settings and disables the provider.
vision.handler.terminateProvider(self._providerInfo, saveSettings=False)
return True
except Exception:
log.error(
f"Could not terminate the {self._providerInfo.providerId} vision enhancement provider",
exc_info=True
)
return False
class VisionSettingsPanel(SettingsPanel):
settingsSizerHelper: guiHelper.BoxSizerHelper
providerPanelInstances: List[SettingsPanel]
initialProviders: List[vision.providerInfo.ProviderInfo]
# Translators: This is the label for the vision panel
title = _("Vision")
# Translators: This is a label appearing on the vision settings panel.
panelDescription = _("Configure visual aids.")
def _createProviderSettingsPanel(
self,
providerInfo: vision.providerInfo.ProviderInfo
) -> Optional[SettingsPanel]:
settingsPanelCls = providerInfo.providerClass.getSettingsPanelClass()
if not settingsPanelCls:
if gui._isDebug():
log.debug(f"Using default panel for providerId: {providerInfo.providerId}")
settingsPanelCls = VisionProviderSubPanel_Wrapper
else:
if gui._isDebug():
log.debug(f"Using custom panel for providerId: {providerInfo.providerId}")
providerControl = VisionProviderStateControl(parent=self, providerInfo=providerInfo)
try:
return settingsPanelCls(
parent=self,
providerControl=providerControl
)
# Broad except used since we can not know what exceptions a provider might throw.
# We should be able to continue despite a buggy provider.
except Exception:
log.debug(f"Error creating providerPanel: {settingsPanelCls!r}", exc_info=True)
return None
def makeSettings(self, settingsSizer: wx.BoxSizer):
self.initialProviders = vision.handler.getActiveProviderInfos()
self.providerPanelInstances = []
self.settingsSizerHelper = guiHelper.BoxSizerHelper(self, sizer=settingsSizer)
self.settingsSizerHelper.addItem(wx.StaticText(self, label=self.panelDescription))
for providerInfo in vision.handler.getProviderList(reloadFromSystem=True):
providerSizer = self.settingsSizerHelper.addItem(
wx.StaticBoxSizer(wx.StaticBox(self, label=providerInfo.displayName), wx.VERTICAL),
flag=wx.EXPAND
)
if len(self.providerPanelInstances) > 0:
settingsSizer.AddSpacer(guiHelper.SPACE_BETWEEN_VERTICAL_DIALOG_ITEMS)
settingsPanel = self._createProviderSettingsPanel(providerInfo)
if not settingsPanel:
continue
providerSizer.Add(settingsPanel, flag=wx.EXPAND)
self.providerPanelInstances.append(settingsPanel)
def safeInitProviders(
self,
providers: List[vision.providerInfo.ProviderInfo]
) -> None:
"""Initializes one or more providers in a way that is gui friendly,
showing an error if appropriate.
"""
errorProviders: List[vision.providerInfo.ProviderInfo] = []
for provider in providers:
success = VisionProviderStateControl(self, provider).startProvider(shouldPromptOnError=False)
if not success:
errorProviders.append(provider)
showStartErrorForProviders(self, errorProviders)
def safeTerminateProviders(
self,
providers: List[vision.providerInfo.ProviderInfo],
verbose: bool = False
) -> None:
"""Terminates one or more providers in a way that is gui friendly,
@verbose: Whether to show a termination error.
@returns: Whether termination succeeded for all providers.
"""
errorProviders: List[vision.providerInfo.ProviderInfo] = []
for provider in providers:
success = VisionProviderStateControl(self, provider).terminateProvider(shouldPromptOnError=False)
if not success:
errorProviders.append(provider)
if verbose:
showTerminationErrorForProviders(self, errorProviders)
def refreshPanel(self):
self.Freeze()
# trigger a refresh of the settings
self.onPanelActivated()
self._sendLayoutUpdatedEvent()
self.Thaw()
def onPanelActivated(self):
super().onPanelActivated()
def onDiscard(self):
for panel in self.providerPanelInstances:
try:
panel.onDiscard()
# Broad except used since we can not know what exceptions a provider might throw.
# We should be able to continue despite a buggy provider.
except Exception:
log.debug(f"Error discarding providerPanel: {panel.__class__!r}", exc_info=True)
providersToInitialize = [
provider for provider in self.initialProviders
if not bool(vision.handler.getProviderInstance(provider))
]
self.safeInitProviders(providersToInitialize)
initialProviderIds = [
providerInfo.providerId for providerInfo in self.initialProviders
]
providersToTerminate = [
provider for provider in vision.handler.getActiveProviderInfos()
if provider.providerId not in initialProviderIds
]
self.safeTerminateProviders(providersToTerminate)
def onSave(self):
for panel in self.providerPanelInstances:
try:
panel.onSave()
# Broad except used since we can not know what exceptions a provider might throw.
# We should be able to continue despite a buggy provider.
except Exception:
log.debug(f"Error saving providerPanel: {panel.__class__!r}", exc_info=True)
self.initialProviders = vision.handler.getActiveProviderInfos()
class VisionProviderSubPanel_Settings(
AutoSettingsMixin,
SettingsPanel
):
_settingsCallable: Callable[[], VisionEnhancementProviderSettings]
def __init__(
self,
parent: wx.Window,
*, # Make next argument keyword only
settingsCallable: Callable[[], vision.providerBase.VisionEnhancementProviderSettings]
):
"""
@param settingsCallable: A callable that returns an instance to a VisionEnhancementProviderSettings.
This will usually be a weakref, but could be any callable taking no arguments.
"""
self._settingsCallable = settingsCallable
super().__init__(parent=parent)
def getSettings(self) -> AutoSettings:
settings = self._settingsCallable()
return settings
def makeSettings(self, settingsSizer):
# Construct vision enhancement provider settings
self.updateDriverSettings()
class VisionProviderSubPanel_Wrapper(
SettingsPanel
):
_checkBox: wx.CheckBox
def __init__(
self,
parent: wx.Window,
providerControl: VisionProviderStateControl
):
self._providerControl = providerControl
self._providerSettings: Optional[VisionProviderSubPanel_Settings] = None
self._providerSettingsSizer = wx.BoxSizer(orient=wx.VERTICAL)
super().__init__(parent=parent)
def makeSettings(self, settingsSizer):
self._checkBox = wx.CheckBox(
self,
# Translators: Enable checkbox on a vision enhancement provider on the vision settings category panel
label=_("Enable")
)
settingsSizer.Add(self._checkBox)
self._optionsSizer = wx.BoxSizer(orient=wx.VERTICAL)
self._optionsSizer.AddSpacer(size=self.scaleSize(10))
# Translators: Options label on a vision enhancement provider on the vision settings category panel
self._optionsText = wx.StaticText(self, label=_("Options:"))
self._optionsSizer.Add(self._optionsText)
self._optionsSizer.Add(
self._providerSettingsSizer,
border=self.scaleSize(15),
flag=wx.LEFT | wx.EXPAND,
proportion=1.0
)
settingsSizer.Add(
self._optionsSizer,
flag=wx.EXPAND,
proportion=1.0
)
self._checkBox.SetValue(bool(self._providerControl.getProviderInstance()))
if self._createProviderSettings():
self._checkBox.Bind(wx.EVT_CHECKBOX, self._enableToggle)
else:
self._checkBox.Bind(wx.EVT_CHECKBOX, self._nonEnableableGUI)
self._updateOptionsVisibility()
def _updateOptionsVisibility(self):
hasProviderOptions = bool(self._providerSettings) and self._providerSettings.hasOptions
if hasProviderOptions:
self.settingsSizer.Show(self._optionsSizer, recursive=True)
else:
self.settingsSizer.Hide(self._optionsSizer, recursive=True)
self._sendLayoutUpdatedEvent()
def _createProviderSettings(self):
try:
getSettingsCallable = self._providerControl.getProviderInfo().providerClass.getSettings
self._providerSettings = VisionProviderSubPanel_Settings(
self,
settingsCallable=getSettingsCallable
)
self._providerSettingsSizer.Add(self._providerSettings, flag=wx.EXPAND, proportion=1.0)
# Broad except used since we can not know what exceptions a provider might throw.
# We should be able to continue despite a buggy provider.
except Exception:
log.error("unable to create provider settings", exc_info=True)
return False
return True
def _nonEnableableGUI(self, evt):
gui.messageBox(
# Translators: Shown when there is an error showing the GUI for a vision enhancement provider
_("Unable to configure user interface for Vision Enhancement Provider, it can not be enabled."),
# Translators: The title of the error dialog displayed when there is an error showing the GUI
# for a vision enhancement provider
_("Error"),
parent=self,
)
self._checkBox.SetValue(False)
def _enableToggle(self, evt):
shouldBeRunning = evt.IsChecked()
if shouldBeRunning and not self._providerControl.startProvider():
self._checkBox.SetValue(False)
self._updateOptionsVisibility()
return
elif not shouldBeRunning and not self._providerControl.terminateProvider():
# When there is an error on termination, don't leave the checkbox checked.
# The provider should not be left configured to startup.
self._checkBox.SetValue(False)
self._updateOptionsVisibility()
return
# Able to successfully start / terminate:
self._providerSettings.updateDriverSettings()
self._providerSettings.refreshGui()
self._updateOptionsVisibility()
def onDiscard(self):
if self._providerSettings:
self._providerSettings.onDiscard()
def onSave(self):
log.debug(f"calling VisionProviderSubPanel_Wrapper")
if self._providerSettings:
self._providerSettings.onSave()
""" The name of the config profile currently being edited, if any.
This is set when the currently edited configuration profile is determined and returned to None when the dialog is destroyed.
This can be used by an AppModule for NVDA to identify and announce
changes in the name of the edited configuration profile when categories are changed"""
NvdaSettingsDialogActiveConfigProfile = None
NvdaSettingsDialogWindowHandle = None
class NVDASettingsDialog(MultiCategorySettingsDialog):
# Translators: This is the label for the NVDA settings dialog.
title = _("NVDA Settings")
categoryClasses=[
GeneralSettingsPanel,
SpeechSettingsPanel,
BrailleSettingsPanel,
VisionSettingsPanel,
KeyboardSettingsPanel,
MouseSettingsPanel,
ReviewCursorPanel,
InputCompositionPanel,
ObjectPresentationPanel,
BrowseModePanel,
DocumentFormattingPanel,
]
if touchHandler.touchSupported():
categoryClasses.append(TouchInteractionPanel)
if winVersion.isUwpOcrAvailable():
categoryClasses.append(UwpOcrPanel)
# And finally the Advanced panel which should always be last.
# if not globalVars.appArgs.secure:
# categoryClasses.append(AdvancedPanel)
def makeSettings(self, settingsSizer):
# Ensure that after the settings dialog is created the name is set correctly
super(NVDASettingsDialog, self).makeSettings(settingsSizer)
self._doOnCategoryChange()
global NvdaSettingsDialogWindowHandle
NvdaSettingsDialogWindowHandle = self.GetHandle()
def _doOnCategoryChange(self):
global NvdaSettingsDialogActiveConfigProfile
NvdaSettingsDialogActiveConfigProfile = config.conf.profiles[-1].name
if not NvdaSettingsDialogActiveConfigProfile or isinstance(self.currentCategory, GeneralSettingsPanel):
# Translators: The profile name for normal configuration
NvdaSettingsDialogActiveConfigProfile = _("normal configuration")
self.SetTitle(self._getDialogTitle())
self.bindHelpEvent(
self.currentCategory.helpId,
self.catListCtrl
)
def _getDialogTitle(self):
return u"{dialogTitle}: {panelTitle} ({configProfile})".format(
dialogTitle=self.title,
panelTitle=self.currentCategory.title,
configProfile=NvdaSettingsDialogActiveConfigProfile
)
def onCategoryChange(self,evt):
super(NVDASettingsDialog,self).onCategoryChange(evt)
if evt.Skipped:
return
self._doOnCategoryChange()
def Destroy(self):
global NvdaSettingsDialogActiveConfigProfile, NvdaSettingsDialogWindowHandle
NvdaSettingsDialogActiveConfigProfile = None
NvdaSettingsDialogWindowHandle = None
super(NVDASettingsDialog, self).Destroy()
class AddSymbolDialog(
gui.ContextHelpMixin,
wx.Dialog # wxPython does not seem to call base class initializer, put last in MRO
):
helpId = "SymbolPronunciation"
def __init__(self, parent):
# Translators: This is the label for the add symbol dialog.
super().__init__(parent, title=_("Add Symbol"))
mainSizer=wx.BoxSizer(wx.VERTICAL)
sHelper = guiHelper.BoxSizerHelper(self, orientation=wx.VERTICAL)
# Translators: This is the label for the edit field in the add symbol dialog.
symbolText = _("&Symbol:")
self.identifierTextCtrl = sHelper.addLabeledControl(symbolText, wx.TextCtrl)
sHelper.addDialogDismissButtons(self.CreateButtonSizer(wx.OK | wx.CANCEL))
mainSizer.Add(sHelper.sizer, border=guiHelper.BORDER_FOR_DIALOGS, flag=wx.ALL)
mainSizer.Fit(self)
self.SetSizer(mainSizer)
self.identifierTextCtrl.SetFocus()
self.CentreOnScreen()
class SpeechSymbolsDialog(SettingsDialog):
helpId = "SymbolPronunciation"
def __init__(self,parent):
try:
symbolProcessor = characterProcessing._localeSpeechSymbolProcessors.fetchLocaleData(speech.getCurrentLanguage())
except LookupError:
symbolProcessor = characterProcessing._localeSpeechSymbolProcessors.fetchLocaleData("en")
self.symbolProcessor = symbolProcessor
# Translators: This is the label for the symbol pronunciation dialog.
# %s is replaced by the language for which symbol pronunciation is being edited.
self.title = _("Symbol Pronunciation (%s)")%languageHandler.getLanguageDescription(self.symbolProcessor.locale)
super(SpeechSymbolsDialog, self).__init__(
parent,
resizeable=True,
)
def makeSettings(self, settingsSizer):
self.filteredSymbols = self.symbols = [
copy.copy(symbol) for symbol in self.symbolProcessor.computedSymbols.values()
]
self.pendingRemovals = {}
sHelper = guiHelper.BoxSizerHelper(self, sizer=settingsSizer)
# Translators: The label of a text field to search for symbols in the speech symbols dialog.
filterText = pgettext("speechSymbols", "&Filter by:")
self.filterEdit = sHelper.addLabeledControl(
labelText = filterText,
wxCtrlClass=wx.TextCtrl,
size=(self.scaleSize(310), -1),
)
self.filterEdit.Bind(wx.EVT_TEXT, self.onFilterEditTextChange)
# Translators: The label for symbols list in symbol pronunciation dialog.
symbolsText = _("&Symbols")
self.symbolsList = sHelper.addLabeledControl(
symbolsText,
nvdaControls.AutoWidthColumnListCtrl,
autoSizeColumn=2, # The replacement column is likely to need the most space
itemTextCallable=self.getItemTextForList,
style=wx.LC_REPORT | wx.LC_SINGLE_SEL | wx.LC_VIRTUAL
)
# Translators: The label for a column in symbols list used to identify a symbol.
self.symbolsList.InsertColumn(0, _("Symbol"), width=self.scaleSize(150))
# Translators: The label for a column in symbols list used to identify a replacement.
self.symbolsList.InsertColumn(1, _("Replacement"))
# Translators: The label for a column in symbols list used to identify a symbol's speech level (either none, some, most, all or character).
self.symbolsList.InsertColumn(2, _("Level"))
# Translators: The label for a column in symbols list which specifies when the actual symbol will be sent to the synthesizer (preserved).
# See the "Punctuation/Symbol Pronunciation" section of the User Guide for details.
self.symbolsList.InsertColumn(3, _("Preserve"))
self.symbolsList.Bind(wx.EVT_LIST_ITEM_FOCUSED, self.onListItemFocused)
# Translators: The label for the group of controls in symbol pronunciation dialog to change the pronunciation of a symbol.
changeSymbolText = _("Change selected symbol")
changeSymbolHelper = sHelper.addItem(guiHelper.BoxSizerHelper(
parent=self,
sizer=wx.StaticBoxSizer(
parent=self,
label=changeSymbolText,
orient=wx.VERTICAL,
)
))
# Used to ensure that event handlers call Skip(). Not calling skip can cause focus problems for controls. More
# generally the advice on the wx documentation is: "In general, it is recommended to skip all non-command events
# to allow the default handling to take place. The command events are, however, normally not skipped as usually
# a single command such as a button click or menu item selection must only be processed by one handler."
def skipEventAndCall(handler):
def wrapWithEventSkip(event):
if event:
event.Skip()
return handler()
return wrapWithEventSkip
# Translators: The label for the edit field in symbol pronunciation dialog to change the replacement text of a symbol.
replacementText = _("&Replacement")
self.replacementEdit = changeSymbolHelper.addLabeledControl(
labelText=replacementText,
wxCtrlClass=wx.TextCtrl,
size=(self.scaleSize(300), -1),
)
self.replacementEdit.Bind(wx.EVT_TEXT, skipEventAndCall(self.onSymbolEdited))
# Translators: The label for the combo box in symbol pronunciation dialog to change the speech level of a symbol.
levelText = _("&Level")
symbolLevelLabels = characterProcessing.SPEECH_SYMBOL_LEVEL_LABELS
levelChoices = [symbolLevelLabels[level] for level in characterProcessing.SPEECH_SYMBOL_LEVELS]
self.levelList = changeSymbolHelper.addLabeledControl(levelText, wx.Choice, choices=levelChoices)
self.levelList.Bind(wx.EVT_CHOICE, skipEventAndCall(self.onSymbolEdited))
# Translators: The label for the combo box in symbol pronunciation dialog to change when a symbol is sent to the synthesizer.
preserveText = _("&Send actual symbol to synthesizer")
symbolPreserveLabels = characterProcessing.SPEECH_SYMBOL_PRESERVE_LABELS
preserveChoices = [symbolPreserveLabels[mode] for mode in characterProcessing.SPEECH_SYMBOL_PRESERVES]
self.preserveList = changeSymbolHelper.addLabeledControl(preserveText, wx.Choice, choices=preserveChoices)
self.preserveList.Bind(wx.EVT_CHOICE, skipEventAndCall(self.onSymbolEdited))
bHelper = sHelper.addItem(guiHelper.ButtonHelper(orientation=wx.HORIZONTAL))
# Translators: The label for a button in the Symbol Pronunciation dialog to add a new symbol.
addButton = bHelper.addButton(self, label=_("&Add"))
# Translators: The label for a button in the Symbol Pronunciation dialog to remove a symbol.
self.removeButton = bHelper.addButton(self, label=_("Re&move"))
self.removeButton.Disable()
addButton.Bind(wx.EVT_BUTTON, self.OnAddClick)
self.removeButton.Bind(wx.EVT_BUTTON, self.OnRemoveClick)
# Populate the unfiltered list with symbols.
self.filter()
def postInit(self):
self.symbolsList.SetFocus()
def filter(self, filterText=''):
NONE_SELECTED = -1
previousSelectionValue = None
previousIndex = self.symbolsList.GetFirstSelected() # may return NONE_SELECTED
if previousIndex != NONE_SELECTED:
previousSelectionValue = self.filteredSymbols[previousIndex]
if not filterText:
self.filteredSymbols = self.symbols
else:
# Do case-insensitive matching by lowering both filterText and each symbols's text.
filterText = filterText.lower()
self.filteredSymbols = [
symbol for symbol in self.symbols
if filterText in symbol.displayName.lower()
or filterText in symbol.replacement.lower()
]
self.symbolsList.ItemCount = len(self.filteredSymbols)
# sometimes filtering may result in an empty list.
if not self.symbolsList.ItemCount:
self.editingItem = None
# disable the "change symbol" controls, since there are no items in the list.
self.replacementEdit.Disable()
self.levelList.Disable()
self.preserveList.Disable()
self.removeButton.Disable()
return # exit early, no need to select an item.
# If there was a selection before filtering, try to preserve it
newIndex = 0 # select first item by default.
if previousSelectionValue:
try:
newIndex = self.filteredSymbols.index(previousSelectionValue)
except ValueError:
pass
# Change the selection
self.symbolsList.Select(newIndex)
self.symbolsList.Focus(newIndex)
# We don't get a new focus event with the new index.
self.symbolsList.sendListItemFocusedEvent(newIndex)
def getItemTextForList(self, item, column):
symbol = self.filteredSymbols[item]
if column == 0:
return symbol.displayName
elif column == 1:
return symbol.replacement
elif column == 2:
return characterProcessing.SPEECH_SYMBOL_LEVEL_LABELS[symbol.level]
elif column == 3:
return characterProcessing.SPEECH_SYMBOL_PRESERVE_LABELS[symbol.preserve]
else:
raise ValueError("Unknown column: %d" % column)
def onSymbolEdited(self):
if self.editingItem is not None:
# Update the symbol the user was just editing.
item = self.editingItem
symbol = self.filteredSymbols[item]
symbol.replacement = self.replacementEdit.Value
symbol.level = characterProcessing.SPEECH_SYMBOL_LEVELS[self.levelList.Selection]
symbol.preserve = characterProcessing.SPEECH_SYMBOL_PRESERVES[self.preserveList.Selection]
def onListItemFocused(self, evt):
# Update the editing controls to reflect the newly selected symbol.
item = evt.GetIndex()
symbol = self.filteredSymbols[item]
self.editingItem = item
# ChangeValue and Selection property used because they do not cause EVNT_CHANGED to be fired.
self.replacementEdit.ChangeValue(symbol.replacement)
self.levelList.Selection = characterProcessing.SPEECH_SYMBOL_LEVELS.index(symbol.level)
self.preserveList.Selection = characterProcessing.SPEECH_SYMBOL_PRESERVES.index(symbol.preserve)
self.removeButton.Enabled = not self.symbolProcessor.isBuiltin(symbol.identifier)
self.replacementEdit.Enable()
self.levelList.Enable()
self.preserveList.Enable()
evt.Skip()
def OnAddClick(self, evt):
with AddSymbolDialog(self) as entryDialog:
if entryDialog.ShowModal() != wx.ID_OK:
return
identifier = entryDialog.identifierTextCtrl.GetValue()
if not identifier:
return
# Clean the filter, so we can select the new entry.
self.filterEdit.Value=""
self.filter()
for index, symbol in enumerate(self.symbols):
if identifier == symbol.identifier:
# Translators: An error reported in the Symbol Pronunciation dialog when adding a symbol that is already present.
gui.messageBox(_('Symbol "%s" is already present.') % identifier,
_("Error"), wx.OK | wx.ICON_ERROR)
self.symbolsList.Select(index)
self.symbolsList.Focus(index)
self.symbolsList.SetFocus()
return
addedSymbol = characterProcessing.SpeechSymbol(identifier)
try:
del self.pendingRemovals[identifier]
except KeyError:
pass
addedSymbol.displayName = identifier
addedSymbol.replacement = ""
addedSymbol.level = characterProcessing.SYMLVL_ALL
addedSymbol.preserve = characterProcessing.SYMPRES_NEVER
self.symbols.append(addedSymbol)
self.symbolsList.ItemCount = len(self.symbols)
index = self.symbolsList.ItemCount - 1
self.symbolsList.Select(index)
self.symbolsList.Focus(index)
# We don't get a new focus event with the new index.
self.symbolsList.sendListItemFocusedEvent(index)
self.symbolsList.SetFocus()
def OnRemoveClick(self, evt):
index = self.symbolsList.GetFirstSelected()
symbol = self.filteredSymbols[index]
self.pendingRemovals[symbol.identifier] = symbol
del self.filteredSymbols[index]
if self.filteredSymbols is not self.symbols:
self.symbols.remove(symbol)
self.symbolsList.ItemCount = len(self.filteredSymbols)
# sometimes removing may result in an empty list.
if not self.symbolsList.ItemCount:
self.editingItem = None
# disable the "change symbol" controls, since there are no items in the list.
self.replacementEdit.Disable()
self.levelList.Disable()
self.preserveList.Disable()
self.removeButton.Disable()
else:
index = min(index, self.symbolsList.ItemCount - 1)
self.symbolsList.Select(index)
self.symbolsList.Focus(index)
# We don't get a new focus event with the new index.
self.symbolsList.sendListItemFocusedEvent(index)
self.symbolsList.SetFocus()
def onOk(self, evt):
self.onSymbolEdited()
self.editingItem = None
for symbol in self.pendingRemovals.values():
self.symbolProcessor.deleteSymbol(symbol)
for symbol in self.symbols:
if not symbol.replacement:
continue
self.symbolProcessor.updateSymbol(symbol)
try:
self.symbolProcessor.userSymbols.save()
except IOError as e:
log.error("Error saving user symbols info: %s" % e)
characterProcessing._localeSpeechSymbolProcessors.invalidateLocaleData(self.symbolProcessor.locale)
super(SpeechSymbolsDialog, self).onOk(evt)
def _refreshVisibleItems(self):
count = self.symbolsList.GetCountPerPage()
first = self.symbolsList.GetTopItem()
self.symbolsList.RefreshItems(first, first+count)
def onFilterEditTextChange(self, evt):
self.filter(self.filterEdit.Value)
self._refreshVisibleItems()
evt.Skip()
| 45.573716
| 262
| 0.760725
|
cc1e6826fade5f96853261c5d4ceb85931b659e6
| 1,040
|
py
|
Python
|
helpers/rpc/generator.py
|
nickpotafiy/developer.bitcoin.org
|
813ba3fb5eae85cfdfffe91d12f2df653ea8b725
|
[
"MIT-0",
"MIT"
] | 569
|
2017-07-26T23:14:31.000Z
|
2022-01-13T20:29:04.000Z
|
helpers/rpc/generator.py
|
nickpotafiy/developer.bitcoin.org
|
813ba3fb5eae85cfdfffe91d12f2df653ea8b725
|
[
"MIT-0",
"MIT"
] | 155
|
2017-07-17T15:40:36.000Z
|
2021-12-07T07:59:33.000Z
|
helpers/rpc/generator.py
|
nickpotafiy/developer.bitcoin.org
|
813ba3fb5eae85cfdfffe91d12f2df653ea8b725
|
[
"MIT-0",
"MIT"
] | 214
|
2017-07-17T15:36:08.000Z
|
2021-11-04T23:38:37.000Z
|
# Distributed under the MIT software license, see the accompanying
# file LICENSE or https://www.opensource.org/licenses/MIT.
from help_parser import HelpParser
from cli_caller import CliCaller
class Generator:
def __init__(self, cli, renderer):
self.cli = CliCaller(cli)
self.renderer = renderer
def generate_command(self, command):
print("Command %s" % command)
command_output = self.cli.help(command)
help_data = HelpParser().parse_help_command(command_output)
self.renderer.render_cmd_page(command, help_data)
def generate_overview(self):
help_output = self.cli.help()
command_list = HelpParser().parse_help_overview(help_output)
self.renderer.render_overview_page(command_list.grouped(),
render_version_info=False)
count = 1
for command in command_list.flat():
self.generate_command(command)
count += 1
print("Generated pages for %s commands." % count)
| 33.548387
| 69
| 0.664423
|
693786fdcf656b9f76487929b4f2b823a69a1dbe
| 2,305
|
py
|
Python
|
models/readouts.py
|
mwcvitkovic/Supervised-Learning-on-Relational-Databases-with-GNNs
|
57195ccab62d23dcbcac1a317f8a9811a9fd6cb5
|
[
"MIT"
] | 44
|
2020-02-07T12:44:25.000Z
|
2022-03-31T21:57:08.000Z
|
models/readouts.py
|
mwcvitkovic/Supervised-Learning-on-Relational-Databases-with-GNNs
|
57195ccab62d23dcbcac1a317f8a9811a9fd6cb5
|
[
"MIT"
] | 2
|
2020-02-07T03:54:15.000Z
|
2020-05-07T13:21:29.000Z
|
models/readouts.py
|
mwcvitkovic/Supervised-Learning-on-Relational-Databases-with-GNNs
|
57195ccab62d23dcbcac1a317f8a9811a9fd6cb5
|
[
"MIT"
] | 10
|
2020-02-23T07:34:55.000Z
|
2021-07-25T18:34:40.000Z
|
import torch.nn as nn
from dgl.nn.pytorch import AvgPooling as AP, SortPooling as SP, GlobalAttentionPooling as GAP, Set2Set as S2S, \
SetTransformerDecoder as STD
from models import activations
class AvgPooling(nn.Module):
def __init__(self, hidden_dim):
super().__init__()
self.ap = AP()
def forward(self, graph, feat):
return self.ap(graph, feat)
class SortPooling(nn.Module):
def __init__(self, hidden_dim, k):
super().__init__()
self.sp = SP(k=k)
self.fc = nn.Linear(hidden_dim * k, hidden_dim)
def forward(self, graph, feat):
feat = self.sp(graph, feat)
return self.fc(feat)
class GlobalAttentionPooling(nn.Module):
def __init__(self, hidden_dim, n_layers, act_name):
super().__init__()
act_class = activations.__dict__[act_name]
gate_nn_layers = [l for _ in range(n_layers) for l in (nn.Linear(hidden_dim, hidden_dim), act_class())]
gate_nn_layers.append(nn.Linear(hidden_dim, 1))
gate_nn = nn.Sequential(*gate_nn_layers)
feat_nn = nn.Sequential(
*[l for _ in range(n_layers) for l in (nn.Linear(hidden_dim, hidden_dim), act_class())])
self.gap = GAP(gate_nn=gate_nn, feat_nn=feat_nn)
def forward(self, graph, feat):
return self.gap(graph, feat)
class Set2Set(nn.Module):
def __init__(self, hidden_dim, n_iters, n_layers):
super().__init__()
self.s2s = S2S(input_dim=hidden_dim, n_iters=n_iters, n_layers=n_layers)
self.fc = nn.Linear(hidden_dim * 2, hidden_dim)
def forward(self, graph, feat):
feat = self.s2s(graph, feat)
return self.fc(feat)
class SetTransformerDecoder(nn.Module):
def __init__(self, hidden_dim, p_dropout, num_heads, n_layers, k):
super().__init__()
self.std = STD(d_model=hidden_dim,
num_heads=num_heads,
d_head=hidden_dim,
d_ff=hidden_dim,
n_layers=n_layers,
k=k,
dropouth=p_dropout,
dropouta=p_dropout)
self.fc = nn.Linear(hidden_dim * k, hidden_dim)
def forward(self, graph, feat):
feat = self.std(graph, feat)
return self.fc(feat)
| 32.928571
| 112
| 0.617787
|
048d81f03077a9b0eb6716bffac1f661a0d30a4c
| 531
|
py
|
Python
|
imutils/augmentation.py
|
labellson/cloud-characterization-pytorch
|
b6bbf25f5669800d9671b79ed3f93f8f34a773e6
|
[
"MIT"
] | null | null | null |
imutils/augmentation.py
|
labellson/cloud-characterization-pytorch
|
b6bbf25f5669800d9671b79ed3f93f8f34a773e6
|
[
"MIT"
] | null | null | null |
imutils/augmentation.py
|
labellson/cloud-characterization-pytorch
|
b6bbf25f5669800d9671b79ed3f93f8f34a773e6
|
[
"MIT"
] | null | null | null |
import numpy as np
def flip_img(image, axis=0):
"""
Flip vertically or horizontally a matrix.
:param image: source image
:param axis: axis which will be flipped
:return: the flipped matrix
"""
return np.flip(image, axis)
def rot90_img(image, clockwise=True):
"""
Rotate by 90 degrees an image
:param image: source image
:para clockwise: True for clockwise rotation
:return: The rotated image
"""
axes = (0, 1) if clockwise else (1, 0)
return np.rot90(image, axes=axes)
| 25.285714
| 48
| 0.655367
|
801e427b2ad0c4678ce19ddc0a59456734cb17c2
| 23,294
|
py
|
Python
|
util/dvsim/dvsim.py
|
mundaym/opentitan
|
b46960ac23ea155c134cdeaa57b97839540b6dfa
|
[
"Apache-2.0"
] | null | null | null |
util/dvsim/dvsim.py
|
mundaym/opentitan
|
b46960ac23ea155c134cdeaa57b97839540b6dfa
|
[
"Apache-2.0"
] | null | null | null |
util/dvsim/dvsim.py
|
mundaym/opentitan
|
b46960ac23ea155c134cdeaa57b97839540b6dfa
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# Copyright lowRISC contributors.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
"""dvsim is a command line tool to deploy ASIC tool flows such as regressions
for design verification (DV), formal property verification (FPV), linting and
synthesis.
It uses hjson as the format for specifying what to build and run. It is an
end-to-end regression manager that can deploy multiple builds (where some tests
might need different set of compile time options requiring a uniquely build sim
executable) in parallel followed by tests in parallel using the load balancer
of your choice.
dvsim is built to be tool-agnostic so that you can easily switch between the
tools at your disposal. dvsim uses fusesoc as the starting step to resolve all
inter-package dependencies and provide us with a filelist that will be consumed
by the sim tool.
"""
import argparse
import datetime
import logging as log
import os
import subprocess
import sys
import textwrap
from signal import SIGINT, signal
from CfgFactory import make_cfg
import Deploy
import utils
# TODO: add dvsim_cfg.hjson to retrieve this info
version = 0.1
# The different categories that can be passed to the --list argument.
_LIST_CATEGORIES = ["build_modes", "run_modes", "tests", "regressions"]
# Function to resolve the scratch root directory among the available options:
# If set on the command line, then use that as a preference.
# Else, check if $SCRATCH_ROOT env variable exists and is a directory.
# Else use the default (<cwd>/scratch)
# Try to create the directory if it does not already exist.
def resolve_scratch_root(arg_scratch_root):
scratch_root = os.environ.get('SCRATCH_ROOT')
if not arg_scratch_root:
if scratch_root is None:
arg_scratch_root = os.getcwd() + "/scratch"
else:
# Scratch space could be mounted in a filesystem (such as NFS) on a network drive.
# If the network is down, it could cause the access access check to hang. So run a
# simple ls command with a timeout to prevent the hang.
(out,
status) = utils.run_cmd_with_timeout(cmd="ls -d " + scratch_root,
timeout=1,
exit_on_failure=0)
if status == 0 and out != "":
arg_scratch_root = scratch_root
else:
arg_scratch_root = os.getcwd() + "/scratch"
log.warning(
"Env variable $SCRATCH_ROOT=\"{}\" is not accessible.\n"
"Using \"{}\" instead.".format(scratch_root,
arg_scratch_root))
else:
arg_scratch_root = os.path.realpath(arg_scratch_root)
try:
os.system("mkdir -p " + arg_scratch_root)
except OSError:
log.fatal(
"Invalid --scratch-root=\"%s\" switch - failed to create directory!",
arg_scratch_root)
sys.exit(1)
return (arg_scratch_root)
def read_max_parallel(arg):
'''Take value for --max-parallel as an integer'''
try:
int_val = int(arg)
if int_val <= 0:
raise ValueError('bad value')
return int_val
except ValueError:
raise argparse.ArgumentTypeError(
'Bad argument for --max-parallel '
'({!r}): must be a positive integer.'.format(arg))
def resolve_max_parallel(arg):
'''Pick a value of max_parallel, defaulting to 16 or $DVSIM_MAX_PARALLEL'''
if arg is not None:
assert arg > 0
return arg
from_env = os.environ.get('DVSIM_MAX_PARALLEL')
if from_env is not None:
try:
return read_max_parallel(from_env)
except argparse.ArgumentTypeError:
log.warning('DVSIM_MAX_PARALLEL environment variable has value '
'{!r}, which is not a positive integer. Using default '
'value (16).'.format(from_env))
return 16
def resolve_branch(branch):
'''Choose a branch name for output files
If the --branch argument was passed on the command line, the branch
argument is the branch name to use. Otherwise it is None and we use git to
find the name of the current branch in the working directory.
'''
if branch is not None:
return branch
result = subprocess.run(["git", "rev-parse", "--abbrev-ref", "HEAD"],
stdout=subprocess.PIPE)
branch = result.stdout.decode("utf-8").strip()
if not branch:
log.warning("Failed to find current git branch. "
"Setting it to \"default\"")
branch = "default"
return branch
# Get the project root directory path - this is used to construct the full paths
def get_proj_root():
cmd = ["git", "rev-parse", "--show-toplevel"]
result = subprocess.run(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
proj_root = result.stdout.decode("utf-8").strip()
if not proj_root:
log.error(
"Attempted to find the root of this GitHub repository by running:\n"
"{}\n"
"But this command has failed:\n"
"{}".format(' '.join(cmd), result.stderr.decode("utf-8")))
sys.exit(1)
return (proj_root)
def sigint_handler(signal_received, frame):
# Kill processes and background jobs.
log.debug('SIGINT or CTRL-C detected. Exiting gracefully')
cfg.kill()
log.info('Exit due to SIGINT or CTRL-C ')
exit(1)
def wrapped_docstring():
'''Return a text-wrapped version of the module docstring'''
paras = []
para = []
for line in __doc__.strip().split('\n'):
line = line.strip()
if not line:
if para:
paras.append('\n'.join(para))
para = []
else:
para.append(line)
if para:
paras.append('\n'.join(para))
return '\n\n'.join(textwrap.fill(p) for p in paras)
def parse_args():
parser = argparse.ArgumentParser(
description=wrapped_docstring(),
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("cfg",
metavar="<cfg-hjson-file>",
help="""Configuration hjson file.""")
parser.add_argument("--version",
action='store_true',
help="Print version and exit")
parser.add_argument(
"--tool",
"-t",
help=("Explicitly set the tool to use. This is "
"optional for running simulations (where it can "
"be set in an .hjson file), but is required for "
"other flows. Possible tools include: vcs, "
"xcelium, ascentlint, veriblelint, verilator, dc."))
parser.add_argument("--list",
"-l",
nargs="*",
metavar='CAT',
choices=_LIST_CATEGORIES,
help=('Parse the the given .hjson config file, list '
'the things that can be run, then exit. The '
'list can be filtered with a space-separated '
'of categories from: {}.'.format(
', '.join(_LIST_CATEGORIES))))
whatg = parser.add_argument_group('Choosing what to run')
whatg.add_argument("-i",
"--items",
nargs="*",
default=["smoke"],
help=('Specify the regressions or tests to run. '
'Defaults to "smoke", but can be a '
'space separated list of test or regression '
'names.'))
whatg.add_argument("--select-cfgs",
nargs="*",
metavar="CFG",
help=('The .hjson file is a primary config. Only run '
'the given configs from it. If this argument is '
'not used, dvsim will process all configs listed '
'in a primary config.'))
disg = parser.add_argument_group('Dispatch options')
disg.add_argument("--job-prefix",
default="",
metavar="PFX",
help=('Prepend this string when running each tool '
'command.'))
disg.add_argument("--max-parallel",
"-mp",
type=read_max_parallel,
metavar="N",
help=('Run only up to N builds/tests at a time. '
'Default value 16, unless the DVSIM_MAX_PARALLEL '
'environment variable is set, in which case that '
'is used.'))
pathg = parser.add_argument_group('File management')
pathg.add_argument("--scratch-root",
"-sr",
metavar="PATH",
help=('Destination for build / run directories. If not '
'specified, uses the path in the SCRATCH_ROOT '
'environment variable, if set, or ./scratch '
'otherwise.'))
pathg.add_argument("--proj-root",
"-pr",
metavar="PATH",
help=('The root directory of the project. If not '
'specified, dvsim will search for a git '
'repository containing the current directory.'))
pathg.add_argument("--branch",
"-br",
metavar='B',
help=('By default, dvsim creates files below '
'{scratch-root}/{dut}.{flow}.{tool}/{branch}. '
'If --branch is not specified, dvsim assumes the '
'current directory is a git repository and uses '
'the name of the current branch.'))
pathg.add_argument("--max-odirs",
"-mo",
type=int,
default=5,
metavar="N",
help=('When tests are run, older runs are backed '
'up. Discard all but the N most recent (defaults '
'to 5).'))
pathg.add_argument("--purge",
action='store_true',
help="Clean the scratch directory before running.")
buildg = parser.add_argument_group('Options for building')
buildg.add_argument("--build-only",
"-bu",
action='store_true',
help=('Stop after building executables for the given '
'items.'))
buildg.add_argument("--build-unique",
action='store_true',
help=('Append a timestamp to the directory in which '
'files are built. This is suitable for the case '
'when another test is already running and you '
'want to run something else from a different '
'terminal without affecting it.'))
buildg.add_argument("--build-opts",
"-bo",
nargs="+",
default=[],
metavar="OPT",
help=('Additional options passed on the command line '
'each time a build tool is run.'))
buildg.add_argument("--build-modes",
"-bm",
nargs="+",
default=[],
metavar="MODE",
help=('The options for each build_mode in this list '
'are applied to all build and run targets.'))
rung = parser.add_argument_group('Options for running')
rung.add_argument("--run-only",
"-ru",
action='store_true',
help=('Skip the build step (assume that simulation '
'executables have already been built).'))
rung.add_argument("--run-opts",
"-ro",
nargs="+",
default=[],
metavar="OPT",
help=('Additional options passed on the command line '
'each time a test is run.'))
rung.add_argument("--run-modes",
"-rm",
nargs="+",
default=[],
metavar="MODE",
help=('The options for each run_mode in this list are '
'applied to each simulation run.'))
rung.add_argument("--profile",
"-p",
nargs="?",
choices=['time', 'mem'],
const="time",
metavar="P",
help=('Turn on simulation profiling (where P is time '
'or mem).'))
rung.add_argument("--xprop-off",
action='store_true',
help="Turn off X-propagation in simulation.")
rung.add_argument("--no-rerun",
action='store_true',
help=("Disable the default behaviour, where failing "
"tests are automatically rerun with waves "
"enabled."))
rung.add_argument("--verbosity",
"-v",
choices=['n', 'l', 'm', 'h', 'f', 'd'],
metavar='V',
help=('Set tool/simulation verbosity to none (n), low '
'(l), medium (m), high (h), full (f) or debug (d).'
' The default value is set in config files.'))
seedg = parser.add_argument_group('Test seeds')
seedg.add_argument("--seeds",
"-s",
nargs="+",
default=[],
metavar="S",
help=('A list of seeds for tests. Note that these '
'specific seeds are applied to items being run '
'in the order they are passed.'))
seedg.add_argument("--fixed-seed",
type=int,
metavar='S',
help=('Run all items with the seed S. This implies '
'--reseed 1.'))
seedg.add_argument("--reseed",
"-r",
type=int,
metavar="N",
help=('Override any reseed value in the test '
'configuration and run each test N times, with '
'a new seed each time.'))
seedg.add_argument("--reseed-multiplier",
"-rx",
type=int,
default=1,
metavar="N",
help=('Scale each reseed value in the test '
'configuration by N. This allows e.g. running '
'the tests 10 times as much as normal while '
'maintaining the ratio of numbers of runs '
'between different tests.'))
waveg = parser.add_argument_group('Dumping waves')
waveg.add_argument(
"--waves",
"-w",
nargs="?",
choices=["default", "fsdb", "shm", "vpd", "vcd", "evcd", "fst"],
const="default",
help=("Enable dumping of waves. It takes an optional "
"argument to pick the desired wave format. If "
"the optional argument is not supplied, it picks "
"whatever is the default for the chosen tool. "
"By default, dumping waves is not enabled."))
waveg.add_argument("--max-waves",
"-mw",
type=int,
default=5,
metavar="N",
help=('Only dump waves for the first N tests run. This '
'includes both tests scheduled for run and those '
'that are automatically rerun.'))
covg = parser.add_argument_group('Generating simulation coverage')
covg.add_argument("--cov",
"-c",
action='store_true',
help="Enable collection of coverage data.")
covg.add_argument("--cov-merge-previous",
action='store_true',
help=('Only applicable with --cov. Merge any previous '
'coverage database directory with the new '
'coverage database.'))
covg.add_argument("--cov-unr",
action='store_true',
help=('Run coverage UNR analysis and generate report. '
'This only supports VCS now.'))
covg.add_argument("--cov-analyze",
action='store_true',
help=('Rather than building or running any tests, '
'analyze the coverage from the last run.'))
pubg = parser.add_argument_group('Generating and publishing results')
pubg.add_argument("--map-full-testplan",
action='store_true',
help=("Show complete testplan annotated results "
"at the end."))
pubg.add_argument("--publish",
action='store_true',
help="Publish results to reports.opentitan.org.")
dvg = parser.add_argument_group('Controlling DVSim itself')
dvg.add_argument("--print-interval",
"-pi",
type=int,
default=10,
metavar="N",
help="Print status every N seconds.")
dvg.add_argument("--verbose",
nargs="?",
choices=['default', 'debug'],
const="default",
metavar="D",
help=('With no argument, print verbose dvsim tool '
'messages. With --verbose=debug, the volume of '
'messages is even higher.'))
dvg.add_argument("--dry-run",
"-n",
action='store_true',
help=("Print dvsim tool messages but don't actually "
"run any command"))
args = parser.parse_args()
if args.version:
print(version)
sys.exit()
# We want the --list argument to default to "all categories", but allow
# filtering. If args.list is None, then --list wasn't supplied. If it is
# [], then --list was supplied with no further arguments and we want to
# list all categories.
if args.list == []:
args.list = _LIST_CATEGORIES
# Get max_parallel from environment if it wasn't specified on the command
# line.
args.max_parallel = resolve_max_parallel(args.max_parallel)
assert args.max_parallel > 0
return args
def main():
args = parse_args()
# Add log level 'VERBOSE' between INFO and DEBUG
log.addLevelName(utils.VERBOSE, 'VERBOSE')
log_format = '%(levelname)s: [%(module)s] %(message)s'
log_level = log.INFO
if args.verbose == "default":
log_level = utils.VERBOSE
elif args.verbose == "debug":
log_level = log.DEBUG
log.basicConfig(format=log_format, level=log_level)
if not os.path.exists(args.cfg):
log.fatal("Path to config file %s appears to be invalid.", args.cfg)
sys.exit(1)
# If publishing results, then force full testplan mapping of results.
if args.publish:
args.map_full_testplan = True
args.scratch_root = resolve_scratch_root(args.scratch_root)
args.branch = resolve_branch(args.branch)
args.cfg = os.path.abspath(args.cfg)
# Add timestamp to args that all downstream objects can use.
# Static variables - indicate timestamp.
ts_format_long = "%A %B %d %Y %I:%M:%S%p UTC"
ts_format = "%a.%m.%d.%y__%I.%M.%S%p"
curr_ts = datetime.datetime.utcnow()
timestamp_long = curr_ts.strftime(ts_format_long)
timestamp = curr_ts.strftime(ts_format)
setattr(args, "ts_format_long", ts_format_long)
setattr(args, "ts_format", ts_format)
setattr(args, "timestamp_long", timestamp_long)
setattr(args, "timestamp", timestamp)
# Register the seeds from command line with RunTest class.
Deploy.RunTest.seeds = args.seeds
# If we are fixing a seed value, no point in tests having multiple reseeds.
if args.fixed_seed:
args.reseed = 1
Deploy.RunTest.fixed_seed = args.fixed_seed
# Register the common deploy settings.
Deploy.Deploy.print_interval = args.print_interval
Deploy.Deploy.max_parallel = args.max_parallel
Deploy.Deploy.max_odirs = args.max_odirs
# Build infrastructure from hjson file and create the list of items to
# be deployed.
# Sets the project root directory: either specified from the command line
# or set by automatically assuming we are in a GitHub repository and
# automatically finding the root of this repository.
if args.proj_root:
proj_root = args.proj_root
else:
proj_root = get_proj_root()
global cfg
cfg = make_cfg(args.cfg, args, proj_root)
# Handle Ctrl-C exit.
signal(SIGINT, sigint_handler)
# List items available for run if --list switch is passed, and exit.
if args.list is not None:
cfg.print_list()
sys.exit(0)
# Purge the scratch path if --purge option is set.
if args.purge:
cfg.purge()
# If --cov-unr is passed, run UNR to generate report for unreachable
# exclusion file.
if args.cov_unr:
cfg.cov_unr()
cfg.deploy_objects()
sys.exit(0)
# In simulation mode: if --cov-analyze switch is passed, then run the GUI
# tool.
if args.cov_analyze:
cfg.cov_analyze()
cfg.deploy_objects()
sys.exit(0)
# Deploy the builds and runs
if args.items != []:
# Create deploy objects.
cfg.create_deploy_objects()
cfg.deploy_objects()
# Generate results.
cfg.gen_results()
# Publish results
if args.publish:
cfg.publish_results()
else:
log.info("No items specified to be run.")
# Exit with non-zero status if there were errors or failures.
if cfg.has_errors():
log.error("Errors were encountered in this run.")
sys.exit(1)
if __name__ == '__main__':
main()
| 37.450161
| 94
| 0.526445
|
9fe45a84f13080f6839d182a9286eede649b3eca
| 2,751
|
py
|
Python
|
events/tasks.py
|
medfiras/Bazinga
|
2f77b70a3fe627410ddf0a5be0f074de5e0dccdd
|
[
"Apache-2.0"
] | null | null | null |
events/tasks.py
|
medfiras/Bazinga
|
2f77b70a3fe627410ddf0a5be0f074de5e0dccdd
|
[
"Apache-2.0"
] | 1
|
2015-05-31T10:42:36.000Z
|
2015-11-03T17:52:06.000Z
|
events/tasks.py
|
medfiras/Bazinga
|
2f77b70a3fe627410ddf0a5be0f074de5e0dccdd
|
[
"Apache-2.0"
] | null | null | null |
from django.conf import settings
from django.contrib.auth.models import User
from django.core.mail import EmailMessage
try:
from django.forms.fields import email_re
except ImportError:
from django.core import *
from django.template.loader import render_to_string
# from celery.task import task
from django.contrib import messages
# from django.template import RequestContext
# @task
def send_mail_comment(subject, recipients_list, sender=None,
message=None, email_template=None, data=None,
headers=None):
"""Send email from /sender/ to /recipients_list/ with /subject/ and
/message/ as body.
"""
# Make sure that there is either a message or a template
if not data:
data = {}
# If there is no body for the email and not a template
# to render, then do nothing.
if not message and not email_template:
return
# If there is both an email body, submitted by the user,
# and a template to render as email body, then add
# user's input to extra_content key
elif message and email_template:
data.update({'extra_content': message})
# Make sure that there is a recipient
if not recipients_list:
return
if not headers:
headers = {}
data.update({'SITE_URL': settings.SITE_URL,
'FROM_EMAIL': settings.EMAIL_HOST_USER})
# Make sure subject is one line.
subject = subject.replace('\n', ' ')
for recipient in recipients_list:
to = ''
if User.objects.filter(pk=recipient).exists():
user = User.objects.get(pk=recipient)
# to = '%s <%s>' % (user.get_full_name(), user.email)
ctx_data = {'user': user,
'userprofile': user.profile}
data.update(ctx_data)
# elif email_re.match(recipient):
to = user.email
else:
return
if email_template:
message = render_to_string(email_template, data)
if not sender:
email = EmailMessage(subject=subject, body=message,
from_email=settings.EMAIL_HOST_USER,
to=[to], headers=headers)
else:
email = EmailMessage(subject=subject, body=message,
from_email=sender, to=[to], cc=[sender],
headers=headers)
try:
email.send()
# messages.success(request, "Comment sent to thread participants.")
except:
print "Comment saved but mail not sent. Contact your administrator."
# messages.error(request, "Comment saved but mail not sent. Contact your administrator.")
| 36.197368
| 101
| 0.605598
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.